Merge "msm: adsprpc: use same CPU address during DMA allocation and free"
diff --git a/AndroidKernel.mk b/AndroidKernel.mk
index 26e5cb8..6ea10f7 100644
--- a/AndroidKernel.mk
+++ b/AndroidKernel.mk
@@ -46,7 +46,7 @@
 ifeq ($(TARGET_KERNEL_CROSS_COMPILE_PREFIX),)
 KERNEL_CROSS_COMPILE := arm-eabi-
 else
-KERNEL_CROSS_COMPILE := $(TARGET_KERNEL_CROSS_COMPILE_PREFIX)
+KERNEL_CROSS_COMPILE := $(shell pwd)/$(TARGET_TOOLS_PREFIX)
 endif
 
 ifeq ($(TARGET_PREBUILT_KERNEL),)
@@ -210,9 +210,11 @@
 			echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \
 			$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi
 
+.PHONY: kerneltags
 kerneltags: $(KERNEL_OUT) $(KERNEL_CONFIG)
 	$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) tags
 
+.PHONY: kernelconfig
 kernelconfig: $(KERNEL_OUT) $(KERNEL_CONFIG)
 	env KCONFIG_NOTIMESTAMP=true \
 	     $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) menuconfig
diff --git a/Documentation/ABI/obsolete/sysfs-block-zram b/Documentation/ABI/obsolete/sysfs-block-zram
deleted file mode 100644
index 720ea92..0000000
--- a/Documentation/ABI/obsolete/sysfs-block-zram
+++ /dev/null
@@ -1,119 +0,0 @@
-What:		/sys/block/zram<id>/num_reads
-Date:		August 2015
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The num_reads file is read-only and specifies the number of
-		reads (failed or successful) done on this device.
-		Now accessible via zram<id>/stat node.
-
-What:		/sys/block/zram<id>/num_writes
-Date:		August 2015
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The num_writes file is read-only and specifies the number of
-		writes (failed or successful) done on this device.
-		Now accessible via zram<id>/stat node.
-
-What:		/sys/block/zram<id>/invalid_io
-Date:		August 2015
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The invalid_io file is read-only and specifies the number of
-		non-page-size-aligned I/O requests issued to this device.
-		Now accessible via zram<id>/io_stat node.
-
-What:		/sys/block/zram<id>/failed_reads
-Date:		August 2015
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The failed_reads file is read-only and specifies the number of
-		failed reads happened on this device.
-		Now accessible via zram<id>/io_stat node.
-
-What:		/sys/block/zram<id>/failed_writes
-Date:		August 2015
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The failed_writes file is read-only and specifies the number of
-		failed writes happened on this device.
-		Now accessible via zram<id>/io_stat node.
-
-What:		/sys/block/zram<id>/notify_free
-Date:		August 2015
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The notify_free file is read-only. Depending on device usage
-		scenario it may account a) the number of pages freed because
-		of swap slot free notifications or b) the number of pages freed
-		because of REQ_DISCARD requests sent by bio. The former ones
-		are sent to a swap block device when a swap slot is freed, which
-		implies that this disk is being used as a swap disk. The latter
-		ones are sent by filesystem mounted with discard option,
-		whenever some data blocks are getting discarded.
-		Now accessible via zram<id>/io_stat node.
-
-What:		/sys/block/zram<id>/zero_pages
-Date:		August 2015
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The zero_pages file is read-only and specifies number of zero
-		filled pages written to this disk. No memory is allocated for
-		such pages.
-		Now accessible via zram<id>/mm_stat node.
-
-What:		/sys/block/zram<id>/orig_data_size
-Date:		August 2015
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The orig_data_size file is read-only and specifies uncompressed
-		size of data stored in this disk. This excludes zero-filled
-		pages (zero_pages) since no memory is allocated for them.
-		Unit: bytes
-		Now accessible via zram<id>/mm_stat node.
-
-What:		/sys/block/zram<id>/compr_data_size
-Date:		August 2015
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The compr_data_size file is read-only and specifies compressed
-		size of data stored in this disk. So, compression ratio can be
-		calculated using orig_data_size and this statistic.
-		Unit: bytes
-		Now accessible via zram<id>/mm_stat node.
-
-What:		/sys/block/zram<id>/mem_used_total
-Date:		August 2015
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The mem_used_total file is read-only and specifies the amount
-		of memory, including allocator fragmentation and metadata
-		overhead, allocated for this disk. So, allocator space
-		efficiency can be calculated using compr_data_size and this
-		statistic.
-		Unit: bytes
-		Now accessible via zram<id>/mm_stat node.
-
-What:		/sys/block/zram<id>/mem_used_max
-Date:		August 2015
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The mem_used_max file is read/write and specifies the amount
-		of maximum memory zram have consumed to store compressed data.
-		For resetting the value, you should write "0". Otherwise,
-		you could see -EINVAL.
-		Unit: bytes
-		Downgraded to write-only node: so it's possible to set new
-		value only; its current value is stored in zram<id>/mm_stat
-		node.
-
-What:		/sys/block/zram<id>/mem_limit
-Date:		August 2015
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The mem_limit file is read/write and specifies the maximum
-		amount of memory ZRAM can use to store the compressed data.
-		The limit could be changed in run time and "0" means disable
-		the limit.  No limit is the initial state.  Unit: bytes
-		Downgraded to write-only node: so it's possible to set new
-		value only; its current value is stored in zram<id>/mm_stat
-		node.
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
index 4518d30..c1513c7 100644
--- a/Documentation/ABI/testing/sysfs-block-zram
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -22,41 +22,6 @@
 		device. The reset operation frees all the memory associated
 		with this device.
 
-What:		/sys/block/zram<id>/num_reads
-Date:		August 2010
-Contact:	Nitin Gupta <ngupta@vflare.org>
-Description:
-		The num_reads file is read-only and specifies the number of
-		reads (failed or successful) done on this device.
-
-What:		/sys/block/zram<id>/num_writes
-Date:		August 2010
-Contact:	Nitin Gupta <ngupta@vflare.org>
-Description:
-		The num_writes file is read-only and specifies the number of
-		writes (failed or successful) done on this device.
-
-What:		/sys/block/zram<id>/invalid_io
-Date:		August 2010
-Contact:	Nitin Gupta <ngupta@vflare.org>
-Description:
-		The invalid_io file is read-only and specifies the number of
-		non-page-size-aligned I/O requests issued to this device.
-
-What:		/sys/block/zram<id>/failed_reads
-Date:		February 2014
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The failed_reads file is read-only and specifies the number of
-		failed reads happened on this device.
-
-What:		/sys/block/zram<id>/failed_writes
-Date:		February 2014
-Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
-		The failed_writes file is read-only and specifies the number of
-		failed writes happened on this device.
-
 What:		/sys/block/zram<id>/max_comp_streams
 Date:		February 2014
 Contact:	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
@@ -73,74 +38,24 @@
 		available and selected compression algorithms, change
 		compression algorithm selection.
 
-What:		/sys/block/zram<id>/notify_free
-Date:		August 2010
-Contact:	Nitin Gupta <ngupta@vflare.org>
-Description:
-		The notify_free file is read-only. Depending on device usage
-		scenario it may account a) the number of pages freed because
-		of swap slot free notifications or b) the number of pages freed
-		because of REQ_DISCARD requests sent by bio. The former ones
-		are sent to a swap block device when a swap slot is freed, which
-		implies that this disk is being used as a swap disk. The latter
-		ones are sent by filesystem mounted with discard option,
-		whenever some data blocks are getting discarded.
-
-What:		/sys/block/zram<id>/zero_pages
-Date:		August 2010
-Contact:	Nitin Gupta <ngupta@vflare.org>
-Description:
-		The zero_pages file is read-only and specifies number of zero
-		filled pages written to this disk. No memory is allocated for
-		such pages.
-
-What:		/sys/block/zram<id>/orig_data_size
-Date:		August 2010
-Contact:	Nitin Gupta <ngupta@vflare.org>
-Description:
-		The orig_data_size file is read-only and specifies uncompressed
-		size of data stored in this disk. This excludes zero-filled
-		pages (zero_pages) since no memory is allocated for them.
-		Unit: bytes
-
-What:		/sys/block/zram<id>/compr_data_size
-Date:		August 2010
-Contact:	Nitin Gupta <ngupta@vflare.org>
-Description:
-		The compr_data_size file is read-only and specifies compressed
-		size of data stored in this disk. So, compression ratio can be
-		calculated using orig_data_size and this statistic.
-		Unit: bytes
-
-What:		/sys/block/zram<id>/mem_used_total
-Date:		August 2010
-Contact:	Nitin Gupta <ngupta@vflare.org>
-Description:
-		The mem_used_total file is read-only and specifies the amount
-		of memory, including allocator fragmentation and metadata
-		overhead, allocated for this disk. So, allocator space
-		efficiency can be calculated using compr_data_size and this
-		statistic.
-		Unit: bytes
-
 What:		/sys/block/zram<id>/mem_used_max
 Date:		August 2014
 Contact:	Minchan Kim <minchan@kernel.org>
 Description:
-		The mem_used_max file is read/write and specifies the amount
-		of maximum memory zram have consumed to store compressed data.
-		For resetting the value, you should write "0". Otherwise,
-		you could see -EINVAL.
+		The mem_used_max file is write-only and is used to reset
+		the counter of maximum memory zram have consumed to store
+		compressed data. For resetting the value, you should write
+		"0". Otherwise, you could see -EINVAL.
 		Unit: bytes
 
 What:		/sys/block/zram<id>/mem_limit
 Date:		August 2014
 Contact:	Minchan Kim <minchan@kernel.org>
 Description:
-		The mem_limit file is read/write and specifies the maximum
-		amount of memory ZRAM can use to store the compressed data.  The
-		limit could be changed in run time and "0" means disable the
-		limit.  No limit is the initial state.  Unit: bytes
+		The mem_limit file is write-only and specifies the maximum
+		amount of memory ZRAM can use to store the compressed data.
+		The limit could be changed in run time and "0" means disable
+		the limit. No limit is the initial state.  Unit: bytes
 
 What:		/sys/block/zram<id>/compact
 Date:		August 2015
@@ -175,3 +90,11 @@
 		device's debugging info useful for kernel developers. Its
 		format is not documented intentionally and may change
 		anytime without any notice.
+
+What:		/sys/block/zram<id>/backing_dev
+Date:		June 2017
+Contact:	Minchan Kim <minchan@kernel.org>
+Description:
+		The backing_dev file is read-write and set up backing
+		device for zram to write incompressible pages.
+		For using, user should enable CONFIG_ZRAM_WRITEBACK.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 6d75a9c..069e8d5 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -356,6 +356,7 @@
 		/sys/devices/system/cpu/vulnerabilities/spectre_v1
 		/sys/devices/system/cpu/vulnerabilities/spectre_v2
 		/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+		/sys/devices/system/cpu/vulnerabilities/l1tf
 Date:		January 2018
 Contact:	Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:	Information about CPU vulnerabilities
@@ -367,3 +368,26 @@
 		"Not affected"	  CPU is not affected by the vulnerability
 		"Vulnerable"	  CPU is affected and no mitigation in effect
 		"Mitigation: $M"  CPU is affected and mitigation $M is in effect
+
+		Details about the l1tf file can be found in
+		Documentation/admin-guide/l1tf.rst
+
+What:		/sys/devices/system/cpu/smt
+		/sys/devices/system/cpu/smt/active
+		/sys/devices/system/cpu/smt/control
+Date:		June 2018
+Contact:	Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:	Control Symetric Multi Threading (SMT)
+
+		active:  Tells whether SMT is active (enabled and siblings online)
+
+		control: Read/write interface to control SMT. Possible
+			 values:
+
+			 "on"		SMT is enabled
+			 "off"		SMT is disabled
+			 "forceoff"	SMT is force disabled. Cannot be changed.
+			 "notsupported" SMT is not supported by the CPU
+
+			 If control status is "forceoff" or "notsupported" writes
+			 are rejected.
diff --git a/Documentation/Changes b/Documentation/Changes
index 22797a1..76d6dc0 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -33,7 +33,7 @@
 GNU make               3.80             make --version
 binutils               2.12             ld -v
 util-linux             2.10o            fdformat --version
-module-init-tools      0.9.10           depmod -V
+kmod                   13               depmod -V
 e2fsprogs              1.41.4           e2fsck -V
 jfsutils               1.1.3            fsck.jfs -V
 reiserfsprogs          3.6.3            reiserfsck -V
@@ -143,12 +143,6 @@
 reproduce the Oops with that option, then you can still decode that Oops
 with ksymoops.
 
-Module-Init-Tools
------------------
-
-A new module loader is now in the kernel that requires ``module-init-tools``
-to use.  It is backward compatible with the 2.4.x series kernels.
-
 Mkinitrd
 --------
 
@@ -363,16 +357,17 @@
 
 - <ftp://ftp.kernel.org/pub/linux/utils/util-linux/>
 
+Kmod
+----
+
+- <https://www.kernel.org/pub/linux/utils/kernel/kmod/>
+- <https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git>
+
 Ksymoops
 --------
 
 - <ftp://ftp.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
 
-Module-Init-Tools
------------------
-
-- <ftp://ftp.kernel.org/pub/linux/kernel/people/rusty/modules/>
-
 Mkinitrd
 --------
 
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index 0535ae1..875b2b5 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -161,42 +161,15 @@
 disksize          RW    show and set the device's disk size
 initstate         RO    shows the initialization state of the device
 reset             WO    trigger device reset
-num_reads         RO    the number of reads
-failed_reads      RO    the number of failed reads
-num_write         RO    the number of writes
-failed_writes     RO    the number of failed writes
-invalid_io        RO    the number of non-page-size-aligned I/O requests
+mem_used_max      WO    reset the `mem_used_max' counter (see later)
+mem_limit         WO    specifies the maximum amount of memory ZRAM can use
+                        to store the compressed data
 max_comp_streams  RW    the number of possible concurrent compress operations
 comp_algorithm    RW    show and change the compression algorithm
-notify_free       RO    the number of notifications to free pages (either
-                        slot free notifications or REQ_DISCARD requests)
-zero_pages        RO    the number of zero filled pages written to this disk
-orig_data_size    RO    uncompressed size of data stored in this disk
-compr_data_size   RO    compressed size of data stored in this disk
-mem_used_total    RO    the amount of memory allocated for this disk
-mem_used_max      RW    the maximum amount of memory zram have consumed to
-                        store the data (to reset this counter to the actual
-                        current value, write 1 to this attribute)
-mem_limit         RW    the maximum amount of memory ZRAM can use to store
-                        the compressed data
-pages_compacted   RO    the number of pages freed during compaction
-                        (available only via zram<id>/mm_stat node)
 compact           WO    trigger memory compaction
 debug_stat        RO    this file is used for zram debugging purposes
+backing_dev	  RW	set up backend storage for zram to write out
 
-WARNING
-=======
-per-stat sysfs attributes are considered to be deprecated.
-The basic strategy is:
--- the existing RW nodes will be downgraded to WO nodes (in linux 4.11)
--- deprecated RO sysfs nodes will eventually be removed (in linux 4.11)
-
-The list of deprecated attributes can be found here:
-Documentation/ABI/obsolete/sysfs-block-zram
-
-Basically, every attribute that has its own read accessible sysfs node
-(e.g. num_reads) *AND* is accessible via one of the stat files (zram<id>/stat
-or zram<id>/io_stat or zram<id>/mm_stat) is considered to be deprecated.
 
 User space is advised to use the following files to read the device statistics.
 
@@ -211,22 +184,41 @@
 layer and, thus, not available in zram<id>/stat file. It consists of a
 single line of text and contains the following stats separated by
 whitespace:
-	failed_reads
-	failed_writes
-	invalid_io
-	notify_free
+ failed_reads     the number of failed reads
+ failed_writes    the number of failed writes
+ invalid_io       the number of non-page-size-aligned I/O requests
+ notify_free      Depending on device usage scenario it may account
+                  a) the number of pages freed because of swap slot free
+                  notifications or b) the number of pages freed because of
+                  REQ_DISCARD requests sent by bio. The former ones are
+                  sent to a swap block device when a swap slot is freed,
+                  which implies that this disk is being used as a swap disk.
+                  The latter ones are sent by filesystem mounted with
+                  discard option, whenever some data blocks are getting
+                  discarded.
 
 File /sys/block/zram<id>/mm_stat
 
 The stat file represents device's mm statistics. It consists of a single
 line of text and contains the following stats separated by whitespace:
-	orig_data_size
-	compr_data_size
-	mem_used_total
-	mem_limit
-	mem_used_max
-	zero_pages
-	num_migrated
+ orig_data_size   uncompressed size of data stored in this disk.
+		  This excludes same-element-filled pages (same_pages) since
+		  no memory is allocated for them.
+                  Unit: bytes
+ compr_data_size  compressed size of data stored in this disk
+ mem_used_total   the amount of memory allocated for this disk. This
+                  includes allocator fragmentation and metadata overhead,
+                  allocated for this disk. So, allocator space efficiency
+                  can be calculated using compr_data_size and this statistic.
+                  Unit: bytes
+ mem_limit        the maximum amount of memory ZRAM can use to store
+                  the compressed data
+ mem_used_max     the maximum amount of memory zram have consumed to
+                  store the data
+ same_pages       the number of same element filled pages written to this disk.
+                  No memory is allocated for such pages.
+ pages_compacted  the number of pages freed during compaction
+ huge_pages	  the number of incompressible pages
 
 9) Deactivate:
 	swapoff /dev/zram0
@@ -241,5 +233,39 @@
 	resets the disksize to zero. You must set the disksize again
 	before reusing the device.
 
+* Optional Feature
+
+= writeback
+
+With incompressible pages, there is no memory saving with zram.
+Instead, with CONFIG_ZRAM_WRITEBACK, zram can write incompressible page
+to backing storage rather than keeping it in memory.
+User should set up backing device via /sys/block/zramX/backing_dev
+before disksize setting.
+
+= memory tracking
+
+With CONFIG_ZRAM_MEMORY_TRACKING, user can know information of the
+zram block. It could be useful to catch cold or incompressible
+pages of the process with*pagemap.
+If you enable the feature, you could see block state via
+/sys/kernel/debug/zram/zram0/block_state". The output is as follows,
+
+	  300    75.033841 .wh
+	  301    63.806904 s..
+	  302    63.806919 ..h
+
+First column is zram's block index.
+Second column is access time since the system was booted
+Third column is state of the block.
+(s: same page
+w: written page to backing store
+h: huge page)
+
+First line of above example says 300th block is accessed at 75.033841sec
+and the block's state is huge so it is written back to the backing
+storage. It's a debugging feature so anyone shouldn't rely on it to work
+properly.
+
 Nitin Gupta
 ngupta@vflare.org
diff --git a/Documentation/devicetree/bindings/arm/msm/hidqvr-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/hidqvr-smp2p.txt
new file mode 100644
index 0000000..32f1d16
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/hidqvr-smp2p.txt
@@ -0,0 +1,39 @@
+Qualcomm Technologies, Inc. HID QVR (hid-qvr) driver
+
+Required properties:
+-compatible :
+	To communicate with cdsp
+		qcom,smp2pgpio-qvrexternal-5-out (outbound)
+		qcom,smp2pgpio-qvrexternal-5-in (inbound)
+
+Example:
+	smp2pgpio_qvrexternal_5_out: qcom,smp2pgpio-qvrexternal-5-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "qvrexternal";
+		qcom,remote-pid = <5>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	smp2pgpio_qvrexternal_5_in: qcom,smp2pgpio-qvrexternal-5-in {
+			compatible = "qcom,smp2pgpio";
+			qcom,entry-name = "qvrexternal";
+			qcom,remote-pid = <5>;
+			qcom,is-inbound;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_qvrexternal_5_out {
+			compatible = "qcom,smp2pgpio_client_qvrexternal_5_out";
+			gpios = <&smp2pgpio_qvrexternal_5_out 0 0>;
+	};
+
+	qcom,smp2pgpio_client_qvrexternal_5_in {
+			compatible = "qcom,smp2pgpio_client_qvrexternal_5_in";
+			gpios = <&smp2pgpio_qvrexternal_5_in 0 0>;
+	}
\ No newline at end of file
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 0319e2f..ab58089 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -68,6 +68,9 @@
 - MSM8917
   compatible = "qcom,msm8917"
 
+- MSM8940
+  compatible = "qcom,msm8940"
+
 - MSM8936
   compatible = "qcom,msm8936"
 
@@ -279,6 +282,7 @@
 compatible = "qcom,msm8917-rumi"
 compatible = "qcom,msm8917-qrd"
 compatible = "qcom,msm8917-qrd-sku5"
+compatible = "qcom,msm8940-mtp"
 compatible = "qcom,msm8926-cdp"
 compatible = "qcom,msm8926-mtp"
 compatible = "qcom,msm8926-qrd"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_hsic_xprt.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_usb_xprt.txt
similarity index 62%
rename from Documentation/devicetree/bindings/arm/msm/msm_ipc_router_hsic_xprt.txt
rename to Documentation/devicetree/bindings/arm/msm/msm_ipc_router_usb_xprt.txt
index 71d0c0d..e7256af 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_hsic_xprt.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_usb_xprt.txt
@@ -1,17 +1,17 @@
-Qualcomm Technologies, Inc. IPC Router HSIC Transport
+Qualcomm Technologies, Inc. IPC Router USB Transport
 
 Required properties:
--compatible:		should be "qcom,ipc_router_hsic_xprt"
--qcom,ch-name:		the HSIC channel name used by the HSIC transport
+-compatible:		should be "qcom,ipc_router_usb_xprt"
+-qcom,ch-name:		the USB channel name used by the USB transport
 -qcom,xprt-remote:	string that defines the edge of the transport (PIL Name)
 -qcom,xprt-linkid:	unique integer to identify the tier to which the link
 			belongs to in the network and is used to avoid the
 			routing loops while forwarding the broadcast messages
--qcom,xprt-version:	unique version ID used by HSIC transport header
+-qcom,xprt-version:	unique version ID used by USB transport header
 
 Example:
 	qcom,ipc_router_external_modem_xprt {
-		compatible = "qcom,ipc_router_hsic_xprt";
+		compatible = "qcom,ipc-router-usb-xprt";
 	        qcom,ch-name = "ipc_bridge";
 		qcom,xprt-remote = "external-modem";
 		qcom,xprt-linkid = <1>;
diff --git a/Documentation/devicetree/bindings/arm/msm/rpmh-master-stat.txt b/Documentation/devicetree/bindings/arm/msm/rpmh-master-stat.txt
index a53eba5..b851ec0 100644
--- a/Documentation/devicetree/bindings/arm/msm/rpmh-master-stat.txt
+++ b/Documentation/devicetree/bindings/arm/msm/rpmh-master-stat.txt
@@ -23,9 +23,16 @@
 	Value type: <prop-encoded-array>
 	Definition: Specifies physical address of start of profiling unit.
 
+- qcom,use-alt-unit:
+	Usage: Optional
+	Value type: <u32>
+	Definition: Specifies designated unit no. for which alternate unit
+		is configured to capture time stamp.
+
 Example:
 
 qcom,rpmh-master-stats {
 	compatible = "qcom,rpmh-master-stats";
 	reg = <0xb221200 0x60>;
+	qcom,use-alt-unit = <3>;
 };
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index 0c75cf6..23e4bd5 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -25,6 +25,7 @@
 Optional properties:
 - qcom,sleep-clk-en: GPIO for sleep clock used for low power modes by 11ad card
 - qcom,wigig-en: Enable GPIO connected to 11ad card
+- qcom,wigig-dc: Enable DC to DC GPIO connected to 11ad card
 - qcom,use-ext-supply: Boolean flag to indicate if 11ad SIP uses external power supply
 - vdd-supply: phandle to 11ad VDD regulator node
 - vddio-supply: phandle to 11ad VDDIO regulator node
@@ -45,6 +46,7 @@
 		qcom,smmu-mapping = <0x20000000 0xe0000000>;
 		qcom,pcie-parent = <&pcie1>;
 		qcom,wigig-en = <&tlmm 94 0>;
+		qcom,wigig-dc = <&tlmm 81 0>;
 		qcom,msm-bus,name = "wil6210";
 		qcom,msm-bus,num-cases = <2>;
 		qcom,msm-bus,num-paths = <1>;
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index c17970c..99f8c4c 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -164,10 +164,7 @@
 					"dfps_immediate_porch_mode_vfp" = FPS change request is
 					implemented immediately by changing panel vertical
 					front porch values.
-- qcom,min-refresh-rate:		Minimum refresh rate supported by the panel.
-- qcom,max-refresh-rate:		Maximum refresh rate supported by the panel. If max refresh
-					rate is not specified, then the frame rate of the panel in
-					qcom,mdss-dsi-panel-framerate is used.
+- qcom,dsi-supported-dfps-list:		List containing all the supported refresh rates.
 - qcom,mdss-dsi-bl-pmic-control-type:	A string that specifies the implementation of backlight
 					control for this panel.
 					"bl_ctrl_pwm" = Backlight controlled by PWM gpio.
@@ -187,6 +184,9 @@
 					255 = default value.
 - qcom,mdss-brightness-max-level:	Specifies the max brightness level supported.
 					255 = default value.
+- qcom,bl-update-flag:			A string that specifies controls for backlight update of the panel.
+					"delay_until_first_frame" = Delay backlight update of the panel
+					until the first frame is received from the HW.
 - qcom,mdss-dsi-interleave-mode:	Specifies interleave mode.
 					0 = default value.
 - qcom,mdss-dsi-panel-type:		Specifies the panel operating mode.
@@ -524,6 +524,10 @@
 - qcom,mdss-dsi-panel-cmds-only-by-right: Boolean used to mention whether the panel support DSI1 or
 					DSI0 to send commands. If this was set, that mean the panel only support
 					DSI1 to send commands, otherwise DSI0 will send comands.
+- qcom,dsi-dyn-clk-enable:		Boolean to indicate dsi dynamic clock switch feature
+					is supported.
+- qcom,dsi-dyn-clk-list:		An u32 array which lists all the supported dsi bit clock
+					frequencies in Hz for the given panel.
 
 Required properties for sub-nodes:	None
 Optional properties:
@@ -599,6 +603,7 @@
 		qcom,mdss-dsi-bl-min-level = <1>;
 		qcom,mdss-dsi-bl-max-level = < 15>;
 		qcom,mdss-brightness-max-level = <255>;
+		qcom,bl-update-flag = "delay_until_first_frame";
 		qcom,mdss-dsi-interleave-mode = <0>;
 		qcom,mdss-dsi-panel-type = "dsi_video_mode";
 		qcom,mdss-dsi-te-check-enable;
@@ -645,8 +650,7 @@
 		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 		qcom,mdss-dsi-pan-enable-dynamic-fps;
 		qcom,mdss-dsi-pan-fps-update = "dfps_suspend_resume_mode";
-		qcom,min-refresh-rate = <30>;
-		qcom,max-refresh-rate = <60>;
+		qcom,dsi-supported-dfps-list = <48 55 60>;
 		qcom,mdss-dsi-bl-pmic-bank-select = <0>;
 		qcom,mdss-dsi-bl-pmic-pwm-frequency = <0>;
 		qcom,mdss-dsi-pwm-gpio = <&pm8941_mpps 5 0>;
@@ -777,5 +781,7 @@
 			                <2 2 1>;
 		qcom,default-topology-index = <0>;
 		qcom,mdss-dsi-dma-schedule-line = <5>;
+		qcom,dsi-dyn-clk-enable;
+		qcom,dsi-dyn-clk-list = <798240576 801594528 804948480>;
 	};
 };
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index a4e9ba7..a59a322 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -6,8 +6,9 @@
 - label:		A string used as a descriptive name for the device.
 - compatible:		Must be "qcom,kgsl-3d0" and "qcom,kgsl-3d"
 - reg:			Specifies the register base address and size, the shader memory
-			base address and size (if it exists), and the base address and size
-			of the CX_DBGC block (if it exists).
+			base address and size (if it exists), base address and size
+			of the CX_DBGC block (if it exists), and the base address and
+			size of the CX_MISC block (if it exists).
 - reg-names:		Resource names used for the physical address of device registers
 			and shader memory. "kgsl_3d0_reg_memory" gives the physical address
 			and length of device registers while "kgsl_3d0_shader_memory" gives
@@ -15,7 +16,8 @@
 			specified, "qfprom_memory" gives the range for the efuse
 			registers used for various configuration options. If specified,
 			"kgsl_3d0_cx_dbgc_memory" gives the physical address and length
-			of the CX DBGC block.
+			of the CX DBGC block. If specified, "cx_misc" gives
+			the physical address and length of the CX_MISC block.
 - interrupts:		Interrupt mapping for GPU IRQ.
 - interrupt-names:	String property to describe the name of the interrupt.
 - qcom,id:		An integer used as an identification number for the device.
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
index 21edaa0..950884c 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
@@ -17,6 +17,9 @@
 Optional property:
  - qcom,clk-freq-out : Desired I2C bus clock frequency in Hz.
    When missing default to 400000Hz.
+ - qcom,disable-autosuspend : Disable autosuspend for I2C controller and
+   I2C clients should call pm_runtime_get_sync()/put_sync() for the
+   I2C controller.
 
 Child nodes should conform to i2c bus binding.
 
diff --git a/Documentation/devicetree/bindings/media/video/msm-csi-phy.txt b/Documentation/devicetree/bindings/media/video/msm-csi-phy.txt
index 24a3443..755ac71 100644
--- a/Documentation/devicetree/bindings/media/video/msm-csi-phy.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-csi-phy.txt
@@ -11,6 +11,7 @@
     - "qcom,csiphy-v3.1.1"
     - "qcom,csiphy-v3.2"
     - "qcom,csiphy-v3.4.2"
+    - "qcom,csiphy-v3.4.2.1"
     - "qcom,csiphy-v3.5"
     - "qcom,csiphy-v5.0"
     - "qcom,csiphy-v5.01"
diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
index 9c67ee4..bbcb255 100644
--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt
+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
@@ -2,7 +2,10 @@
 
 Required properties:
 
-- compatible: should be "qca,qca8337"
+- compatible: should be one of:
+    "qca,qca8334"
+    "qca,qca8337"
+
 - #size-cells: must be 0
 - #address-cells: must be 1
 
@@ -14,6 +17,20 @@
 referencing the internal PHY connected to it. The CPU port of this switch is
 always port 0.
 
+A CPU port node has the following optional node:
+
+- fixed-link            : Fixed-link subnode describing a link to a non-MDIO
+                          managed entity. See
+                          Documentation/devicetree/bindings/net/fixed-link.txt
+                          for details.
+
+For QCA8K the 'fixed-link' sub-node supports only the following properties:
+
+- 'speed' (integer, mandatory), to indicate the link speed. Accepted
+  values are 10, 100 and 1000
+- 'full-duplex' (boolean, optional), to indicate that full duplex is
+  used. When absent, half duplex is assumed.
+
 Example:
 
 
@@ -53,6 +70,10 @@
 					label = "cpu";
 					ethernet = <&gmac1>;
 					phy-mode = "rgmii";
+					fixed-link {
+						speed = 1000;
+						full-duplex;
+					};
 				};
 
 				port@1 {
diff --git a/Documentation/devicetree/bindings/net/meson-dwmac.txt b/Documentation/devicetree/bindings/net/meson-dwmac.txt
index 89e62dd..da37da0 100644
--- a/Documentation/devicetree/bindings/net/meson-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/meson-dwmac.txt
@@ -10,6 +10,7 @@
 			- "amlogic,meson6-dwmac"
 			- "amlogic,meson8b-dwmac"
 			- "amlogic,meson-gxbb-dwmac"
+			- "amlogic,meson-axg-dwmac"
 		Additionally "snps,dwmac" and any applicable more
 		detailed version number described in net/stmmac.txt
 		should be used.
diff --git a/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt b/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt
index faf56c2..6026756 100644
--- a/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt
+++ b/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt
@@ -49,6 +49,8 @@
   - qcom,phy-status-reg: Register offset for PHY status.
   - qcom,dbi-base-reg: Register offset for DBI base address.
   - qcom,slv-space-reg: Register offset for slave address space size.
+  - qcom,pcie-vendor-id: Vendor id to be written to the Vendor ID register.
+  - qcom,pcie-device-id: Device id to be written to the Device ID register.
   - qcom,pcie-link-speed: generation of PCIe link speed. The value could be
     1, 2 or 3.
   - qcom,pcie-active-config: boolean type; active configuration of PCIe
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
index fe7fe0b..1b98817 100644
--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -3,8 +3,10 @@
 Required properties for the root node:
  - compatible: one of "amlogic,meson8-cbus-pinctrl"
 		      "amlogic,meson8b-cbus-pinctrl"
+		      "amlogic,meson8m2-cbus-pinctrl"
 		      "amlogic,meson8-aobus-pinctrl"
 		      "amlogic,meson8b-aobus-pinctrl"
+		      "amlogic,meson8m2-aobus-pinctrl"
 		      "amlogic,meson-gxbb-periphs-pinctrl"
 		      "amlogic,meson-gxbb-aobus-pinctrl"
  - reg: address and size of registers controlling irq functionality
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
index 83f964d..1e49e96 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
@@ -308,6 +308,20 @@
 	Definition: Boolean property to support external-rsense based
 		    configuration.
 
+- qcom,shutdown-temp-diff
+	Usage:      optional
+	Value type: <u32>
+	Definition: The allowed battery temperature in deci-degree difference
+		    between shutdown and power-on to continue with the shutdown
+		    SOC. If not specified the default value is 6 degrees C (60).
+
+- qcom,shutdown-soc-threshold
+	Usage:      optional
+	Value type: <u32>
+	Definition: The SOC difference allowed between PON and SHUTDOWN SOC
+		    for the shutdown SOC to be used. If the difference is
+		    beyond this value the PON SOC is used.
+
 ==========================================================
 Second Level Nodes - Peripherals managed by QGAUGE driver
 ==========================================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index 9de24c3..452fff9 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -199,6 +199,12 @@
   Definition: Boolean flag which when present disables STAT pin default software
 		override configuration.
 
+- qcom,fcc-stepping-enable
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present enables stepwise change in FCC.
+		The default stepping rate is 100mA/sec.
+
 =============================================
 Second Level Nodes - SMB2 Charger Peripherals
 =============================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
index 2c26743..bba5784 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
@@ -247,6 +247,12 @@
 	Definition: Boolean flag which when present enables mositure protection
 		    feature.
 
+- qcom,fcc-stepping-enable
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present enables stepwise change in FCC.
+		The default stepping rate is 100mA/sec.
+
 =============================================
 Second Level Nodes - SMB5 Charger Peripherals
 =============================================
diff --git a/Documentation/devicetree/bindings/qseecom/qseecom.txt b/Documentation/devicetree/bindings/qseecom/qseecom.txt
index 8fbf8e2..83a2e8a 100644
--- a/Documentation/devicetree/bindings/qseecom/qseecom.txt
+++ b/Documentation/devicetree/bindings/qseecom/qseecom.txt
@@ -27,6 +27,7 @@
   - qcom,qsee-reentrancy-support: indicates the qsee reentrancy phase supported by the target
   - qcom,commonlib64-loaded-by-uefi: indicates commonlib64 is loaded by uefi already
   - qcom,fde-key-size: indicates which FDE key size is used in device.
+  - qcom,enable-key-wrap-in-ks: enables wrapping of ICE key with KS key.
 
 Example:
 	qcom,qseecom@fe806000 {
@@ -40,6 +41,7 @@
 		qcom,hlos-ce-hw-instance = <1 2>;
 		qcom,qsee-ce-hw-instance = <0>;
 		qcom,support-fde;
+		qcom,enable-key-wrap-in-ks;
 		qcom,support-pfe;
 		qcom,msm_bus,name = "qseecom-noc";
 		qcom,msm_bus,num_cases = <4>;
@@ -64,6 +66,7 @@
 		reg = <0x7f00000 0x500000>;
 		reg-names = "secapp-region";
 		qcom,support-fde;
+		qcom,enable-key-wrap-in-ks;
 		qcom,full-disk-encrypt-info = <0 1 2>, <0 2 2>;
 		qcom,support-pfe;
 		qcom,per-file-encrypt-info = <0 1 0>, <0 2 0>;
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
index 72c4eaf..748cfbe 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
@@ -31,6 +31,14 @@
 	Value type: <phandle>
 	Definition:  Phandle to the PMIC's revid node
 
+- qcom,voltage-step-ramp
+	Usage:      optional
+	Value type: <bool>
+	Definition:  Required only if the voltage needs to be set in the
+		     steps of 500 mV starting from the 4500 mV. This needs
+		     to be enabled only on platforms where voltage needs to
+		     be ramped up with multiple steps.
+
 Touch-to-wake (TTW) properties:
 
 TTW supports 2 modes of operation - HW and SW. In the HW mode the enable/disable
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index c28b05b..bcaa311 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -107,6 +107,9 @@
    microvolts or a value corresponding to voltage corner.
  - "pcs_clamp_enable_reg" : Clamps the phy data inputs and enables USB3
    autonomous mode.
+ - extcon : phandle to external connector devices which provide type-C based
+            "USB-HOST" cable events. This phandle is used for notifying number
+            of lanes used in case of USB+DP concurrent mode to driver.
 
 Example:
 	ssphy0: ssphy@f9b38000 {
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index 193a034..d9a0f69 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -154,6 +154,26 @@
                        enabled by default.
 data_flush             Enable data flushing before checkpoint in order to
                        persist data of regular and symlink.
+fault_injection=%d     Enable fault injection in all supported types with
+                       specified injection rate.
+fault_type=%d          Support configuring fault injection type, should be
+                       enabled with fault_injection option, fault type value
+                       is shown below, it supports single or combined type.
+                       Type_Name		Type_Value
+                       FAULT_KMALLOC		0x000000001
+                       FAULT_KVMALLOC		0x000000002
+                       FAULT_PAGE_ALLOC		0x000000004
+                       FAULT_PAGE_GET		0x000000008
+                       FAULT_ALLOC_BIO		0x000000010
+                       FAULT_ALLOC_NID		0x000000020
+                       FAULT_ORPHAN		0x000000040
+                       FAULT_BLOCK		0x000000080
+                       FAULT_DIR_DEPTH		0x000000100
+                       FAULT_EVICT_INODE	0x000000200
+                       FAULT_TRUNCATE		0x000000400
+                       FAULT_IO			0x000000800
+                       FAULT_CHECKPOINT		0x000001000
+                       FAULT_DISCARD		0x000002000
 mode=%s                Control block allocation mode which supports "adaptive"
                        and "lfs". In "lfs" mode, there should be no random
                        writes towards main area.
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
new file mode 100644
index 0000000..48b424d
--- /dev/null
+++ b/Documentation/filesystems/fscrypt.rst
@@ -0,0 +1,626 @@
+=====================================
+Filesystem-level encryption (fscrypt)
+=====================================
+
+Introduction
+============
+
+fscrypt is a library which filesystems can hook into to support
+transparent encryption of files and directories.
+
+Note: "fscrypt" in this document refers to the kernel-level portion,
+implemented in ``fs/crypto/``, as opposed to the userspace tool
+`fscrypt <https://github.com/google/fscrypt>`_.  This document only
+covers the kernel-level portion.  For command-line examples of how to
+use encryption, see the documentation for the userspace tool `fscrypt
+<https://github.com/google/fscrypt>`_.  Also, it is recommended to use
+the fscrypt userspace tool, or other existing userspace tools such as
+`fscryptctl <https://github.com/google/fscryptctl>`_ or `Android's key
+management system
+<https://source.android.com/security/encryption/file-based>`_, over
+using the kernel's API directly.  Using existing tools reduces the
+chance of introducing your own security bugs.  (Nevertheless, for
+completeness this documentation covers the kernel's API anyway.)
+
+Unlike dm-crypt, fscrypt operates at the filesystem level rather than
+at the block device level.  This allows it to encrypt different files
+with different keys and to have unencrypted files on the same
+filesystem.  This is useful for multi-user systems where each user's
+data-at-rest needs to be cryptographically isolated from the others.
+However, except for filenames, fscrypt does not encrypt filesystem
+metadata.
+
+Unlike eCryptfs, which is a stacked filesystem, fscrypt is integrated
+directly into supported filesystems --- currently ext4, F2FS, and
+UBIFS.  This allows encrypted files to be read and written without
+caching both the decrypted and encrypted pages in the pagecache,
+thereby nearly halving the memory used and bringing it in line with
+unencrypted files.  Similarly, half as many dentries and inodes are
+needed.  eCryptfs also limits encrypted filenames to 143 bytes,
+causing application compatibility issues; fscrypt allows the full 255
+bytes (NAME_MAX).  Finally, unlike eCryptfs, the fscrypt API can be
+used by unprivileged users, with no need to mount anything.
+
+fscrypt does not support encrypting files in-place.  Instead, it
+supports marking an empty directory as encrypted.  Then, after
+userspace provides the key, all regular files, directories, and
+symbolic links created in that directory tree are transparently
+encrypted.
+
+Threat model
+============
+
+Offline attacks
+---------------
+
+Provided that userspace chooses a strong encryption key, fscrypt
+protects the confidentiality of file contents and filenames in the
+event of a single point-in-time permanent offline compromise of the
+block device content.  fscrypt does not protect the confidentiality of
+non-filename metadata, e.g. file sizes, file permissions, file
+timestamps, and extended attributes.  Also, the existence and location
+of holes (unallocated blocks which logically contain all zeroes) in
+files is not protected.
+
+fscrypt is not guaranteed to protect confidentiality or authenticity
+if an attacker is able to manipulate the filesystem offline prior to
+an authorized user later accessing the filesystem.
+
+Online attacks
+--------------
+
+fscrypt (and storage encryption in general) can only provide limited
+protection, if any at all, against online attacks.  In detail:
+
+fscrypt is only resistant to side-channel attacks, such as timing or
+electromagnetic attacks, to the extent that the underlying Linux
+Cryptographic API algorithms are.  If a vulnerable algorithm is used,
+such as a table-based implementation of AES, it may be possible for an
+attacker to mount a side channel attack against the online system.
+Side channel attacks may also be mounted against applications
+consuming decrypted data.
+
+After an encryption key has been provided, fscrypt is not designed to
+hide the plaintext file contents or filenames from other users on the
+same system, regardless of the visibility of the keyring key.
+Instead, existing access control mechanisms such as file mode bits,
+POSIX ACLs, LSMs, or mount namespaces should be used for this purpose.
+Also note that as long as the encryption keys are *anywhere* in
+memory, an online attacker can necessarily compromise them by mounting
+a physical attack or by exploiting any kernel security vulnerability
+which provides an arbitrary memory read primitive.
+
+While it is ostensibly possible to "evict" keys from the system,
+recently accessed encrypted files will remain accessible at least
+until the filesystem is unmounted or the VFS caches are dropped, e.g.
+using ``echo 2 > /proc/sys/vm/drop_caches``.  Even after that, if the
+RAM is compromised before being powered off, it will likely still be
+possible to recover portions of the plaintext file contents, if not
+some of the encryption keys as well.  (Since Linux v4.12, all
+in-kernel keys related to fscrypt are sanitized before being freed.
+However, userspace would need to do its part as well.)
+
+Currently, fscrypt does not prevent a user from maliciously providing
+an incorrect key for another user's existing encrypted files.  A
+protection against this is planned.
+
+Key hierarchy
+=============
+
+Master Keys
+-----------
+
+Each encrypted directory tree is protected by a *master key*.  Master
+keys can be up to 64 bytes long, and must be at least as long as the
+greater of the key length needed by the contents and filenames
+encryption modes being used.  For example, if AES-256-XTS is used for
+contents encryption, the master key must be 64 bytes (512 bits).  Note
+that the XTS mode is defined to require a key twice as long as that
+required by the underlying block cipher.
+
+To "unlock" an encrypted directory tree, userspace must provide the
+appropriate master key.  There can be any number of master keys, each
+of which protects any number of directory trees on any number of
+filesystems.
+
+Userspace should generate master keys either using a cryptographically
+secure random number generator, or by using a KDF (Key Derivation
+Function).  Note that whenever a KDF is used to "stretch" a
+lower-entropy secret such as a passphrase, it is critical that a KDF
+designed for this purpose be used, such as scrypt, PBKDF2, or Argon2.
+
+Per-file keys
+-------------
+
+Master keys are not used to encrypt file contents or names directly.
+Instead, a unique key is derived for each encrypted file, including
+each regular file, directory, and symbolic link.  This has several
+advantages:
+
+- In cryptosystems, the same key material should never be used for
+  different purposes.  Using the master key as both an XTS key for
+  contents encryption and as a CTS-CBC key for filenames encryption
+  would violate this rule.
+- Per-file keys simplify the choice of IVs (Initialization Vectors)
+  for contents encryption.  Without per-file keys, to ensure IV
+  uniqueness both the inode and logical block number would need to be
+  encoded in the IVs.  This would make it impossible to renumber
+  inodes, which e.g. ``resize2fs`` can do when resizing an ext4
+  filesystem.  With per-file keys, it is sufficient to encode just the
+  logical block number in the IVs.
+- Per-file keys strengthen the encryption of filenames, where IVs are
+  reused out of necessity.  With a unique key per directory, IV reuse
+  is limited to within a single directory.
+- Per-file keys allow individual files to be securely erased simply by
+  securely erasing their keys.  (Not yet implemented.)
+
+A KDF (Key Derivation Function) is used to derive per-file keys from
+the master key.  This is done instead of wrapping a randomly-generated
+key for each file because it reduces the size of the encryption xattr,
+which for some filesystems makes the xattr more likely to fit in-line
+in the filesystem's inode table.  With a KDF, only a 16-byte nonce is
+required --- long enough to make key reuse extremely unlikely.  A
+wrapped key, on the other hand, would need to be up to 64 bytes ---
+the length of an AES-256-XTS key.  Furthermore, currently there is no
+requirement to support unlocking a file with multiple alternative
+master keys or to support rotating master keys.  Instead, the master
+keys may be wrapped in userspace, e.g. as done by the `fscrypt
+<https://github.com/google/fscrypt>`_ tool.
+
+The current KDF encrypts the master key using the 16-byte nonce as an
+AES-128-ECB key.  The output is used as the derived key.  If the
+output is longer than needed, then it is truncated to the needed
+length.  Truncation is the norm for directories and symlinks, since
+those use the CTS-CBC encryption mode which requires a key half as
+long as that required by the XTS encryption mode.
+
+Note: this KDF meets the primary security requirement, which is to
+produce unique derived keys that preserve the entropy of the master
+key, assuming that the master key is already a good pseudorandom key.
+However, it is nonstandard and has some problems such as being
+reversible, so it is generally considered to be a mistake!  It may be
+replaced with HKDF or another more standard KDF in the future.
+
+Encryption modes and usage
+==========================
+
+fscrypt allows one encryption mode to be specified for file contents
+and one encryption mode to be specified for filenames.  Different
+directory trees are permitted to use different encryption modes.
+Currently, the following pairs of encryption modes are supported:
+
+- AES-256-XTS for contents and AES-256-CTS-CBC for filenames
+- AES-128-CBC for contents and AES-128-CTS-CBC for filenames
+- Speck128/256-XTS for contents and Speck128/256-CTS-CBC for filenames
+
+It is strongly recommended to use AES-256-XTS for contents encryption.
+AES-128-CBC was added only for low-powered embedded devices with
+crypto accelerators such as CAAM or CESA that do not support XTS.
+
+Similarly, Speck128/256 support was only added for older or low-end
+CPUs which cannot do AES fast enough -- especially ARM CPUs which have
+NEON instructions but not the Cryptography Extensions -- and for which
+it would not otherwise be feasible to use encryption at all.  It is
+not recommended to use Speck on CPUs that have AES instructions.
+Speck support is only available if it has been enabled in the crypto
+API via CONFIG_CRYPTO_SPECK.  Also, on ARM platforms, to get
+acceptable performance CONFIG_CRYPTO_SPECK_NEON must be enabled.
+
+New encryption modes can be added relatively easily, without changes
+to individual filesystems.  However, authenticated encryption (AE)
+modes are not currently supported because of the difficulty of dealing
+with ciphertext expansion.
+
+For file contents, each filesystem block is encrypted independently.
+Currently, only the case where the filesystem block size is equal to
+the system's page size (usually 4096 bytes) is supported.  With the
+XTS mode of operation (recommended), the logical block number within
+the file is used as the IV.  With the CBC mode of operation (not
+recommended), ESSIV is used; specifically, the IV for CBC is the
+logical block number encrypted with AES-256, where the AES-256 key is
+the SHA-256 hash of the inode's data encryption key.
+
+For filenames, the full filename is encrypted at once.  Because of the
+requirements to retain support for efficient directory lookups and
+filenames of up to 255 bytes, a constant initialization vector (IV) is
+used.  However, each encrypted directory uses a unique key, which
+limits IV reuse to within a single directory.  Note that IV reuse in
+the context of CTS-CBC encryption means that when the original
+filenames share a common prefix at least as long as the cipher block
+size (16 bytes for AES), the corresponding encrypted filenames will
+also share a common prefix.  This is undesirable; it may be fixed in
+the future by switching to an encryption mode that is a strong
+pseudorandom permutation on arbitrary-length messages, e.g. the HEH
+(Hash-Encrypt-Hash) mode.
+
+Since filenames are encrypted with the CTS-CBC mode of operation, the
+plaintext and ciphertext filenames need not be multiples of the AES
+block size, i.e. 16 bytes.  However, the minimum size that can be
+encrypted is 16 bytes, so shorter filenames are NUL-padded to 16 bytes
+before being encrypted.  In addition, to reduce leakage of filename
+lengths via their ciphertexts, all filenames are NUL-padded to the
+next 4, 8, 16, or 32-byte boundary (configurable).  32 is recommended
+since this provides the best confidentiality, at the cost of making
+directory entries consume slightly more space.  Note that since NUL
+(``\0``) is not otherwise a valid character in filenames, the padding
+will never produce duplicate plaintexts.
+
+Symbolic link targets are considered a type of filename and are
+encrypted in the same way as filenames in directory entries.  Each
+symlink also uses a unique key; hence, the hardcoded IV is not a
+problem for symlinks.
+
+User API
+========
+
+Setting an encryption policy
+----------------------------
+
+The FS_IOC_SET_ENCRYPTION_POLICY ioctl sets an encryption policy on an
+empty directory or verifies that a directory or regular file already
+has the specified encryption policy.  It takes in a pointer to a
+:c:type:`struct fscrypt_policy`, defined as follows::
+
+    #define FS_KEY_DESCRIPTOR_SIZE  8
+
+    struct fscrypt_policy {
+            __u8 version;
+            __u8 contents_encryption_mode;
+            __u8 filenames_encryption_mode;
+            __u8 flags;
+            __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+    };
+
+This structure must be initialized as follows:
+
+- ``version`` must be 0.
+
+- ``contents_encryption_mode`` and ``filenames_encryption_mode`` must
+  be set to constants from ``<linux/fs.h>`` which identify the
+  encryption modes to use.  If unsure, use
+  FS_ENCRYPTION_MODE_AES_256_XTS (1) for ``contents_encryption_mode``
+  and FS_ENCRYPTION_MODE_AES_256_CTS (4) for
+  ``filenames_encryption_mode``.
+
+- ``flags`` must be set to a value from ``<linux/fs.h>`` which
+  identifies the amount of NUL-padding to use when encrypting
+  filenames.  If unsure, use FS_POLICY_FLAGS_PAD_32 (0x3).
+
+- ``master_key_descriptor`` specifies how to find the master key in
+  the keyring; see `Adding keys`_.  It is up to userspace to choose a
+  unique ``master_key_descriptor`` for each master key.  The e4crypt
+  and fscrypt tools use the first 8 bytes of
+  ``SHA-512(SHA-512(master_key))``, but this particular scheme is not
+  required.  Also, the master key need not be in the keyring yet when
+  FS_IOC_SET_ENCRYPTION_POLICY is executed.  However, it must be added
+  before any files can be created in the encrypted directory.
+
+If the file is not yet encrypted, then FS_IOC_SET_ENCRYPTION_POLICY
+verifies that the file is an empty directory.  If so, the specified
+encryption policy is assigned to the directory, turning it into an
+encrypted directory.  After that, and after providing the
+corresponding master key as described in `Adding keys`_, all regular
+files, directories (recursively), and symlinks created in the
+directory will be encrypted, inheriting the same encryption policy.
+The filenames in the directory's entries will be encrypted as well.
+
+Alternatively, if the file is already encrypted, then
+FS_IOC_SET_ENCRYPTION_POLICY validates that the specified encryption
+policy exactly matches the actual one.  If they match, then the ioctl
+returns 0.  Otherwise, it fails with EEXIST.  This works on both
+regular files and directories, including nonempty directories.
+
+Note that the ext4 filesystem does not allow the root directory to be
+encrypted, even if it is empty.  Users who want to encrypt an entire
+filesystem with one key should consider using dm-crypt instead.
+
+FS_IOC_SET_ENCRYPTION_POLICY can fail with the following errors:
+
+- ``EACCES``: the file is not owned by the process's uid, nor does the
+  process have the CAP_FOWNER capability in a namespace with the file
+  owner's uid mapped
+- ``EEXIST``: the file is already encrypted with an encryption policy
+  different from the one specified
+- ``EINVAL``: an invalid encryption policy was specified (invalid
+  version, mode(s), or flags)
+- ``ENOTDIR``: the file is unencrypted and is a regular file, not a
+  directory
+- ``ENOTEMPTY``: the file is unencrypted and is a nonempty directory
+- ``ENOTTY``: this type of filesystem does not implement encryption
+- ``EOPNOTSUPP``: the kernel was not configured with encryption
+  support for this filesystem, or the filesystem superblock has not
+  had encryption enabled on it.  (For example, to use encryption on an
+  ext4 filesystem, CONFIG_EXT4_ENCRYPTION must be enabled in the
+  kernel config, and the superblock must have had the "encrypt"
+  feature flag enabled using ``tune2fs -O encrypt`` or ``mkfs.ext4 -O
+  encrypt``.)
+- ``EPERM``: this directory may not be encrypted, e.g. because it is
+  the root directory of an ext4 filesystem
+- ``EROFS``: the filesystem is readonly
+
+Getting an encryption policy
+----------------------------
+
+The FS_IOC_GET_ENCRYPTION_POLICY ioctl retrieves the :c:type:`struct
+fscrypt_policy`, if any, for a directory or regular file.  See above
+for the struct definition.  No additional permissions are required
+beyond the ability to open the file.
+
+FS_IOC_GET_ENCRYPTION_POLICY can fail with the following errors:
+
+- ``EINVAL``: the file is encrypted, but it uses an unrecognized
+  encryption context format
+- ``ENODATA``: the file is not encrypted
+- ``ENOTTY``: this type of filesystem does not implement encryption
+- ``EOPNOTSUPP``: the kernel was not configured with encryption
+  support for this filesystem
+
+Note: if you only need to know whether a file is encrypted or not, on
+most filesystems it is also possible to use the FS_IOC_GETFLAGS ioctl
+and check for FS_ENCRYPT_FL, or to use the statx() system call and
+check for STATX_ATTR_ENCRYPTED in stx_attributes.
+
+Getting the per-filesystem salt
+-------------------------------
+
+Some filesystems, such as ext4 and F2FS, also support the deprecated
+ioctl FS_IOC_GET_ENCRYPTION_PWSALT.  This ioctl retrieves a randomly
+generated 16-byte value stored in the filesystem superblock.  This
+value is intended to used as a salt when deriving an encryption key
+from a passphrase or other low-entropy user credential.
+
+FS_IOC_GET_ENCRYPTION_PWSALT is deprecated.  Instead, prefer to
+generate and manage any needed salt(s) in userspace.
+
+Adding keys
+-----------
+
+To provide a master key, userspace must add it to an appropriate
+keyring using the add_key() system call (see:
+``Documentation/security/keys/core.rst``).  The key type must be
+"logon"; keys of this type are kept in kernel memory and cannot be
+read back by userspace.  The key description must be "fscrypt:"
+followed by the 16-character lower case hex representation of the
+``master_key_descriptor`` that was set in the encryption policy.  The
+key payload must conform to the following structure::
+
+    #define FS_MAX_KEY_SIZE 64
+
+    struct fscrypt_key {
+            u32 mode;
+            u8 raw[FS_MAX_KEY_SIZE];
+            u32 size;
+    };
+
+``mode`` is ignored; just set it to 0.  The actual key is provided in
+``raw`` with ``size`` indicating its size in bytes.  That is, the
+bytes ``raw[0..size-1]`` (inclusive) are the actual key.
+
+The key description prefix "fscrypt:" may alternatively be replaced
+with a filesystem-specific prefix such as "ext4:".  However, the
+filesystem-specific prefixes are deprecated and should not be used in
+new programs.
+
+There are several different types of keyrings in which encryption keys
+may be placed, such as a session keyring, a user session keyring, or a
+user keyring.  Each key must be placed in a keyring that is "attached"
+to all processes that might need to access files encrypted with it, in
+the sense that request_key() will find the key.  Generally, if only
+processes belonging to a specific user need to access a given
+encrypted directory and no session keyring has been installed, then
+that directory's key should be placed in that user's user session
+keyring or user keyring.  Otherwise, a session keyring should be
+installed if needed, and the key should be linked into that session
+keyring, or in a keyring linked into that session keyring.
+
+Note: introducing the complex visibility semantics of keyrings here
+was arguably a mistake --- especially given that by design, after any
+process successfully opens an encrypted file (thereby setting up the
+per-file key), possessing the keyring key is not actually required for
+any process to read/write the file until its in-memory inode is
+evicted.  In the future there probably should be a way to provide keys
+directly to the filesystem instead, which would make the intended
+semantics clearer.
+
+Access semantics
+================
+
+With the key
+------------
+
+With the encryption key, encrypted regular files, directories, and
+symlinks behave very similarly to their unencrypted counterparts ---
+after all, the encryption is intended to be transparent.  However,
+astute users may notice some differences in behavior:
+
+- Unencrypted files, or files encrypted with a different encryption
+  policy (i.e. different key, modes, or flags), cannot be renamed or
+  linked into an encrypted directory; see `Encryption policy
+  enforcement`_.  Attempts to do so will fail with EPERM.  However,
+  encrypted files can be renamed within an encrypted directory, or
+  into an unencrypted directory.
+
+- Direct I/O is not supported on encrypted files.  Attempts to use
+  direct I/O on such files will fall back to buffered I/O.
+
+- The fallocate operations FALLOC_FL_COLLAPSE_RANGE,
+  FALLOC_FL_INSERT_RANGE, and FALLOC_FL_ZERO_RANGE are not supported
+  on encrypted files and will fail with EOPNOTSUPP.
+
+- Online defragmentation of encrypted files is not supported.  The
+  EXT4_IOC_MOVE_EXT and F2FS_IOC_MOVE_RANGE ioctls will fail with
+  EOPNOTSUPP.
+
+- The ext4 filesystem does not support data journaling with encrypted
+  regular files.  It will fall back to ordered data mode instead.
+
+- DAX (Direct Access) is not supported on encrypted files.
+
+- The st_size of an encrypted symlink will not necessarily give the
+  length of the symlink target as required by POSIX.  It will actually
+  give the length of the ciphertext, which will be slightly longer
+  than the plaintext due to NUL-padding and an extra 2-byte overhead.
+
+- The maximum length of an encrypted symlink is 2 bytes shorter than
+  the maximum length of an unencrypted symlink.  For example, on an
+  EXT4 filesystem with a 4K block size, unencrypted symlinks can be up
+  to 4095 bytes long, while encrypted symlinks can only be up to 4093
+  bytes long (both lengths excluding the terminating null).
+
+Note that mmap *is* supported.  This is possible because the pagecache
+for an encrypted file contains the plaintext, not the ciphertext.
+
+Without the key
+---------------
+
+Some filesystem operations may be performed on encrypted regular
+files, directories, and symlinks even before their encryption key has
+been provided:
+
+- File metadata may be read, e.g. using stat().
+
+- Directories may be listed, in which case the filenames will be
+  listed in an encoded form derived from their ciphertext.  The
+  current encoding algorithm is described in `Filename hashing and
+  encoding`_.  The algorithm is subject to change, but it is
+  guaranteed that the presented filenames will be no longer than
+  NAME_MAX bytes, will not contain the ``/`` or ``\0`` characters, and
+  will uniquely identify directory entries.
+
+  The ``.`` and ``..`` directory entries are special.  They are always
+  present and are not encrypted or encoded.
+
+- Files may be deleted.  That is, nondirectory files may be deleted
+  with unlink() as usual, and empty directories may be deleted with
+  rmdir() as usual.  Therefore, ``rm`` and ``rm -r`` will work as
+  expected.
+
+- Symlink targets may be read and followed, but they will be presented
+  in encrypted form, similar to filenames in directories.  Hence, they
+  are unlikely to point to anywhere useful.
+
+Without the key, regular files cannot be opened or truncated.
+Attempts to do so will fail with ENOKEY.  This implies that any
+regular file operations that require a file descriptor, such as
+read(), write(), mmap(), fallocate(), and ioctl(), are also forbidden.
+
+Also without the key, files of any type (including directories) cannot
+be created or linked into an encrypted directory, nor can a name in an
+encrypted directory be the source or target of a rename, nor can an
+O_TMPFILE temporary file be created in an encrypted directory.  All
+such operations will fail with ENOKEY.
+
+It is not currently possible to backup and restore encrypted files
+without the encryption key.  This would require special APIs which
+have not yet been implemented.
+
+Encryption policy enforcement
+=============================
+
+After an encryption policy has been set on a directory, all regular
+files, directories, and symbolic links created in that directory
+(recursively) will inherit that encryption policy.  Special files ---
+that is, named pipes, device nodes, and UNIX domain sockets --- will
+not be encrypted.
+
+Except for those special files, it is forbidden to have unencrypted
+files, or files encrypted with a different encryption policy, in an
+encrypted directory tree.  Attempts to link or rename such a file into
+an encrypted directory will fail with EPERM.  This is also enforced
+during ->lookup() to provide limited protection against offline
+attacks that try to disable or downgrade encryption in known locations
+where applications may later write sensitive data.  It is recommended
+that systems implementing a form of "verified boot" take advantage of
+this by validating all top-level encryption policies prior to access.
+
+Implementation details
+======================
+
+Encryption context
+------------------
+
+An encryption policy is represented on-disk by a :c:type:`struct
+fscrypt_context`.  It is up to individual filesystems to decide where
+to store it, but normally it would be stored in a hidden extended
+attribute.  It should *not* be exposed by the xattr-related system
+calls such as getxattr() and setxattr() because of the special
+semantics of the encryption xattr.  (In particular, there would be
+much confusion if an encryption policy were to be added to or removed
+from anything other than an empty directory.)  The struct is defined
+as follows::
+
+    #define FS_KEY_DESCRIPTOR_SIZE  8
+    #define FS_KEY_DERIVATION_NONCE_SIZE 16
+
+    struct fscrypt_context {
+            u8 format;
+            u8 contents_encryption_mode;
+            u8 filenames_encryption_mode;
+            u8 flags;
+            u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+            u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+    };
+
+Note that :c:type:`struct fscrypt_context` contains the same
+information as :c:type:`struct fscrypt_policy` (see `Setting an
+encryption policy`_), except that :c:type:`struct fscrypt_context`
+also contains a nonce.  The nonce is randomly generated by the kernel
+and is used to derive the inode's encryption key as described in
+`Per-file keys`_.
+
+Data path changes
+-----------------
+
+For the read path (->readpage()) of regular files, filesystems can
+read the ciphertext into the page cache and decrypt it in-place.  The
+page lock must be held until decryption has finished, to prevent the
+page from becoming visible to userspace prematurely.
+
+For the write path (->writepage()) of regular files, filesystems
+cannot encrypt data in-place in the page cache, since the cached
+plaintext must be preserved.  Instead, filesystems must encrypt into a
+temporary buffer or "bounce page", then write out the temporary
+buffer.  Some filesystems, such as UBIFS, already use temporary
+buffers regardless of encryption.  Other filesystems, such as ext4 and
+F2FS, have to allocate bounce pages specially for encryption.
+
+Filename hashing and encoding
+-----------------------------
+
+Modern filesystems accelerate directory lookups by using indexed
+directories.  An indexed directory is organized as a tree keyed by
+filename hashes.  When a ->lookup() is requested, the filesystem
+normally hashes the filename being looked up so that it can quickly
+find the corresponding directory entry, if any.
+
+With encryption, lookups must be supported and efficient both with and
+without the encryption key.  Clearly, it would not work to hash the
+plaintext filenames, since the plaintext filenames are unavailable
+without the key.  (Hashing the plaintext filenames would also make it
+impossible for the filesystem's fsck tool to optimize encrypted
+directories.)  Instead, filesystems hash the ciphertext filenames,
+i.e. the bytes actually stored on-disk in the directory entries.  When
+asked to do a ->lookup() with the key, the filesystem just encrypts
+the user-supplied name to get the ciphertext.
+
+Lookups without the key are more complicated.  The raw ciphertext may
+contain the ``\0`` and ``/`` characters, which are illegal in
+filenames.  Therefore, readdir() must base64-encode the ciphertext for
+presentation.  For most filenames, this works fine; on ->lookup(), the
+filesystem just base64-decodes the user-supplied name to get back to
+the raw ciphertext.
+
+However, for very long filenames, base64 encoding would cause the
+filename length to exceed NAME_MAX.  To prevent this, readdir()
+actually presents long filenames in an abbreviated form which encodes
+a strong "hash" of the ciphertext filename, along with the optional
+filesystem-specific hash(es) needed for directory lookups.  This
+allows the filesystem to still, with a high degree of confidence, map
+the filename given in ->lookup() back to a particular directory entry
+that was previously listed by readdir().  See :c:type:`struct
+fscrypt_digested_name` in the source for more details.
+
+Note that the precise way that filenames are presented to userspace
+without the key is subject to change in the future.  It is only meant
+as a way to temporarily present valid filenames so that commands like
+``rm -r`` work as expected on encrypted directories.
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx
index cfd31d9..f8bf140 100644
--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx
@@ -32,7 +32,7 @@
     Datasheet: Publicly available at the Texas Instruments website
                http://www.ti.com/
 
-Author: Lothar Felten <l-felten@ti.com>
+Author: Lothar Felten <lothar.felten@gmail.com>
 
 Description
 -----------
diff --git a/Documentation/index.rst b/Documentation/index.rst
index c53d089..213399a 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -12,6 +12,7 @@
    :maxdepth: 2
 
    kernel-documentation
+   l1tf
    development-process/index
    dev-tools/tools
    driver-api/index
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 435a509..fdc9af2 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1100,12 +1100,6 @@
 	nopku		[X86] Disable Memory Protection Keys CPU feature found
 			in some Intel CPUs.
 
-	eagerfpu=	[X86]
-			on	enable eager fpu restore
-			off	disable eager fpu restore
-			auto	selects the default scheme, which automatically
-				enables eagerfpu restore for xsaveopt.
-
 	module.async_probe [KNL]
 			Enable asynchronous probe on this module.
 
@@ -2032,10 +2026,84 @@
 			(virtualized real and unpaged mode) on capable
 			Intel chips. Default is 1 (enabled)
 
+	kvm-intel.vmentry_l1d_flush=[KVM,Intel] Mitigation for L1 Terminal Fault
+			CVE-2018-3620.
+
+			Valid arguments: never, cond, always
+
+			always: L1D cache flush on every VMENTER.
+			cond:	Flush L1D on VMENTER only when the code between
+				VMEXIT and VMENTER can leak host memory.
+			never:	Disables the mitigation
+
+			Default is cond (do L1 cache flush in specific instances)
+
 	kvm-intel.vpid=	[KVM,Intel] Disable Virtual Processor Identification
 			feature (tagged TLBs) on capable Intel chips.
 			Default is 1 (enabled)
 
+	l1tf=           [X86] Control mitigation of the L1TF vulnerability on
+			      affected CPUs
+
+			The kernel PTE inversion protection is unconditionally
+			enabled and cannot be disabled.
+
+			full
+				Provides all available mitigations for the
+				L1TF vulnerability. Disables SMT and
+				enables all mitigations in the
+				hypervisors, i.e. unconditional L1D flush.
+
+				SMT control and L1D flush control via the
+				sysfs interface is still possible after
+				boot.  Hypervisors will issue a warning
+				when the first VM is started in a
+				potentially insecure configuration,
+				i.e. SMT enabled or L1D flush disabled.
+
+			full,force
+				Same as 'full', but disables SMT and L1D
+				flush runtime control. Implies the
+				'nosmt=force' command line option.
+				(i.e. sysfs control of SMT is disabled.)
+
+			flush
+				Leaves SMT enabled and enables the default
+				hypervisor mitigation, i.e. conditional
+				L1D flush.
+
+				SMT control and L1D flush control via the
+				sysfs interface is still possible after
+				boot.  Hypervisors will issue a warning
+				when the first VM is started in a
+				potentially insecure configuration,
+				i.e. SMT enabled or L1D flush disabled.
+
+			flush,nosmt
+
+				Disables SMT and enables the default
+				hypervisor mitigation.
+
+				SMT control and L1D flush control via the
+				sysfs interface is still possible after
+				boot.  Hypervisors will issue a warning
+				when the first VM is started in a
+				potentially insecure configuration,
+				i.e. SMT enabled or L1D flush disabled.
+
+			flush,nowarn
+				Same as 'flush', but hypervisors will not
+				warn when a VM is started in a potentially
+				insecure configuration.
+
+			off
+				Disables hypervisor mitigations and doesn't
+				emit any warnings.
+
+			Default is 'flush'.
+
+			For details see: Documentation/admin-guide/l1tf.rst
+
 	l2cr=		[PPC]
 
 	l3cr=		[PPC]
@@ -2716,6 +2784,10 @@
 	nosmt		[KNL,S390] Disable symmetric multithreading (SMT).
 			Equivalent to smt=1.
 
+			[KNL,x86] Disable symmetric multithreading (SMT).
+			nosmt=force: Force disable SMT, cannot be undone
+				     via the sysfs control file.
+
 	nospectre_v2	[X86] Disable all mitigations for the Spectre variant 2
 			(indirect branch prediction) vulnerability. System may
 			allow data leaks with this option, which is equivalent
@@ -3918,6 +3990,14 @@
 			last alloc / free. For more information see
 			Documentation/vm/slub.txt.
 
+	slub_memcg_sysfs=       [MM, SLUB]
+			Determines whether to enable sysfs directories for
+			memory cgroup sub-caches. 1 to enable, 0 to disable.
+			The default is determined by CONFIG_SLUB_MEMCG_SYSFS_ON.
+			Enabling this can lead to a very high number of debug
+			directories and files being created under
+			/sys/kernel/slub.
+
 	slub_max_order= [MM, SLUB]
 			Determines the maximum allowed order for slabs.
 			A high setting may cause OOMs due to memory
@@ -4045,6 +4125,23 @@
 	spia_pedr=
 	spia_peddr=
 
+	ssbd=		[ARM64,HW]
+			Speculative Store Bypass Disable control
+
+			On CPUs that are vulnerable to the Speculative
+			Store Bypass vulnerability and offer a
+			firmware based mitigation, this parameter
+			indicates how the mitigation should be used:
+
+			force-on:  Unconditionally enable mitigation for
+				   for both kernel and userspace
+			force-off: Unconditionally disable mitigation for
+				   for both kernel and userspace
+			kernel:    Always enable mitigation in the
+				   kernel, and offer a prctl interface
+				   to allow userspace to register its
+				   interest in being mitigated too.
+
 	stack_guard_gap=	[MM]
 			override the default stack gap protection. The value
 			is in page units and it defines how many pages prior
diff --git a/Documentation/l1tf.rst b/Documentation/l1tf.rst
new file mode 100644
index 0000000..bae52b84
--- /dev/null
+++ b/Documentation/l1tf.rst
@@ -0,0 +1,610 @@
+L1TF - L1 Terminal Fault
+========================
+
+L1 Terminal Fault is a hardware vulnerability which allows unprivileged
+speculative access to data which is available in the Level 1 Data Cache
+when the page table entry controlling the virtual address, which is used
+for the access, has the Present bit cleared or other reserved bits set.
+
+Affected processors
+-------------------
+
+This vulnerability affects a wide range of Intel processors. The
+vulnerability is not present on:
+
+   - Processors from AMD, Centaur and other non Intel vendors
+
+   - Older processor models, where the CPU family is < 6
+
+   - A range of Intel ATOM processors (Cedarview, Cloverview, Lincroft,
+     Penwell, Pineview, Silvermont, Airmont, Merrifield)
+
+   - The Intel XEON PHI family
+
+   - Intel processors which have the ARCH_CAP_RDCL_NO bit set in the
+     IA32_ARCH_CAPABILITIES MSR. If the bit is set the CPU is not affected
+     by the Meltdown vulnerability either. These CPUs should become
+     available by end of 2018.
+
+Whether a processor is affected or not can be read out from the L1TF
+vulnerability file in sysfs. See :ref:`l1tf_sys_info`.
+
+Related CVEs
+------------
+
+The following CVE entries are related to the L1TF vulnerability:
+
+   =============  =================  ==============================
+   CVE-2018-3615  L1 Terminal Fault  SGX related aspects
+   CVE-2018-3620  L1 Terminal Fault  OS, SMM related aspects
+   CVE-2018-3646  L1 Terminal Fault  Virtualization related aspects
+   =============  =================  ==============================
+
+Problem
+-------
+
+If an instruction accesses a virtual address for which the relevant page
+table entry (PTE) has the Present bit cleared or other reserved bits set,
+then speculative execution ignores the invalid PTE and loads the referenced
+data if it is present in the Level 1 Data Cache, as if the page referenced
+by the address bits in the PTE was still present and accessible.
+
+While this is a purely speculative mechanism and the instruction will raise
+a page fault when it is retired eventually, the pure act of loading the
+data and making it available to other speculative instructions opens up the
+opportunity for side channel attacks to unprivileged malicious code,
+similar to the Meltdown attack.
+
+While Meltdown breaks the user space to kernel space protection, L1TF
+allows to attack any physical memory address in the system and the attack
+works across all protection domains. It allows an attack of SGX and also
+works from inside virtual machines because the speculation bypasses the
+extended page table (EPT) protection mechanism.
+
+
+Attack scenarios
+----------------
+
+1. Malicious user space
+^^^^^^^^^^^^^^^^^^^^^^^
+
+   Operating Systems store arbitrary information in the address bits of a
+   PTE which is marked non present. This allows a malicious user space
+   application to attack the physical memory to which these PTEs resolve.
+   In some cases user-space can maliciously influence the information
+   encoded in the address bits of the PTE, thus making attacks more
+   deterministic and more practical.
+
+   The Linux kernel contains a mitigation for this attack vector, PTE
+   inversion, which is permanently enabled and has no performance
+   impact. The kernel ensures that the address bits of PTEs, which are not
+   marked present, never point to cacheable physical memory space.
+
+   A system with an up to date kernel is protected against attacks from
+   malicious user space applications.
+
+2. Malicious guest in a virtual machine
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   The fact that L1TF breaks all domain protections allows malicious guest
+   OSes, which can control the PTEs directly, and malicious guest user
+   space applications, which run on an unprotected guest kernel lacking the
+   PTE inversion mitigation for L1TF, to attack physical host memory.
+
+   A special aspect of L1TF in the context of virtualization is symmetric
+   multi threading (SMT). The Intel implementation of SMT is called
+   HyperThreading. The fact that Hyperthreads on the affected processors
+   share the L1 Data Cache (L1D) is important for this. As the flaw allows
+   only to attack data which is present in L1D, a malicious guest running
+   on one Hyperthread can attack the data which is brought into the L1D by
+   the context which runs on the sibling Hyperthread of the same physical
+   core. This context can be host OS, host user space or a different guest.
+
+   If the processor does not support Extended Page Tables, the attack is
+   only possible, when the hypervisor does not sanitize the content of the
+   effective (shadow) page tables.
+
+   While solutions exist to mitigate these attack vectors fully, these
+   mitigations are not enabled by default in the Linux kernel because they
+   can affect performance significantly. The kernel provides several
+   mechanisms which can be utilized to address the problem depending on the
+   deployment scenario. The mitigations, their protection scope and impact
+   are described in the next sections.
+
+   The default mitigations and the rationale for choosing them are explained
+   at the end of this document. See :ref:`default_mitigations`.
+
+.. _l1tf_sys_info:
+
+L1TF system information
+-----------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current L1TF
+status of the system: whether the system is vulnerable, and which
+mitigations are active. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/l1tf
+
+The possible values in this file are:
+
+  ===========================   ===============================
+  'Not affected'		The processor is not vulnerable
+  'Mitigation: PTE Inversion'	The host protection is active
+  ===========================   ===============================
+
+If KVM/VMX is enabled and the processor is vulnerable then the following
+information is appended to the 'Mitigation: PTE Inversion' part:
+
+  - SMT status:
+
+    =====================  ================
+    'VMX: SMT vulnerable'  SMT is enabled
+    'VMX: SMT disabled'    SMT is disabled
+    =====================  ================
+
+  - L1D Flush mode:
+
+    ================================  ====================================
+    'L1D vulnerable'		      L1D flushing is disabled
+
+    'L1D conditional cache flushes'   L1D flush is conditionally enabled
+
+    'L1D cache flushes'		      L1D flush is unconditionally enabled
+    ================================  ====================================
+
+The resulting grade of protection is discussed in the following sections.
+
+
+Host mitigation mechanism
+-------------------------
+
+The kernel is unconditionally protected against L1TF attacks from malicious
+user space running on the host.
+
+
+Guest mitigation mechanisms
+---------------------------
+
+.. _l1d_flush:
+
+1. L1D flush on VMENTER
+^^^^^^^^^^^^^^^^^^^^^^^
+
+   To make sure that a guest cannot attack data which is present in the L1D
+   the hypervisor flushes the L1D before entering the guest.
+
+   Flushing the L1D evicts not only the data which should not be accessed
+   by a potentially malicious guest, it also flushes the guest
+   data. Flushing the L1D has a performance impact as the processor has to
+   bring the flushed guest data back into the L1D. Depending on the
+   frequency of VMEXIT/VMENTER and the type of computations in the guest
+   performance degradation in the range of 1% to 50% has been observed. For
+   scenarios where guest VMEXIT/VMENTER are rare the performance impact is
+   minimal. Virtio and mechanisms like posted interrupts are designed to
+   confine the VMEXITs to a bare minimum, but specific configurations and
+   application scenarios might still suffer from a high VMEXIT rate.
+
+   The kernel provides two L1D flush modes:
+    - conditional ('cond')
+    - unconditional ('always')
+
+   The conditional mode avoids L1D flushing after VMEXITs which execute
+   only audited code paths before the corresponding VMENTER. These code
+   paths have been verified that they cannot expose secrets or other
+   interesting data to an attacker, but they can leak information about the
+   address space layout of the hypervisor.
+
+   Unconditional mode flushes L1D on all VMENTER invocations and provides
+   maximum protection. It has a higher overhead than the conditional
+   mode. The overhead cannot be quantified correctly as it depends on the
+   workload scenario and the resulting number of VMEXITs.
+
+   The general recommendation is to enable L1D flush on VMENTER. The kernel
+   defaults to conditional mode on affected processors.
+
+   **Note**, that L1D flush does not prevent the SMT problem because the
+   sibling thread will also bring back its data into the L1D which makes it
+   attackable again.
+
+   L1D flush can be controlled by the administrator via the kernel command
+   line and sysfs control files. See :ref:`mitigation_control_command_line`
+   and :ref:`mitigation_control_kvm`.
+
+.. _guest_confinement:
+
+2. Guest VCPU confinement to dedicated physical cores
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   To address the SMT problem, it is possible to make a guest or a group of
+   guests affine to one or more physical cores. The proper mechanism for
+   that is to utilize exclusive cpusets to ensure that no other guest or
+   host tasks can run on these cores.
+
+   If only a single guest or related guests run on sibling SMT threads on
+   the same physical core then they can only attack their own memory and
+   restricted parts of the host memory.
+
+   Host memory is attackable, when one of the sibling SMT threads runs in
+   host OS (hypervisor) context and the other in guest context. The amount
+   of valuable information from the host OS context depends on the context
+   which the host OS executes, i.e. interrupts, soft interrupts and kernel
+   threads. The amount of valuable data from these contexts cannot be
+   declared as non-interesting for an attacker without deep inspection of
+   the code.
+
+   **Note**, that assigning guests to a fixed set of physical cores affects
+   the ability of the scheduler to do load balancing and might have
+   negative effects on CPU utilization depending on the hosting
+   scenario. Disabling SMT might be a viable alternative for particular
+   scenarios.
+
+   For further information about confining guests to a single or to a group
+   of cores consult the cpusets documentation:
+
+   https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt
+
+.. _interrupt_isolation:
+
+3. Interrupt affinity
+^^^^^^^^^^^^^^^^^^^^^
+
+   Interrupts can be made affine to logical CPUs. This is not universally
+   true because there are types of interrupts which are truly per CPU
+   interrupts, e.g. the local timer interrupt. Aside of that multi queue
+   devices affine their interrupts to single CPUs or groups of CPUs per
+   queue without allowing the administrator to control the affinities.
+
+   Moving the interrupts, which can be affinity controlled, away from CPUs
+   which run untrusted guests, reduces the attack vector space.
+
+   Whether the interrupts with are affine to CPUs, which run untrusted
+   guests, provide interesting data for an attacker depends on the system
+   configuration and the scenarios which run on the system. While for some
+   of the interrupts it can be assumed that they won't expose interesting
+   information beyond exposing hints about the host OS memory layout, there
+   is no way to make general assumptions.
+
+   Interrupt affinity can be controlled by the administrator via the
+   /proc/irq/$NR/smp_affinity[_list] files. Limited documentation is
+   available at:
+
+   https://www.kernel.org/doc/Documentation/IRQ-affinity.txt
+
+.. _smt_control:
+
+4. SMT control
+^^^^^^^^^^^^^^
+
+   To prevent the SMT issues of L1TF it might be necessary to disable SMT
+   completely. Disabling SMT can have a significant performance impact, but
+   the impact depends on the hosting scenario and the type of workloads.
+   The impact of disabling SMT needs also to be weighted against the impact
+   of other mitigation solutions like confining guests to dedicated cores.
+
+   The kernel provides a sysfs interface to retrieve the status of SMT and
+   to control it. It also provides a kernel command line interface to
+   control SMT.
+
+   The kernel command line interface consists of the following options:
+
+     =========== ==========================================================
+     nosmt	 Affects the bring up of the secondary CPUs during boot. The
+		 kernel tries to bring all present CPUs online during the
+		 boot process. "nosmt" makes sure that from each physical
+		 core only one - the so called primary (hyper) thread is
+		 activated. Due to a design flaw of Intel processors related
+		 to Machine Check Exceptions the non primary siblings have
+		 to be brought up at least partially and are then shut down
+		 again.  "nosmt" can be undone via the sysfs interface.
+
+     nosmt=force Has the same effect as "nosmt" but it does not allow to
+		 undo the SMT disable via the sysfs interface.
+     =========== ==========================================================
+
+   The sysfs interface provides two files:
+
+   - /sys/devices/system/cpu/smt/control
+   - /sys/devices/system/cpu/smt/active
+
+   /sys/devices/system/cpu/smt/control:
+
+     This file allows to read out the SMT control state and provides the
+     ability to disable or (re)enable SMT. The possible states are:
+
+	==============  ===================================================
+	on		SMT is supported by the CPU and enabled. All
+			logical CPUs can be onlined and offlined without
+			restrictions.
+
+	off		SMT is supported by the CPU and disabled. Only
+			the so called primary SMT threads can be onlined
+			and offlined without restrictions. An attempt to
+			online a non-primary sibling is rejected
+
+	forceoff	Same as 'off' but the state cannot be controlled.
+			Attempts to write to the control file are rejected.
+
+	notsupported	The processor does not support SMT. It's therefore
+			not affected by the SMT implications of L1TF.
+			Attempts to write to the control file are rejected.
+	==============  ===================================================
+
+     The possible states which can be written into this file to control SMT
+     state are:
+
+     - on
+     - off
+     - forceoff
+
+   /sys/devices/system/cpu/smt/active:
+
+     This file reports whether SMT is enabled and active, i.e. if on any
+     physical core two or more sibling threads are online.
+
+   SMT control is also possible at boot time via the l1tf kernel command
+   line parameter in combination with L1D flush control. See
+   :ref:`mitigation_control_command_line`.
+
+5. Disabling EPT
+^^^^^^^^^^^^^^^^
+
+  Disabling EPT for virtual machines provides full mitigation for L1TF even
+  with SMT enabled, because the effective page tables for guests are
+  managed and sanitized by the hypervisor. Though disabling EPT has a
+  significant performance impact especially when the Meltdown mitigation
+  KPTI is enabled.
+
+  EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
+
+There is ongoing research and development for new mitigation mechanisms to
+address the performance impact of disabling SMT or EPT.
+
+.. _mitigation_control_command_line:
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the L1TF mitigations at boot
+time with the option "l1tf=". The valid arguments for this option are:
+
+  ============  =============================================================
+  full		Provides all available mitigations for the L1TF
+		vulnerability. Disables SMT and enables all mitigations in
+		the hypervisors, i.e. unconditional L1D flushing
+
+		SMT control and L1D flush control via the sysfs interface
+		is still possible after boot.  Hypervisors will issue a
+		warning when the first VM is started in a potentially
+		insecure configuration, i.e. SMT enabled or L1D flush
+		disabled.
+
+  full,force	Same as 'full', but disables SMT and L1D flush runtime
+		control. Implies the 'nosmt=force' command line option.
+		(i.e. sysfs control of SMT is disabled.)
+
+  flush		Leaves SMT enabled and enables the default hypervisor
+		mitigation, i.e. conditional L1D flushing
+
+		SMT control and L1D flush control via the sysfs interface
+		is still possible after boot.  Hypervisors will issue a
+		warning when the first VM is started in a potentially
+		insecure configuration, i.e. SMT enabled or L1D flush
+		disabled.
+
+  flush,nosmt	Disables SMT and enables the default hypervisor mitigation,
+		i.e. conditional L1D flushing.
+
+		SMT control and L1D flush control via the sysfs interface
+		is still possible after boot.  Hypervisors will issue a
+		warning when the first VM is started in a potentially
+		insecure configuration, i.e. SMT enabled or L1D flush
+		disabled.
+
+  flush,nowarn	Same as 'flush', but hypervisors will not warn when a VM is
+		started in a potentially insecure configuration.
+
+  off		Disables hypervisor mitigations and doesn't emit any
+		warnings.
+  ============  =============================================================
+
+The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
+
+
+.. _mitigation_control_kvm:
+
+Mitigation control for KVM - module parameter
+-------------------------------------------------------------
+
+The KVM hypervisor mitigation mechanism, flushing the L1D cache when
+entering a guest, can be controlled with a module parameter.
+
+The option/parameter is "kvm-intel.vmentry_l1d_flush=". It takes the
+following arguments:
+
+  ============  ==============================================================
+  always	L1D cache flush on every VMENTER.
+
+  cond		Flush L1D on VMENTER only when the code between VMEXIT and
+		VMENTER can leak host memory which is considered
+		interesting for an attacker. This still can leak host memory
+		which allows e.g. to determine the hosts address space layout.
+
+  never		Disables the mitigation
+  ============  ==============================================================
+
+The parameter can be provided on the kernel command line, as a module
+parameter when loading the modules and at runtime modified via the sysfs
+file:
+
+/sys/module/kvm_intel/parameters/vmentry_l1d_flush
+
+The default is 'cond'. If 'l1tf=full,force' is given on the kernel command
+line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush
+module parameter is ignored and writes to the sysfs file are rejected.
+
+
+Mitigation selection guide
+--------------------------
+
+1. No virtualization in use
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   The system is protected by the kernel unconditionally and no further
+   action is required.
+
+2. Virtualization with trusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   If the guest comes from a trusted source and the guest OS kernel is
+   guaranteed to have the L1TF mitigations in place the system is fully
+   protected against L1TF and no further action is required.
+
+   To avoid the overhead of the default L1D flushing on VMENTER the
+   administrator can disable the flushing via the kernel command line and
+   sysfs control files. See :ref:`mitigation_control_command_line` and
+   :ref:`mitigation_control_kvm`.
+
+
+3. Virtualization with untrusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+3.1. SMT not supported or disabled
+""""""""""""""""""""""""""""""""""
+
+  If SMT is not supported by the processor or disabled in the BIOS or by
+  the kernel, it's only required to enforce L1D flushing on VMENTER.
+
+  Conditional L1D flushing is the default behaviour and can be tuned. See
+  :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
+
+3.2. EPT not supported or disabled
+""""""""""""""""""""""""""""""""""
+
+  If EPT is not supported by the processor or disabled in the hypervisor,
+  the system is fully protected. SMT can stay enabled and L1D flushing on
+  VMENTER is not required.
+
+  EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
+
+3.3. SMT and EPT supported and active
+"""""""""""""""""""""""""""""""""""""
+
+  If SMT and EPT are supported and active then various degrees of
+  mitigations can be employed:
+
+  - L1D flushing on VMENTER:
+
+    L1D flushing on VMENTER is the minimal protection requirement, but it
+    is only potent in combination with other mitigation methods.
+
+    Conditional L1D flushing is the default behaviour and can be tuned. See
+    :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
+
+  - Guest confinement:
+
+    Confinement of guests to a single or a group of physical cores which
+    are not running any other processes, can reduce the attack surface
+    significantly, but interrupts, soft interrupts and kernel threads can
+    still expose valuable data to a potential attacker. See
+    :ref:`guest_confinement`.
+
+  - Interrupt isolation:
+
+    Isolating the guest CPUs from interrupts can reduce the attack surface
+    further, but still allows a malicious guest to explore a limited amount
+    of host physical memory. This can at least be used to gain knowledge
+    about the host address space layout. The interrupts which have a fixed
+    affinity to the CPUs which run the untrusted guests can depending on
+    the scenario still trigger soft interrupts and schedule kernel threads
+    which might expose valuable information. See
+    :ref:`interrupt_isolation`.
+
+The above three mitigation methods combined can provide protection to a
+certain degree, but the risk of the remaining attack surface has to be
+carefully analyzed. For full protection the following methods are
+available:
+
+  - Disabling SMT:
+
+    Disabling SMT and enforcing the L1D flushing provides the maximum
+    amount of protection. This mitigation is not depending on any of the
+    above mitigation methods.
+
+    SMT control and L1D flushing can be tuned by the command line
+    parameters 'nosmt', 'l1tf', 'kvm-intel.vmentry_l1d_flush' and at run
+    time with the matching sysfs control files. See :ref:`smt_control`,
+    :ref:`mitigation_control_command_line` and
+    :ref:`mitigation_control_kvm`.
+
+  - Disabling EPT:
+
+    Disabling EPT provides the maximum amount of protection as well. It is
+    not depending on any of the above mitigation methods. SMT can stay
+    enabled and L1D flushing is not required, but the performance impact is
+    significant.
+
+    EPT can be disabled in the hypervisor via the 'kvm-intel.ept'
+    parameter.
+
+3.4. Nested virtual machines
+""""""""""""""""""""""""""""
+
+When nested virtualization is in use, three operating systems are involved:
+the bare metal hypervisor, the nested hypervisor and the nested virtual
+machine.  VMENTER operations from the nested hypervisor into the nested
+guest will always be processed by the bare metal hypervisor. If KVM is the
+bare metal hypervisor it wiil:
+
+ - Flush the L1D cache on every switch from the nested hypervisor to the
+   nested virtual machine, so that the nested hypervisor's secrets are not
+   exposed to the nested virtual machine;
+
+ - Flush the L1D cache on every switch from the nested virtual machine to
+   the nested hypervisor; this is a complex operation, and flushing the L1D
+   cache avoids that the bare metal hypervisor's secrets are exposed to the
+   nested virtual machine;
+
+ - Instruct the nested hypervisor to not perform any L1D cache flush. This
+   is an optimization to avoid double L1D flushing.
+
+
+.. _default_mitigations:
+
+Default mitigations
+-------------------
+
+  The kernel default mitigations for vulnerable processors are:
+
+  - PTE inversion to protect against malicious user space. This is done
+    unconditionally and cannot be controlled.
+
+  - L1D conditional flushing on VMENTER when EPT is enabled for
+    a guest.
+
+  The kernel does not by default enforce the disabling of SMT, which leaves
+  SMT systems vulnerable when running untrusted guests with EPT enabled.
+
+  The rationale for this choice is:
+
+  - Force disabling SMT can break existing setups, especially with
+    unattended updates.
+
+  - If regular users run untrusted guests on their machine, then L1TF is
+    just an add on to other malware which might be embedded in an untrusted
+    guest, e.g. spam-bots or attacks on the local network.
+
+    There is no technical way to prevent a user from running untrusted code
+    on their machines blindly.
+
+  - It's technically extremely unlikely and from today's knowledge even
+    impossible that L1TF can be exploited via the most popular attack
+    mechanisms like JavaScript because these mechanisms have no way to
+    control PTEs. If this would be possible and not other mitigation would
+    be possible, then the default might be different.
+
+  - The administrators of cloud and hosting setups have to carefully
+    analyze the risk for their scenarios and make the appropriate
+    mitigation choices, which might even vary across their deployed
+    machines and also result in other changes of their overall setup.
+    There is no way for the kernel to provide a sensible default for this
+    kind of scenarios.
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 3cab335..1ac0123 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -50,6 +50,7 @@
 - nr_trim_pages         (only if CONFIG_MMU=n)
 - numa_zonelist_order
 - oom_dump_tasks
+- reap_mem_on_sigkill
 - oom_kill_allocating_task
 - overcommit_kbytes
 - overcommit_memory
@@ -635,6 +636,24 @@
 
 ==============================================================
 
+reap_mem_on_sigkill
+
+This enables or disables the memory reaping for a SIGKILL received
+process and that the sending process must have the CAP_KILL capabilities.
+
+If this is set to 1, when a process receives SIGKILL from a process
+that has the capability, CAP_KILL, the process is added into the oom_reaper
+queue which can be picked up by the oom_reaper thread to reap the memory of
+that process. This reaps for the process which received SIGKILL through
+either sys_kill from user or kill_pid from kernel.
+
+If this is set to 0, we are not reaping memory of a SIGKILL, sent through
+either sys_kill from user or kill_pid from kernel, received process.
+
+The default value is 0 (disabled).
+
+==============================================================
+
 oom_kill_allocating_task
 
 This enables or disables killing the OOM-triggering task in
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index e46c14f..3ff58a8 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -122,14 +122,15 @@
 privileged user (CAP_SYS_ADMIN).
 
 
-4.3 KVM_GET_MSR_INDEX_LIST
+4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST
 
-Capability: basic
+Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST
 Architectures: x86
-Type: system
+Type: system ioctl
 Parameters: struct kvm_msr_list (in/out)
 Returns: 0 on success; -1 on error
 Errors:
+  EFAULT:    the msr index list cannot be read from or written to
   E2BIG:     the msr index list is to be to fit in the array specified by
              the user.
 
@@ -138,16 +139,23 @@
 	__u32 indices[0];
 };
 
-This ioctl returns the guest msrs that are supported.  The list varies
-by kvm version and host processor, but does not change otherwise.  The
-user fills in the size of the indices array in nmsrs, and in return
-kvm adjusts nmsrs to reflect the actual number of msrs and fills in
-the indices array with their numbers.
+The user fills in the size of the indices array in nmsrs, and in return
+kvm adjusts nmsrs to reflect the actual number of msrs and fills in the
+indices array with their numbers.
+
+KVM_GET_MSR_INDEX_LIST returns the guest msrs that are supported.  The list
+varies by kvm version and host processor, but does not change otherwise.
 
 Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are
 not returned in the MSR list, as different vcpus can have a different number
 of banks, as set via the KVM_X86_SETUP_MCE ioctl.
 
+KVM_GET_MSR_FEATURE_INDEX_LIST returns the list of MSRs that can be passed
+to the KVM_GET_MSRS system ioctl.  This lets userspace probe host capabilities
+and processor features that are exposed via MSRs (e.g., VMX capabilities).
+This list also varies by kvm version and host processor, but does not change
+otherwise.
+
 
 4.4 KVM_CHECK_EXTENSION
 
@@ -474,14 +482,22 @@
 
 4.18 KVM_GET_MSRS
 
-Capability: basic
+Capability: basic (vcpu), KVM_CAP_GET_MSR_FEATURES (system)
 Architectures: x86
-Type: vcpu ioctl
+Type: system ioctl, vcpu ioctl
 Parameters: struct kvm_msrs (in/out)
-Returns: 0 on success, -1 on error
+Returns: number of msrs successfully returned;
+        -1 on error
 
+When used as a system ioctl:
+Reads the values of MSR-based features that are available for the VM.  This
+is similar to KVM_GET_SUPPORTED_CPUID, but it returns MSR indices and values.
+The list of msr-based features can be obtained using KVM_GET_MSR_FEATURE_INDEX_LIST
+in a system ioctl.
+
+When used as a vcpu ioctl:
 Reads model-specific registers from the vcpu.  Supported msr indices can
-be obtained using KVM_GET_MSR_INDEX_LIST.
+be obtained using KVM_GET_MSR_INDEX_LIST in a system ioctl.
 
 struct kvm_msrs {
 	__u32 nmsrs; /* number of msrs in entries */
diff --git a/Makefile b/Makefile
index 98ce41f..3655d3c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 112
+SUBLEVEL = 133
 EXTRAVERSION =
 NAME = Roaring Lionus
 
@@ -366,7 +366,7 @@
 
 # Use the wrapper for the compiler.  This wrapper scans for new
 # warnings and causes the build to stop upon encountering them
-CC		= $(srctree)/scripts/gcc-wrapper.py $(REAL_CC)
+CC		= $(PYTHON) $(srctree)/scripts/gcc-wrapper.py $(REAL_CC)
 
 CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
 		  -Wbitwise -Wno-return-void $(CF)
@@ -422,7 +422,8 @@
 export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
 
 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
-export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN
+export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
+export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
 export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
 export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
 export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@@ -654,6 +655,7 @@
 KBUILD_CFLAGS	+= $(call cc-disable-warning, format-truncation)
 KBUILD_CFLAGS	+= $(call cc-disable-warning, format-overflow)
 KBUILD_CFLAGS	+= $(call cc-disable-warning, int-in-bool-context)
+KBUILD_CFLAGS	+= $(call cc-disable-warning, attribute-alias)
 
 ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
 KBUILD_CFLAGS	+= $(call cc-option,-ffunction-sections,)
diff --git a/arch/Kconfig b/arch/Kconfig
index 4fa799b..0ecbd6d 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -5,6 +5,9 @@
 config KEXEC_CORE
 	bool
 
+config HOTPLUG_SMT
+	bool
+
 config OPROFILE
 	tristate "OProfile system profiling"
 	depends on PROFILING
@@ -527,6 +530,7 @@
 	bool "Use clang Link Time Optimization (LTO) (EXPERIMENTAL)"
 	depends on ARCH_SUPPORTS_LTO_CLANG
 	depends on !FTRACE_MCOUNT_RECORD || HAVE_C_RECORDMCOUNT
+	depends on !KASAN
 	select LTO
 	select THIN_ARCHIVES
 	select LD_DEAD_CODE_DATA_ELIMINATION
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 4f95577..6e0d549 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -526,24 +526,19 @@
 SYSCALL_DEFINE1(osf_utsname, char __user *, name)
 {
 	int error;
+	char tmp[5 * 32];
 
 	down_read(&uts_sem);
-	error = -EFAULT;
-	if (copy_to_user(name + 0, utsname()->sysname, 32))
-		goto out;
-	if (copy_to_user(name + 32, utsname()->nodename, 32))
-		goto out;
-	if (copy_to_user(name + 64, utsname()->release, 32))
-		goto out;
-	if (copy_to_user(name + 96, utsname()->version, 32))
-		goto out;
-	if (copy_to_user(name + 128, utsname()->machine, 32))
-		goto out;
+	memcpy(tmp + 0 * 32, utsname()->sysname, 32);
+	memcpy(tmp + 1 * 32, utsname()->nodename, 32);
+	memcpy(tmp + 2 * 32, utsname()->release, 32);
+	memcpy(tmp + 3 * 32, utsname()->version, 32);
+	memcpy(tmp + 4 * 32, utsname()->machine, 32);
+	up_read(&uts_sem);
 
-	error = 0;
- out:
-	up_read(&uts_sem);	
-	return error;
+	if (copy_to_user(name, tmp, sizeof(tmp)))
+		return -EFAULT;
+	return 0;
 }
 
 SYSCALL_DEFINE0(getpagesize)
@@ -561,24 +556,22 @@
  */
 SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
 {
-	unsigned len;
-	int i;
+	int len, err = 0;
+	char *kname;
+	char tmp[32];
 
-	if (!access_ok(VERIFY_WRITE, name, namelen))
-		return -EFAULT;
-
-	len = namelen;
-	if (len > 32)
-		len = 32;
+	if (namelen < 0 || namelen > 32)
+		namelen = 32;
 
 	down_read(&uts_sem);
-	for (i = 0; i < len; ++i) {
-		__put_user(utsname()->domainname[i], name + i);
-		if (utsname()->domainname[i] == '\0')
-			break;
-	}
+	kname = utsname()->domainname;
+	len = strnlen(kname, namelen);
+	len = min(len + 1, namelen);
+	memcpy(tmp, kname, len);
 	up_read(&uts_sem);
 
+	if (copy_to_user(name, tmp, len))
+		return -EFAULT;
 	return 0;
 }
 
@@ -741,13 +734,14 @@
 	};
 	unsigned long offset;
 	const char *res;
-	long len, err = -EINVAL;
+	long len;
+	char tmp[__NEW_UTS_LEN + 1];
 
 	offset = command-1;
 	if (offset >= ARRAY_SIZE(sysinfo_table)) {
 		/* Digital UNIX has a few unpublished interfaces here */
 		printk("sysinfo(%d)", command);
-		goto out;
+		return -EINVAL;
 	}
 
 	down_read(&uts_sem);
@@ -755,13 +749,11 @@
 	len = strlen(res)+1;
 	if ((unsigned long)len > (unsigned long)count)
 		len = count;
-	if (copy_to_user(buf, res, len))
-		err = -EFAULT;
-	else
-		err = 0;
+	memcpy(tmp, res, len);
 	up_read(&uts_sem);
- out:
-	return err;
+	if (copy_to_user(buf, tmp, len))
+		return -EFAULT;
+	return 0;
 }
 
 SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 19cce22..8447eed 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -18,7 +18,7 @@
 
 KBUILD_DEFCONFIG := nsim_700_defconfig
 
-cflags-y	+= -fno-common -pipe -fno-builtin -D__linux__
+cflags-y	+= -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
 cflags-$(CONFIG_ISA_ARCOMPACT)	+= -mA7
 cflags-$(CONFIG_ISA_ARCV2)	+= -mcpu=archs
 
@@ -141,16 +141,3 @@
 
 archclean:
 	$(Q)$(MAKE) $(clean)=$(boot)
-
-# Hacks to enable final link due to absence of link-time branch relexation
-# and gcc choosing optimal(shorter) branches at -O3
-#
-# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
-# However lib/decompress_inflate.o (.init.text) calls
-# zlib_inflate_workspacesize (.text) causing relocation errors.
-# Thus forcing all exten calls in this file to be long calls
-export CFLAGS_decompress_inflate.o = -mmedium-calls
-export CFLAGS_initramfs.o = -mmedium-calls
-ifdef CONFIG_SMP
-export CFLAGS_core.o = -mmedium-calls
-endif
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 0a0eaf0..dd62319 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -1,5 +1,4 @@
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -11,7 +10,6 @@
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 2233f57..2e0d7d7 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -1,5 +1,4 @@
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -11,7 +10,6 @@
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 1108747..ec188fc 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -1,5 +1,4 @@
 CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -11,7 +10,6 @@
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index b0066a7..df609fc 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -11,7 +11,6 @@
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index ebe9ebb..1dbb661 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -11,7 +11,6 @@
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 4bde432..cb36a69 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -9,7 +9,6 @@
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index f6fb3d26..5680daa6 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -11,7 +11,6 @@
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index b9f0fe0..87decc4 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -11,7 +11,6 @@
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 6da71ba..4d14684 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -9,7 +9,6 @@
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_PERF_EVENTS=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 54b54da..49112f7 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -84,7 +84,7 @@
 	"1:	llock   %[orig], [%[ctr]]		\n"		\
 	"	" #asm_op " %[val], %[orig], %[i]	\n"		\
 	"	scond   %[val], [%[ctr]]		\n"		\
-	"						\n"		\
+	"	bnz     1b				\n"		\
 	: [val]	"=&r"	(val),						\
 	  [orig] "=&r" (orig)						\
 	: [ctr]	"r"	(&v->counter),					\
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index d5da211..03d6bb0 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -17,8 +17,11 @@
 #ifndef __ASM_ARC_UDELAY_H
 #define __ASM_ARC_UDELAY_H
 
+#include <asm-generic/types.h>
 #include <asm/param.h>		/* HZ */
 
+extern unsigned long loops_per_jiffy;
+
 static inline void __delay(unsigned long loops)
 {
 	__asm__ __volatile__(
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index c28e6c3..871f3cb 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -34,9 +34,7 @@
 	const char		*name;
 	const char		**dt_compat;
 	void			(*init_early)(void);
-#ifdef CONFIG_SMP
 	void			(*init_per_cpu)(unsigned int);
-#endif
 	void			(*init_machine)(void);
 	void			(*init_late)(void);
 
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 296c342..ffb5f33 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -105,7 +105,7 @@
 #define virt_addr_valid(kaddr)  pfn_valid(virt_to_pfn(kaddr))
 
 /* Default Permissions for stack/heaps pages (Non Executable) */
-#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
 #define WANT_PAGE_VIRTUAL   1
 
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index e94ca72..c10f5cb 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -378,7 +378,7 @@
 
 /* Decode a PTE containing swap "identifier "into constituents */
 #define __swp_type(pte_lookalike)	(((pte_lookalike).val) & 0x1f)
-#define __swp_offset(pte_lookalike)	((pte_lookalike).val << 13)
+#define __swp_offset(pte_lookalike)	((pte_lookalike).val >> 13)
 
 /* NOPs, to keep generic kernel happy */
 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 538b36a..62b1850 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -31,10 +31,10 @@
 	/* a SMP H/w block could do IPI IRQ request here */
 	if (plat_smp_ops.init_per_cpu)
 		plat_smp_ops.init_per_cpu(smp_processor_id());
+#endif
 
 	if (machine_desc->init_per_cpu)
 		machine_desc->init_per_cpu(smp_processor_id());
-#endif
 }
 
 /*
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index a41a79a..3ce1213 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -44,7 +44,8 @@
 SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
 {
 	struct pt_regs *regs = current_pt_regs();
-	int uval = -EFAULT;
+	u32 uval;
+	int ret;
 
 	/*
 	 * This is only for old cores lacking LLOCK/SCOND, which by defintion
@@ -57,23 +58,47 @@
 	/* Z indicates to userspace if operation succeded */
 	regs->status32 &= ~STATUS_Z_MASK;
 
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
-		return -EFAULT;
+	ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr));
+	if (!ret)
+		 goto fail;
 
+again:
 	preempt_disable();
 
-	if (__get_user(uval, uaddr))
-		goto done;
+	ret = __get_user(uval, uaddr);
+	if (ret)
+		 goto fault;
 
-	if (uval == expected) {
-		if (!__put_user(new, uaddr))
-			regs->status32 |= STATUS_Z_MASK;
-	}
+	if (uval != expected)
+		 goto out;
 
-done:
+	ret = __put_user(new, uaddr);
+	if (ret)
+		 goto fault;
+
+	regs->status32 |= STATUS_Z_MASK;
+
+out:
+	preempt_enable();
+	return uval;
+
+fault:
 	preempt_enable();
 
-	return uval;
+	if (unlikely(ret != -EFAULT))
+		 goto fail;
+
+	down_read(&current->mm->mmap_sem);
+	ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
+			       FAULT_FLAG_WRITE, NULL);
+	up_read(&current->mm->mmap_sem);
+
+	if (likely(!ret))
+		 goto again;
+
+fail:
+	force_sig(SIGSEGV, current);
+	return ret;
 }
 
 void arch_cpu_idle(void)
@@ -188,6 +213,26 @@
 		task_thread_info(current)->thr_ptr;
 	}
 
+
+	/*
+	 * setup usermode thread pointer #1:
+	 * when child is picked by scheduler, __switch_to() uses @c_callee to
+	 * populate usermode callee regs: this works (despite being in a kernel
+	 * function) since special return path for child @ret_from_fork()
+	 * ensures those regs are not clobbered all the way to RTIE to usermode
+	 */
+	c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	/*
+	 * setup usermode thread pointer #2:
+	 * however for this special use of r25 in kernel, __switch_to() sets
+	 * r25 for kernel needs and only in the final return path is usermode
+	 * r25 setup, from pt_regs->user_r25. So set that up as well
+	 */
+	c_regs->user_r25 = c_callee->r25;
+#endif
+
 	return 0;
 }
 
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index bbdfeb3..fefe357 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -840,7 +840,7 @@
 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
 		      unsigned long pfn)
 {
-	unsigned int paddr = pfn << PAGE_SHIFT;
+	phys_addr_t paddr = pfn << PAGE_SHIFT;
 
 	u_vaddr &= PAGE_MASK;
 
@@ -860,8 +860,9 @@
 		     unsigned long u_vaddr)
 {
 	/* TBD: do we really need to clear the kernel mapping */
-	__flush_dcache_page(page_address(page), u_vaddr);
-	__flush_dcache_page(page_address(page), page_address(page));
+	__flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
+	__flush_dcache_page((phys_addr_t)page_address(page),
+			    (phys_addr_t)page_address(page));
 
 }
 
diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
index 9d6718c..3c401ce 100644
--- a/arch/arc/plat-eznps/include/plat/ctop.h
+++ b/arch/arc/plat-eznps/include/plat/ctop.h
@@ -21,6 +21,7 @@
 #error "Incorrect ctop.h include"
 #endif
 
+#include <linux/types.h>
 #include <soc/nps/common.h>
 
 /* core auxiliary registers */
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index 0db19d3..d022b6b 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -74,6 +74,11 @@
 	};
 };
 
+/* Table Table 5-79 of the TRM shows 480ab000 is reserved */
+&usb_otg_hs {
+	status = "disabled";
+};
+
 &iva {
 	status = "disabled";
 };
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 319d942..6482ada 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -533,6 +533,8 @@
 
 		touchscreen-size-x = <480>;
 		touchscreen-size-y = <272>;
+
+		wakeup-source;
 	};
 
 	tlv320aic3106: tlv320aic3106@1b {
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index fabc9f3..5ad6153 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -128,7 +128,7 @@
 			reg = <0x18008000 0x100>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
 			clock-frequency = <100000>;
 			status = "disabled";
 		};
@@ -157,7 +157,7 @@
 			reg = <0x1800b000 0x100>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 			clock-frequency = <100000>;
 			status = "disabled";
 		};
@@ -168,7 +168,7 @@
 
 			#interrupt-cells = <1>;
 			interrupt-map-mask = <0 0 0 0>;
-			interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
+			interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
 
 			linux,pci-domain = <0>;
 
@@ -190,10 +190,10 @@
 				compatible = "brcm,iproc-msi";
 				msi-controller;
 				interrupt-parent = <&gic>;
-				interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
-					     <GIC_SPI 97 IRQ_TYPE_NONE>,
-					     <GIC_SPI 98 IRQ_TYPE_NONE>,
-					     <GIC_SPI 99 IRQ_TYPE_NONE>;
+				interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
 			};
 		};
 
@@ -203,7 +203,7 @@
 
 			#interrupt-cells = <1>;
 			interrupt-map-mask = <0 0 0 0>;
-			interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
+			interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
 
 			linux,pci-domain = <1>;
 
@@ -225,10 +225,10 @@
 				compatible = "brcm,iproc-msi";
 				msi-controller;
 				interrupt-parent = <&gic>;
-				interrupts = <GIC_SPI 102 IRQ_TYPE_NONE>,
-					     <GIC_SPI 103 IRQ_TYPE_NONE>,
-					     <GIC_SPI 104 IRQ_TYPE_NONE>,
-					     <GIC_SPI 105 IRQ_TYPE_NONE>;
+				interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
 			};
 		};
 
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index 65e0db1..6e3d3b5 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -288,7 +288,7 @@
 			reg = <0x38000 0x50>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
+			interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
 			clock-frequency = <100000>;
 		};
 
@@ -375,7 +375,7 @@
 
 		#interrupt-cells = <1>;
 		interrupt-map-mask = <0 0 0 0>;
-		interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_NONE>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
 
 		linux,pci-domain = <0>;
 
@@ -397,10 +397,10 @@
 			compatible = "brcm,iproc-msi";
 			msi-controller;
 			interrupt-parent = <&gic>;
-			interrupts = <GIC_SPI 127 IRQ_TYPE_NONE>,
-				     <GIC_SPI 128 IRQ_TYPE_NONE>,
-				     <GIC_SPI 129 IRQ_TYPE_NONE>,
-				     <GIC_SPI 130 IRQ_TYPE_NONE>;
+			interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
 			brcm,pcie-msi-inten;
 		};
 	};
@@ -411,7 +411,7 @@
 
 		#interrupt-cells = <1>;
 		interrupt-map-mask = <0 0 0 0>;
-		interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_NONE>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
 
 		linux,pci-domain = <1>;
 
@@ -433,10 +433,10 @@
 			compatible = "brcm,iproc-msi";
 			msi-controller;
 			interrupt-parent = <&gic>;
-			interrupts = <GIC_SPI 133 IRQ_TYPE_NONE>,
-				     <GIC_SPI 134 IRQ_TYPE_NONE>,
-				     <GIC_SPI 135 IRQ_TYPE_NONE>,
-				     <GIC_SPI 136 IRQ_TYPE_NONE>;
+			interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
 			brcm,pcie-msi-inten;
 		};
 	};
@@ -447,7 +447,7 @@
 
 		#interrupt-cells = <1>;
 		interrupt-map-mask = <0 0 0 0>;
-		interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_NONE>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
 
 		linux,pci-domain = <2>;
 
@@ -469,10 +469,10 @@
 			compatible = "brcm,iproc-msi";
 			msi-controller;
 			interrupt-parent = <&gic>;
-			interrupts = <GIC_SPI 139 IRQ_TYPE_NONE>,
-				     <GIC_SPI 140 IRQ_TYPE_NONE>,
-				     <GIC_SPI 141 IRQ_TYPE_NONE>,
-				     <GIC_SPI 142 IRQ_TYPE_NONE>;
+			interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
 			brcm,pcie-msi-inten;
 		};
 	};
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index f79e1b9..51ab92f 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -377,11 +377,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			reg = <0x226000 0x1000>;
-			interrupts = <42 IRQ_TYPE_EDGE_BOTH
-				43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH
-				45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH
-				47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH
-				49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>;
+			interrupts = <42 43 44 45 46 47 48 49 50>;
 			ti,ngpio = <144>;
 			ti,davinci-gpio-unbanked = <0>;
 			status = "disabled";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index ce54a70..a1a9280 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1770,7 +1770,7 @@
 			};
 		};
 
-		dcan1: can@481cc000 {
+		dcan1: can@4ae3c000 {
 			compatible = "ti,dra7-d_can";
 			ti,hwmods = "dcan1";
 			reg = <0x4ae3c000 0x2000>;
@@ -1780,7 +1780,7 @@
 			status = "disabled";
 		};
 
-		dcan2: can@481d0000 {
+		dcan2: can@48480000 {
 			compatible = "ti,dra7-d_can";
 			ti,hwmods = "dcan2";
 			reg = <0x48480000 0x2000>;
diff --git a/arch/arm/boot/dts/emev2.dtsi b/arch/arm/boot/dts/emev2.dtsi
index cd11940..fd6f9ce 100644
--- a/arch/arm/boot/dts/emev2.dtsi
+++ b/arch/arm/boot/dts/emev2.dtsi
@@ -30,13 +30,13 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		cpu@0 {
+		cpu0: cpu@0 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <0>;
 			clock-frequency = <533000000>;
 		};
-		cpu@1 {
+		cpu1: cpu@1 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <1>;
@@ -56,6 +56,7 @@
 		compatible = "arm,cortex-a9-pmu";
 		interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>, <&cpu1>;
 	};
 
 	clocks@e0110000 {
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 1a473e8..a885052 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -1280,7 +1280,7 @@
 				  /* non-prefetchable memory */
 				  0x82000000 0 0x08000000 0x08000000 0 0x00f00000>;
 			num-lanes = <1>;
-			interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clks IMX6SX_CLK_PCIE_REF_125M>,
 				 <&clks IMX6SX_CLK_PCIE_AXI>,
 				 <&clks IMX6SX_CLK_LVDS1_OUT>,
diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
index c0fb4a6..386b93c 100644
--- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
@@ -188,6 +188,8 @@
 						regulator-max-microvolt = <2950000>;
 
 						regulator-boot-on;
+						regulator-system-load = <200000>;
+						regulator-allow-set-load;
 					};
 
 					l21 {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-coresight.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-coresight.dtsi
index 9da25abd..8e2a8ea 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-coresight.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-coresight.dtsi
@@ -342,15 +342,6 @@
 			};
 
 			port@3 {
-				reg = <2>;
-				tpda_in_tpdm_dcc: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&tpdm_dcc_out_tpda>;
-				};
-			};
-
-			port@4 {
 				reg = <5>;
 				tpda_in_tpdm_center: endpoint {
 					slave-mode;
@@ -396,24 +387,6 @@
 		};
 	};
 
-	tpdm_dcc: tpdm@6870280 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b968>;
-		reg = <0x6870280 0x1000>;
-		reg-names = "tpdm-base";
-
-		coresight-name = "coresight-tpdm-dcc";
-
-		clocks = <&clock_aop QDSS_CLK>;
-		clock-names = "apb_pclk";
-
-		port{
-			tpdm_dcc_out_tpda: endpoint {
-				remote-endpoint = <&tpda_in_tpdm_dcc>;
-			};
-		};
-	};
-
 	tpdm_vsense: tpdm@6840000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b968>;
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-memory-256.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-memory-256.dtsi
index 0c21814..6dc8354 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-memory-256.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-memory-256.dtsi
@@ -15,5 +15,5 @@
 };
 
 &mss_mem {
-	reg = <0x87000000 0x8300000>;
+	reg = <0x86c00000 0x8300000>;
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
index a383f3e..de5922c 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
@@ -25,6 +25,14 @@
 	status = "okay";
 };
 
+&qseecom_mem {
+	status = "okay";
+};
+
+&qseecom_ta_mem {
+	status = "okay";
+};
+
 &blsp1_uart2b_hs {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi
index 65467f9..0f2fe90 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -339,6 +339,35 @@
 		};
 	};
 
+	mdm-core-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&tsens0 4>;
+		trips {
+			mdm_step_trip0: mdm-step-trip-0 {
+				temperature = <95000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+			mdm_step_trip1: mdm-step-trip-1 {
+				temperature = <105000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			modem_proc-lv11 {
+				trip = <&mdm_step_trip0>;
+				cooling-device = <&modem_proc 1 1>;
+			};
+			modem_proc_lvl3 {
+				trip = <&mdm_step_trip1>;
+				cooling-device = <&modem_proc 3 3>;
+			};
+		};
+	};
+
 	xo-therm-adc {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
index a26aa71..1d055d8 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi
@@ -160,99 +160,67 @@
 		qcom,vbus-valid-override;
 		qcom,qmp-phy-init-seq =
 		/* <reg_offset, value, delay> */
-			<0x058 0x07 0x00 /* QSERDES_COM_PLL_IVCO */
-			 0x094 0x1a 0x00 /* QSERDES_COM_SYSCLK_EN_SEL */
-			 0x044 0x14 0x00 /* QSERDES_COM_BIAS_EN_CLKBUFLR_EN */
-			 0x154 0x31 0x00 /* QSERDES_COM_CLK_SELECT */
-			 0x04c 0x02 0x00 /* QSERDES_COM_SYS_CLK_CTRL */
-			 0x0a0 0x08 0x00 /* QSERDES_COM_RESETSM_CNTRL2 */
-			 0x17c 0x06 0x00 /* QSERDES_COM_CMN_CONFIG */
-			 0x184 0x05 0x00 /* QSERDES_COM_SVS_MODE_CLK_SEL */
-			 0x1bc 0x11 0x00 /* QSERDES_COM_BIN_VCOCAL_HSCLK_SEL*/
+			<0x094 0x1a 0x00 /* QSERDES_COM_SYSCLK_EN_SEL */
+			 0x1bc 0x11 0x00 /* QSERDES_COM_BIN_VCOCAL_HSCLK_SEL */
 			 0x158 0x01 0x00 /* QSERDES_COM_HSCLK_SEL */
 			 0x0bc 0x82 0x00 /* QSERDES_COM_DEC_START_MODE0 */
 			 0x0cc 0xab 0x00 /* QSERDES_COM_DIV_FRAC_START1_MODE0 */
 			 0x0d0 0xea 0x00 /* QSERDES_COM_DIV_FRAC_START2_MODE0 */
-			 0x0d4 0x02 0x00 /* COM_DIV_FRAC_START3_MODE0 */
-			 0x1ac 0xca 0x00 /* COM_BIN_VCOCAL_CMP_CODE1_MODE0 */
-			 0x1b0 0x1e 0x00 /* COM_BIN_VCOCAL_CMP_CODE2_MODE0 */
+			 0x0d4 0x02 0x00 /* QSERDES_COM_DIV_FRAC_START3_MODE0 */
+			 0x1ac 0xca 0x00 /* QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0 */
+			 0x1b0 0x1e 0x00 /* QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0 */
 			 0x074 0x06 0x00 /* QSERDES_COM_CP_CTRL_MODE0 */
 			 0x07c 0x16 0x00 /* QSERDES_COM_PLL_RCTRL_MODE0 */
 			 0x084 0x36 0x00 /* QSERDES_COM_PLL_CCTRL_MODE0 */
-			 0x0f0 0x00 0x00 /* QSERDES_COM_INTEGLOOP_GAIN1_MODE0 */
-			 0x0ec 0x3f 0x00 /* QSERDES_COM_INTEGLOOP_GAIN0_MODE0 */
-			 0x114 0x02 0x00 /* QSERDES_COM_VCO_TUNE2_MODE0 */
 			 0x110 0x24 0x00 /* QSERDES_COM_VCO_TUNE1_MODE0 */
-			 0x168 0x0a 0x00 /* QSERDES_COM_CORECLK_DIV_MODE0 */
 			 0x0b0 0x34 0x00 /* QSERDES_COM_LOCK_CMP2_MODE0 */
 			 0x0ac 0x14 0x00 /* QSERDES_COM_LOCK_CMP1_MODE0 */
 			 0x0a4 0x04 0x00 /* QSERDES_COM_LOCK_CMP_EN */
-			 0x174 0x00 0x00 /* QSERDES_COM_CORE_CLK_EN */
-			 0x0a8 0x00 0x00 /* QSERDES_COM_LOCK_CMP_CFG */
-			 0x10c 0x00 0x00 /* QSERDES_COM_VCO_TUNE_MAP */
 			 0x050 0x0a 0x00 /* QSERDES_COM_SYSCLK_BUF_ENABLE */
-			 0x00c 0x0a 0x00 /* QSERDES_COM_BG_TIMER */
 			 0x010 0x01 0x00 /* QSERDES_COM_SSC_EN_CENTER */
 			 0x01c 0x31 0x00 /* QSERDES_COM_SSC_PER1 */
 			 0x020 0x01 0x00 /* QSERDES_COM_SSC_PER2 */
-			 0x014 0x00 0x00 /* QSERDES_COM_SSC_ADJ_PER1 */
-			 0x018 0x00 0x00 /* QSERDES_COM_SSC_ADJ_PER2 */
 			 0x030 0xde 0x00 /* QSERDES_COM_SSC_STEP_SIZE1_MODE1 */
 			 0x034 0x07 0x00 /* QSERDES_COM_SSC_STEP_SIZE2_MODE1 */
 			 0x024 0xde 0x00 /* QSERDES_COM_SSC_STEP_SIZE1_MODE0 */
-			 0x028 0x07 0x00 /* QSERDES_COM_SSC_STEP_SIZE1_MODE0 */
-			 0x4a4 0x3f 0x00 /* QSERDES_RX_RX_IDAC_ENABLES */
-			 0x594 0xbf 0x00 /* QSERDES_RX_RX_MODE_01_HIGH4 */
-			 0x590 0x09 0x00 /* QSERDES_RX_RX_MODE_01_HIGH3 */
-			 0x58c 0xc8 0x00 /* QSERDES_RX_RX_MODE_01_HIGH2 */
-			 0x588 0xc8 0x00 /* QSERDES_RX_RX_MODE_01_HIGH */
-			 0x584 0xe0 0x00 /* QSERDES_RX_RX_MODE_01_LOW */
+			 0x028 0x07 0x00 /* QSERDES_COM_SSC_STEP_SIZE2_MODE0 */
+			 0x594 0x31 0x00 /* QSERDES_RX_RX_MODE_01_HIGH4 */
+			 0x590 0x39 0x00 /* QSERDES_RX_RX_MODE_01_HIGH3 */
+			 0x58c 0xdb 0x00 /* QSERDES_RX_RX_MODE_01_HIGH2 */
+			 0x588 0x54 0x00 /* QSERDES_RX_RX_MODE_01_HIGH */
+			 0x584 0xd4 0x00 /* QSERDES_RX_RX_MODE_01_LOW */
 			 0x444 0x01 0x00 /* QSERDES_RX_UCDR_PI_CONTROLS */
-			 0x408 0x0a 0x00 /* QSERDES_RX_UCDR_FO_GAIN */
-			 0x414 0x06 0x00 /* QSERDES_RX_UCDR_SO_GAIN */
 			 0x430 0x2f 0x00 /* QSERDES_RX_UCDR_FASTLOCK_FO_GAIN */
-			 0x43c 0xff 0x00 /* RX_UCDR_FASTLOCK_COUNT_LOW */
-			 0x440 0x0f 0x00 /* RX_UCDR_FASTLOCK_COUNT_HIGH */
-			 0x420 0x0a 0x00 /* QSERDES_RX_UCDR_SVS_FO_GAIN */
-			 0x42c 0x06 0x00 /* QSERDES_RX_UCDR_SVS_SO_GAIN */
-			 0x434 0x7f 0x00 /* RX_UCDR_SO_SATURATION_AND_ENABLE */
-			 0x4d8 0x0c 0x00 /* QSERDES_RX_VGA_CAL_CNTRL2 */
-			 0x4ec 0x0e 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 */
-			 0x4f0 0x4e 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 */
-			 0x4f4 0x18 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 */
+			 0x43c 0xff 0x00 /* QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW */
+			 0x440 0x0f 0x00 /* QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH */
+			 0x434 0x7f 0x00 /* QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE */
+			 0x4d8 0x03 0x00 /* QSERDES_RX_VGA_CAL_CNTRL2 */
+			 0x4ec 0x0f 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 */
+			 0x4f0 0x4a 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 */
+			 0x4f4 0x08 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 */
 			 0x5b4 0x04 0x00 /* QSERDES_RX_DFE_EN_TIMER */
-			 0x510 0x77 0x00 /* RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 */
-			 0x514 0x80 0x00 /* RX_RX_OFFSET_ADAPTOR_CNTRL2 */
+			 0x510 0x77 0x00 /* QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 */
+			 0x514 0x80 0x00 /* QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2 */
 			 0x51c 0x04 0x00 /* QSERDES_RX_SIGDET_CNTRL */
-			 0x524 0x1a 0x00 /* QSERDES_RX_SIGDET_DEGLITCH_CNTRL */
+			 0x524 0x0e 0x00 /* QSERDES_RX_SIGDET_DEGLITCH_CNTRL */
 			 0x4fc 0x00 0x00 /* QSERDES_RX_RX_IDAC_TSETTLE_HIGH */
 			 0x4f8 0xc0 0x00 /* QSERDES_RX_RX_IDAC_TSETTLE_LOW */
-			 0x258 0x10 0x00 /* QSERDES_TX_HIGHZ_DRVR_EN */
+			 0x5b8 0x30 0x00 /* QSERDES_RX_DFE_CTLE_POST_CAL_OFFSET */
+			 0x414 0x04 0x00 /* QSERDES_RX_UCDR_SO_GAIN */
 			 0x29c 0x12 0x00 /* QSERDES_TX_RCV_DETECT_LVL_2 */
 			 0x284 0x05 0x00 /* QSERDES_TX_LANE_MODE_1 */
-			 0x288 0x02 0x00 /* QSERDES_TX_LANE_MODE_2 */
-			 0x28c 0x00 0x00 /* QSERDES_TX_LANE_MODE_3*/
-			 0x89c 0x83 0x00 /* USB3_UNI_PCS_FLL_CNTRL2 */
-			 0x8a0 0x09 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_L */
-			 0x8a4 0xa2 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_H_TOL */
-			 0x8a8 0x40 0x00 /* USB3_UNI_PCS_FLL_MAN_CODE */
-			 0x898 0x02 0x00 /* USB3_UNI_PCS_FLL_CNTRL1 */
-			 0x8c4 0xd0 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG1 */
-			 0x8c8 0x17 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG2 */
-			 0x8cc 0x20 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG3 */
-			 0x890 0x4f 0x00 /* USB3_UNI_PCS_POWER_STATE_CONFIG1 */
-			 0x990 0xe7 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_L */
-			 0x994 0x03 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_H */
-			 0x988 0xba 0x00 /* USB3_UNI_PCS_RX_SIGDET_LVL */
-			 0xe2c 0x75 0x00 /* USB3_RXEQTRAINING_WAIT_TIME */
-			 0xe38 0x07 0x00 /* USB3_RXEQTRAINING_DFE_TIME_S2 */
-			 0xe18 0x64 0x00 /* USB3_LFPS_DET_HIGH_COUNT_VAL */
-			 0x9c0 0x88 0x00 /* USB3_UNI_PCS_ALIGN_DETECT_CONFIG1 */
-			 0x9c4 0x13 0x00 /* USB3_UNI_PCS_ALIGN_DETECT_CONFIG2 */
-			 0x9dc 0x0d 0x00 /* USB3_UNI_PCS_EQ_CONFIG1 */
-			 0x9e0 0x0d 0x00 /* USB3_UNI_PCS_EQ_CONFIG2 */
-			 0x8dc 0x21 0x00 /* USB3_UNI_PCS_REFGEN_REQ_CONFIG1 */
-			 0x8e0 0x60 0x00 /* USB3_UNI_PCS_REFGEN_REQ_CONFIG2 */
+			 0x8c4 0xd0 0x00 /* PCIE_USB3_UNI_PCS_LOCK_DETECT_CONFIG1 */
+			 0x8c8 0x07 0x00 /* PCIE_USB3_UNI_PCS_LOCK_DETECT_CONFIG2 */
+			 0x8cc 0x20 0x00 /* PCIE_USB3_UNI_PCS_LOCK_DETECT_CONFIG3 */
+			 0x990 0xe7 0x00 /* PCIE_USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_L */
+			 0x994 0x03 0x00 /* PCIE_USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_H */
+			 0x988 0x88 0x00 /* PCIE_USB3_UNI_PCS_RX_SIGDET_LVL */
+			 0xe38 0x07 0x00 /* PCIE_USB3_UNI_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 */
+			 0xe18 0x64 0x00 /* PCIE_USB3_UNI_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL */
+			 0x9c0 0x88 0x00 /* PCIE_USB3_UNI_PCS_ALIGN_DETECT_CONFIG1 */
+			 0x9c4 0x13 0x00 /* PCIE_USB3_UNI_PCS_ALIGN_DETECT_CONFIG2 */
+			 0x9dc 0x0d 0x00 /* PCIE_USB3_UNI_PCS_EQ_CONFIG1 */
+			 0x8dc 0x21 0x00 /* PCIE_USB3_UNI_PCS_REFGEN_REQ_CONFIG1 */
 			 0xffffffff 0xffffffff 0x00>;
 
 		qcom,qmp-phy-reg-offset =
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 7be23ee..f6768d8 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -65,7 +65,7 @@
 		mss_mem: mss_region@87400000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x87000000 0x8700000>;
+			reg = <0x86c00000 0x8b00000>;
 			label = "mss_mem";
 		};
 
@@ -85,14 +85,16 @@
 			compatible = "shared-dma-pool";
 			reusable;
 			alignment = <0x400000>;
-			size = <0x1400000>;
+			size = <0x400000>;
+			status = "disabled";
 		};
 
 		qseecom_ta_mem: qseecom_ta_region@0 {
 			compatible = "shared-dma-pool";
 			reusable;
 			alignment = <0x400000>;
-			size = <0x1000000>;
+			size = <0x400000>;
+			status = "disabled";
 		};
 	};
 
@@ -416,6 +418,8 @@
 				<45 512 0 0>,
 				<45 512 500 800>;
 
+		qcom,pcie-vendor-id = /bits/ 16 <0x17cb>;
+		qcom,pcie-device-id = /bits/ 16 <0x0304>;
 		qcom,pcie-link-speed = <2>;
 		qcom,pcie-phy-ver = <6>;
 		qcom,pcie-active-config;
@@ -908,10 +912,10 @@
 	dcc: dcc_v2@10a2000 {
 		compatible = "qcom,dcc-v2";
 		reg = <0x10a2000 0x1000>,
-		      <0x10ae000 0x2000>;
+		      <0x10ae400 0x1c00>;
 		reg-names = "dcc-base", "dcc-ram-base";
 
-		dcc-ram-offset = <0x6000>;
+		dcc-ram-offset = <0x400>;
 	};
 
 	system_pm {
@@ -932,7 +936,7 @@
 		qcom,ipa-hw-mode = <0>;
 		qcom,ee = <0>;
 		qcom,use-ipa-tethering-bridge;
-		qcom,mhi-event-ring-id-limits = <9 10>; /* start and end */
+		qcom,mhi-event-ring-id-limits = <9 11>; /* start and end */
 		qcom,modem-cfg-emb-pipe-flt;
 		qcom,use-ipa-pm;
 		qcom,wlan-ce-db-over-pcie;
@@ -1387,7 +1391,7 @@
 			compatible = "qcom,ess-switch-qca83xx";
 			qcom,switch-access-mode = "mdio";
 			qcom,ar8327-initvals = <
-				0x00004 0x7600000   /* PAD0_MODE */
+				0x00004 0x4200000   /* PAD0_MODE */
 				0x00008 0x0         /* PAD5_MODE */
 				0x000e4 0xaa545     /* MAC_POWER_SEL */
 				0x000e0 0xc74164de  /* SGMII_CTRL */
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index 032fe2f..6b0cc22 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -22,7 +22,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		cpu@0 {
+		cpu0: cpu@0 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <0>;
@@ -30,7 +30,7 @@
 			power-domains = <&pd_a2sl>;
 			next-level-cache = <&L2>;
 		};
-		cpu@1 {
+		cpu1: cpu@1 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <1>;
@@ -89,6 +89,7 @@
 		compatible = "arm,cortex-a9-pmu";
 		interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>, <&cpu1>;
 	};
 
 	cmt1: timer@e6138000 {
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index f11012b..cfcf5dc 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -205,6 +205,7 @@
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x70>;
+			reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>;
 		};
 	};
 
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 5f013c9f..8290c9a 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -145,9 +145,11 @@
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_ETH=m
+CONFIG_USB_ULPI_BUS=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
diff --git a/arch/arm/configs/msm8909-perf_defconfig b/arch/arm/configs/msm8909-perf_defconfig
index b64fade..b641f37 100644
--- a/arch/arm/configs/msm8909-perf_defconfig
+++ b/arch/arm/configs/msm8909-perf_defconfig
@@ -25,6 +25,8 @@
 CONFIG_SCHED_TUNE=y
 CONFIG_DEFAULT_USE_ENERGY_AWARE=y
 CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
 # CONFIG_RD_XZ is not set
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
@@ -263,6 +265,7 @@
 CONFIG_DIAG_CHAR=y
 CONFIG_DIAG_USES_SMD=y
 CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_MSM_SMD_PKT=y
 CONFIG_MSM_ADSPRPC=y
 CONFIG_I2C_CHARDEV=y
@@ -279,6 +282,8 @@
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_SMB1360_CHARGER_FG=y
 CONFIG_QPNP_VM_BMS=y
@@ -298,7 +303,6 @@
 CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
-CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
@@ -335,19 +339,14 @@
 CONFIG_SND_SOC=y
 CONFIG_UHID=y
 CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
 CONFIG_HID_BELKIN=y
 CONFIG_HID_CHERRY=y
 CONFIG_HID_CHICONY=y
 CONFIG_HID_CYPRESS=y
-CONFIG_HID_ELECOM=y
 CONFIG_HID_EZKEY=y
 CONFIG_HID_KENSINGTON=y
 CONFIG_HID_LOGITECH=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MONTEREY=y
-CONFIG_HID_MULTITOUCH=y
 CONFIG_USB=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_MON=y
@@ -454,13 +453,15 @@
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
 CONFIG_FRAME_WARN=2048
+CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_TIMEOUT=5
 CONFIG_PANIC_ON_SCHED_BUG=y
 CONFIG_PANIC_ON_RT_THROTTLING=y
 # CONFIG_DEBUG_PREEMPT is not set
-CONFIG_FUNCTION_TRACER=y
+CONFIG_STACKTRACE=y
+# CONFIG_FTRACE is not set
 CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
diff --git a/arch/arm/configs/msm8909_defconfig b/arch/arm/configs/msm8909_defconfig
index 17640c9..ccf4b59 100644
--- a/arch/arm/configs/msm8909_defconfig
+++ b/arch/arm/configs/msm8909_defconfig
@@ -304,6 +304,8 @@
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_SMB1360_CHARGER_FG=y
 CONFIG_QPNP_VM_BMS=y
@@ -324,7 +326,6 @@
 CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
-CONFIG_MFD_SYSCON=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
diff --git a/arch/arm/configs/msm8909w-perf_defconfig b/arch/arm/configs/msm8909w-perf_defconfig
index 6ef7915..59f882d 100644
--- a/arch/arm/configs/msm8909w-perf_defconfig
+++ b/arch/arm/configs/msm8909w-perf_defconfig
@@ -241,6 +241,7 @@
 CONFIG_DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
+CONFIG_IFB=y
 CONFIG_TUN=y
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
@@ -457,6 +458,8 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
diff --git a/arch/arm/configs/msm8909w_defconfig b/arch/arm/configs/msm8909w_defconfig
index 986a65c..04a60b0 100644
--- a/arch/arm/configs/msm8909w_defconfig
+++ b/arch/arm/configs/msm8909w_defconfig
@@ -17,6 +17,7 @@
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
@@ -230,6 +231,7 @@
 CONFIG_DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
+CONFIG_IFB=y
 CONFIG_TUN=y
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
@@ -450,6 +452,8 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
diff --git a/arch/arm/configs/msm8937-perf_defconfig b/arch/arm/configs/msm8937-perf_defconfig
index f1f9cf0..c47203f 100644
--- a/arch/arm/configs/msm8937-perf_defconfig
+++ b/arch/arm/configs/msm8937-perf_defconfig
@@ -58,6 +58,8 @@
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_MSM8937=y
 CONFIG_ARCH_MSM8917=y
+CONFIG_ARCH_QM215=y
+CONFIG_ARCH_MSM8940=y
 CONFIG_ARCH_SDM439=y
 CONFIG_ARCH_SDM429=y
 # CONFIG_VDSO is not set
@@ -249,6 +251,7 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
+CONFIG_FPR_FPC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -264,6 +267,7 @@
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -346,6 +350,7 @@
 CONFIG_SPMI=y
 CONFIG_PINCTRL_MSM8937=y
 CONFIG_PINCTRL_MSM8917=y
+CONFIG_PINCTRL_MSM8940=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_QPNP_PIN=y
@@ -355,10 +360,13 @@
 CONFIG_POWER_SUPPLY=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1360_CHARGER_FG=y
 CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_VM_BMS=y
+CONFIG_QPNP_LINEAR_CHARGER=y
 CONFIG_QPNP_TYPEC=y
 CONFIG_QPNP_QG=y
 CONFIG_MSM_APM=y
@@ -514,6 +522,7 @@
 CONFIG_LEDS_QPNP_WLED=y
 CONFIG_LEDS_QPNP_HAPTICS=y
 CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_QPNP_VIBRATOR=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_EDAC=y
diff --git a/arch/arm/configs/msm8937_defconfig b/arch/arm/configs/msm8937_defconfig
index 4a85408..e7017ec 100644
--- a/arch/arm/configs/msm8937_defconfig
+++ b/arch/arm/configs/msm8937_defconfig
@@ -60,6 +60,8 @@
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_MSM8937=y
 CONFIG_ARCH_MSM8917=y
+CONFIG_ARCH_QM215=y
+CONFIG_ARCH_MSM8940=y
 CONFIG_ARCH_SDM439=y
 CONFIG_ARCH_SDM429=y
 # CONFIG_VDSO is not set
@@ -253,6 +255,7 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
+CONFIG_FPR_FPC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -268,6 +271,7 @@
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -352,6 +356,7 @@
 CONFIG_SPMI=y
 CONFIG_PINCTRL_MSM8937=y
 CONFIG_PINCTRL_MSM8917=y
+CONFIG_PINCTRL_MSM8940=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_QPNP_PIN=y
@@ -361,10 +366,13 @@
 CONFIG_POWER_SUPPLY=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1360_CHARGER_FG=y
 CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_VM_BMS=y
+CONFIG_QPNP_LINEAR_CHARGER=y
 CONFIG_QPNP_TYPEC=y
 CONFIG_QPNP_QG=y
 CONFIG_MSM_APM=y
@@ -524,6 +532,7 @@
 CONFIG_LEDS_QPNP_WLED=y
 CONFIG_LEDS_QPNP_HAPTICS=y
 CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_QPNP_VIBRATOR=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_EDAC=y
diff --git a/arch/arm/configs/msm8937go-perf_defconfig b/arch/arm/configs/msm8937go-perf_defconfig
index 7e934c2..3a38886 100644
--- a/arch/arm/configs/msm8937go-perf_defconfig
+++ b/arch/arm/configs/msm8937go-perf_defconfig
@@ -38,6 +38,7 @@
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
 CONFIG_KALLSYMS_ALL=y
+# CONFIG_BASE_FULL is not set
 CONFIG_BPF_SYSCALL=y
 # CONFIG_MEMBARRIER is not set
 CONFIG_EMBEDDED=y
@@ -58,6 +59,7 @@
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_MSM8937=y
 CONFIG_ARCH_MSM8917=y
+CONFIG_ARCH_QM215=y
 CONFIG_ARCH_SDM439=y
 CONFIG_ARCH_SDM429=y
 # CONFIG_VDSO is not set
@@ -356,10 +358,15 @@
 CONFIG_POWER_SUPPLY=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1360_CHARGER_FG=y
 CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_VM_BMS=y
+CONFIG_QPNP_LINEAR_CHARGER=y
 CONFIG_QPNP_TYPEC=y
+CONFIG_QPNP_QG=y
 CONFIG_MSM_APM=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
@@ -385,6 +392,7 @@
 CONFIG_REGULATOR_CPR=y
 CONFIG_REGULATOR_MEM_ACC=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP=y
 CONFIG_REGULATOR_RPM_SMD=y
 CONFIG_REGULATOR_SPM=y
@@ -467,6 +475,7 @@
 CONFIG_USB_STORAGE_CYPRESS_ATACB=y
 CONFIG_USB_SERIAL=y
 CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
@@ -504,11 +513,16 @@
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_MMC_SDHCI_MSM_ICE=y
 CONFIG_MMC_CQ_HCI=y
+CONFIG_LEDS_QTI_TRI_LED=y
 CONFIG_LEDS_QPNP=y
 CONFIG_LEDS_QPNP_FLASH=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
 CONFIG_LEDS_QPNP_WLED=y
 CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_QPNP_VIBRATOR=y
 CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
 CONFIG_RTC_CLASS=y
@@ -580,6 +594,7 @@
 CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
+CONFIG_PWM_QTI_LPG=y
 CONFIG_QCOM_SHOW_RESUME_IRQ=y
 CONFIG_QTI_MPM=y
 CONFIG_ANDROID=y
@@ -617,6 +632,7 @@
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
 CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
 CONFIG_PFK=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
diff --git a/arch/arm/configs/msm8937go_defconfig b/arch/arm/configs/msm8937go_defconfig
index 9d6a683..0f7c823 100644
--- a/arch/arm/configs/msm8937go_defconfig
+++ b/arch/arm/configs/msm8937go_defconfig
@@ -39,6 +39,7 @@
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
 CONFIG_KALLSYMS_ALL=y
+# CONFIG_BASE_FULL is not set
 CONFIG_BPF_SYSCALL=y
 # CONFIG_MEMBARRIER is not set
 CONFIG_EMBEDDED=y
@@ -60,6 +61,7 @@
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_MSM8937=y
 CONFIG_ARCH_MSM8917=y
+CONFIG_ARCH_QM215=y
 CONFIG_ARCH_SDM439=y
 CONFIG_ARCH_SDM429=y
 # CONFIG_VDSO is not set
@@ -362,10 +364,15 @@
 CONFIG_POWER_SUPPLY=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1360_CHARGER_FG=y
 CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_VM_BMS=y
+CONFIG_QPNP_LINEAR_CHARGER=y
 CONFIG_QPNP_TYPEC=y
+CONFIG_QPNP_QG=y
 CONFIG_MSM_APM=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
@@ -391,6 +398,7 @@
 CONFIG_REGULATOR_CPR=y
 CONFIG_REGULATOR_MEM_ACC=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP=y
 CONFIG_REGULATOR_RPM_SMD=y
 CONFIG_REGULATOR_SPM=y
@@ -474,6 +482,7 @@
 CONFIG_USB_STORAGE_CYPRESS_ATACB=y
 CONFIG_USB_SERIAL=y
 CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
@@ -512,11 +521,16 @@
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_MMC_SDHCI_MSM_ICE=y
 CONFIG_MMC_CQ_HCI=y
+CONFIG_LEDS_QTI_TRI_LED=y
 CONFIG_LEDS_QPNP=y
 CONFIG_LEDS_QPNP_FLASH=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
 CONFIG_LEDS_QPNP_WLED=y
 CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_QPNP_VIBRATOR=y
 CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
 CONFIG_RTC_CLASS=y
@@ -596,6 +610,7 @@
 CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
+CONFIG_PWM_QTI_LPG=y
 CONFIG_QCOM_SHOW_RESUME_IRQ=y
 CONFIG_QTI_MPM=y
 CONFIG_ANDROID=y
diff --git a/arch/arm/configs/sdm670-perf_defconfig b/arch/arm/configs/sdm670-perf_defconfig
index e367824..2273f4f 100644
--- a/arch/arm/configs/sdm670-perf_defconfig
+++ b/arch/arm/configs/sdm670-perf_defconfig
@@ -348,6 +348,7 @@
 CONFIG_VIDEO_ADV_DEBUG=y
 CONFIG_VIDEO_FIXED_MINOR_RANGES=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SPECTRA_CAMERA=y
 CONFIG_MSM_VIDC_V4L2=y
 CONFIG_MSM_VIDC_GOVERNORS=y
 CONFIG_MSM_SDE_ROTATOR=y
@@ -400,8 +401,10 @@
 CONFIG_USB_CONFIGFS_F_ACC=y
 CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC2=y
 CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_UVC=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_CCID=y
@@ -505,7 +508,10 @@
 CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_MSM_REMOTEQDSS=y
 CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
diff --git a/arch/arm/configs/sdm670_defconfig b/arch/arm/configs/sdm670_defconfig
index d905ae5..9059037 100644
--- a/arch/arm/configs/sdm670_defconfig
+++ b/arch/arm/configs/sdm670_defconfig
@@ -357,6 +357,7 @@
 CONFIG_VIDEO_ADV_DEBUG=y
 CONFIG_VIDEO_FIXED_MINOR_RANGES=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SPECTRA_CAMERA=y
 CONFIG_MSM_VIDC_V4L2=y
 CONFIG_MSM_VIDC_GOVERNORS=y
 CONFIG_MSM_SDE_ROTATOR=y
@@ -410,8 +411,10 @@
 CONFIG_USB_CONFIGFS_F_ACC=y
 CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC2=y
 CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_UVC=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_CCID=y
@@ -527,7 +530,10 @@
 CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_MSM_REMOTEQDSS=y
 CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
 CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
diff --git a/arch/arm/configs/sdxpoorwills-auto-perf_defconfig b/arch/arm/configs/sdxpoorwills-auto-perf_defconfig
new file mode 100644
index 0000000..1fe7178
--- /dev/null
+++ b/arch/arm/configs/sdxpoorwills-auto-perf_defconfig
@@ -0,0 +1,442 @@
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+# CONFIG_FAIR_GROUP_SCHED is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDXPOORWILLS=y
+CONFIG_PCI_MSM=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_CMA=y
+CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_MSM=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_GRE=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_DEBUG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_CT_NETLINK_TIMEOUT=y
+CONFIG_NF_CT_NETLINK_HELPER=y
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_IP_SET=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NATTYPE_MODULE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_ARP=y
+CONFIG_BRIDGE_EBT_IP=y
+CONFIG_BRIDGE_EBT_IP6=y
+CONFIG_BRIDGE_EBT_ARPREPLY=y
+CONFIG_BRIDGE_EBT_DNAT=y
+CONFIG_BRIDGE_EBT_SNAT=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_DEBUGFS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_RFKILL=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=12
+CONFIG_MTD=y
+CONFIG_MTD_TESTS=m
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_MSM_QPIC_NAND=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_QSEECOM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+CONFIG_KS8851=y
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+CONFIG_AT803X_PHY=y
+CONFIG_PPP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CNSS=y
+CONFIG_CNSS_SDIO=y
+CONFIG_CNSS_PCI=y
+CONFIG_CLD_HL_SDIO_CORE=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_LOGGER=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=m
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_PINCTRL_SDXPOORWILLS=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_AOP_REG_COOLING_DEVICE=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_RPMH=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DEBUG=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_HSUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC1=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=m
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_IPA_WDI_UNIFIED_API=y
+CONFIG_RMNET_IPA3=y
+CONFIG_ECM_IPA=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_EP_PCIE=y
+CONFIG_EP_PCIE_HW=y
+CONFIG_QPNP_REVID=y
+CONFIG_MSM_MHI_DEV=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_MDM_GCC_SDXPOORWILLS=y
+CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS=y
+CONFIG_MDM_DEBUGCC_SDXPOORWILLS=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_SCM=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_TRACER_PKT=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_PM=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON_QCOM_SPMI_MISC=y
+CONFIG_IIO=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
+CONFIG_ANDROID=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_ON_RECURSIVE_FAULT=y
+CONFIG_PANIC_TIMEOUT=5
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+# CONFIG_SECURITY_SELINUX_AVC_STATS is not set
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm/configs/sdxpoorwills-auto_defconfig b/arch/arm/configs/sdxpoorwills-auto_defconfig
new file mode 100644
index 0000000..9a62d53
--- /dev/null
+++ b/arch/arm/configs/sdxpoorwills-auto_defconfig
@@ -0,0 +1,469 @@
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+# CONFIG_FAIR_GROUP_SCHED is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDXPOORWILLS=y
+# CONFIG_VDSO is not set
+CONFIG_PCI_MSM=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_CMA=y
+CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_MSM=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_GRE=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_DEBUG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_CT_NETLINK_TIMEOUT=y
+CONFIG_NF_CT_NETLINK_HELPER=y
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_IP_SET=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NATTYPE_MODULE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_ARP=y
+CONFIG_BRIDGE_EBT_IP=y
+CONFIG_BRIDGE_EBT_IP6=y
+CONFIG_BRIDGE_EBT_ARPREPLY=y
+CONFIG_BRIDGE_EBT_DNAT=y
+CONFIG_BRIDGE_EBT_SNAT=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_DEBUGFS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_RFKILL=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=12
+CONFIG_MTD=y
+CONFIG_MTD_TESTS=m
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_MSM_QPIC_NAND=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_QSEECOM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+CONFIG_KS8851=y
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+CONFIG_AT803X_PHY=y
+CONFIG_PPP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CNSS=y
+CONFIG_CNSS_SDIO=y
+CONFIG_CNSS_PCI=y
+CONFIG_CLD_HL_SDIO_CORE=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_LOGGER=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=m
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_PINCTRL_SDXPOORWILLS=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_AOP_REG_COOLING_DEVICE=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
+CONFIG_MSM_CDC_PINCTRL=y
+CONFIG_MSM_CDC_SUPPLY=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_RPMH=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_FB=y
+CONFIG_FB_MSM=y
+CONFIG_FB_MSM_MDP_NONE=y
+CONFIG_FB_MSM_QPIC_PANEL_DETECT=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DEBUG=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_HSUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_DEBUG_FS=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC1=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=m
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_IPA_WDI_UNIFIED_API=y
+CONFIG_RMNET_IPA3=y
+CONFIG_ECM_IPA=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_EP_PCIE=y
+CONFIG_EP_PCIE_HW=y
+CONFIG_QPNP_REVID=y
+CONFIG_MSM_MHI_DEV=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_MDM_GCC_SDXPOORWILLS=y
+CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS=y
+CONFIG_MDM_DEBUGCC_SDXPOORWILLS=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_SCM=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_TRACER_PKT=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_PM=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON_QCOM_SPMI_MISC=y
+CONFIG_IIO=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
+CONFIG_ANDROID=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_PANIC_ON_RECURSIVE_FAULT=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM3X=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_TGU=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+# CONFIG_SECURITY_SELINUX_AVC_STATS is not set
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_XZ_DEC=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 5ac5966..fd6868c 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -32,6 +32,7 @@
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_CMA=y
+CONFIG_NO_VM_RECLAIM=y
 CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
@@ -177,6 +178,7 @@
 CONFIG_RFKILL=y
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=12
 CONFIG_MTD=y
@@ -313,6 +315,7 @@
 CONFIG_USB_GADGET_VBUS_DRAW=500
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_RNDIS=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_USB_CONFIGFS_UEVENT=y
@@ -321,6 +324,7 @@
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_GSI=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_IPC=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
 CONFIG_MMC_PARANOID_SD_INIT=y
@@ -374,6 +378,7 @@
 CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_BUS_CONFIG_RPMH=y
+CONFIG_QCOM_EARLY_RANDOM=y
 CONFIG_MSM_SMEM=y
 CONFIG_MSM_GLINK=y
 CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
@@ -381,6 +386,7 @@
 CONFIG_TRACER_PKT=y
 CONFIG_QTI_RPMH_API=y
 CONFIG_MSM_SMP2P=y
+CONFIG_MSM_IPC_ROUTER_USB_XPRT=y
 CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 137a3d5..2b02a48 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -34,6 +34,7 @@
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_CMA=y
+CONFIG_NO_VM_RECLAIM=y
 CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
@@ -171,6 +172,7 @@
 CONFIG_RFKILL=y
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=12
 CONFIG_MTD=y
@@ -314,6 +316,7 @@
 CONFIG_USB_GADGET_VBUS_DRAW=500
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_RNDIS=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_USB_CONFIGFS_UEVENT=y
@@ -322,6 +325,7 @@
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_GSI=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_IPC=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
 CONFIG_MMC_RING_BUFFER=y
@@ -379,6 +383,7 @@
 CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_BUS_CONFIG_RPMH=y
+CONFIG_QCOM_EARLY_RANDOM=y
 CONFIG_MSM_SMEM=y
 CONFIG_MSM_GLINK=y
 CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
@@ -386,6 +391,7 @@
 CONFIG_TRACER_PKT=y
 CONFIG_QTI_RPMH_API=y
 CONFIG_MSM_SMP2P=y
+CONFIG_MSM_IPC_ROUTER_USB_XPRT=y
 CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index f4dab20..0833d8a 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -327,4 +327,16 @@
 	return false;
 }
 
+#define KVM_SSBD_UNKNOWN		-1
+#define KVM_SSBD_FORCE_DISABLE		0
+#define KVM_SSBD_KERNEL		1
+#define KVM_SSBD_FORCE_ENABLE		2
+#define KVM_SSBD_MITIGATED		3
+
+static inline int kvm_arm_have_ssbd(void)
+{
+	/* No way to detect it yet, pretend it is not there. */
+	return KVM_SSBD_UNKNOWN;
+}
+
 #endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 7f66b1b..e2f05ce 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -28,6 +28,13 @@
  */
 #define kern_hyp_va(kva)	(kva)
 
+/* Contrary to arm64, there is no need to generate a PC-relative address */
+#define hyp_symbol_addr(s)						\
+	({								\
+		typeof(s) *addr = &(s);					\
+		addr;							\
+	})
+
 /*
  * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
  */
@@ -249,6 +256,11 @@
 	return 0;
 }
 
+static inline int hyp_map_aux_data(void)
+{
+	return 0;
+}
+
 #endif	/* !__ASSEMBLY__ */
 
 #endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 10c3283..0dcd9e7 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -12,6 +12,7 @@
 #include <asm/unistd.h>
 #include <asm/ftrace.h>
 #include <asm/unwind.h>
+#include <asm/memory.h>
 
 #ifdef CONFIG_NEED_RET_TO_USER
 #include <mach/entry-macro.S>
@@ -35,6 +36,9 @@
  UNWIND(.fnstart	)
  UNWIND(.cantunwind	)
 	disable_irq_notrace			@ disable interrupts
+	ldr	r2, [tsk, #TI_ADDR_LIMIT]
+	cmp	r2, #TASK_SIZE
+	blne	addr_limit_check_failed
 	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
 	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
 	bne	fast_work_pending
@@ -61,6 +65,9 @@
  UNWIND(.cantunwind	)
 	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
 	disable_irq_notrace			@ disable interrupts
+	ldr	r2, [tsk, #TI_ADDR_LIMIT]
+	cmp	r2, #TASK_SIZE
+	blne	addr_limit_check_failed
 	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
 	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
 	beq	no_work_pending
@@ -93,6 +100,9 @@
 ret_slow_syscall:
 	disable_irq_notrace			@ disable interrupts
 ENTRY(ret_to_user_from_irq)
+	ldr	r2, [tsk, #TI_ADDR_LIMIT]
+	cmp	r2, #TASK_SIZE
+	blne	addr_limit_check_failed
 	ldr	r1, [tsk, #TI_FLAGS]
 	tst	r1, #_TIF_WORK_MASK
 	bne	slow_work_pending
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 7b8f214..304e684 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -14,6 +14,7 @@
 #include <linux/uaccess.h>
 #include <linux/tracehook.h>
 #include <linux/uprobes.h>
+#include <linux/syscalls.h>
 
 #include <asm/elf.h>
 #include <asm/cacheflush.h>
@@ -631,3 +632,9 @@
 
 	return page;
 }
+
+/* Defer to generic check */
+asmlinkage void addr_limit_check_failed(void)
+{
+	addr_limit_user_check();
+}
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index ef6595c..2043697 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -51,8 +51,8 @@
 __asm__(".arch_extension	virt");
 #endif
 
+DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
-static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
 static unsigned long hyp_default_vectors;
 
 /* Per-CPU variable containing the currently running vcpu. */
@@ -338,7 +338,7 @@
 	}
 
 	vcpu->cpu = cpu;
-	vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
+	vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);
 
 	kvm_arm_set_running_vcpu(vcpu);
 }
@@ -1199,19 +1199,8 @@
 }
 #endif
 
-static void teardown_common_resources(void)
-{
-	free_percpu(kvm_host_cpu_state);
-}
-
 static int init_common_resources(void)
 {
-	kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
-	if (!kvm_host_cpu_state) {
-		kvm_err("Cannot allocate host CPU state\n");
-		return -ENOMEM;
-	}
-
 	/* set size of VMID supported by CPU */
 	kvm_vmid_bits = kvm_get_vmid_bits();
 	kvm_info("%d-bit VMID\n", kvm_vmid_bits);
@@ -1369,7 +1358,7 @@
 	for_each_possible_cpu(cpu) {
 		kvm_cpu_context_t *cpu_ctxt;
 
-		cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
+		cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
 		err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
 
 		if (err) {
@@ -1378,6 +1367,12 @@
 		}
 	}
 
+	err = hyp_map_aux_data();
+	if (err) {
+		kvm_err("Cannot map host auxilary data: %d\n", err);
+		goto out_err;
+	}
+
 	kvm_info("Hyp mode initialized successfully\n");
 
 	return 0;
@@ -1447,7 +1442,6 @@
 out_hyp:
 	teardown_hyp_mode();
 out_err:
-	teardown_common_resources();
 	return err;
 }
 
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 7f868d9..b3d268a 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -894,19 +894,35 @@
 	pmd = stage2_get_pmd(kvm, cache, addr);
 	VM_BUG_ON(!pmd);
 
-	/*
-	 * Mapping in huge pages should only happen through a fault.  If a
-	 * page is merged into a transparent huge page, the individual
-	 * subpages of that huge page should be unmapped through MMU
-	 * notifiers before we get here.
-	 *
-	 * Merging of CompoundPages is not supported; they should become
-	 * splitting first, unmapped, merged, and mapped back in on-demand.
-	 */
-	VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
-
 	old_pmd = *pmd;
 	if (pmd_present(old_pmd)) {
+		/*
+		 * Multiple vcpus faulting on the same PMD entry, can
+		 * lead to them sequentially updating the PMD with the
+		 * same value. Following the break-before-make
+		 * (pmd_clear() followed by tlb_flush()) process can
+		 * hinder forward progress due to refaults generated
+		 * on missing translations.
+		 *
+		 * Skip updating the page table if the entry is
+		 * unchanged.
+		 */
+		if (pmd_val(old_pmd) == pmd_val(*new_pmd))
+			return 0;
+
+		/*
+		 * Mapping in huge pages should only happen through a
+		 * fault.  If a page is merged into a transparent huge
+		 * page, the individual subpages of that huge page
+		 * should be unmapped through MMU notifiers before we
+		 * get here.
+		 *
+		 * Merging of CompoundPages is not supported; they
+		 * should become splitting first, unmapped, merged,
+		 * and mapped back in on-demand.
+		 */
+		VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
+
 		pmd_clear(pmd);
 		kvm_tlb_flush_vmid_ipa(kvm, addr);
 	} else {
@@ -962,6 +978,10 @@
 	/* Create 2nd stage page table mapping - Level 3 */
 	old_pte = *pte;
 	if (pte_present(old_pte)) {
+		/* Skip page table update if there is no change */
+		if (pte_val(old_pte) == pte_val(*new_pte))
+			return 0;
+
 		kvm_set_pte(pte, __pte(0));
 		kvm_tlb_flush_vmid_ipa(kvm, addr);
 	} else {
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 8a9c654..83365be 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -403,7 +403,7 @@
 int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
 {
 	u32 func_id = smccc_get_function(vcpu);
-	u32 val = PSCI_RET_NOT_SUPPORTED;
+	u32 val = SMCCC_RET_NOT_SUPPORTED;
 	u32 feature;
 
 	switch (func_id) {
@@ -415,7 +415,21 @@
 		switch(feature) {
 		case ARM_SMCCC_ARCH_WORKAROUND_1:
 			if (kvm_arm_harden_branch_predictor())
-				val = 0;
+				val = SMCCC_RET_SUCCESS;
+			break;
+		case ARM_SMCCC_ARCH_WORKAROUND_2:
+			switch (kvm_arm_have_ssbd()) {
+			case KVM_SSBD_FORCE_DISABLE:
+			case KVM_SSBD_UNKNOWN:
+				break;
+			case KVM_SSBD_KERNEL:
+				val = SMCCC_RET_SUCCESS;
+				break;
+			case KVM_SSBD_FORCE_ENABLE:
+			case KVM_SSBD_MITIGATED:
+				val = SMCCC_RET_NOT_REQUIRED;
+				break;
+			}
 			break;
 		}
 		break;
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 06332f6..3e1430a 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -252,6 +252,7 @@
 					  NULL);
 	if (!domain) {
 		iounmap(pmu_base_addr);
+		pmu_base_addr = NULL;
 		return -ENOMEM;
 	}
 
diff --git a/arch/arm/mach-hisi/hotplug.c b/arch/arm/mach-hisi/hotplug.c
index a129aae..909bb24 100644
--- a/arch/arm/mach-hisi/hotplug.c
+++ b/arch/arm/mach-hisi/hotplug.c
@@ -148,13 +148,20 @@
 	struct device_node *node;
 
 	node = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl");
-	if (node) {
-		ctrl_base = of_iomap(node, 0);
-		id = HI3620_CTRL;
-		return 0;
+	if (!node) {
+		id = ERROR_CTRL;
+		return -ENOENT;
 	}
-	id = ERROR_CTRL;
-	return -ENOENT;
+
+	ctrl_base = of_iomap(node, 0);
+	of_node_put(node);
+	if (!ctrl_base) {
+		id = ERROR_CTRL;
+		return -ENOMEM;
+	}
+
+	id = HI3620_CTRL;
+	return 0;
 }
 
 void hi3xxx_set_cpu(int cpu, bool enable)
@@ -173,11 +180,15 @@
 	struct device_node *np;
 
 	np = of_find_compatible_node(NULL, NULL, "hisilicon,cpuctrl");
-	if (np) {
-		ctrl_base = of_iomap(np, 0);
-		return true;
-	}
-	return false;
+	if (!np)
+		return false;
+
+	ctrl_base = of_iomap(np, 0);
+	of_node_put(np);
+	if (!ctrl_base)
+		return false;
+
+	return true;
 }
 
 void hix5hd2_set_cpu(int cpu, bool enable)
@@ -219,10 +230,10 @@
 
 	if (!ctrl_base) {
 		np = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl");
-		if (np)
-			ctrl_base = of_iomap(np, 0);
-		else
-			BUG();
+		BUG_ON(!np);
+		ctrl_base = of_iomap(np, 0);
+		of_node_put(np);
+		BUG_ON(!ctrl_base);
 	}
 
 	if (enable) {
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index f39bd51..faaf7c3 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -116,8 +116,8 @@
 		PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
 }
 
-extern unsigned char mvebu_boot_wa_start;
-extern unsigned char mvebu_boot_wa_end;
+extern unsigned char mvebu_boot_wa_start[];
+extern unsigned char mvebu_boot_wa_end[];
 
 /*
  * This function sets up the boot address workaround needed for SMP
@@ -130,7 +130,7 @@
 			     phys_addr_t resume_addr_reg)
 {
 	void __iomem *sram_virt_base;
-	u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start;
+	u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start;
 
 	mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE);
 	mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute,
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index b4de3da..f7f36da 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -104,6 +104,45 @@
 static inline void omap5_erratum_workaround_801819(void) { }
 #endif
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+/*
+ * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with
+ * ICIALLU) to activate the workaround for secondary Core.
+ * NOTE: it is assumed that the primary core's configuration is done
+ * by the boot loader (kernel will detect a misconfiguration and complain
+ * if this is not done).
+ *
+ * In General Purpose(GP) devices, ACR bit settings can only be done
+ * by ROM code in "secure world" using the smc call and there is no
+ * option to update the "firmware" on such devices. This also works for
+ * High security(HS) devices, as a backup option in case the
+ * "update" is not done in the "security firmware".
+ */
+static void omap5_secondary_harden_predictor(void)
+{
+	u32 acr, acr_mask;
+
+	asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
+
+	/*
+	 * ACTLR[0] (Enable invalidates of BTB with ICIALLU)
+	 */
+	acr_mask = BIT(0);
+
+	/* Do we already have it done.. if yes, skip expensive smc */
+	if ((acr & acr_mask) == acr_mask)
+		return;
+
+	acr |= acr_mask;
+	omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
+
+	pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n",
+		 __func__, smp_processor_id());
+}
+#else
+static inline void omap5_secondary_harden_predictor(void) { }
+#endif
+
 static void omap4_secondary_init(unsigned int cpu)
 {
 	/*
@@ -126,6 +165,8 @@
 		set_cntfreq();
 		/* Configure ACR to disable streaming WA for 801819 */
 		omap5_erratum_workaround_801819();
+		/* Enable ACR to allow for ICUALLU workaround */
+		omap5_secondary_harden_predictor();
 	}
 
 	/*
diff --git a/arch/arm/mach-omap2/omap_hwmod_reset.c b/arch/arm/mach-omap2/omap_hwmod_reset.c
index b68f9c0..d5ddba0 100644
--- a/arch/arm/mach-omap2/omap_hwmod_reset.c
+++ b/arch/arm/mach-omap2/omap_hwmod_reset.c
@@ -92,11 +92,13 @@
  */
 void omap_hwmod_rtc_unlock(struct omap_hwmod *oh)
 {
-	local_irq_disable();
+	unsigned long flags;
+
+	local_irq_save(flags);
 	omap_rtc_wait_not_busy(oh);
 	omap_hwmod_write(OMAP_RTC_KICK0_VALUE, oh, OMAP_RTC_KICK0_REG);
 	omap_hwmod_write(OMAP_RTC_KICK1_VALUE, oh, OMAP_RTC_KICK1_REG);
-	local_irq_enable();
+	local_irq_restore(flags);
 }
 
 /**
@@ -110,9 +112,11 @@
  */
 void omap_hwmod_rtc_lock(struct omap_hwmod *oh)
 {
-	local_irq_disable();
+	unsigned long flags;
+
+	local_irq_save(flags);
 	omap_rtc_wait_not_busy(oh);
 	omap_hwmod_write(0x0, oh, OMAP_RTC_KICK0_REG);
 	omap_hwmod_write(0x0, oh, OMAP_RTC_KICK1_REG);
-	local_irq_enable();
+	local_irq_restore(flags);
 }
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 9c10248..4e8c211 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -185,7 +185,7 @@
 {
 	int i;
 
-	for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+	for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
 		void __iomem *base = irq_base(i);
 
 		saved_icmr[i] = __raw_readl(base + ICMR);
@@ -204,7 +204,7 @@
 {
 	int i;
 
-	for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+	for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
 		void __iomem *base = irq_base(i);
 
 		__raw_writel(saved_icmr[i], base + ICMR);
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 4e95a7b..19c5020 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -126,6 +126,30 @@
 	select HAVE_CLK_PREPARE
 	select COMMON_CLK_MSM
 
+config ARCH_QM215
+	bool "Enable support for QM215"
+	select CPU_V7
+	select HAVE_ARM_ARCH_TIMER
+	select PINCTRL
+	select QCOM_SCM if SMP
+	select PM_DEVFREQ
+	select CLKDEV_LOOKUP
+	select HAVE_CLK
+	select HAVE_CLK_PREPARE
+	select COMMON_CLK_MSM
+
+config ARCH_MSM8940
+	bool "Enable support for MSM8940"
+	select CPU_V7
+	select HAVE_ARM_ARCH_TIMER
+	select PINCTRL
+	select QCOM_SCM if SMP
+	select PM_DEVFREQ
+	select CLKDEV_LOOKUP
+	select HAVE_CLK
+	select HAVE_CLK_PREPARE
+	select COMMON_CLK_MSM
+
 config ARCH_SDM439
 	bool "Enable support for SDM439"
 	select CPU_V7
diff --git a/arch/arm/mach-qcom/Makefile b/arch/arm/mach-qcom/Makefile
index 3ef169f..f757ce0 100644
--- a/arch/arm/mach-qcom/Makefile
+++ b/arch/arm/mach-qcom/Makefile
@@ -6,6 +6,8 @@
 obj-$(CONFIG_ARCH_MSM8937) += board-msm8937.o
 obj-$(CONFIG_ARCH_MSM8909) += board-msm8909.o
 obj-$(CONFIG_ARCH_MSM8917) += board-msm8917.o
+obj-$(CONFIG_ARCH_QM215) += board-qm215.o
+obj-$(CONFIG_ARCH_MSM8940) += board-msm8940.o
 obj-$(CONFIG_ARCH_SDM429) += board-sdm429.o
 obj-$(CONFIG_ARCH_SDM439) += board-sdm439.o
 obj-$(CONFIG_ARCH_SDM450) += board-sdm450.o
diff --git a/arch/arm/mach-qcom/board-msm8940.c b/arch/arm/mach-qcom/board-msm8940.c
new file mode 100644
index 0000000..2c5f371
--- /dev/null
+++ b/arch/arm/mach-qcom/board-msm8940.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <asm/mach/arch.h>
+#include "board-dt.h"
+
+static const char *msm8940_dt_match[] __initconst = {
+	"qcom,msm8940",
+	NULL
+};
+
+static void __init msm8940_init(void)
+{
+	board_dt_populate(NULL);
+}
+
+DT_MACHINE_START(MSM8940_DT,
+	"Qualcomm Technologies, Inc. MSM8940 MTP")
+	.init_machine = msm8940_init,
+	.dt_compat = msm8940_dt_match,
+MACHINE_END
diff --git a/arch/arm/mach-qcom/board-qm215.c b/arch/arm/mach-qcom/board-qm215.c
new file mode 100644
index 0000000..62f9175
--- /dev/null
+++ b/arch/arm/mach-qcom/board-qm215.c
@@ -0,0 +1,32 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "board-dt.h"
+#include <asm/mach/map.h>
+#include <asm/mach/arch.h>
+
+static const char *qm215_dt_match[] __initconst = {
+	"qcom,qm215",
+	NULL
+};
+
+static void __init qm215_init(void)
+{
+	board_dt_populate(NULL);
+}
+
+DT_MACHINE_START(QM215_DT,
+	"Qualcomm Technologies, Inc. QM215")
+	.init_machine		= qm215_init,
+	.dt_compat		= qm215_dt_match,
+MACHINE_END
diff --git a/arch/arm/mach-qcom/board-sdm450.c b/arch/arm/mach-qcom/board-sdm450.c
index 5f68ede..1c0b135 100644
--- a/arch/arm/mach-qcom/board-sdm450.c
+++ b/arch/arm/mach-qcom/board-sdm450.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
 
 static const char *sdm450_dt_match[] __initconst = {
 	"qcom,sdm450",
+	"qcom,sda450",
 	NULL
 };
 
diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig
index 9ad84cd..5ed8fa5 100644
--- a/arch/arm/mach-rockchip/Kconfig
+++ b/arch/arm/mach-rockchip/Kconfig
@@ -16,6 +16,7 @@
 	select ROCKCHIP_TIMER
 	select ARM_GLOBAL_TIMER
 	select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
+	select PM
 	help
 	  Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs
 	  containing the RK2928, RK30xx and RK31xx series.
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e5a8a57..9e97962 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -30,6 +30,8 @@
 #include <linux/cma.h>
 #include <linux/msm_dma_iommu_mapping.h>
 #include <linux/dma-mapping-fast.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
 
 #include <asm/memory.h>
 #include <asm/highmem.h>
@@ -122,7 +124,8 @@
 
 static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn);
 
-static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot);
+static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
+					bool coherent);
 
 static void *arm_dma_remap(struct device *dev, void *cpu_addr,
 			dma_addr_t handle, size_t size,
@@ -131,6 +134,30 @@
 static void arm_dma_unremap(struct device *dev, void *remapped_addr,
 				size_t size);
 
+
+static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
+				 bool coherent)
+{
+	if (attrs & DMA_ATTR_STRONGLY_ORDERED)
+		return pgprot_stronglyordered(prot);
+	else if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
+		return pgprot_writecombine(prot);
+	return prot;
+}
+
+static bool is_dma_coherent(struct device *dev, unsigned long attrs,
+			    bool is_coherent)
+{
+	if (attrs & DMA_ATTR_FORCE_COHERENT)
+		is_coherent = true;
+	else if (attrs & DMA_ATTR_FORCE_NON_COHERENT)
+		is_coherent = false;
+	else if (is_device_dma_coherent(dev))
+		is_coherent = true;
+
+	return is_coherent;
+}
+
 /**
  * arm_dma_map_page - map a portion of a page for streaming DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -709,19 +736,6 @@
 	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
 }
 
-static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
-{
-	if (attrs & DMA_ATTR_WRITE_COMBINE)
-		prot = pgprot_writecombine(prot);
-	else if (attrs & DMA_ATTR_STRONGLY_ORDERED)
-		prot = pgprot_stronglyordered(prot);
-	/* if non-consistent just pass back what was given */
-	else if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
-		prot = pgprot_dmacoherent(prot);
-
-	return prot;
-}
-
 #define nommu() 0
 
 #else	/* !CONFIG_MMU */
@@ -915,7 +929,7 @@
 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 		    gfp_t gfp, unsigned long attrs)
 {
-	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
 
 	return __dma_alloc(dev, size, handle, gfp, prot, false,
 			   attrs, __builtin_return_address(0));
@@ -959,7 +973,7 @@
 {
 	void *ptr;
 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
-	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
 	unsigned long offset = handle & ~PAGE_MASK;
 
 	size = PAGE_ALIGN(size + offset);
@@ -1003,7 +1017,8 @@
 		 unsigned long attrs)
 {
 #ifdef CONFIG_MMU
-	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+						false);
 #endif	/* CONFIG_MMU */
 	return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
 }
@@ -1529,16 +1544,19 @@
  * Create a mapping in device IO address space for specified pages
  */
 static dma_addr_t
-__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
+			int coherent_flag)
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	dma_addr_t dma_addr, iova;
 	int i;
+	int prot = IOMMU_READ | IOMMU_WRITE;
 
 	dma_addr = __alloc_iova(mapping, size);
 	if (dma_addr == DMA_ERROR_CODE)
 		return dma_addr;
+	prot |= coherent_flag ? IOMMU_CACHE : 0;
 
 	iova = dma_addr;
 	for (i = 0; i < count; ) {
@@ -1553,8 +1571,7 @@
 				break;
 
 		len = (j - i) << PAGE_SHIFT;
-		ret = iommu_map(mapping->domain, iova, phys, len,
-				IOMMU_READ|IOMMU_WRITE);
+		ret = iommu_map(mapping->domain, iova, phys, len, prot);
 		if (ret < 0)
 			goto fail;
 		iova += len;
@@ -1623,7 +1640,7 @@
 	if (!addr)
 		return NULL;
 
-	*handle = __iommu_create_mapping(dev, &page, size);
+	*handle = __iommu_create_mapping(dev, &page, size, coherent_flag);
 	if (*handle == DMA_ERROR_CODE)
 		goto err_mapping;
 
@@ -1648,17 +1665,19 @@
 	    dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
 	    int coherent_flag)
 {
-	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 	struct page **pages;
 	void *addr = NULL;
+	pgprot_t prot;
 
 	*handle = DMA_ERROR_CODE;
 	size = PAGE_ALIGN(size);
 
-	if (coherent_flag  == COHERENT || !gfpflags_allow_blocking(gfp))
+	if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
 		return __iommu_alloc_simple(dev, size, gfp, handle,
 					    coherent_flag);
 
+	coherent_flag = is_dma_coherent(dev, attrs, coherent_flag);
+	prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent_flag);
 	/*
 	 * Following is a work-around (a.k.a. hack) to prevent pages
 	 * with __GFP_COMP being passed to split_page() which cannot
@@ -1672,7 +1691,7 @@
 	if (!pages)
 		return NULL;
 
-	*handle = __iommu_create_mapping(dev, pages, size);
+	*handle = __iommu_create_mapping(dev, pages, size, coherent_flag);
 	if (*handle == DMA_ERROR_CODE)
 		goto err_buffer;
 
@@ -1739,7 +1758,8 @@
 		struct vm_area_struct *vma, void *cpu_addr,
 		dma_addr_t dma_addr, size_t size, unsigned long attrs)
 {
-	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+					is_dma_coherent(dev, attrs, NORMAL));
 
 	return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
 }
@@ -1784,7 +1804,8 @@
 void arm_iommu_free_attrs(struct device *dev, size_t size,
 		    void *cpu_addr, dma_addr_t handle, unsigned long attrs)
 {
-	__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
+	__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs,
+				is_dma_coherent(dev, attrs, NORMAL));
 }
 
 void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
@@ -1841,7 +1862,7 @@
 	int ret = 0;
 	unsigned int count;
 	struct scatterlist *s;
-	int prot;
+	int prot = 0;
 
 	size = PAGE_ALIGN(size);
 	*handle = DMA_ERROR_CODE;
@@ -1850,6 +1871,11 @@
 	if (iova == DMA_ERROR_CODE)
 		return -ENOMEM;
 
+	/*
+	 * Check for coherency.
+	 */
+	prot |= is_coherent ? IOMMU_CACHE : 0;
+
 	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
 		phys_addr_t phys = page_to_phys(sg_page(s));
 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
@@ -1857,7 +1883,7 @@
 		if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
 			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
 
-		prot = __dma_direction_to_prot(dir);
+		prot |= __dma_direction_to_prot(dir);
 
 		ret = iommu_map(mapping->domain, iova, phys, len, prot);
 		if (ret < 0)
@@ -1959,14 +1985,45 @@
 	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
 	unsigned int total_length = 0, current_offset = 0;
 	dma_addr_t iova;
+	bool coherent;
 	int prot = __dma_direction_to_prot(dir);
+	int upstream_hint = 0;
+	/*
+	 * This is used to check if there are any unaligned offset/size
+	 * given in the scatter list.
+	 */
+	bool unaligned_offset_size = false;
 
-	for_each_sg(sg, s, nents, i)
+	for_each_sg(sg, s, nents, i) {
 		total_length += s->length;
+		if ((s->offset & ~PAGE_MASK) || (s->length & ~PAGE_MASK)) {
+			unaligned_offset_size = true;
+			break;
+		}
+	}
+
+	/*
+	 * Check for the upstream domain attribute just to catch
+	 * any abusive clients who expects the unaligned offset/size
+	 * support with out setting this attribute.
+	 * NOTE: on future kernels, we may not have this domain
+	 * attribute set where the check then will be based on just
+	 * offset/size.
+	 */
+	iommu_domain_get_attr(mapping->domain,
+			      DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR,
+			      &upstream_hint);
+	if (upstream_hint && unaligned_offset_size)
+		return __iommu_map_sg(dev, sg, nents, dir, attrs,
+				      is_dma_coherent(dev, attrs, false));
 
 	iova = __alloc_iova(mapping, total_length);
 	if (iova == DMA_ERROR_CODE)
 		return 0;
+
+	coherent = of_dma_is_coherent(dev->of_node);
+	prot |= is_dma_coherent(dev, attrs, coherent) ? IOMMU_CACHE : 0;
+
 	ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
 	if (ret != total_length) {
 		__free_iova(mapping, iova, total_length);
@@ -2053,6 +2110,12 @@
 {
 	struct scatterlist *s;
 	int i;
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = sg_dma_address(sg);
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
+
+	if (iova_coherent)
+		return;
 
 	for_each_sg(sg, s, nents, i)
 		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
@@ -2072,6 +2135,13 @@
 	struct scatterlist *s;
 	int i;
 
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = sg_dma_address(sg);
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
+
+	if (iova_coherent)
+		return;
+
 	for_each_sg(sg, s, nents, i)
 		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
 }
@@ -2130,7 +2200,8 @@
 	     unsigned long offset, size_t size, enum dma_data_direction dir,
 	     unsigned long attrs)
 {
-	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+	if (!is_dma_coherent(dev, attrs, false) &&
+	      !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 		__dma_page_cpu_to_dev(page, offset, size, dir);
 
 	return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
@@ -2178,7 +2249,8 @@
 	if (!iova)
 		return;
 
-	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+	if (!(is_dma_coherent(dev, attrs, false) ||
+	      (attrs & DMA_ATTR_SKIP_CPU_SYNC)))
 		__dma_page_dev_to_cpu(page, offset, size, dir);
 
 	iommu_unmap(mapping->domain, iova, len);
@@ -2249,8 +2321,10 @@
 	dma_addr_t iova = handle & PAGE_MASK;
 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
 	unsigned int offset = handle & ~PAGE_MASK;
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
 
-	__dma_page_dev_to_cpu(page, offset, size, dir);
+	if (!iova_coherent)
+		__dma_page_dev_to_cpu(page, offset, size, dir);
 }
 
 static void arm_iommu_sync_single_for_device(struct device *dev,
@@ -2260,10 +2334,16 @@
 	dma_addr_t iova = handle & PAGE_MASK;
 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
 	unsigned int offset = handle & ~PAGE_MASK;
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
 
-	__dma_page_cpu_to_dev(page, offset, size, dir);
+	if (!iova_coherent)
+		__dma_page_cpu_to_dev(page, offset, size, dir);
 }
 
+static int arm_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+	return dma_addr == DMA_ERROR_CODE;
+}
 const struct dma_map_ops iommu_ops = {
 	.alloc		= arm_iommu_alloc_attrs,
 	.free		= arm_iommu_free_attrs,
@@ -2282,6 +2362,8 @@
 
 	.map_resource		= arm_iommu_map_resource,
 	.unmap_resource		= arm_iommu_unmap_resource,
+
+	.mapping_error          = arm_iommu_mapping_error,
 };
 
 const struct dma_map_ops iommu_coherent_ops = {
@@ -2346,16 +2428,12 @@
 iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
 {
 	unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long);
-	u64 size = mapping->bits << PAGE_SHIFT;
 	int extensions = 1;
 	int err = -ENOMEM;
 
 	if (!bitmap_size)
 		return -EINVAL;
 
-	WARN(!IS_ALIGNED(size, SZ_128M),
-			"size is not aligned to 128M, alignment enforced");
-
 	if (bitmap_size > PAGE_SIZE) {
 		extensions = bitmap_size / PAGE_SIZE;
 		bitmap_size = PAGE_SIZE;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index cae69148a..ebee899 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -716,11 +716,14 @@
 				  pteval_t prot, struct mm_struct *mm)
 {
 	struct pte_data data;
+	struct mm_struct *apply_mm = mm;
 
 	data.mask = mask;
 	data.val = prot;
 
-	apply_to_page_range(mm, addr, SECTION_SIZE, __pte_update, &data);
+	if (addr >= PAGE_OFFSET)
+		apply_mm = &init_mm;
+	apply_to_page_range(apply_mm, addr, SECTION_SIZE, __pte_update, &data);
 	flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
 }
 
@@ -822,19 +825,28 @@
 	return 0;
 }
 
+static int kernel_set_to_readonly __read_mostly;
+
 void mark_rodata_ro(void)
 {
+	kernel_set_to_readonly = 1;
 	stop_machine(__mark_rodata_ro, NULL, NULL);
 }
 
 void set_kernel_text_rw(void)
 {
+	if (!kernel_set_to_readonly)
+		return;
+
 	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
 				current->active_mm);
 }
 
 void set_kernel_text_ro(void)
 {
+	if (!kernel_set_to_readonly)
+		return;
+
 	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
 				current->active_mm);
 }
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9557c63..5d06273 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1158,9 +1158,6 @@
 	u64 vmalloc_limit;
 	struct memblock_region *reg;
 	phys_addr_t lowmem_limit = 0;
-#ifdef CONFIG_ENABLE_VMALLOC_SAVING
-	struct memblock_region *prev_reg = NULL;
-#endif
 
 	/*
 	 * Let's use our own (unoptimized) equivalent of __pa() that is
@@ -1171,17 +1168,6 @@
 	 */
 	vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
 
-#ifdef CONFIG_ENABLE_VMALLOC_SAVING
-	for_each_memblock(memory, reg) {
-		if (prev_reg == NULL) {
-			prev_reg = reg;
-			continue;
-		}
-		vmalloc_limit += reg->base - (prev_reg->base + prev_reg->size);
-		prev_reg = reg;
-	}
-#endif
-
 	for_each_memblock(memory, reg) {
 		phys_addr_t block_start = reg->base;
 		phys_addr_t block_end = reg->base + reg->size;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index ac71d39..2d53e26 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -802,6 +802,13 @@
 	help
 	  Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
 
+config OKL4_GUEST
+	bool "OKL4 Hypervisor guest support"
+	depends on ARM64 && OF
+	default n
+	help
+	  Say Y if you want to run Linux as a guest of the OKL4 hypervisor
+
 config FORCE_MAX_ZONEORDER
 	int
 	default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
@@ -865,6 +872,15 @@
 
 	  If unsure, say N.
 
+config ARM64_SSBD
+	bool "Speculative Store Bypass Disable" if EXPERT
+	default y
+	help
+	  This enables mitigation of the bypassing of previous stores
+	  by speculative loads.
+
+	  If unsure, say Y.
+
 menuconfig ARMV8_DEPRECATED
 	bool "Emulate deprecated/obsolete ARMv8 instructions"
 	depends on COMPAT
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 4c013ad..e685767 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -166,6 +166,15 @@
 	  This enables support for the MSM8937 chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
+config ARCH_MSM8940
+	bool "Enable Support for Qualcomm Technologies Inc. MSM8940"
+	depends on ARCH_QCOM
+	select CPU_FREQ_QCOM
+	select COMMON_CLK_MSM
+	help
+	  This enables support for the MSM8940 chipset. If you do not
+	  wish to build a kernel that runs on this chipset, say 'N' here.
+
 config ARCH_MSM8917
 	bool "Enable Support for Qualcomm Technologies Inc. MSM8917"
 	depends on ARCH_QCOM
@@ -175,6 +184,15 @@
 	  This enables support for the MSM8917 chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
+config ARCH_QM215
+	bool "Enable Support for Qualcomm Technologies Inc. QM215"
+	depends on ARCH_QCOM
+	select CPU_FREQ_QCOM
+	select COMMON_CLK_MSM
+	help
+	  This enables support for the QM215 chipset. If you do not
+	  wish to build a kernel that runs on this chipset, say 'N' here.
+
 config ARCH_SDM450
 	bool "Enable Support for Qualcomm Technologies Inc. SDM450"
 	depends on ARCH_QCOM
@@ -219,6 +237,7 @@
 	select GPIOLIB
 	select PINCTRL
 	select PINCTRL_ROCKCHIP
+	select PM
 	select ROCKCHIP_TIMER
 	help
 	  This enables support for the ARMv8 based Rockchip chipsets,
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 2b265a7..826f47b 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -81,7 +81,7 @@
 KBUILD_CFLAGS_MODULE	+= -mcmodel=large
 ifeq ($(CONFIG_LTO_CLANG), y)
 # Code model is not stored in LLVM IR, so we need to pass it also to LLVMgold
-LDFLAGS		+= -plugin-opt=-code-model=large
+KBUILD_LDFLAGS_MODULE	+= -plugin-opt=-code-model=large
 endif
 endif
 
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index a16b1b3..8a94ec8 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -393,7 +393,7 @@
 			reg = <0x66080000 0x100>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			interrupts = <GIC_SPI 394 IRQ_TYPE_NONE>;
+			interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
 			clock-frequency = <100000>;
 			status = "disabled";
 		};
@@ -421,7 +421,7 @@
 			reg = <0x660b0000 0x100>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			interrupts = <GIC_SPI 395 IRQ_TYPE_NONE>;
+			interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>;
 			clock-frequency = <100000>;
 			status = "disabled";
 		};
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index ef3b161..fb236fc 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -336,6 +336,8 @@
 endif
 
 ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+dtbo-$(CONFIG_ARCH_MSM8940) += msm8940-mtp-overlay.dtbo
+
 dtbo-$(CONFIG_ARCH_MSM8917) += msm8917-mtp-overlay.dtbo \
 	msm8917-qrd-overlay.dtbo \
 	msm8917-cdp-overlay.dtbo \
@@ -344,8 +346,9 @@
 	msm8917-rcm-overlay.dtbo \
 	apq8017-mtp-overlay.dtbo \
 	apq8017-cdp-overlay.dtbo \
-	apq8017-cdp-wcd-rome-overlay.dtbo \
-	qm215-qrd-overlay.dtbo
+	apq8017-cdp-wcd-rome-overlay.dtbo
+
+dtbo-$(CONFIG_ARCH_QM215) +=qm215-qrd-overlay.dtbo
 
 dtbo-$(CONFIG_ARCH_MSM8953) += msm8953-mtp-overlay.dtbo \
 	msm8953-cdp-overlay.dtbo \
@@ -385,6 +388,10 @@
 	sdm429-cdp-overlay.dtbo \
 	sdm429-qrd-overlay.dtbo
 
+msm8940-mtp-overlay.dtbo-base := msm8940-pmi8950.dtb \
+	msm8940-pmi8937.dtb \
+	msm8940-pmi8940.dtb
+
 msm8917-mtp-overlay.dtbo-base := msm8917-pmi8950.dtb \
 	msm8917-pmi8937.dtb \
 	msm8917-pmi8940.dtb
@@ -529,6 +536,10 @@
 	msm8937-interposer-sdm429-cdp.dtb \
 	msm8937-interposer-sdm429-mtp.dtb
 
+dtb-$(CONFIG_ARCH_MSM8940) += msm8940-pmi8937-mtp.dtb \
+	msm8940-pmi8950-mtp.dtb \
+	msm8940-pmi8940-mtp.dtb
+
 dtb-$(CONFIG_ARCH_MSM8917) += msm8917-pmi8950-mtp.dtb \
 	msm8917-pmi8950-cdp.dtb \
 	msm8917-pmi8950-rcm.dtb \
@@ -546,10 +557,12 @@
 	apq8017-pmi8937-cdp-wcd-rome.dtb \
 	msm8917-pmi8940-mtp.dtb \
 	msm8917-pmi8940-cdp.dtb \
-	msm8917-pmi8940-rcm.dtb \
-	qm215-qrd.dtb
+	msm8917-pmi8940-rcm.dtb
 
-dtb-$(CONFIG_ARCH_MSM8909) += sdw3100-msm8909w-wtp.dtb \
+dtb-$(CONFIG_ARCH_QM215) += qm215-qrd.dtb
+
+dtb-$(CONFIG_ARCH_MSM8909) += msm8909-pm8916-mtp.dtb \
+	sdw3100-msm8909w-wtp.dtb \
 	sdw3100-apq8009w-wtp.dtb \
 	sdw3100-apq8009w-alpha.dtb \
 	apq8009-mtp-wcd9326-refboard.dtb \
diff --git a/arch/arm64/boot/dts/qcom/apq8009-dragon.dts b/arch/arm64/boot/dts/qcom/apq8009-dragon.dts
index 314af11..1ac603e 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-dragon.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-dragon.dts
@@ -361,3 +361,47 @@
 &blsp1_uart1 {
 	status = "disabled";
 };
+
+&i2c_5 {
+	status = "okay";
+	goodix_ts@5d {
+		compatible = "goodix,gt9xx";
+		status = "okay";
+		reg = <0x5d>;
+		vdd_ana-supply = <&pm8916_l17>;
+		vcc_i2c-supply = <&pm8916_l6>;
+		interrupt-parent = <&msm_gpio>;
+		interrupts = <13 0x2008>;
+		pinctrl-names = "gdix_ts_int_default", "gdix_ts_int_output_low",
+			"gdix_ts_int_output_high", "gdix_ts_int_input",
+			"gdix_ts_rst_default", "gdix_ts_rst_output_low",
+			"gdix_ts_rst_output_high", "gdix_ts_rst_input";
+		pinctrl-0 = <&ts_int_default>;
+		pinctrl-1 = <&ts_int_output_low>;
+		pinctrl-2 = <&ts_int_output_high>;
+		pinctrl-3 = <&ts_int_input>;
+		pinctrl-4 = <&ts_rst_default>;
+		pinctrl-5 = <&ts_rst_output_low>;
+		pinctrl-6 = <&ts_rst_output_high>;
+		pinctrl-7 = <&ts_rst_input>;
+		reset-gpios = <&msm_gpio 16 0x00>;
+		irq-gpios = <&msm_gpio 13 0x2008>;
+		irq-flags = <2>;
+		touchscreen-max-id = <5>;
+		touchscreen-size-x = <479>;
+		touchscreen-size-y = <853>;
+		touchscreen-max-w = <1024>;
+		touchscreen-max-p = <1024>;
+		goodix,type-a-report = <0>;
+		goodix,driver-send-cfg = <1>;
+		goodix,wakeup-with-reset = <0>;
+		goodix,resume-in-workqueue = <1>;
+		goodix,int-sync = <1>;
+		goodix,swap-x2y = <0>;
+		goodix,esd-protect = <1>;
+		goodix,pen-suppress-finger = <0>;
+		goodix,auto-update = <1>;
+		goodix,auto-update-cfg = <0>;
+		goodix,power-off-sleep = <0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8009-robot-pronto-refboard.dts b/arch/arm64/boot/dts/qcom/apq8009-robot-pronto-refboard.dts
index 3a421bf..e0e45c5 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-robot-pronto-refboard.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-robot-pronto-refboard.dts
@@ -18,7 +18,7 @@
 #include "apq8009-memory.dtsi"
 #include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
 #include "msm8909-pm8916-camera.dtsi"
-#include "msm8909-pm8916-camera-sensor-robot.dtsi"
+#include "msm8909-pm8916-camera-sensor-robot-pronto.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. APQ8009 Robot-pronto RefBoard";
@@ -330,18 +330,7 @@
 		};
 
 		pa-therm0-adc {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&pm8916_vadc 0x36>;
-			thermal-governor = "user_space";
-
-			trips {
-				active-config0 {
-					temperature = <65000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
+			status = "disabled";
 		};
 		mdm-lowf {
 			cooling-maps {
@@ -381,6 +370,19 @@
 	};
 };
 
+&pm8916_vadc {
+	chan@36 {
+		label = "pa_therm0";
+		reg = <0x36>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		/delete-property/ qcom,vadc-thermal-node;
+	};
+};
 &msm_gpio {
 	sdc2_wlan_gpio_on: sdc2_wlan_gpio_on {
 		mux {
@@ -409,36 +411,6 @@
 	};
 };
 
-&sdhc_2 {
-	/delete-property/cd-gpios;
-	#address-cells = <0>;
-	interrupt-parent = <&sdhc_2>;
-	interrupts = <0 1 2>;
-	#interrupt-cells = <1>;
-	interrupt-map-mask = <0xffffffff>;
-	interrupt-map = <0 &intc 0 125 0
-			1 &intc 0 221 0
-			2 &msm_gpio 38 0>;
-	interrupt-names = "hc_irq", "pwr_irq", "sdiowakeup_irq";
-
-	qcom,vdd-voltage-level = <1800000 2950000>;
-	qcom,vdd-current-level = <15000 400000>;
-
-	qcom,vdd-io-voltage-level = <1800000 1800000>;
-	qcom,vdd-io-current-level = <200 50000>;
-	qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
-	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
-
-	pinctrl-names = "active", "sleep";
-	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on
-	&sdc2_wlan_gpio_on>;
-	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
-	&sdc2_wlan_gpio_off>;
-	qcom,nonremovable;
-	qcom,core_3_0v_support;
-	status = "disabled";
-};
-
 &i2c_4 {
 	status= "okay";
 	smb1360_otg_supply: smb1360-chg-fg@14 {
diff --git a/arch/arm64/boot/dts/qcom/apq8009-robot-rome-refboard.dts b/arch/arm64/boot/dts/qcom/apq8009-robot-rome-refboard.dts
index e00eb01..f1c130f 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-robot-rome-refboard.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-robot-rome-refboard.dts
@@ -18,7 +18,7 @@
 #include "apq8009-memory.dtsi"
 #include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
 #include "msm8909-pm8916-camera.dtsi"
-#include "msm8909-pm8916-camera-sensor-robot.dtsi"
+#include "msm8909-pm8916-camera-sensor-robot-rome.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. APQ8009 Robot-rome RefBoard";
@@ -325,18 +325,7 @@
 		};
 
 		pa-therm0-adc {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&pm8916_vadc 0x36>;
-			thermal-governor = "user_space";
-
-			trips {
-				active-config0 {
-					temperature = <65000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
+			status = "disabled";
 		};
 		mdm-lowf {
 			cooling-maps {
@@ -380,6 +369,20 @@
 	status = "disabled";
 };
 
+&pm8916_vadc {
+		chan@36 {
+		label = "pa_therm0";
+		reg = <0x36>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		/delete-property/ qcom,vadc-thermal-node;
+	};
+};
+
 &msm_gpio {
 	sdc2_wlan_gpio_on: sdc2_wlan_gpio_on {
 		mux {
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index bb062b5..601be61 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -170,7 +170,7 @@
 			led@6 {
 				label = "apq8016-sbc:blue:bt";
 				gpios = <&pm8916_mpps 3 GPIO_ACTIVE_HIGH>;
-				linux,default-trigger = "bt";
+				linux,default-trigger = "bluetooth-power";
 				default-state = "off";
 			};
 		};
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-dragon.dtsi b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon.dtsi
index 41152d2..3f0bc28 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-dragon.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-dragon.dtsi
@@ -104,22 +104,6 @@
 		pinctrl-1 = <&sec_tlmm_lines_sus &ext_amp_ctrl_sleep>;
 	};
 
-	gpio_keys {
-		compatible = "gpio-keys";
-		input-name = "gpio-keys";
-		pinctrl-names = "default";
-		pinctrl-0 = <&gpio_key_active>;
-		vol_up {
-			label = "volume_up";
-			gpios = <&tlmm 85 0x1>;
-			linux,input-type = <1>;
-			linux,code = <115>;
-			debounce-interval = <15>;
-			linux,can-disable;
-			gpio-key,wakeup;
-		};
-	};
-
 	qcom,rmnet-ipa {
 		status = "disabled";
 	};
@@ -408,6 +392,14 @@
 };
 
 &spmi_bus {
+	qcom,pm8953@0 {
+		qcom,power-on@800 {
+			qcom,resin-gpiobase = <1019>;
+			qcom,pon_2 {
+				/delete-property/ linux,code;
+			};
+		};
+	};
 	qcom,pmi8950@2 {
 		qcom,leds@a100 {
 			compatible = "qcom,leds-qpnp";
diff --git a/arch/arm64/boot/dts/qcom/apq8053-lite-som.dts b/arch/arm64/boot/dts/qcom/apq8053-lite-som.dts
index fa51fd2..684833c 100644
--- a/arch/arm64/boot/dts/qcom/apq8053-lite-som.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-lite-som.dts
@@ -108,6 +108,23 @@
 		pinctrl-0 = <&sec_tlmm_lines_act &ext_amp_ctrl_active>;
 		pinctrl-1 = <&sec_tlmm_lines_sus &ext_amp_ctrl_sleep>;
 	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		input-name = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&gpio_key_active>;
+		vol_up {
+			label = "volume_up";
+			gpios = <&tlmm 85 0x1>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			debounce-interval = <15>;
+			linux,can-disable;
+			gpio-key,wakeup;
+		};
+	};
+
 };
 
 &cdc_pdm_comp_lines_act {
@@ -304,14 +321,6 @@
 };
 
 &spmi_bus {
-	qcom,pm8953@0 {
-		qcom,power-on@800 {
-			qcom,resin-gpiobase = <1019>;
-			qcom,pon_2 {
-				/delete-property/ linux,code;
-			};
-		};
-	};
 	qcom,pmi8950@2 {
 		qcom,leds@a100 {
 			compatible = "qcom,leds-qpnp";
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi
index 06fc5a4..87a5253 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-390p-auo-cmd.dtsi
@@ -37,6 +37,7 @@
 		qcom,mdss-dsi-underflow-color = <0xff>;
 		qcom,mdss-dsi-border-color = <0>;
 		qcom,mdss-tear-check-frame-rate = <4500>;
+		qcom,mdss-dsi-idle-fps = <10>;
 		qcom,mdss-dsi-on-command = [
 			15 01 00 00 00 00 02 fe 01
 			15 01 00 00 00 00 02 0a f0
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-osd-disp-fwvga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-osd-disp-fwvga-video.dtsi
new file mode 100644
index 0000000..0967a50
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-osd-disp-fwvga-video.dtsi
@@ -0,0 +1,97 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_osd_disp_fwvga_video: qcom,mdss_dsi_osd_disp_fwvga_video {
+		qcom,mdss-dsi-panel-name =
+			"OSD Displays fwvga video mode dsi panel";
+		qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-destination = "display_1";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <480>;
+		qcom,mdss-dsi-panel-height = <854>;
+		qcom,mdss-dsi-h-front-porch = <70>;
+		qcom,mdss-dsi-h-back-porch = <70>;
+		qcom,mdss-dsi-h-pulse-width = <70>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <10>;
+		qcom,mdss-dsi-v-front-porch = <10>;
+		qcom,mdss-dsi-v-pulse-width = <20>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = <0>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,ulps-enabled;
+		qcom,mdss-dsi-on-command = [
+			39 01 00 00 01 00 04 BF 91 61 F2
+			39 01 00 00 01 00 03 B3 00 9B
+			39 01 00 00 01 00 03 B4 00 9B
+			39 01 00 00 01 00 02 C3 04
+			39 01 00 00 01 00 07 B8 00 6F 01 00 6F 01
+			39 01 00 00 01 00 04 BA 34 23 00
+			39 01 00 00 01 00 03 C4 30 6A
+			39 01 00 00 01 00 0A C7 00 01 32 05 65 2A 12 A5 A5
+			39 01 00 00 01 00 27 C8 7F 6A 5A 4E 49 39 3B 23 37
+				32 2F 49 35 3B 31 2B 1E 0F 00 7F 6A 5A 4E
+				49 39 3B 23 37 32 2F 49 35 3B 31 2B 1E 0F 00
+			39 01 00 00 01 00 11 D4 1E 1F 1F 1F 06 04 0A 08 00
+				02 1F 1F 1F 1F 1F 1F
+			39 01 00 00 01 00 11 D5 1E 1F 1F 1F 07 05 0B 09 01
+				03 1F 1F 1F 1F 1F 1F
+			39 01 00 00 01 00 11 D6 1F 1E 1F 1F 07 09 0B 05 03
+				01 1F 1F 1F 1F 1F 1F
+			39 01 00 00 01 00 11 D7 1F 1E 1F 1F 06 08 0A 04 02
+				00 1F 1F 1F 1F 1F 1F
+			39 01 00 00 01 00 15 D8 20 00 00 30 08 20 01 02 00
+				01 02 06 7B 00 00 72 0A 0E 49 08
+			39 01 00 00 01 00 14 D9 00 0A 0A 89 00 00 06 7B 00
+				00 00 3B 33 1F 00 00 00 03 7B
+			05 01 00 00 01 00 02 35 00
+			39 01 00 00 01 00 02 BE 01
+			39 01 00 00 01 00 02 C1 10
+			39 01 00 00 01 00 0B CC 34 20 38 60 11 91 00 40 00 00
+			39 01 00 00 01 00 02 BE 00
+			05 01 00 00 01 00 02 11 00
+			05 01 00 00 01 00 02 29 00];
+		qcom,mdss-dsi-off-command = [
+				05 01 00 00 01 00 02 28 00
+				05 01 00 00 01 00 02 10 00];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <1>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-panel-timings =
+			[7F 1C 12 00 40 44 16 1E 17 03 04 00];
+		qcom,mdss-dsi-t-clk-post = <0x20>;
+		qcom,mdss-dsi-t-clk-pre = <0x2C>;
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+		qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+		qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+		qcom,mdss-dsi-pwm-gpio = <&pm8916_mpps 4 0>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-8909.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-8909.dtsi
index b506fb4..a6c7266 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-8909.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-8909.dtsi
@@ -36,13 +36,10 @@
 			<GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>,
 			<GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH>,
 			<GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
-
-		vdd-supply = <&gdsc_oxili_gx>;
-		qcom,regulator-names = "vdd";
 		clocks =
-			<&clock_gcc clk_gcc_oxili_ahb_clk>,
-			<&clock_gcc clk_gcc_bimc_gfx_clk>;
-		clock-names = "gpu_ahb_clk", "gcc_bimc_gfx_clk";
+			<&clock_gcc clk_gcc_smmu_cfg_clk>,
+			<&clock_gcc clk_gcc_gfx_tcu_clk>;
+		clock-names = "iface_clk", "core_clk";
 	};
 
 	/* A test device to test the SMMU operation */
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
index 707875b..5664d3e 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
@@ -25,6 +25,7 @@
 		#global-interrupts = <2>;
 		qcom,regulator-names = "vdd";
 		vdd-supply = <&gpu_cx_gdsc>;
+		qcom,deferred-regulator-disable-delay = <80>;
 		interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 364 IRQ_TYPE_LEVEL_HIGH>,
@@ -324,17 +325,8 @@
 };
 
 &apps_smmu {
-	qcom,actlr =	<0x0880 0x8 0x103>,
-			<0x0881 0x8 0x103>,
-			<0x0c80 0x8 0x103>,
-			<0x0c81 0x8 0x103>,
-			<0x1090 0x0 0x103>,
-			<0x1091 0x0 0x103>,
-			<0x10a0 0x8 0x103>,
-			<0x10b0 0x0 0x103>,
-			<0x10a1 0x8 0x103>,
-			<0x10a3 0x8 0x103>,
-			<0x10a4 0x8 0x103>,
-			<0x10b4 0x0 0x103>,
-			<0x10a5 0x8 0x103>;
+	qcom,actlr =
+		/* HF and SF TBUs: +3 deep PF */
+			<0x0800 0x7ff 0x103>,
+			<0x1000 0x3ff 0x103>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msm-qvr-external.dtsi b/arch/arm64/boot/dts/qcom/msm-qvr-external.dtsi
new file mode 100644
index 0000000..3b3fc58
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-qvr-external.dtsi
@@ -0,0 +1,44 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	smp2pgpio_qvrexternal_5_in: qcom,smp2pgpio-qvrexternal-5-in {
+			compatible = "qcom,smp2pgpio";
+			qcom,entry-name = "qvrexternal";
+			qcom,remote-pid = <5>;
+			qcom,is-inbound;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_qvrexternal_5_in {
+			compatible = "qcom,smp2pgpio_client_qvrexternal_5_in";
+			gpios = <&smp2pgpio_qvrexternal_5_in 0 0>;
+	};
+
+	smp2pgpio_qvrexternal_5_out: qcom,smp2pgpio-qvrexternal-5-out {
+			compatible = "qcom,smp2pgpio";
+			qcom,entry-name = "qvrexternal";
+			qcom,remote-pid = <5>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_client_qvrexternal_5_out {
+			compatible = "qcom,smp2pgpio_client_qvrexternal_5_out";
+			gpios = <&smp2pgpio_qvrexternal_5_out 0 0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi b/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi
index 96d9ea7..024833a 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-gpu.dtsi
@@ -65,6 +65,7 @@
 
 		/* Bus Scale Settings */
 		qcom,gpubw-dev = <&gpubw>;
+		qcom,bus-control;
 		qcom,msm-bus,name = "grp3d";
 		qcom,msm-bus,num-cases = <4>;
 		qcom,msm-bus,num-paths = <1>;
diff --git a/arch/arm64/boot/dts/qcom/msm8909-mdss.dtsi b/arch/arm64/boot/dts/qcom/msm8909-mdss.dtsi
index 67c6d06..725ef31 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-mdss.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-mdss.dtsi
@@ -28,6 +28,21 @@
 		clock-names = "iface_clk", "bus_clk", "core_clk_src",
 				"core_clk", "vsync_clk";
 
+		qcom,regs-dump-mdp =    <0x0300 0x0358>,
+					<0x10000 0x101E0>,
+					<0x20004 0x0020044>,
+					<0x90000 0x90074>;
+		qcom,regs-dump-names-mdp =      "MDP_SYNC",
+						"PPP","PPP_FETCH",
+						"DMA_P";
+
+		qcom,regs-dump-vbif =   <0x0004 0x0010>,
+					<0x0194 0x01b0>,
+					<0x0200 0x020c>;
+
+		qcom,regs-dump-names-vbif =     "VBIF_CLK",
+						"VBIF_ERR","VBIF_XIN_HALT";
+
 		mdss_fb0: qcom,mdss_fb_primary {
 			cell-index = <0>;
 			compatible = "qcom,mdss-fb";
diff --git a/arch/arm64/boot/dts/qcom/msm8909-mtp_qseev4.dtsi b/arch/arm64/boot/dts/qcom/msm8909-mtp_qseev4.dtsi
new file mode 100644
index 0000000..420aff7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-mtp_qseev4.dtsi
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&reserved_mem {
+	secure_display_memory: secure_region {
+		compatible = "shared-dma-pool";
+		label = "secure_display_mem";
+		size = <0 0x1400000>;
+		reusable;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pinctrl.dtsi
index c22259b..656385d 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-pinctrl.dtsi
@@ -922,6 +922,103 @@
 			};
 		};
 
+		/* add pingrp for goodix touch */
+		ts_int_default: ts_int_default {
+			mux {
+				pins = "gpio13";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio13";
+				drive-strength = <16>;
+				/*bias-pull-up;*/
+				input-enable;
+				bias-disable;
+			};
+		};
+
+		ts_int_output_high: ts_int_output_high {
+			mux {
+				pins = "gpio13";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio13";
+				output-high;
+			};
+		};
+
+		ts_int_output_low: ts_int_output_low {
+			mux {
+				pins = "gpio13";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio13";
+				output-low;
+			};
+		};
+
+		ts_int_input: ts_int_input {
+			mux {
+				pins = "gpio13";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio13";
+				input-enable;
+				bias-disable;
+			};
+		};
+
+		ts_rst_default: ts_rst_default {
+			mux {
+				pins = "gpio16";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio16";
+				drive-strength = <16>;
+				/*bias-pull-up;*/
+				input-enable;
+				bias-disable;
+			};
+		};
+
+		ts_rst_output_high: ts_rst_output_high {
+			mux {
+				pins = "gpio16";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio16";
+				output-high;
+			};
+		};
+
+		ts_rst_output_low: ts_rst_output_low {
+			mux {
+				pins = "gpio16";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio16";
+				output-low;
+			};
+		};
+
+		ts_rst_input: ts_rst_input {
+			mux {
+				pins = "gpio16";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio16";
+				input-enable;
+				bias-disable;
+			};
+		};
+
 		/* add pingrp for touchscreen */
 		pmx_ts_int_active {
 			ts_int_active: ts_int_active {
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot-pronto.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot-pronto.dtsi
new file mode 100644
index 0000000..854965e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot-pronto.dtsi
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&i2c_3 {
+	status = "ok";
+};
+
+&i2c_3 {
+
+	qcom,camera@1 {
+		cell-index = <1>;
+		compatible = "qcom,camera";
+		reg = <0x1>;
+		qcom,csiphy-sd-index = <0>;
+		qcom,csid-sd-index = <0>;
+		qcom,mount-angle = <90>;
+		cam_vana-supply = <&pm8916_l17>;
+		cam_vio-supply = <&pm8916_l6>;
+		qcom,cam-vreg-name = "cam_vio","cam_vana";
+		qcom,cam-vreg-min-voltage = <1800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1800000 2850000>;
+		qcom,cam-vreg-op-mode = <0 80000>;
+		qcom,cam-vreg-type = <0 0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+				&cam_sensor_rear_default>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>;
+		gpios = <&msm_gpio 26 0>,
+			<&msm_gpio 35 0>,
+			<&msm_gpio 34 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+			"CAM_RESET",
+			"CAM_STANDBY";
+		qcom,sensor-position = <0>;
+		qcom,sensor-mode = <0>;
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+				<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot-rome.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot-rome.dtsi
new file mode 100644
index 0000000..cd720fd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot-rome.dtsi
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&i2c_3 {
+	status = "ok";
+};
+
+&i2c_3 {
+	otp_eeprom: qcom,msm_eeprom@6e{
+		status = "ok";
+		cell-index = <0>;
+		reg = <0x6e>;
+		compatible = "msm_eeprom";
+		qcom,eeprom-name = "sunny_imx241_otp";
+		qcom,slave-addr = <0x6e>;
+		qcom,i2c-freq-mode = <1>;
+
+		cam_vdig-supply = <&pm8916_l2>;
+		cam_vio-supply = <&pm8916_l6>;
+		cam_vana-supply = <&pm8916_l17>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+		qcom,cam-vreg-min-voltage = <1200000 1800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 1800000 2850000>;
+		qcom,cam-vreg-op-mode = <200000 0 80000>;
+		qcom,cam-vreg-type = <0 0 0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+					&cam_sensor_rear_default>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep
+					&cam_sensor_rear_sleep>;
+		gpios = <&msm_gpio 26 0>,
+				<&msm_gpio 35 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-req-tbl-num = <0 1>;
+		qcom,gpio-req-tbl-flags = <1 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK", "CAM_RESET";
+
+		qcom,cam-power-seq-type = "sensor_vreg",
+						"sensor_vreg",
+						"sensor_vreg",
+						"sensor_gpio",
+						"sensor_clk",
+						"sensor_i2c_mux";
+		qcom,cam-power-seq-val =  "cam_vana",
+						"cam_vdig",
+						"cam_vio",
+						"sensor_gpio_reset",
+						"sensor_cam_mclk",
+						"none";
+		qcom,cam-power-seq-cfg-val = <2850000 1200000
+						1800000 1 24000000 0>;
+		qcom,cam-power-seq-delay =   <1 1 1 1 1 0>;
+
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+				<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+
+		qcom,num-blocks = <12>;
+		qcom,page0 = <1 0x34C5 2 0x02 1 1>;
+		qcom,poll0 = <0 0x0 2 0 1 1>;
+		qcom,mem0 = <0 0x0 2 0 1 0>;
+		qcom,page1 = <1 0x34C9 2 0x02 1 10>;
+		qcom,poll1 = <0 0x0 2 0 1 1>;
+		qcom,mem1 = <8 0x3510 2 0 1 0>;
+		qcom,page2 = <1 0x34C5 2 0x03 1 1>;
+		qcom,poll2 = <0 0x0 2 0 1 1>;
+		qcom,mem2 = <0 0x0 2 0 1 0>;
+		qcom,page3 = <1 0x34C9 2 0x03 1 10>;
+		qcom,poll3 = <0 0x0 2 0 1 1>;
+		qcom,mem3 = <8 0x3518 2 0 1 0>;
+		qcom,page4 = <1 0x34C5 2 0x06 1 1>;
+		qcom,poll4 = <0 0x0 2 0 1 1>;
+		qcom,mem4 = <0 0x0 2 0 1 0>;
+		qcom,page5 = <1 0x34C9 2 0x06 1 10>;
+		qcom,poll5 = <0 0x0 2 0 1 1>;
+		qcom,mem5 = <8 0x3530 2 0 1 0>;
+		qcom,page6 = <1 0x34C5 2 0x07 1 1>;
+		qcom,poll6 = <0 0x0 2 0 1 1>;
+		qcom,mem6 = <0 0x0 2 0 1 0>;
+		qcom,page7 = <1 0x34C9 2 0x07 1 10>;
+		qcom,poll7 = <0 0x0 2 0 1 1>;
+		qcom,mem7 = <8 0x3538 2 0 1 0>;
+		qcom,page8 = <1 0x34C5 2 0x0A 1 1>;
+		qcom,poll8 = <0 0x0 2 0 1 1>;
+		qcom,mem8 = <0 0x0 2 0 1 0>;
+		qcom,page9 = <1 0x34C9 2 0x0A 1 10>;
+		qcom,poll9 = <0 0x0 2 0 1 1>;
+		qcom,mem9 = <8 0x3550 2 0 1 0>;
+		qcom,page10 = <1 0x34C5 2 0x0B 1 1>;
+		qcom,poll10 = <0 0x0 2 0 1 1>;
+		qcom,mem10 = <0 0x0 2 0 1 0>;
+		qcom,page11 = <1 0x34C9 2 0x0B 1 10>;
+		qcom,poll11 = <0 0x0 2 0 1 1>;
+		qcom,mem11 = <8 0x3558 2 0 1 0>;
+	};
+
+	qcom,camera@0 {
+		cell-index = <0>;
+		compatible = "qcom,camera";
+		reg = <0x2>;
+		qcom,csiphy-sd-index = <0>;
+		qcom,csid-sd-index = <0>;
+		qcom,mount-angle = <90>;
+		cam_vdig-supply = <&pm8916_l2>;
+		cam_vana-supply = <&pm8916_l17>;
+		cam_vio-supply = <&pm8916_l6>;
+		qcom,cam-vreg-type = <0 0 0>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+		qcom,cam-vreg-min-voltage = <1200000 1800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 1800000 2850000>;
+		qcom,cam-vreg-op-mode = <200000 0 80000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+				&cam_sensor_rear_default>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>;
+		gpios = <&msm_gpio 26 0>,
+			<&msm_gpio 35 0>,
+			<&msm_gpio 34 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+			"CAM_RESET",
+			"CAM_STANDBY";
+		qcom,sensor-position = <0>;
+		qcom,sensor-mode = <0>;
+		qcom,eeprom-src = <&otp_eeprom>;
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+				<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot-som.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot-som.dtsi
index 9c0e539..7172e5e 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot-som.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot-som.dtsi
@@ -74,12 +74,12 @@
 		pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>;
 		gpios = <&msm_gpio 26 0>,
 			<&msm_gpio 91 0>;
-		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <1>;
 
 		qcom,gpio-req-tbl-num = <0 1>;
 		qcom,gpio-req-tbl-flags = <1 0>;
 		qcom,gpio-req-tbl-label = "CAMIF_MCLK",
-			"CAM_RESET";
+			"CAM_STANDBY";
 		qcom,sensor-position = <0>;
 		qcom,sensor-mode = <0>;
 		status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm8916-mtp.dts b/arch/arm64/boot/dts/qcom/msm8909-pm8916-mtp.dts
new file mode 100644
index 0000000..f1caab6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-pm8916-mtp.dts
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2016,2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8909-mtp.dtsi"
+#include "8909-pm8916.dtsi"
+#include "msm8909-pm8916-mtp.dtsi"
+#include "msm8909-mtp_qseev4.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8909-PM8916 1GB MTP";
+	compatible = "qcom,msm8909-mtp", "qcom,msm8909", "qcom,mtp";
+	qcom,msm-id = <245 0x20000>, <245 0x0>;
+	qcom,board-id= <0x02010008 0x102>;
+	qcom,pmic-id = <0x1000B 0x0 0x0 0x0>;
+};
+
+&pm8916_chg {
+	status = "ok";
+};
+
+&usb_otg {
+	extcon = <&pm8916_chg>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909.dtsi b/arch/arm64/boot/dts/qcom/msm8909.dtsi
index 73760a5..a4e64fb 100644
--- a/arch/arm64/boot/dts/qcom/msm8909.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909.dtsi
@@ -1158,6 +1158,11 @@
 			compatible = "qcom,msm-imem-restart_reason";
 			reg = <0x65c 4>;
 		};
+
+		diag_dload@c8 {
+			compatible = "qcom,msm-imem-diag-dload";
+			reg = <0xc8 200>;
+		};
 	};
 
 	qcom,mpm2-sleep-counter@4a3000 {
diff --git a/arch/arm64/boot/dts/qcom/msm8909w-gpu.dtsi b/arch/arm64/boot/dts/qcom/msm8909w-gpu.dtsi
index 7d42127..3bbd283 100644
--- a/arch/arm64/boot/dts/qcom/msm8909w-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909w-gpu.dtsi
@@ -25,6 +25,8 @@
 	/* To disable GPU wake up on touch event */
 	qcom,disable-wake-on-touch;
 
+	qcom,initial-pwrlevel = <0>;
+
 	/* Bus Scale Settings */
 	qcom,msm-bus,num-cases = <3>;
 	qcom,msm-bus,vectors-KBps =
diff --git a/arch/arm64/boot/dts/qcom/msm8917-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8917-coresight.dtsi
index 87303c5..2ff64d6 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-coresight.dtsi
@@ -11,1029 +11,34 @@
  * GNU General Public License for more details.
  */
 
+#include "msm8937-coresight.dtsi"
+
+&funnel_apss {
+	ports {
+		/delete-node/ port@1;
+		/delete-node/ port@2;
+		/delete-node/ port@3;
+		/delete-node/ port@4;
+	};
+};
+
+&funnel_mm {
+	ports {
+		/delete-node/ port@4;
+	};
+};
+
 &soc {
-	tmc_etr: tmc@6028000 {
-		compatible = "arm,primecell";
-		reg = <0x6028000 0x1000>,
-		      <0x6044000 0x15000>;
-		reg-names = "tmc-base", "bam-base";
+	/delete-node/ etm@619c000;
+	/delete-node/ etm@619d000;
+	/delete-node/ etm@619e000;
+	/delete-node/ etm@619f000;
+	/delete-node/ cti@6198000;
+	/delete-node/ cti@6199000;
+	/delete-node/ cti@619a000;
+	/delete-node/ cti@619b000;
+};
 
-		interrupts = <0 166 0>;
-		interrupt-names = "byte-cntr-irq";
-
-		arm,buffer-size = <0x100000>;
-		arm,sg-enable;
-		qcom,force-reg-dump;
-
-		coresight-name = "coresight-tmc-etr";
-		coresight-csr = <&csr>;
-		coresight-ctis = <&cti0 &cti8>;
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		port {
-			tmc_etr_in_replicator: endpoint {
-				slave-mode;
-				remote-endpoint = <&replicator_out_tmc_etr>;
-			};
-		};
-	};
-
-	replicator: replicator@6026000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b909>;
-
-		reg = <0x6026000 0x1000>;
-		reg-names = "replicator-base";
-
-		coresight-name = "coresight-replicator";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				replicator_out_tmc_etr: endpoint {
-					remote-endpoint =
-						<&tmc_etr_in_replicator>;
-				};
-			};
-
-			port@1 {
-				reg = <0>;
-				replicator_in_tmc_etf: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&tmc_etf_out_replicator>;
-				};
-			};
-		};
-	};
-
-	tmc_etf: tmc@6027000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b961>;
-
-		reg = <0x6027000 0x1000>;
-		reg-names = "tmc-base";
-
-		coresight-name = "coresight-tmc-etf";
-		coresight-csr = <&csr>;
-
-		arm,default-sink;
-		qcom,force-reg-dump;
-
-		coresight-ctis = <&cti0 &cti8>;
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		 ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				tmc_etf_out_replicator:endpoint {
-					remote-endpoint =
-						<&replicator_in_tmc_etf>;
-				};
-			};
-
-			port@1 {
-				reg = <0>;
-				tmc_etf_in_funnel_in0: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&funnel_in0_out_tmc_etf>;
-				};
-			};
-		};
-	};
-
-	funnel_in0: funnel@6021000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b908>;
-
-		reg = <0x6021000 0x1000>;
-		reg-names = "funnel-base";
-
-		coresight-name = "coresight-funnel-in0";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		 ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				funnel_in0_out_tmc_etf: endpoint {
-					remote-endpoint =
-						<&tmc_etf_in_funnel_in0>;
-				};
-			};
-
-			port@1 {
-				reg = <7>;
-				funnel_in0_in_stm: endpoint {
-					slave-mode;
-					remote-endpoint = <&stm_out_funnel_in0>;
-				};
-			};
-
-			port@2 {
-				reg = <6>;
-				funnel_in0_in_tpda: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&tpda_out_funnel_in0>;
-				};
-			};
-
-			port@3 {
-				reg = <3>;
-				funnel_in0_in_funnel_center: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&funnel_center_out_funnel_in0>;
-				};
-			};
-
-			port@4 {
-				reg = <4>;
-				funnel_in0_in_funnel_right: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&funnel_right_out_funnel_in0>;
-				};
-			};
-
-			port@5 {
-				reg = <5>;
-				funnel_in0_in_funnel_mm: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&funnel_mm_out_funnel_in0>;
-				};
-			};
-		};
-	};
-
-	funnel_mm: funnel@6130000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b908>;
-
-		reg = <0x6130000 0x1000>;
-		reg-names = "funnel-base";
-
-		coresight-name = "coresight-funnel-mm";
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				funnel_mm_out_funnel_in0: endpoint {
-					remote-endpoint =
-						<&funnel_in0_in_funnel_mm>;
-				};
-			};
-
-			port@1 {
-				reg = <0>;
-				funnel_mm_in_wcn_etm0: endpoint {
-					slave-mode;
-					remote-endpoint =
-					<&wcn_etm0_out_funnel_mm>;
-				};
-			};
-
-			port@2 {
-				reg = <4>;
-				funnel_mm_in_funnel_cam: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&funnel_cam_out_funnel_mm>;
-				};
-			};
-
-			port@3 {
-				reg = <5>;
-				funnel_mm_in_audio_etm0: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&audio_etm0_out_funnel_mm>;
-				};
-			};
-		};
-	};
-
-	funnel_center: funnel@6100000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b908>;
-
-		reg = <0x6100000 0x1000>;
-		reg-names = "funnel-base";
-
-		coresight-name = "coresight-funnel-center";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				funnel_center_out_funnel_in0: endpoint {
-					remote-endpoint =
-						<&funnel_in0_in_funnel_center>;
-				};
-			};
-
-			port@1 {
-				reg = <0>;
-				funnel_center_in_rpm_etm0: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&rpm_etm0_out_funnel_center>;
-				};
-			};
-
-			port@2 {
-				reg = <2>;
-				funnel_center_in_dbgui: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&dbgui_out_funnel_center>;
-				};
-			};
-		};
-	};
-
-	funnel_right: funnel@6120000 {
-		compatible = "arm,primecell";
-
-		reg = <0x6120000 0x1000>;
-		reg-names = "funnel-base";
-
-		coresight-name = "coresight-funnel-right";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				funnel_right_out_funnel_in0: endpoint {
-					remote-endpoint =
-						<&funnel_in0_in_funnel_right>;
-				};
-			};
-
-			port@1 {
-				reg = <1>;
-				funnel_right_in_modem_etm0: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&modem_etm0_out_funnel_right>;
-				};
-			};
-
-			port@2 {
-				reg = <2>;
-				funnel_right_in_funnel_apss: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&funnel_apss_out_funnel_right>;
-				};
-			};
-		};
-	};
-
-	funnel_cam: funnel@6132000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b908>;
-
-		reg = <0x6132000 0x1000>;
-		reg-names = "funnel-base";
-
-		coresight-name = "coresight-funnel-cam";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		 port {
-			funnel_cam_out_funnel_mm: endpoint {
-				remote-endpoint = <&funnel_mm_in_funnel_cam>;
-			};
-		};
-	};
-
-	funnel_apss: funnel@61a1000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b908>;
-
-		reg = <0x61a1000 0x1000>;
-		reg-names = "funnel-base";
-
-		coresight-name = "coresight-funnel-apss";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				funnel_apss_out_funnel_right: endpoint {
-					remote-endpoint =
-						<&funnel_right_in_funnel_apss>;
-				};
-			};
-
-			port@1 {
-				reg = <0>;
-				funnel_apss0_in_etm0: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&etm0_out_funnel_apss0>;
-				};
-			};
-
-			port@2 {
-				reg = <1>;
-				funnel_apss0_in_etm1: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&etm1_out_funnel_apss0>;
-				};
-			};
-
-			port@3 {
-				reg = <2>;
-				funnel_apss0_in_etm2: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&etm2_out_funnel_apss0>;
-				};
-			};
-
-			port@4 {
-				reg = <3>;
-				funnel_apss0_in_etm3: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&etm3_out_funnel_apss0>;
-				};
-			};
-		};
-	};
-
-	etm0: etm@61bc000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x000bb95d>;
-
-		reg = <0x61bc000 0x1000>;
-		cpu = <&CPU0>;
-		reg-names = "etm-base";
-
-		coresight-name = "coresight-etm0";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-		port {
-			etm0_out_funnel_apss0: endpoint {
-				remote-endpoint = <&funnel_apss0_in_etm0>;
-			};
-		};
-	};
-
-	etm1: etm@61bd000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x000bb95d>;
-
-		reg = <0x61bd000 0x1000>;
-		cpu = <&CPU1>;
-		coresight-name = "coresight-etm1";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		port {
-			etm1_out_funnel_apss0: endpoint {
-				remote-endpoint = <&funnel_apss0_in_etm1>;
-			};
-		};
-	};
-
-	etm2: etm@61be000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x000bb95d>;
-
-		reg = <0x61be000 0x1000>;
-		cpu = <&CPU2>;
-		coresight-name = "coresight-etm2";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		port {
-			etm2_out_funnel_apss0: endpoint {
-				remote-endpoint = <&funnel_apss0_in_etm2>;
-			};
-		};
-	};
-
-	etm3: etm@61bf000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x000bb95d>;
-
-		reg = <0x61bf000 0x1000>;
-		cpu = <&CPU3>;
-		coresight-name = "coresight-etm3";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		port {
-			etm3_out_funnel_apss0: endpoint {
-				remote-endpoint = <&funnel_apss0_in_etm3>;
-			};
-		};
-	};
-
-	stm: stm@6002000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b962>;
-
-		reg = <0x6002000 0x1000>,
-		      <0x9280000 0x180000>;
-		reg-names = "stm-base", "stm-data-base";
-
-		coresight-name = "coresight-stm";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		port {
-			stm_out_funnel_in0: endpoint {
-				remote-endpoint = <&funnel_in0_in_stm>;
-			};
-		};
-	};
-
-	cti0: cti@6010000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6010000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti0";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti1: cti@6011000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6011000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti1";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti2: cti@6012000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6012000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti2";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti3: cti@6013000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6013000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti3";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti4: cti@6014000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6014000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti4";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti5: cti@6015000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6015000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti5";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti6: cti@6016000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6016000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti6";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti7: cti@6017000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6017000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti7";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti8: cti@6018000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6018000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti8";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti9: cti@6019000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6019000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti9";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti10: cti@601a000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x601a000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti10";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti11: cti@601b000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x601b000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti11";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti12: cti@601c000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x601c000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti12";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti13: cti@601d000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x601d000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti13";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti14: cti@601e000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x601e000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti14";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti15: cti@601f000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x601f000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti15";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti_cpu0: cti@61b8000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x61b8000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti-cpu0";
-		cpu = <&CPU0>;
-		qcom,cti-save;
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti_cpu1: cti@61b9000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x61b9000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti-cpu1";
-		cpu = <&CPU1>;
-		qcom,cti-save;
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti_cpu2: cti@61ba000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x61ba000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti-cpu2";
-		cpu = <&CPU2>;
-		qcom,cti-save;
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti_cpu3: cti@61bb000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x61bb000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti-cpu3";
-		cpu = <&CPU3>;
-		qcom,cti-save;
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	cti_modem_cpu0: cti@6124000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6124000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti-modem-cpu0";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	/* Proto CTI */
-	cti_wcn_cpu0: cti@6139000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6139000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti-wcn-cpu0";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	/* Venus CTI */
-	cti_video_cpu0: cti@6134000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x6134000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti-video-cpu0";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	/* LPASS CTI */
-	cti_audio_cpu0: cti@613c000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x613c000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti-audio-cpu0";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	/* RPM CTI */
-	cti_rpm_cpu0: cti@610c000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b966>;
-
-		reg = <0x610c000 0x1000>;
-		reg-names = "cti-base";
-
-		coresight-name = "coresight-cti-rpm-cpu0";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	/* Proto ETM */
-	wcn_etm0 {
-		compatible = "qcom,coresight-remote-etm";
-		coresight-name = "coresight-wcn-etm0";
-		qcom,inst-id = <3>;
-
-		port {
-			wcn_etm0_out_funnel_mm: endpoint {
-				remote-endpoint = <&funnel_mm_in_wcn_etm0>;
-			};
-		};
-	};
-
-	rpm_etm0 {
-		compatible = "qcom,coresight-remote-etm";
-		coresight-name = "coresight-rpm-etm0";
-		qcom,inst-id = <4>;
-
-		port {
-			rpm_etm0_out_funnel_center: endpoint {
-				remote-endpoint = <&funnel_center_in_rpm_etm0>;
-			};
-		};
-	};
-
-	/* LPASS ETM */
-	audio_etm0 {
-		compatible = "qcom,coresight-remote-etm";
-		coresight-name = "coresight-audio-etm0";
-		qcom,inst-id = <5>;
-
-		port {
-			audio_etm0_out_funnel_mm: endpoint {
-				remote-endpoint = <&funnel_mm_in_audio_etm0>;
-			};
-		};
-	};
-
-	modem_etm0 {
-		compatible = "qcom,coresight-remote-etm";
-		coresight-name = "coresight-modem-etm0";
-		qcom,inst-id = <11>;
-
-		port {
-			modem_etm0_out_funnel_right: endpoint {
-				remote-endpoint = <&funnel_right_in_modem_etm0>;
-			};
-		};
-	};
-
-	csr: csr@6001000 {
-		compatible = "qcom,coresight-csr";
-		reg = <0x6001000 0x1000>;
-		reg-names = "csr-base";
-
-		coresight-name = "coresight-csr";
-
-		qcom,usb-bam-support;
-		qcom,hwctrl-set-support;
-		qcom,set-byte-cntr-support;
-
-		qcom,blk-size = <1>;
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
-
-	dbgui: dbgui@6108000 {
-		compatible = "qcom,coresight-dbgui";
-		reg = <0x6108000 0x1000>;
-		reg-names = "dbgui-base";
-
-		coresight-name = "coresight-dbgui";
-
-		qcom,dbgui-addr-offset = <0x30>;
-		qcom,dbgui-data-offset = <0x130>;
-		qcom,dbgui-size = <32>;
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		port {
-			dbgui_out_funnel_center: endpoint {
-				remote-endpoint = <&funnel_center_in_dbgui>;
-			};
-		};
-	};
-
-	tpda: tpda@6003000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b969>;
-
-		reg = <0x6003000 0x1000>;
-		reg-names = "tpda-base";
-
-		coresight-name = "coresight-tpda";
-
-		qcom,tpda-atid = <64>;
-		qcom,cmb-elem-size = <0 32>;
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				tpda_out_funnel_in0: endpoint {
-					remote-endpoint = <&funnel_in0_in_tpda>;
-				};
-			};
-
-			port@1 {
-				reg = <0>;
-				tpda_in_tpdm_dcc: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&tpdm_dcc_out_tpda>;
-				};
-			};
-		};
-	};
-
-	tpdm_dcc: tpdm@6110000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b968>;
-
-		reg = <0x6110000 0x1000>;
-		reg-names = "tpdm-base";
-
-		coresight-name = "coresight-tpdm-dcc";
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-
-		port {
-			tpdm_dcc_out_tpda: endpoint {
-				remote-endpoint = <&tpda_in_tpdm_dcc>;
-			};
-		};
-	};
-
-	hwevent: hwevent@6101000 {
-		compatible = "qcom,coresight-hwevent";
-		reg = <0x6101000 0x148>,
-		      <0x6101fb0 0x4>,
-		      <0x6121000 0x148>,
-		      <0x6121fb0 0x4>,
-		      <0x6131000 0x148>,
-		      <0x6131fb0 0x4>,
-		      <0x78c5010 0x4>,
-		      <0x7885010 0x4>;
-		reg-names = "center-wrapper-mux", "center-wrapper-lockaccess",
-			    "right-wrapper-mux", "right-wrapper-lockaccess",
-			    "mm-wrapper-mux", "mm-wrapper-lockaccess",
-			    "usbbam-mux", "blsp-mux";
-
-		coresight-name = "coresight-hwevent";
-		coresight-csr = <&csr>;
-
-		clocks = <&clock_gcc clk_qdss_clk>,
-			 <&clock_gcc clk_qdss_a_clk>;
-		clock-names = "apb_pclk";
-	};
+&dbgui {
+	qcom,dbgui-size = <32>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi b/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi
index 5a242db..7eb4d38 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi
@@ -47,7 +47,10 @@
 			reg = <0x100>;
 			enable-method = "psci";
 			cpu-release-addr = <0x0 0x90000000>;
+			efficiency = <1024>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			next-level-cache = <&L2_1>;
+			#cooling-cells = <2>;
 			L2_1: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-level = <2>;
@@ -70,7 +73,10 @@
 			reg = <0x101>;
 			enable-method = "psci";
 			cpu-release-addr = <0x0 0x90000000>;
+			efficiency = <1024>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			next-level-cache = <&L2_1>;
+			#cooling-cells = <2>;
 			L1_I_101: l1-icache {
 				compatible = "arm,arch-cache";
 				qcom,dump-size = <0x8800>;
@@ -87,7 +93,10 @@
 			reg = <0x102>;
 			enable-method = "psci";
 			cpu-release-addr = <0x0 0x90000000>;
+			efficiency = <1024>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			next-level-cache = <&L2_1>;
+			#cooling-cells = <2>;
 			L1_I_102: l1-icache {
 				compatible = "arm,arch-cache";
 				qcom,dump-size = <0x8800>;
@@ -104,7 +113,10 @@
 			reg = <0x103>;
 			enable-method = "psci";
 			cpu-release-addr = <0x0 0x90000000>;
+			efficiency = <1024>;
+			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			next-level-cache = <&L2_1>;
+			#cooling-cells = <2>;
 			L1_I_103: l1-icache {
 				compatible = "arm,arch-cache";
 				qcom,dump-size = <0x8800>;
@@ -116,6 +128,35 @@
 		};
 
 	};
+
+	energy_costs: energy-costs {
+		compatible = "sched-energy";
+
+		CPU_COST_0: core-cost0 {
+			busy-cost-data = <
+				 960000 159
+				1094000 207
+				1248000 256
+				1401000 327
+				1497600 343
+			>;
+			idle-cost-data = <
+				100 80 60 40
+			>;
+		};
+		CLUSTER_COST_0: cluster-cost0 {
+			busy-cost-data = <
+				 960000 53
+				1094000 61
+				1248000 71
+				1401000 85
+				1497600 88
+			>;
+			idle-cost-data = <
+				4 3 2 1
+			>;
+		};
+	};
 };
 
 &soc {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-gpu.dtsi b/arch/arm64/boot/dts/qcom/msm8917-gpu.dtsi
index 3b12ba4..6b9de2a 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-gpu.dtsi
@@ -97,6 +97,9 @@
 		/* CPU latency parameter */
 		qcom,pm-qos-active-latency = <651>;
 
+		/* Enable gpu cooling device */
+		#cooling-cells = <2>;
+
 		/* Power levels */
 		qcom,gpu-pwrlevels {
 			#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
index 8238b98..4cd15de 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-pinctrl.dtsi
@@ -1195,6 +1195,49 @@
 			};
 		};
 
+		fpc_reset_int {
+			fpc_reset_low: reset_low {
+				mux {
+					pins = "gpio124";
+					function = "fpc_reset_gpio_low";
+				};
+
+				config {
+					pins = "gpio124";
+					drive-strength = <2>;
+					bias-disable;
+					output-low;
+				};
+			};
+
+			fpc_reset_high: reset_high {
+				mux {
+					pins = "gpio124";
+					function = "fpc_reset_gpio_high";
+				};
+
+				config {
+					pins = "gpio124";
+					drive-strength = <2>;
+					bias-disable;
+					output-high;
+				};
+			};
+
+			fpc_int_low: int_low {
+				mux {
+					pins = "gpio48";
+				};
+				config {
+					pins = "gpio48";
+					drive-strength = <2>;
+					bias-pull-down;
+					input-enable;
+				};
+			};
+		};
+
+
 		i2c_2 {
 			i2c_2_active: i2c_2_active {
 				/* active state */
diff --git a/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
index 6a4c10e..5c63ed3 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
@@ -13,7 +13,6 @@
 #include <dt-bindings/clock/msm-clocks-8952.h>
 #include "msm8917-camera-sensor-qrd.dtsi"
 #include "msm8937-mdss-panels.dtsi"
-#include "msm8917-pmi8937.dtsi"
 
 &blsp1_uart2 {
 	status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/msm8917-thermal.dtsi b/arch/arm64/boot/dts/qcom/msm8917-thermal.dtsi
index 98ad44c..4b3e834 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-thermal.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-thermal.dtsi
@@ -12,6 +12,36 @@
 
 #include <dt-bindings/thermal/thermal.h>
 
+&soc {
+	qmi-tmd-devices {
+		compatible = "qcom,qmi_cooling_devices";
+
+		modem {
+			qcom,instance-id = <0x0>;
+
+			modem_pa: modem_pa {
+				qcom,qmi-dev-name = "pa";
+				#cooling-cells = <2>;
+			};
+
+			modem_proc: modem_proc {
+				qcom,qmi-dev-name = "modem";
+				#cooling-cells = <2>;
+			};
+
+			modem_current: modem_current {
+				qcom,qmi-dev-name = "modem_current";
+				#cooling-cells = <2>;
+			};
+
+			modem_vdd: modem_vdd {
+				qcom,qmi-dev-name = "cpuv_restriction_cold";
+				#cooling-cells = <2>;
+			};
+		};
+	};
+};
+
 &thermal_zones {
 	aoss0-usr {
 		polling-delay-passive = <0>;
@@ -41,7 +71,7 @@
 		};
 	};
 
-	mdss-usr {
+	q6-usr {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
 		thermal-governor = "user_space";
@@ -69,7 +99,7 @@
 		};
 	};
 
-	cpuss-0-usr {
+	cpuss-usr {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
 		thermal-sensors = <&tsens0 4>;
@@ -83,7 +113,7 @@
 		};
 	};
 
-	apc1-cpu1-usr {
+	apc1-cpu0-usr {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
 		thermal-sensors = <&tsens0 5>;
@@ -97,7 +127,7 @@
 		};
 	};
 
-	apc1-cpu2-usr {
+	apc1-cpu1-usr {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
 		thermal-sensors = <&tsens0 6>;
@@ -111,7 +141,7 @@
 		};
 	};
 
-	apc1-cpu3-usr {
+	apc1-cpu2-usr {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
 		thermal-sensors = <&tsens0 7>;
@@ -125,7 +155,7 @@
 		};
 	};
 
-	apc1-cpu4-usr {
+	apc1-cpu3-usr {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
 		thermal-sensors = <&tsens0 8>;
@@ -152,4 +182,183 @@
 			};
 		};
 	};
+
+	penta-cpu-max-step {
+		polling-delay-passive = <50>;
+		polling-delay = <100>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu_trip:cpu-trip {
+				temperature = <85000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu1_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu2_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu3_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+		};
+	};
+
+	gpu0-step {
+		polling-delay-passive = <250>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 9>;
+		thermal-governor = "step_wise";
+		trips {
+			gpu_step_trip: gpu-step-trip {
+				temperature = <95000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			gpu_cdev0 {
+				trip = <&gpu_step_trip>;
+				cooling-device =
+					<&msm_gpu THERMAL_NO_LIMIT
+						THERMAL_NO_LIMIT>;
+			};
+		};
+	};
+
+	apc1-cpu0-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 5>;
+		thermal-governor = "step_wise";
+		trips {
+			apc1_cpu0_trip: apc1-cpu0-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_cdev {
+				trip = <&apc1_cpu0_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc1-cpu1-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 6>;
+		thermal-governor = "step_wise";
+		trips {
+			apc1_cpu1_trip: apc1-cpu1--trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu1_cdev {
+				trip = <&apc1_cpu1_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc1-cpu2-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 7>;
+		thermal-governor = "step_wise";
+		trips {
+			apc1_cpu2_trip: apc1-cpu2-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu2_cdev {
+				trip = <&apc1_cpu2_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc1-cpu3-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 8>;
+		thermal-governor = "step_wise";
+		trips {
+			apc1_cpu3_trip: apc1-cpu3-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu3_cdev {
+				trip = <&apc1_cpu3_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	aoss0-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 0>;
+		tracks-low;
+		trips {
+			aoss_lowf: aoss-lowf {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_cdev {
+				trip = <&aoss_lowf>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT-2)
+							(THERMAL_MAX_LIMIT-2)>;
+			};
+			cx_vdd_cdev {
+				trip = <&aoss_lowf>;
+				cooling-device = <&pm8937_cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&aoss_lowf>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8917.dtsi b/arch/arm64/boot/dts/qcom/msm8917.dtsi
index cb520bd..2118140 100644
--- a/arch/arm64/boot/dts/qcom/msm8917.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917.dtsi
@@ -445,6 +445,7 @@
 		vdd_dig-supply = <&pm8937_s2_level>;
 		vdd_hf_dig-supply = <&pm8937_s2_level_ao>;
 		vdd_hf_pll-supply = <&pm8937_l7_ao>;
+		qcom,gfx3d_clk_src-opp-store-vcorner = <&msm_gpu>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
@@ -490,6 +491,13 @@
 			< 1401000000 4>,
 			< 1497600000 5>;
 
+		qcom,speed3-bin-v0-c1 =
+			<          0 0>,
+			<  960000000 1>,
+			< 1094400000 2>,
+			< 1248000000 3>,
+			< 1305600000 4>;
+
 		#clock-cells = <1>;
 	};
 
@@ -506,6 +514,7 @@
 			 <  960000 >,
 			 < 1094400 >,
 			 < 1248000 >,
+			 < 1305600 >,
 			 < 1401000 >,
 			 < 1497600 >;
 	};
@@ -832,9 +841,9 @@
 			reg = <0x10 8>;
 		};
 
-		dload_type@18 {
+		dload_type@1c {
 			compatible = "qcom,msm-imem-dload-type";
-			reg = <0x18 4>;
+			reg = <0x1c 4>;
 		};
 
 		restart_reason@65c {
@@ -847,11 +856,20 @@
 			reg = <0x6b0 32>;
 		};
 
+		kaslr_offset@6d0 {
+			compatible = "qcom,msm-imem-kaslr_offset";
+			reg = <0x6d0 12>;
+		};
+
 		pil@94c {
 			compatible = "qcom,msm-imem-pil";
 			reg = <0x94c 200>;
 		};
 
+		diag_dload@c8 {
+			compatible = "qcom,msm-imem-diag-dload";
+			reg = <0xc8 200>;
+		};
 	};
 
 	 jtag_fuse: jtagfuse@a601c {
@@ -1722,7 +1740,9 @@
 /* GPU overrides */
 &msm_gpu {
 
-	qcom,gpu-speed-bin = <0x6018 0x80000000 31>;
+	qcom,gpu-speed-bin-vectors =
+		<0x6018 0x80000000 31>,
+		<0x0164 0x00000400 9>;
 	/delete-node/qcom,gpu-pwrlevels;
 
 	qcom,gpu-pwrlevel-bins {
@@ -1854,5 +1874,49 @@
 				qcom,bus-max = <0>;
 			};
 		};
+
+		qcom,gpu-pwrlevels-2 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,speed-bin = <2>;
+			qcom,initial-pwrlevel = <1>;
+
+			/* NOM */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <465000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <7>;
+			};
+
+			/* SVS+ */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <400000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <3>;
+				qcom,bus-max = <5>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <270000000>;
+				qcom,bus-freq = <3>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <3>;
+			};
+
+			/* XO */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <19200000>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8937-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8937-coresight.dtsi
index e599b31..8c8f175 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-coresight.dtsi
@@ -853,11 +853,11 @@
 		clock-names = "apb_pclk";
 	};
 
-	cti_cpu0: cti@6198000{
+	cti_cpu0: cti@61b8000{
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
 
-		reg = <0x6198000 0x1000>;
+		reg = <0x61b8000 0x1000>;
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu0";
 		cpu = <&CPU0>;
@@ -868,11 +868,11 @@
 		clock-names = "apb_pclk";
 	};
 
-	cti_cpu1: cti@6199000{
+	cti_cpu1: cti@61b9000{
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
 
-		reg = <0x6199000 0x1000>;
+		reg = <0x61b9000 0x1000>;
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu1";
 		cpu = <&CPU1>;
@@ -883,11 +883,11 @@
 		clock-names = "apb_pclk";
 	};
 
-	cti_cpu2: cti@619a000{
+	cti_cpu2: cti@61ba000{
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
 
-		reg = <0x619a000 0x1000>;
+		reg = <0x61ba000 0x1000>;
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu2";
 		cpu = <&CPU2>;
@@ -898,11 +898,11 @@
 		clock-names = "apb_pclk";
 	};
 
-	cti_cpu3: cti@619b000{
+	cti_cpu3: cti@61bb000{
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
 
-		reg = <0x619b000 0x1000>;
+		reg = <0x61bb000 0x1000>;
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu3";
 		cpu = <&CPU3>;
@@ -913,11 +913,11 @@
 		clock-names = "apb_pclk";
 	};
 
-	cti_cpu4: cti@61b8000{
+	cti_cpu4: cti@6198000{
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
 
-		reg = <0x61b8000 0x1000>;
+		reg = <0x6198000 0x1000>;
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu4";
 		cpu = <&CPU4>;
@@ -928,11 +928,11 @@
 		clock-names = "apb_pclk";
 	};
 
-	cti_cpu5: cti@61b9000{
+	cti_cpu5: cti@6199000{
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
 
-		reg = <0x61b9000 0x1000>;
+		reg = <0x6199000 0x1000>;
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu5";
 		cpu = <&CPU5>;
@@ -943,11 +943,11 @@
 		clock-names = "apb_pclk";
 	};
 
-	cti_cpu6: cti@61ba000{
+	cti_cpu6: cti@619a000{
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
 
-		reg = <0x61ba000 0x1000>;
+		reg = <0x619a000 0x1000>;
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu6";
 		cpu = <&CPU6>;
@@ -958,11 +958,11 @@
 		clock-names = "apb_pclk";
 	};
 
-	cti_cpu7: cti@61bb000{
+	cti_cpu7: cti@619b000{
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
 
-		reg = <0x61bb000 0x1000>;
+		reg = <0x619b000 0x1000>;
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu7";
 		cpu = <&CPU7>;
@@ -1195,7 +1195,7 @@
 		      <0x6121fb0 0x4>,
 		      <0x6131000 0x148>,
 		      <0x6131fb0 0x4>,
-		      <0x7105010 0x4>,
+		      <0x78c5010 0x4>,
 		      <0x7885010 0x4>;
 
 		reg-names = "center-wrapper-mux", "center-wrapper-lockaccess",
diff --git a/arch/arm64/boot/dts/qcom/msm8937-interposer-sdm429.dtsi b/arch/arm64/boot/dts/qcom/msm8937-interposer-sdm429.dtsi
index 433ed7c..29f008a 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-interposer-sdm429.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-interposer-sdm429.dtsi
@@ -19,10 +19,10 @@
 	/delete-node/ etm@619d000;
 	/delete-node/ etm@619e000;
 	/delete-node/ etm@619f000;
-	/delete-node/ cti@61b8000;
-	/delete-node/ cti@61b9000;
-	/delete-node/ cti@61ba000;
-	/delete-node/ cti@61bb000;
+	/delete-node/ cti@6198000;
+	/delete-node/ cti@6199000;
+	/delete-node/ cti@619a000;
+	/delete-node/ cti@619b000;
 	/delete-node/ jtagmm@619c000;
 	/delete-node/ jtagmm@619d000;
 	/delete-node/ jtagmm@619e000;
diff --git a/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
index 90685e9..b0cc91c 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-mtp.dtsi
@@ -106,6 +106,8 @@
 &dsi_truly_1080_vid {
 	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-min-refresh-rate = <48>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
 };
 
diff --git a/arch/arm64/boot/dts/qcom/msm8937.dtsi b/arch/arm64/boot/dts/qcom/msm8937.dtsi
index b440cb60..33a4f67 100644
--- a/arch/arm64/boot/dts/qcom/msm8937.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937.dtsi
@@ -160,6 +160,7 @@
 
 };
 
+#include "msm8937-camera.dtsi"
 #include "msm8937-pinctrl.dtsi"
 #include "msm8937-cpu.dtsi"
 #include "msm8937-ion.dtsi"
@@ -1040,9 +1041,9 @@
 			reg = <0x10 8>;
 		};
 
-		dload_type@18 {
+		dload_type@1c {
 			compatible = "qcom,msm-imem-dload-type";
-			reg = <0x18 4>;
+			reg = <0x1c 4>;
 		};
 
 		restart_reason@65c {
@@ -1818,13 +1819,20 @@
 		status = "ok";
 	};
 
+	qcom,csiphy@1b34000 {
+		compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
+		};
+
+	qcom,csiphy@1b35000 {
+		compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
+		};
+
 };
 
 #include "pm8937-rpm-regulator.dtsi"
 #include "msm8937-regulator.dtsi"
 #include "pm8937.dtsi"
 #include "msm8937-audio.dtsi"
-#include "msm8937-camera.dtsi"
 #include "msm-gdsc-8916.dtsi"
 #include "msm8937-coresight.dtsi"
 #include "msm8937-thermal.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm8940-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/msm8940-mtp-overlay.dts
new file mode 100644
index 0000000..21c1190
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-mtp-overlay.dts
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "msm8940-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8940-MTP";
+	qcom,msm-id = <313 0x10000>;
+	qcom,board-id = <8 0>;
+};
+
+&vendor {
+	mtp_batterydata: qcom,battery-data {
+		qcom,batt-id-range-pct = <15>;
+		#include "batterydata-itech-3000mah.dtsi"
+		#include "batterydata-ascent-3450mAh.dtsi"
+	};
+};
+
+&qpnp_fg {
+	qcom,battery-data = <&mtp_batterydata>;
+};
+
+&qpnp_smbcharger {
+	qcom,battery-data = <&mtp_batterydata>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8940-mtp.dtsi
new file mode 100644
index 0000000..60f3d79
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-mtp.dtsi
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/msm-clocks-8952.h>
+#include "msm8937-camera-sensor-mtp.dtsi"
+
+&blsp1_uart2 {
+	status = "ok";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart_console_active>;
+};
+
+&sdhc_1 {
+	/* device core power supply */
+	vdd-supply = <&pm8937_l8>;
+	qcom,vdd-voltage-level = <2900000 2900000>;
+	qcom,vdd-current-level = <200 570000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8937_l5>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 325000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000 192000000
+								384000000>;
+	qcom,nonremovable;
+	qcom,bus-speed-mode = "HS400_1p8v", "HS200_1p8v", "DDR_1p8v";
+
+	status = "ok";
+};
+
+&sdhc_2 {
+	/* device core power supply */
+	vdd-supply = <&pm8937_l11>;
+	qcom,vdd-voltage-level = <2950000 2950000>;
+	qcom,vdd-current-level = <15000 800000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&pm8937_l12>;
+	qcom,vdd-io-voltage-level = <1800000 2950000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+
+	cd-gpios = <&tlmm 67 0x1>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
+								200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	status = "ok";
+};
+
+#include "msm8937-mdss-panels.dtsi"
+
+&mdss_mdp {
+	qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+	hw-config = "single_dsi";
+};
+
+&mdss_dsi0 {
+	qcom,dsi-pref-prim-pan = <&dsi_truly_1080_vid>;
+	pinctrl-names = "mdss_default", "mdss_sleep";
+	pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+	pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+
+	qcom,platform-te-gpio = <&tlmm 24 0>;
+	qcom,platform-reset-gpio = <&tlmm 61 0>;
+	qcom,platform-bklight-en-gpio = <&tlmm 59 0>;
+};
+
+&mdss_dsi1 {
+	status = "disabled";
+	qcom,dsi-pref-prim-pan = <&dsi_adv7533_1080p>;
+	pinctrl-names = "mdss_default", "mdss_sleep";
+	pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+	pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+
+	qcom,pluggable;
+	qcom,platform-te-gpio = <&tlmm 24 0>;
+	qcom,platform-reset-gpio = <&tlmm 61 0>;
+	qcom,platform-bklight-en-gpio = <&tlmm 59 0>;
+};
+
+&dsi_truly_1080_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-min-refresh-rate = <48>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
+};
+
+&dsi_truly_1080_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,ulps-enabled;
+	qcom,partial-update-enabled;
+	qcom,panel-roi-alignment = <2 2 4 2 1080 2>;
+};
+
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		input-name = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&gpio_key_active>;
+
+		camera_focus {
+			label = "camera_focus";
+			gpios = <&tlmm 128 0x1>;
+			linux,input-type = <1>;
+			linux,code = <0x210>;
+			debounce-interval = <15>;
+			linux,can-disable;
+			gpio-key,wakeup;
+		};
+
+		camera_snapshot {
+			label = "camera_snapshot";
+			gpios = <&tlmm 127 0x1>;
+			linux,input-type = <1>;
+			linux,code = <0x2fe>;
+			debounce-interval = <15>;
+			linux,can-disable;
+			gpio-key,wakeup;
+		};
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&tlmm 91 0x1>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			debounce-interval = <15>;
+			linux,can-disable;
+			gpio-key,wakeup;
+		};
+
+	};
+};
+
+&pm8937_gpios {
+	nfc_clk {
+		nfc_clk_default: nfc_clk_default {
+			pins = "gpio5";
+			function = "normal";
+			input-enable;
+			power-source = <1>;
+		};
+	};
+};
+
+&i2c_5 { /* BLSP2 QUP1 (NFC) */
+	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 17 0x00>;
+		qcom,nq-ven = <&tlmm 16 0x00>;
+		qcom,nq-firm = <&tlmm 130 0x00>;
+		qcom,nq-clkreq = <&pm8937_gpios 5 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK2";
+		interrupts = <17 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_disable_active
+						&nfc_clk_default>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_disable_suspend>;
+		clocks = <&clock_gcc clk_bb_clk2_pin>;
+		clock-names = "ref_clk";
+	};
+};
+
+&thermal_zones {
+	quiet-therm-step {
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8940-pinctrl.dtsi
new file mode 100644
index 0000000..55933b9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-pinctrl.dtsi
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8937-pinctrl.dtsi"
+
+&soc {
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940-pmi8937-mtp.dts b/arch/arm64/boot/dts/qcom/msm8940-pmi8937-mtp.dts
new file mode 100644
index 0000000..386badd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-pmi8937-mtp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8940.dtsi"
+#include "msm8940-pmi8937-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8940-PMI8937 MTP";
+	compatible = "qcom,msm8940-mtp", "qcom,msm8940", "qcom,mtp";
+	qcom,board-id= <8 0>;
+	qcom,pmic-id = <0x10019 0x020037 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940-pmi8937-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8940-pmi8937-mtp.dtsi
new file mode 100644
index 0000000..9be1664
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-pmi8937-mtp.dtsi
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "pmi8937.dtsi"
+#include "msm8940-mtp.dtsi"
+
+&soc {
+	led_flash0: qcom,camera-flash {
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		qcom,flash-type = <1>;
+		qcom,flash-source = <&pmi8937_flash0 &pmi8937_flash1>;
+		qcom,torch-source = <&pmi8937_torch0 &pmi8937_torch1>;
+		qcom,switch-source = <&pmi8937_switch>;
+	};
+};
+
+&vendor {
+	mtp_batterydata: qcom,battery-data {
+		qcom,batt-id-range-pct = <15>;
+		#include "batterydata-itech-3000mah.dtsi"
+		#include "batterydata-ascent-3450mAh.dtsi"
+	};
+};
+
+&qpnp_fg {
+	qcom,battery-data = <&mtp_batterydata>;
+};
+
+&qpnp_smbcharger {
+	qcom,battery-data = <&mtp_batterydata>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940-pmi8937.dts b/arch/arm64/boot/dts/qcom/msm8940-pmi8937.dts
new file mode 100644
index 0000000..3f7b358
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-pmi8937.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8940.dtsi"
+#include "msm8940-pmi8937.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8940 + PMI8937 SOC";
+	compatible = "qcom,msm8940";
+	qcom,pmic-id = <0x10019 0x020037 0x0 0x0>;
+	qcom,pmic-name = "PMI8937";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940-pmi8937.dtsi b/arch/arm64/boot/dts/qcom/msm8940-pmi8937.dtsi
new file mode 100644
index 0000000..55e8e21
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-pmi8937.dtsi
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "pmi8937.dtsi"
+
+&qpnp_smbcharger {
+	qcom,chg-led-sw-controls;
+	qcom,chg-led-support;
+	dpdm-supply = <&usb_otg>;
+};
+
+&usb_otg {
+	extcon = <&qpnp_smbcharger>;
+};
+
+&soc {
+	led_flash0: qcom,camera-flash {
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		qcom,flash-type = <1>;
+		qcom,flash-source = <&pmi8937_flash0 &pmi8937_flash1>;
+		qcom,torch-source = <&pmi8937_torch0 &pmi8937_torch1>;
+		qcom,switch-source = <&pmi8937_switch>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940-pmi8940-mtp.dts b/arch/arm64/boot/dts/qcom/msm8940-pmi8940-mtp.dts
new file mode 100644
index 0000000..51a34d4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-pmi8940-mtp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8940.dtsi"
+#include "msm8940-mtp.dtsi"
+#include "msm8940-pmi8940.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8940-PMI8940 MTP";
+	compatible = "qcom,msm8940-mtp", "qcom,msm8940", "qcom,mtp";
+	qcom,board-id= <8 0>;
+	qcom,pmic-id = <0x10019 0x020040 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940-pmi8940-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8940-pmi8940-mtp.dtsi
new file mode 100644
index 0000000..3cc12e3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-pmi8940-mtp.dtsi
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8937-pmi8940-mtp.dtsi"
+#include "msm8940-mtp.dtsi"
+
+&soc {
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940-pmi8940.dts b/arch/arm64/boot/dts/qcom/msm8940-pmi8940.dts
new file mode 100644
index 0000000..7d47af5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-pmi8940.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8940.dtsi"
+#include "msm8940-pmi8940.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8940 + PMI8940 SOC";
+	compatible = "qcom,msm8940";
+	qcom,pmic-id = <0x10019 0x020040 0x0 0x0>;
+	qcom,pmic-name = "PMI8940";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940-pmi8940.dtsi b/arch/arm64/boot/dts/qcom/msm8940-pmi8940.dtsi
new file mode 100644
index 0000000..d9baeca
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-pmi8940.dtsi
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "pmi8940.dtsi"
+
+&soc {
+	led_flash0: qcom,camera-flash {
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		qcom,flash-type = <1>;
+		qcom,flash-source = <&pmi8940_flash0 &pmi8940_flash1>;
+		qcom,torch-source = <&pmi8940_torch0 &pmi8940_torch1>;
+		qcom,switch-source = <&pmi8940_switch>;
+	};
+};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8940-pmi8950-mtp.dts b/arch/arm64/boot/dts/qcom/msm8940-pmi8950-mtp.dts
new file mode 100644
index 0000000..ff6d208
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-pmi8950-mtp.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8940.dtsi"
+#include "msm8940-pmi8950-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8940-PMI8950 MTP";
+	compatible = "qcom,msm8940-mtp", "qcom,msm8940", "qcom,mtp";
+	qcom,board-id= <8 0>;
+	qcom,pmic-id = <0x10019 0x020011 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940-pmi8950-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8940-pmi8950-mtp.dtsi
new file mode 100644
index 0000000..5f3f3b7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-pmi8950-mtp.dtsi
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8937-pmi8950-mtp.dtsi"
+#include "msm8940-mtp.dtsi"
+
+&soc {
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940-pmi8950.dts b/arch/arm64/boot/dts/qcom/msm8940-pmi8950.dts
new file mode 100644
index 0000000..216b675
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-pmi8950.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8940.dtsi"
+#include "msm8940-pmi8950.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8940 + PMI8950 SOC ";
+	compatible = "qcom,msm8940";
+	qcom,pmic-id = <0x10019 0x020011 0x0 0x0>;
+	qcom,pmic-name = "PMI8950";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940-pmi8950.dtsi b/arch/arm64/boot/dts/qcom/msm8940-pmi8950.dtsi
new file mode 100644
index 0000000..95e003b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940-pmi8950.dtsi
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "pmi8950.dtsi"
+
+&qpnp_smbcharger {
+	qcom,chg-led-sw-controls;
+	qcom,chg-led-support;
+	dpdm-supply = <&usb_otg>;
+};
+
+&usb_otg {
+	extcon = <&qpnp_smbcharger>;
+};
+
+&labibb {
+	status = "ok";
+	qpnp,qpnp-labibb-mode = "lcd";
+};
+
+&mdss_dsi0 {
+	lab-supply = <&lab_regulator>;
+	ibb-supply = <&ibb_regulator>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8940.dtsi b/arch/arm64/boot/dts/qcom/msm8940.dtsi
new file mode 100644
index 0000000..64fbaf9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8940.dtsi
@@ -0,0 +1,680 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "skeleton64.dtsi"
+#include "msm8937.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM8940";
+	compatible = "qcom,msm8940";
+	qcom,msm-id = <313 0x0>;
+
+	soc: soc { };
+
+};
+
+&usb_otg {
+
+	/delete-property/ clocks;
+	clocks = <&clock_gcc clk_gcc_usb_hs_ahb_clk>,
+			 <&clock_gcc clk_gcc_usb_hs_system_clk>,
+			 <&clock_gcc clk_gcc_usb2a_phy_sleep_clk>,
+			 <&clock_gcc clk_bimc_usb_clk>,
+			 <&clock_gcc clk_snoc_usb_clk>,
+			 <&clock_gcc clk_pnoc_usb_clk>,
+			 <&clock_gcc clk_gcc_qusb2_phy_clk>,
+			 <&clock_gcc clk_gcc_usb2_hs_phy_only_clk>,
+			 <&clock_gcc clk_gcc_usb_hs_phy_cfg_ahb_clk>,
+			 <&clock_gcc clk_xo_otg_clk>;
+
+	qcom,usbbam@78c4000 {
+		/delete-property/ qcom,reset-bam-on-disconnect;
+		/delete-node/ qcom,pipe0;
+		qcom,pipe0 {
+			label = "hsusb-ipa-out-0";
+			qcom,usb-bam-mem-type = <1>;
+			qcom,dir = <0>;
+			qcom,pipe-num = <0>;
+			qcom,peer-bam = <1>;
+			qcom,src-bam-pipe-index = <1>;
+			qcom,data-fifo-size = <0x8000>;
+			qcom,descriptor-fifo-size = <0x2000>;
+		};
+		qcom,pipe1 {
+			label = "hsusb-ipa-in-0";
+			qcom,usb-bam-mem-type = <1>;
+			qcom,dir = <1>;
+			qcom,pipe-num = <0>;
+			qcom,peer-bam = <1>;
+			qcom,dst-bam-pipe-index = <0>;
+			qcom,data-fifo-size = <0x8000>;
+			qcom,descriptor-fifo-size = <0x2000>;
+		};
+		qcom,pipe2 {
+			label = "hsusb-qdss-in-0";
+			qcom,usb-bam-mem-type = <2>;
+			qcom,dir = <1>;
+			qcom,pipe-num = <0>;
+			qcom,peer-bam = <0>;
+			qcom,peer-bam-physical-address = <0x6044000>;
+			qcom,src-bam-pipe-index = <0>;
+			qcom,dst-bam-pipe-index = <2>;
+			qcom,data-fifo-offset = <0x0>;
+			qcom,data-fifo-size = <0xe00>;
+			qcom,descriptor-fifo-offset = <0xe00>;
+			qcom,descriptor-fifo-size = <0x200>;
+		};
+		qcom,pipe3 {
+			label = "hsusb-dpl-ipa-in-1";
+			qcom,usb-bam-mem-type = <1>;
+			qcom,dir = <1>;
+			qcom,pipe-num = <1>;
+			qcom,peer-bam = <1>;
+			qcom,dst-bam-pipe-index = <3>;
+			qcom,data-fifo-size = <0x8000>;
+			qcom,descriptor-fifo-size = <0x2000>;
+		};
+	};
+};
+
+&ad_hoc_bus {
+	mas_ipa: mas-ipa {
+		cell-id = <MSM_BUS_MASTER_IPA>;
+		label = "mas-ipa";
+		qcom,buswidth = <8>;
+		qcom,agg-ports = <1>;
+		qcom,ap-owned;
+		qcom,qport = <14>;
+		qcom,qos-mode = "fixed";
+		qcom,connections = <&snoc_int_1 &slv_snoc_bimc_1>;
+		qcom,prio1 = <0>;
+		qcom,prio0 = <0>;
+		qcom,bus-dev = <&fab_snoc>;
+		qcom,mas-rpm-id = <ICBID_MASTER_IPA>;
+	};
+};
+
+&soc {
+	devfreq_spdm_cpu {
+		compatible = "qcom,devfreq_spdm";
+		qcom,bw-dwnstep = <4000>;
+		qcom,max-vote = <4000>;
+	};
+};
+
+&clock_gcc {
+	compatible = "qcom,gcc-8940";
+};
+
+&clock_debug {
+	compatible = "qcom,cc-debug-8940";
+};
+
+&clock_gcc_mdss {
+	compatible = "qcom,gcc-mdss-8940";
+};
+
+&bam_dmux {
+	status = "disabled";
+};
+
+&soc {
+	ipa_hw: qcom,ipa@07900000 {
+		compatible = "qcom,ipa";
+		reg = <0x07900000 0x4effc>, <0x07904000 0x26934>;
+		reg-names = "ipa-base", "bam-base";
+		interrupts = <0 228 0>,
+				<0 230 0>;
+		interrupt-names = "ipa-irq", "bam-irq";
+		qcom,ipa-hw-ver = <6>; /* IPA core version = IPAv2.6L */
+		qcom,ipa-hw-mode = <0>; /* IPA hw type = Normal */
+		qcom,wan-rx-ring-size = <192>; /* IPA WAN-rx-ring-size*/
+		qcom,lan-rx-ring-size = <192>; /* IPA LAN-rx-ring-size*/
+		clock-names = "core_clk";
+		clocks = <&clock_gcc clk_ipa_clk>;
+		qcom,ee = <0>;
+		qcom,use-ipa-tethering-bridge;
+		qcom,modem-cfg-emb-pipe-flt;
+		qcom,msm-bus,name = "ipa";
+		qcom,msm-bus,num-cases = <3>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+		<90 512 0 0>, /* No BIMC vote (ab=0 Mbps, ib=0 Mbps ~ 0MHZ) */
+		<90 512 100000 800000>, /* SVS (ab=100, ib=800 ~ 50MHz) */
+		<90 512 100000 1200000>; /* PERF (ab=100, ib=1200 ~ 75MHz) */
+		qcom,bus-vector-names = "MIN", "SVS", "PERF";
+		qcom,rx-polling-sleep-ms = <2>; /* Polling sleep interval */
+		qcom,ipa-polling-iteration = <5>;	/* Polling Iteration */
+	};
+
+	qcom,rmnet-ipa {
+		compatible = "qcom,rmnet-ipa";
+		qcom,rmnet-ipa-ssr;
+		qcom,ipa-loaduC;
+		qcom,ipa-advertise-sg-support;
+	};
+
+	qcom,rmtfs_sharedmem@00000000 {
+		reg = <0x00000000 0x00180000>;
+	};
+
+	/* remove 8937 MEM ACC node */
+	/delete-node/ regulator@01946004;
+
+	mem_acc_vreg_corner: regulator@01946004 {
+		compatible = "qcom,mem-acc-regulator";
+		reg = <0xa4000 0x1000>;
+		reg-names = "efuse_addr";
+		regulator-name = "mem_acc_corner";
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <3>;
+
+		qcom,acc-reg-addr-list =
+			<0x01942138 0x01942130 0x01942120
+			 0x01942124 0x01942128>;
+
+		qcom,acc-init-reg-config = <1 0xfff>;
+
+		qcom,num-acc-corners = <3>;
+		qcom,boot-acc-corner = <2>;
+		qcom,corner1-reg-config =
+			/* SVS+ => SVS+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,
+			/* SVS+ => NOM */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <(-1)      (-1)>, <(-1)      (-1)>,
+			<(-1)     (-1)>,
+			/* SVS+ => TURBO/NOM+ */
+			<  2 0x555555 >,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>,  <  3        0x0>, <  4        0x0>,
+			<  5       0x0>;
+
+		qcom,corner2-reg-config =
+			/* NOM => SVS+ */
+			<  2 0x555555>,  <  3  0x30C30C3>, <  4  0x30C30C3>,
+			<  5 0x00000C3>,
+			/* NOM => NOM */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,
+			/* NOM => TURBO/NOM+ */
+			<  2 0x555555>,       <  3  0x0>,    <  4  0x0>,
+			<  5	  0x0>;
+
+		qcom,corner3-reg-config =
+			/* TURBO/NOM+ => SVS+ */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <  3  0x30C30C3>, <  4  0x30C30C3>,
+			<  5 0x00000C3>,
+			/* TURBO/NOM+ => NOM */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <(-1)	    (-1)>, <(-1)      (-1)>,
+			<(-1)     (-1)>,
+			/* TURBO/NOM+ => TURBO/NOM+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1)  (-1)>;
+
+		qcom,override-acc-range-fuse-list =
+			<37 40 3 0>,	/* foundry id */
+			<36 30 8 0>,	/* iddq apc on */
+			<67  0 6 0>;	/* turbo targ volt */
+
+		qcom,override-fuse-range-map =
+			<0 0>, <  0   0>, <49 63>,
+			<1 1>, <  0   0>, <50 63>,
+			<5 5>, <  0   0>, <51 63>,
+			<0 1>, < 95 255>, < 0 63>,
+			<5 5>, <100 255>, < 0 63>;
+
+		qcom,override-corner1-addr-val-map =
+			/* 1st fuse version tuple matched */
+			/* SVS+ => SVS+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,
+			/* SVS+ => NOM */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <(-1)      (-1)>, <(-1)      (-1)>,
+			<(-1)     (-1)>,
+			/* SVS+ => TURBO/NOM+ */
+			<  2 0x555555 >,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>,  <  3        0x1>, <  4     0x1000>,
+			<  5       0x0>,
+
+			/* 2nd fuse version tuple matched */
+			/* SVS+ => SVS+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,
+			/* SVS+ => NOM */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <(-1)      (-1)>, <(-1)      (-1)>,
+			<(-1)     (-1)>,
+			/* SVS+ => TURBO/NOM+ */
+			<  2 0x555555 >,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>,  <  3        0x1>, <  4     0x1000>,
+			<  5       0x0>,
+
+			/* 3rd fuse version tuple matched */
+			/* SVS+ => SVS+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,
+			/* SVS+ => NOM */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <(-1)      (-1)>, <(-1)      (-1)>,
+			<(-1)     (-1)>,
+			/* SVS+ => TURBO/NOM+ */
+			<  2 0x555555 >,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>,  <  3        0x1>, <  4     0x1000>,
+			<  5       0x0>,
+
+			/* 4th fuse version tuple matched */
+			/* SVS+ => SVS+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,
+			/* SVS+ => NOM */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <(-1)      (-1)>, <(-1)      (-1)>,
+			<(-1)     (-1)>,
+			/* SVS+ => TURBO/NOM+ */
+			<  2 0x555555 >,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>,  <  3        0x1>, <  4     0x1000>,
+			<  5       0x0>,
+
+			/* 5th fuse version tuple matched */
+			/* SVS+ => SVS+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,
+			/* SVS+ => NOM */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <(-1)      (-1)>, <(-1)      (-1)>,
+			<(-1)     (-1)>,
+			/* SVS+ => TURBO/NOM+ */
+			<  2 0x555555 >,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>,  <  3        0x1>, <  4     0x1000>,
+			<  5       0x0>;
+
+		qcom,override-corner2-addr-val-map =
+			/* 1st fuse version tuple matched */
+			/* NOM => SVS+ */
+			<  2 0x555555>,  <  3  0x30C30C3>, <  4  0x30C30C3>,
+			<  5 0x00000C3>,
+			/* NOM => NOM */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,
+			/* NOM => TURBO/NOM+ */
+			<  2 0x555555>,       <  3  0x1>,    <  4  0x1000>,
+			<  5	  0x0>,
+
+			/* 2nd fuse version tuple matched */
+			/* NOM => SVS+ */
+			<  2 0x555555>,  <  3  0x30C30C3>, <  4  0x30C30C3>,
+			<  5 0x00000C3>,
+			/* NOM => NOM */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,
+			/* NOM => TURBO/NOM+ */
+			<  2 0x555555>,       <  3  0x1>,    <  4  0x1000>,
+			<  5	  0x0>,
+
+			/* 3rd fuse version tuple matched */
+			/* NOM => SVS+ */
+			<  2 0x555555>,  <  3  0x30C30C3>, <  4  0x30C30C3>,
+			<  5 0x00000C3>,
+			/* NOM => NOM */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,
+			/* NOM => TURBO/NOM+ */
+			<  2 0x555555>,       <  3  0x1>,    <  4  0x1000>,
+			<  5	  0x0>,
+
+			/* 4th fuse version tuple matched */
+			/* NOM => SVS+ */
+			<  2 0x555555>,  <  3  0x30C30C3>, <  4  0x30C30C3>,
+			<  5 0x00000C3>,
+			/* NOM => NOM */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,
+			/* NOM => TURBO/NOM+ */
+			<  2 0x555555>,       <  3  0x1>,    <  4  0x1000>,
+			<  5	  0x0>,
+
+			/* 5th fuse version tuple matched */
+			/* NOM => SVS+ */
+			<  2 0x555555>,  <  3  0x30C30C3>, <  4  0x30C30C3>,
+			<  5 0x00000C3>,
+			/* NOM => NOM */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,
+			/* NOM => TURBO/NOM+ */
+			<  2 0x555555>,       <  3  0x1>,    <  4  0x1000>,
+			<  5	  0x0>;
+
+		qcom,override-corner3-addr-val-map =
+			/* 1st fuse version tuple matched */
+			/* TURBO/NOM+ => SVS+ */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <  3  0x30C30C3>, <  4  0x30C30C3>,
+			<  5 0x00000C3>,
+			/* TURBO/NOM+ => NOM */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <(-1)	    (-1)>, <(-1)      (-1)>,
+			<(-1)     (-1)>,
+			/* TURBO/NOM+ => TURBO/NOM+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1)  (-1)>,
+
+			/* 2nd fuse version tuple matched */
+			/* TURBO/NOM+ => SVS+ */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <  3  0x30C30C3>, <  4  0x30C30C3>,
+			<  5 0x00000C3>,
+			/* TURBO/NOM+ => NOM */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <(-1)	    (-1)>, <(-1)      (-1)>,
+			<(-1)     (-1)>,
+			/* TURBO/NOM+ => TURBO/NOM+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1)  (-1)>,
+
+			/* 3rd fuse version tuple matched */
+			/* TURBO/NOM+ => SVS+ */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <  3  0x30C30C3>, <  4  0x30C30C3>,
+			<  5 0x00000C3>,
+			/* TURBO/NOM+ => NOM */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <(-1)	    (-1)>, <(-1)      (-1)>,
+			<(-1)     (-1)>,
+			/* TURBO/NOM+ => TURBO/NOM+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1)  (-1)>,
+
+			/* 4th fuse version tuple matched */
+			/* TURBO/NOM+ => SVS+ */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <  3  0x30C30C3>, <  4  0x30C30C3>,
+			<  5 0x00000C3>,
+			/* TURBO/NOM+ => NOM */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <(-1)	    (-1)>, <(-1)      (-1)>,
+			<(-1)     (-1)>,
+			/* TURBO/NOM+ => TURBO/NOM+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1)  (-1)>,
+
+			/* 5th fuse version tuple matched */
+			/* TURBO/NOM+ => SVS+ */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <  3  0x30C30C3>, <  4  0x30C30C3>,
+			<  5 0x00000C3>,
+			/* TURBO/NOM+ => NOM */
+			<  2 0x555555>,  <  3  0x1041041>, <  4  0x1041041>,
+			<  5 0x0000041>, <(-1)	    (-1)>, <(-1)      (-1)>,
+			<(-1)     (-1)>,
+			/* TURBO/NOM+ => TURBO/NOM+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1)  (-1)>;
+	};
+};
+
+&apc_vreg_corner {
+	/delete-property/ qcom,cpr-fuse-version-map;
+	/delete-property/ qcom,cpr-quotient-adjustment;
+	/delete-property/ qcom,cpr-init-voltage-adjustment;
+	/delete-property/ qcom,cpr-enable;
+
+	qcom,pvs-version-fuse-sel = <37 40 3 0>; /* foundry */
+	qcom,cpr-speed-bin-max-corners =
+		<0 (-1) 1 2 6>,
+		<1 (-1) 1 2 7>;
+
+	qcom,cpr-fuse-version-map =
+		<  0    0  (-1) (-1) (-1) (-1)>,
+		<  0    1  (-1) (-1) (-1) (-1)>,
+		<  0    5  (-1) (-1) (-1) (-1)>,
+		<  1    0  (-1) (-1) (-1) (-1)>,
+		<  1    1  (-1) (-1) (-1) (-1)>,
+		<  1    5  (-1) (-1) (-1) (-1)>;
+
+	qcom,cpr-init-voltage-adjustment =
+		<0    0     0>,
+		<0    0 20000>,
+		<0    0 20000>,
+		<0    0 20000>,
+		<0    0 20000>,
+		<0    0 25000>;
+
+	qcom,cpr-quotient-adjustment =
+		<0    0    0>,
+		<38   0   28>, /* SVSP(20mv); TURBO(15mv); KV(1.9) */
+		<0    0   28>, /* TURBO(15mv); KV(1.9) */
+		<0    0   28>, /* TURBO(15mv); KV(1.9) */
+		<38   0   28>, /* SVSP(20mv); TURBO(15mv); KV(1.9) */
+		<0    0   38>; /* TURBO(20mv); KV(1.9) */
+
+	qcom,cpr-enable;
+};
+
+&mdss_mdp {
+	qcom,vbif-settings = <0x0d0 0x00000020>;
+};
+
+&modem_mem {
+	reg = <0x0 0x86800000 0x0 0x6a00000>;
+};
+
+&adsp_fw_mem {
+	reg = <0x0 0x8d200000 0x0 0x1100000>;
+};
+
+&wcnss_fw_mem {
+	reg = <0x0 0x8e300000 0x0 0x700000>;
+};
+
+&pil_mss {
+	/delete-property/ qcom,qdsp6v56-1-8-inrush-current;
+	qcom,qdsp6v56-1-8;
+};
+
+/* GPU overrides */
+&msm_gpu {
+
+	qcom,restrict-pwrlevel = <1>;
+	/delete-property/qcom,gpu-speed-bin;
+	qcom,gpu-speed-bin = <0x0174 0x80000000 31>;
+
+	/delete-property/qcom,initial-pwrlevel;
+	/delete-node/qcom,gpu-pwrlevel-bins;
+	/delete-node/qcom,gpu-pwrlevels;
+
+	qcom,gpu-pwrlevel-bins {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible="qcom,gpu-pwrlevel-bins";
+
+		/* Power levels */
+		qcom,gpu-pwrlevels-0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,speed-bin = <0>;
+			qcom,initial-pwrlevel = <3>;
+
+			/* SUPER TURBO */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <475000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <10>;
+				qcom,bus-max = <10>;
+			};
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <450000000>;
+				qcom,bus-freq = <9>;
+				qcom,bus-min = <9>;
+				qcom,bus-max = <10>;
+			};
+
+			/* NOM+ */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <400000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <6>;
+				qcom,bus-max = <9>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <375000000>;
+				qcom,bus-freq = <6>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <8>;
+			};
+
+			/* SVS+ */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <300000000>;
+				qcom,bus-freq = <5>;
+				qcom,bus-min = <4>;
+				qcom,bus-max = <7>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <216000000>;
+				qcom,bus-freq = <3>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <4>;
+			};
+
+			/* XO */
+			qcom,gpu-pwrlevel@6 {
+				reg = <6>;
+				qcom,gpu-freq = <19200000>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+
+		/* Power levels */
+		qcom,gpu-pwrlevels-1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,speed-bin = <1>;
+			qcom,initial-pwrlevel = <3>;
+
+			/* SUPER TURBO */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <500000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <10>;
+				qcom,bus-max = <10>;
+			};
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <450000000>;
+				qcom,bus-freq = <9>;
+				qcom,bus-min = <9>;
+				qcom,bus-max = <10>;
+			};
+
+			/* NOM+ */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <400000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <6>;
+				qcom,bus-max = <9>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <375000000>;
+				qcom,bus-freq = <6>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <8>;
+			};
+
+			/* SVS+ */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <300000000>;
+				qcom,bus-freq = <5>;
+				qcom,bus-min = <4>;
+				qcom,bus-max = <7>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <216000000>;
+				qcom,bus-freq = <3>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <4>;
+			};
+
+			/* XO */
+			qcom,gpu-pwrlevel@6 {
+				reg = <6>;
+				qcom,gpu-freq = <19200000>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+
+	};
+};
+
+&tsens0 {
+	qcom,temp1-offset = <0 (-2) (-5) (-3) (-1) (-1) (-1) 0 1 (-1) (-6)>;
+	qcom,temp2-offset = <1 1 (-7) 5 4 7 6 2 3 1 7>;
+};
+
+/* CAMSS_CPHY */
+&soc {
+	qcom,csiphy@1b34000 {
+		status = "ok";
+		compatible = "qcom,csiphy-v3.4.2.1", "qcom,csiphy";
+	};
+
+	qcom,csiphy@1b35000 {
+		status = "ok";
+		compatible = "qcom,csiphy-v3.4.2.1", "qcom,csiphy";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-camera.dtsi b/arch/arm64/boot/dts/qcom/msm8953-camera.dtsi
index adfc73f..370c06a 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-camera.dtsi
@@ -329,7 +329,7 @@
 	};
 
 	qcom,adsp {
-		status = "ok";
+		status = "disabled";
 		compatible = "adsp-shmem-device";
 		memory-region = <&adsp_shmem_device_mem>;
 	};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
index 9b78253..b21983c 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
@@ -151,6 +151,8 @@
 &dsi_truly_1080_vid {
 	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-min-refresh-rate = <48>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
 };
 
diff --git a/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi
index d3c2e26..87684a7 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-coresight.dtsi
@@ -890,6 +890,7 @@
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu0";
 		cpu = <&CPU0>;
+		qcom,cit-save;
 
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
@@ -904,6 +905,7 @@
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu1";
 		cpu = <&CPU1>;
+		qcom,cit-save;
 
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
@@ -918,6 +920,7 @@
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu2";
 		cpu = <&CPU2>;
+		qcom,cit-save;
 
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
@@ -932,6 +935,7 @@
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu3";
 		cpu = <&CPU3>;
+		qcom,cit-save;
 
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
@@ -946,6 +950,7 @@
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu4";
 		cpu = <&CPU4>;
+		qcom,cit-save;
 
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
@@ -960,6 +965,7 @@
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu5";
 		cpu = <&CPU5>;
+		qcom,cit-save;
 
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
@@ -974,6 +980,7 @@
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu6";
 		cpu = <&CPU6>;
+		qcom,cit-save;
 
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
@@ -988,6 +995,7 @@
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu7";
 		cpu = <&CPU7>;
+		qcom,cit-save;
 
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi
index cc4bc7f..5655e94 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp.dtsi
@@ -150,6 +150,8 @@
 &dsi_truly_1080_vid {
 	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-min-refresh-rate = <48>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
 };
 
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index b7d72b7..24701bf 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -1345,9 +1345,9 @@
 			reg = <0x10 8>;
 		};
 
-		dload_type@18 {
+		dload_type@1c {
 			compatible = "qcom,msm-imem-dload-type";
-			reg = <0x18 4>;
+			reg = <0x1c 4>;
 		};
 
 		restart_reason@65c {
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
index 93516a1..da15b33 100644
--- a/arch/arm64/boot/dts/qcom/pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -44,6 +44,7 @@
 			interrupt-names = "eoc-int-en-set";
 			qcom,adc-vdd-reference = <1875>;
 			qcom,adc-full-scale-code = <0x70e4>;
+			qcom,pmic-revid = <&pmi632_revid>;
 
 			chan@0 {
 				label = "ref_gnd";
@@ -624,6 +625,7 @@
 			interrupt-names = "sc-irq";
 
 			qcom,pmic-revid = <&pmi632_revid>;
+			qcom,voltage-step-ramp;
 
 			lcdb_ldo_vreg: ldo {
 				label = "ldo";
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-ipcamera.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc-ipcamera.dtsi
index e661e9b..0b36ffe 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-lc-ipcamera.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-ipcamera.dtsi
@@ -66,6 +66,29 @@
 			bias-disable;
 			};
 	};
+
+	usb_det_default: usb_det_default {
+		usb_id_det_default: usb_id_det_default {
+			mux {
+				pins = "gpio131";
+				function = "gpio";
+			};
+			config {
+				pins = "gpio131";
+				drive-strength = <2>;
+				bias-pull-down;
+				output-enable;
+			};
+		};
+		usb_vbus_det_default: usb_vbus_det_default {
+			config {
+				pins = "gpio26";
+				drive-strength = <2>;
+				bias-pull-up;
+				input-enable;
+			};
+		};
+	};
 };
 
 &sdhc_2 {
@@ -83,6 +106,22 @@
 	status = "ok";
 };
 
+&soc {
+	extcon_usb1: extcon_usb1 {
+		compatible = "linux,extcon-usb-gpio";
+		id-gpio = <&tlmm 26 0x1>;
+		trig-gpio = <&tlmm 131 0x0>;
+		gpio-names = "gpio131";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&usb_vbus_det_default &usb_id_det_default>;
+	};
+};
+
+&usb0 {
+	extcon = <&extcon_usb1>;
+};
+
 &msm_sdw_codec {
 	status = "disabled";
 };
@@ -146,19 +185,23 @@
 &tavil_snd {
 	status = "okay";
 	compatible = "qcom,qcs605-asoc-snd-tavil";
-	qcom,model = "qcs605-tavil-snd-card";
+	qcom,model = "qcs605-ipc-tavil-snd-card";
 	qcom,audio-routing =
 		"AIF4 VI", "MCLK",
 		"RX_BIAS", "MCLK",
 		"MADINPUT", "MCLK",
+		"AMIC1", "MIC BIAS1",
+		"MIC BIAS1", "Handset Mic",
+		"AMIC2", "MIC BIAS2",
+		"MIC BIAS2", "Headset Mic",
 		"DMIC0", "MIC BIAS1",
 		"MIC BIAS1", "Digital Mic0",
 		"DMIC1", "MIC BIAS1",
 		"MIC BIAS1", "Digital Mic1",
-		"DMIC2", "MIC BIAS3",
-		"MIC BIAS3", "Digital Mic2",
-		"DMIC3", "MIC BIAS3",
-		"MIC BIAS3", "Digital Mic3",
+		"DMIC2", "MIC BIAS1",
+		"MIC BIAS1", "Digital Mic2",
+		"DMIC3", "MIC BIAS1",
+		"MIC BIAS1", "Digital Mic3",
 		"SpkrLeft IN", "SPK1 OUT";
 	qcom,wsa-max-devs = <1>;
 	qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc-pmic-overlay.dtsi
index 1264e08..bbe3fb0 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-lc-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-pmic-overlay.dtsi
@@ -19,6 +19,8 @@
 
 		qcom,pmic-revid = <&pm660_revid>;
 
+		qcom,qcs605-ipc-wa;
+
 		io-channels = <&pm660_rradc 8>,
 			      <&pm660_rradc 10>,
 			      <&pm660_rradc 3>,
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-sde-display.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc-sde-display.dtsi
index 99bf1e5..382fc1d5 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-lc-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-sde-display.dtsi
@@ -58,7 +58,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		ports {
 			#address-cells = <1>;
@@ -81,7 +81,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -112,11 +112,10 @@
 &dsi_dual_nt35597_truly_video {
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <53 55 60>;
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index 7801775..3c6c7a8 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -18,40 +18,52 @@
 	qcom,msm-id = <347 0x0>;
 };
 
+&removed_region {
+	reg = <0 0x85fc0000 0 0x1540000>;
+};
+
+&pil_camera_mem {
+	reg = <0 0x8b800000 0 0x500000>;
+};
+
 &pil_modem_mem {
-	reg = <0 0x8b000000 0 0x3100000>;
+	reg = <0 0x8bd00000 0 0x3100000>;
 };
 
 &pil_video_mem {
-	reg = <0 0x8e100000 0 0x500000>;
+	reg = <0 0x8ee00000 0 0x500000>;
 };
 
 &wlan_msa_mem {
-	reg = <0 0x8e600000 0 0x100000>;
+	reg = <0 0x8f300000 0 0x100000>;
 };
 
 &pil_cdsp_mem {
-	reg = <0 0x8e700000 0 0x800000>;
+	reg = <0 0x8f400000 0 0x800000>;
 };
 
 &pil_mba_mem {
-	reg = <0 0x8ef00000 0 0x200000>;
+	reg = <0 0x8fc00000 0 0x200000>;
 };
 
 &pil_adsp_mem {
-	reg = <0 0x8f100000 0 0x1e00000>;
+	reg = <0 0x8fe00000 0 0x1e00000>;
 };
 
 &pil_ipa_fw_mem {
-	reg = <0 0x90f00000 0 0x10000>;
+	reg = <0 0x91c00000 0 0x10000>;
 };
 
 &pil_ipa_gsi_mem {
-	reg = <0 0x90f10000 0 0x5000>;
+	reg = <0 0x91c10000 0 0x5000>;
 };
 
 &pil_gpu_mem {
-	reg = <0 0x90f15000 0 0x2000>;
+	reg = <0 0x91c15000 0 0x2000>;
+};
+
+&qseecom_mem {
+	reg = <0 0x9e800000 0 0x1000000>;
 };
 
 &adsp_mem {
@@ -62,6 +74,11 @@
 	status = "disabled";
 };
 
+&qcom_seecom {
+	reg = <0x86d00000 0x800000>;
+	/delete-property/ qcom,appsbl-qseecom-support;
+};
+
 &sp_mem {
 	status = "disabled";
 };
diff --git a/arch/arm64/boot/dts/qcom/qm215-audio.dtsi b/arch/arm64/boot/dts/qcom/qm215-audio.dtsi
new file mode 100644
index 0000000..8fa4a4c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qm215-audio.dtsi
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm-audio-lpass.dtsi"
+
+&msm_audio_ion {
+	iommus = <&apps_iommu 0x2001 0x0>;
+	qcom,smmu-sid-mask = /bits/ 64 <0xf>;
+};
+
+&soc {
+	qcom,msm-audio-apr {
+		compatible = "qcom,msm-audio-apr";
+		msm_audio_apr_dummy {
+			compatible = "qcom,msm-audio-apr-dummy";
+		};
+	};
+
+	qcom,avtimer@c0a300c {
+		compatible = "qcom,avtimer";
+		reg = <0x0c0a300c 0x4>,
+			<0x0c0a3010 0x4>;
+		reg-names = "avtimer_lsb_addr", "avtimer_msb_addr";
+		qcom,clk-div = <27>;
+	};
+
+	int_codec: sound {
+		status = "okay";
+		compatible = "qcom,msm8952-audio-codec";
+		qcom,model = "msm8952-snd-card-mtp";
+		reg = <0xc051000 0x4>,
+			<0xc051004 0x4>,
+			<0xc055000 0x4>,
+			<0xc052000 0x4>;
+		reg-names = "csr_gp_io_mux_mic_ctl",
+			"csr_gp_io_mux_spkr_ctl",
+			"csr_gp_io_lpaif_pri_pcm_pri_mode_muxsel",
+			"csr_gp_io_mux_quin_ctl";
+
+		qcom,msm-ext-pa = "primary";
+		qcom,msm-mclk-freq = <9600000>;
+		qcom,msm-mbhc-hphl-swh = <1>;
+		qcom,msm-mbhc-gnd-swh = <1>;
+		qcom,msm-hs-micbias-type = "external";
+		qcom,msm-micbias1-ext-cap;
+
+		qcom,audio-routing =
+				"RX_BIAS", "MCLK",
+				"SPK_RX_BIAS", "MCLK",
+				"INT_LDO_H", "MCLK",
+				"RX_I2S_CLK", "MCLK",
+				"TX_I2S_CLK", "MCLK",
+				"MIC BIAS External", "Handset Mic",
+				"MIC BIAS External2", "Headset Mic",
+				"MIC BIAS External", "Secondary Mic",
+				"AMIC1", "MIC BIAS External",
+				"AMIC2", "MIC BIAS External2",
+				"AMIC3", "MIC BIAS External",
+				"ADC1_IN", "ADC1_OUT",
+				"ADC2_IN", "ADC2_OUT",
+				"ADC3_IN", "ADC3_OUT",
+				"PDM_IN_RX1", "PDM_OUT_RX1",
+				"PDM_IN_RX2", "PDM_OUT_RX2",
+				"PDM_IN_RX3", "PDM_OUT_RX3";
+
+		qcom,pri-mi2s-gpios = <&cdc_pri_mi2s_gpios>;
+		qcom,quin-mi2s-gpios = <&cdc_quin_mi2s_gpios>;
+
+		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+				<&loopback>, <&compress>, <&hostless>,
+				<&afe>, <&lsm>, <&routing>, <&pcm_noirq>;
+		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+				"msm-pcm-dsp.2", "msm-voip-dsp",
+				"msm-pcm-voice", "msm-pcm-loopback",
+				"msm-compress-dsp", "msm-pcm-hostless",
+				"msm-pcm-afe", "msm-lsm-client",
+				"msm-pcm-routing", "msm-pcm-dsp-noirq";
+		asoc-cpu = <&dai_pri_auxpcm>,
+			<&dai_mi2s0>, <&dai_mi2s1>,
+			<&dai_mi2s2>, <&dai_mi2s3>,
+			<&dai_mi2s4>, <&dai_mi2s5>,
+			<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+			<&sb_3_rx>, <&sb_3_tx>, <&sb_4_rx>, <&sb_4_tx>,
+			<&bt_sco_rx>, <&bt_sco_tx>,
+			<&int_fm_rx>, <&int_fm_tx>,
+			<&afe_pcm_rx>, <&afe_pcm_tx>,
+			<&afe_proxy_rx>, <&afe_proxy_tx>,
+			<&incall_record_rx>, <&incall_record_tx>,
+			<&incall_music_rx>, <&incall_music_2_rx>;
+
+		asoc-cpu-names = "msm-dai-q6-auxpcm.1",
+				"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+				"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+				"msm-dai-q6-mi2s.4", "msm-dai-q6-mi2s.6",
+				"msm-dai-q6-dev.16384", "msmdai-q6-dev.16385",
+				"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+				"msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+				"msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+				"msm-dai-q6-dev.12288", "msm-dai-q6-dev.12289",
+				"msm-dai-q6-dev.12292", "msm-dai-q6-dev.12293",
+				"msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
+				"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
+				"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
+				"msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770";
+
+		asoc-codec = <&stub_codec>, <&msm_digital_codec>,
+				<&pmic_analog_codec>;
+		asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec",
+					"analog-codec";
+	};
+
+	cdc_us_euro_sw: msm_cdc_pinctrl_us_euro_sw {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&cross_conn_det_act>;
+		pinctrl-1 = <&cross_conn_det_sus>;
+	};
+
+	cdc_pri_mi2s_gpios: msm_cdc_pinctrl_pri {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&cdc_pdm_lines_act &cdc_pdm_lines_2_act>;
+		pinctrl-1 = <&cdc_pdm_lines_sus &cdc_pdm_lines_2_sus>;
+	};
+
+	cdc_quin_mi2s_gpios: msm_cdc_pinctrl_quin {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&pri_tlmm_lines_act &pri_tlmm_ws_act>;
+		pinctrl-1 = <&pri_tlmm_lines_sus &pri_tlmm_ws_sus>;
+	};
+};
+
+&pm8916_1 {
+	pmic_analog_codec: analog-codec@f000 {
+		status = "okay";
+		compatible = "qcom,pmic-analog-codec";
+		reg = <0xf000 0x200>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+		interrupt-parent = <&spmi_bus>;
+		interrupts = <0x1 0xf0 0x0 IRQ_TYPE_NONE>,
+			<0x1 0xf0 0x1 IRQ_TYPE_NONE>,
+			<0x1 0xf0 0x2 IRQ_TYPE_NONE>,
+			<0x1 0xf0 0x3 IRQ_TYPE_NONE>,
+			<0x1 0xf0 0x4 IRQ_TYPE_NONE>,
+			<0x1 0xf0 0x5 IRQ_TYPE_NONE>,
+			<0x1 0xf0 0x6 IRQ_TYPE_NONE>,
+			<0x1 0xf0 0x7 IRQ_TYPE_NONE>,
+			<0x1 0xf1 0x0 IRQ_TYPE_NONE>,
+			<0x1 0xf1 0x1 IRQ_TYPE_NONE>,
+			<0x1 0xf1 0x2 IRQ_TYPE_NONE>,
+			<0x1 0xf1 0x3 IRQ_TYPE_NONE>,
+			<0x1 0xf1 0x4 IRQ_TYPE_NONE>,
+			<0x1 0xf1 0x5 IRQ_TYPE_NONE>;
+		interrupt-names = "spk_cnp_int",
+				"spk_clip_int",
+				"spk_ocp_int",
+				"ins_rem_det1",
+				"but_rel_det",
+				"but_press_det",
+				"ins_rem_det",
+				"mbhc_int",
+				"ear_ocp_int",
+				"hphr_ocp_int",
+				"hphl_ocp_det",
+				"ear_cnp_int",
+				"hphr_cnp_int",
+				"hphl_cnp_int";
+
+		cdc-vdd-pa-cp-supply = <&pm8916_s4>;
+		qcom,cdc-vdd-pa-cp-voltage = <2050000 2050000>;
+		qcom,cdc-vdd-pa-cp-current = <550000>;
+
+		cdc-vdd-io-supply = <&pm8916_l5>;
+		qcom,cdc-vdd-io-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-io-current = <5000>;
+
+		cdc-vdda-h-supply = <&pm8916_l5>;
+		qcom,cdc-vdda-h-voltage = <1800000 1800000>;
+		qcom,cdc-vdda-h-current = <10000>;
+
+		cdc-vdd-mic-bias-supply = <&pm8916_l13>;
+		qcom,cdc-vdd-mic-bias-voltage = <3075000 3075000>;
+		qcom,cdc-vdd-mic-bias-current = <5000>;
+
+		qcom,cdc-mclk-clk-rate = <9600000>;
+
+		qcom,cdc-static-supplies = "cdc-vdd-io",
+					"cdc-vdd-pa-cp",
+					"cdc-vdda-h";
+
+		qcom,cdc-on-demand-supplies = "cdc-vdd-mic-bias";
+
+		msm_digital_codec: msm-dig-codec {
+			compatible = "qcom,msm-digital-codec";
+			reg = <0xc0f0000 0x0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi b/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi
index 796f21a..46e4bf7 100644
--- a/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi
@@ -32,7 +32,22 @@
 
 /* delete all node referring PM8937 */
 &soc {
-	/delete-node/ thermal-zones;
+	thermal-zones {
+		/delete-node/ pa-therm1-adc;
+		/delete-node/ xo-therm-adc;
+		/delete-node/ xo-therm-buf-adc;
+		/delete-node/ case-therm-adc;
+		/delete-node/ pa-therm0-adc;
+		/delete-node/ pm8937_tz;
+
+		aoss0-lowf {
+			cooling-maps {
+				cx_vdd_cdev {
+					/delete-property/ cooling-device;
+				};
+			};
+		};
+	};
 
 	qcom,cpu-clock-8939@b111050 {
 		/delete-property/ vdd-c1-supply;
@@ -56,40 +71,59 @@
 		/delete-property/ vdd_cx-supply;
 		/delete-property/ vdd_mx-supply;
 		/delete-property/ vdd_pll-supply;
+		vdd_mss-supply = <&pm8916_s1_level>;
+		vdd_cx-supply = <&pm8916_s1_level>;
+		vdd_mx-supply = <&pm8916_l2_level_ao>;
+		vdd_pll-supply = <&pm8916_l7>;
 	};
 
 	qcom,lpass@c200000 {
 		/delete-property/ vdd_cx-supply;
+		vdd_cx-supply = <&pm8916_s1_level>;
 	};
 
 	qcom,pronto@a21b000 {
 		/delete-property/ vdd_pronto_pll-supply;
+		vdd_pronto_pll-supply = <&pm8916_l7>;
 	};
 
 	qcom,wcnss-wlan@a000000 {
-		/delete-property/ qcom,pronto-vddmx-supply;
-		/delete-property/ qcom,pronto-vddcx-supply;
-		/delete-property/ qcom,pronto-vddpx-supply;
-		/delete-property/ qcom,iris-vddxo-supply;
-		/delete-property/ qcom,iris-vddrfa-supply;
-		/delete-property/ qcom,iris-vddpa-supply;
-		/delete-property/ qcom,iris-vdddig-supply;
 		/delete-property/ qcom,wcnss-adc_tm;
-	};
+		qcom,pronto-vddmx-supply = <&pm8916_l2_level_ao>;
+		qcom,pronto-vddcx-supply = <&pm8916_s1_level>;
+		qcom,pronto-vddpx-supply = <&pm8916_l7>;
+		qcom,iris-vddxo-supply   = <&pm8916_l7>;
+		qcom,iris-vddrfa-supply  = <&pm8916_l3>;
+		qcom,iris-vddpa-supply   = <&pm8916_l9>;
+		qcom,iris-vdddig-supply  = <&pm8916_l7>;
 
-	/delete-node/ qcom,gcc-mdss@1800000;
+		qcom,iris-vddxo-voltage-level = <1800000 0 1800000>;
+		qcom,iris-vddrfa-voltage-level = <1325000 0 1325000>;
+		qcom,iris-vddpa-voltage-level = <3300000 0 3300000>;
+		qcom,iris-vdddig-voltage-level = <1800000 0 1800000>;
+
+		qcom,vddmx-voltage-level = <RPM_SMD_REGULATOR_LEVEL_NOM
+					    RPM_SMD_REGULATOR_LEVEL_NONE
+					    RPM_SMD_REGULATOR_LEVEL_TURBO>;
+		qcom,vddcx-voltage-level = <RPM_SMD_REGULATOR_LEVEL_NOM
+					    RPM_SMD_REGULATOR_LEVEL_NONE
+					    RPM_SMD_REGULATOR_LEVEL_TURBO>;
+		qcom,vddpx-voltage-level = <1800000 0 1800000>;
+
+		qcom,iris-vddxo-current = <10000>;
+		qcom,iris-vddrfa-current = <100000>;
+		qcom,iris-vddpa-current = <515000>;
+		qcom,iris-vdddig-current = <10000>;
+
+		qcom,pronto-vddmx-current = <0>;
+		qcom,pronto-vddcx-current = <0>;
+		qcom,pronto-vddpx-current = <0>;
+	};
 
 	/delete-node/ qcom,csid@1b30000;
 	/delete-node/ qcom,csid@1b30400;
 	/delete-node/ qcom,csid@1b30800;
 
-	/delete-node/ qcom,mdss_mdp@1a00000;
-	/delete-node/ qcom,mdss_dsi@0;
-	/delete-node/ qcom,mdss_wb_panel;
-	/delete-node/ qcom,mdss_rotator;
-	/delete-node/ qcom,mdss_dsi_pll@1a94a00;
-	/delete-node/ qcom,mdss_dsi_pll@1a96a00;
-
 	/* mem_acc */
 	/delete-node/ regulator@01946004;
 	/* apc vreg */
@@ -107,3 +141,359 @@
 		/delete-node/ msm8x16_wcd_codec@f000;
 	};
 };
+
+&pm8916_gpios {
+	disp_vdda_en_default: disp_vdda_en_default {
+		pins = "gpio3";
+		function = "normal";
+		power-source = <0>;
+		drive-strength = <8>;
+		output-high;
+	};
+};
+
+&soc {
+	disp_vdda_eldo1: gpio-regulator@0 {
+		compatible = "regulator-fixed";
+		reg = <0x00 0x00>;
+		regulator-name = "disp_vdda_eldo1";
+		regulator-min-microvolt = <1200000>;
+		regulator-max-microvolt = <1200000>;
+		regulator-enable-ramp-delay = <135>;
+		enable-active-high;
+		gpio = <&pm8916_gpios 3 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&disp_vdda_en_default>;
+		vin-supply = <&pm8916_s3>;
+	};
+};
+
+&mdss_dsi0_pll {
+	vddio-supply = <&pm8916_l6>;
+};
+
+&mdss_dsi0 {
+	/delete-property/ vdd-supply;
+	vddio-supply = <&pm8916_l6>;
+};
+
+&mdss_dsi {
+	vdda-supply = <&pm8916_l6>;
+	vddio-supply = <&pm8916_l6>;
+
+	qcom,phy-supply-entries {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,phy-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "vddio";
+			qcom,supply-min-voltage = <1744000>;
+			qcom,supply-max-voltage = <1904000>;
+			qcom,supply-enable-load = <100000>;
+			qcom,supply-disable-load = <100>;
+		};
+	};
+
+	qcom,ctrl-supply-entries {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,ctrl-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "vdda";
+			qcom,supply-min-voltage = <1744000>;
+			qcom,supply-max-voltage = <1904000>;
+			qcom,supply-enable-load = <100000>;
+			qcom,supply-disable-load = <100>;
+		};
+	};
+};
+
+&clock_cpu {
+	vdd-c1-supply = <&apc_vreg_corner>;
+};
+
+&clock_gcc {
+	vdd_dig-supply = <&pm8916_s1_level>;
+	vdd_hf_dig-supply = <&pm8916_s1_level_ao>;
+	vdd_hf_pll-supply = <&pm8916_l7_ao>;
+};
+
+&pm8916_vadc {
+	chan@0 {
+		label = "usb_in";
+		reg = <0>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <7>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@2 {
+		label = "ireg_fb";
+		reg = <2>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <6>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@5 {
+		label = "vcoin";
+		reg = <5>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@6 {
+		label = "vbat_sns";
+		reg = <6>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@7 {
+		label = "vph_pwr";
+		reg = <7>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@b {
+		label = "chg_temp";
+		reg = <0xb>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <3>;
+		qcom,hw-settle-time = <0>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@11 {
+		label = "skin_therm";
+		reg = <0x11>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
+	};
+
+	chan@30 {
+		label = "batt_therm";
+		reg = <0x30>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <22>;
+		qcom,hw-settle-time = <0xb>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@31 {
+		label = "batt_id";
+		reg = <0x31>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0xb>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@36 {
+		label = "pa_therm0";
+		reg = <0x36>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
+	};
+
+	chan@32 {
+		label = "xo_therm";
+		reg = <0x32>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
+	};
+
+	chan@3c {
+		label = "xo_therm_buf";
+		reg = <0x3c>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <4>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
+	};
+};
+
+&pm8916_adc_tm {
+	/* Channel Node */
+	chan@30 {
+		label = "batt_therm";
+		reg = <0x30>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <8>;
+		qcom,hw-settle-time = <0xb>;
+		qcom,fast-avg-setup = <0x2>;
+		qcom,btm-channel-number = <0x48>;
+	};
+
+	chan@6 {
+		label = "vbat_sns";
+		reg = <0x6>;
+		qcom,decimation = <0>;
+		qcom,pre-div-channel-scaling = <1>;
+		qcom,calibration-type = "absolute";
+		qcom,scale-function = <0>;
+		qcom,hw-settle-time = <0xb>;
+		qcom,fast-avg-setup = <0x2>;
+		qcom,btm-channel-number = <0x68>;
+	};
+};
+
+&soc {
+	thermal-zones {
+		xo-therm-buf-adc {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&pm8916_vadc 0x3c>;
+			thermal-governor = "user_space";
+
+			trips {
+				active-config0 {
+					temperature = <65000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		xo-therm-adc {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&pm8916_vadc 0x32>;
+			thermal-governor = "user_space";
+
+			trips {
+				active-config0 {
+					temperature = <65000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		pa-therm0-adc {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&pm8916_vadc 0x36>;
+			thermal-governor = "user_space";
+
+			trips {
+				active-config0 {
+					temperature = <65000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		skin-therm-adc {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&pm8916_vadc 0x11>;
+			thermal-governor = "user_space";
+
+			trips {
+				active-config0 {
+					temperature = <65000>;
+					hysteresis = <1000>;
+					type = "passive";
+				};
+			};
+		};
+
+		aoss0-lowf {
+			cooling-maps {
+				cx_vdd_cdev {
+					cooling-device = <&pm8916_cx_cdev 0 0>;
+				};
+			};
+		};
+
+		pm8916_tz {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "step_wise";
+			thermal-sensors = <&pm8916_tz>;
+
+			trips {
+				pm8916_trip0: pm8916-trip0 {
+					temperature = <105000>;
+					hysteresis = <0>;
+					type = "passive";
+				};
+				pm8916_trip1: pm8916-trip1 {
+					temperature = <125000>;
+					hysteresis = <0>;
+					type = "passive";
+				};
+				pm8916_trip2: pm8916-trip2 {
+					temperature = <145000>;
+					hysteresis = <0>;
+					type = "passive";
+				};
+			};
+		};
+	};
+};
+
+&soc {
+	usb_vdig_supply: usb_vdig_supply {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_vdig_supply";
+		regulator-min-microvolt = <1200000>;
+		regulator-max-microvolt = <1200000>;
+	};
+};
+
+&usb_otg {
+	hsusb_vdd_dig-supply = <&usb_vdig_supply>;
+	HSUSB_1p8-supply = <&pm8916_l7>;
+	HSUSB_3p3-supply = <&pm8916_l13>;
+	extcon = <&pm8916_chg>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qm215-qrd.dtsi b/arch/arm64/boot/dts/qcom/qm215-qrd.dtsi
index 89c9aa9..76540a1 100644
--- a/arch/arm64/boot/dts/qcom/qm215-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/qm215-qrd.dtsi
@@ -17,10 +17,16 @@
 
 &pm8916_chg{
 	status = "ok";
+	qcom,chgr-led-support;
+	qcom,vddmax-mv = <4400>;
+	qcom,vddsafe-mv = <4400>;
 };
 
 &pm8916_bms{
 	status = "ok";
+	qcom,battery-data = <&qrd_batterydata>;
+	qcom,batt-aging-comp;
+	qcom,resume-soc = <99>;
 };
 
 &pm8916_vib{
@@ -44,4 +50,111 @@
 			gpio-key,wakeup;
 		};
 	};
+
+	fpc1020 {
+		compatible = "fpc,fpc1020";
+		interrupt-parent = <&tlmm>;
+		interrupts = <48 0>;
+		fpc,gpio_rst = <&tlmm 124 0x0>;
+		fpc,gpio_irq = <&tlmm 48 0>;
+		vcc_spi-supply = <&pm8916_l5>;
+		vdd_io-supply  = <&pm8916_l5>;
+		vdd_ana-supply = <&pm8916_l5>;
+		fpc,enable-on-boot;
+		pinctrl-names = "fpc1020_reset_reset",
+				"fpc1020_reset_active",
+				"fpc1020_irq_active";
+		pinctrl-0 = <&fpc_reset_low>;
+		pinctrl-1 = <&fpc_reset_high>;
+		pinctrl-2 = <&fpc_int_low>;
+	};
+};
+
+&mdss_dsi_active {
+	mux {
+		pins = "gpio60", "gpio93", "gpio94";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio60", "gpio93", "gpio94";
+		drive-strength = <8>; /* 8 mA */
+		bias-disable = <0>; /* no pull */
+		output-high;
+	};
+};
+
+&mdss_dsi_suspend {
+	mux {
+		pins = "gpio60", "gpio93", "gpio94";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio60", "gpio93", "gpio94";
+		drive-strength = <2>; /* 2 mA */
+		bias-pull-down; /* pull down */
+	};
+};
+
+#include "msm8937-mdss-panels.dtsi"
+
+&mdss_mdp {
+	qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&dsi_panel_pwr_supply {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	qcom,panel-supply-entry@0 {
+		reg = <1>;
+		qcom,supply-name = "vddio";
+		qcom,supply-min-voltage = <1744000>;
+		qcom,supply-max-voltage = <1904000>;
+		qcom,supply-enable-load = <100000>;
+		qcom,supply-disable-load = <100>;
+	};
+	/delete-node/ qcom,panel-supply-entry@1;
+	/delete-node/ qcom,panel-supply-entry@2;
+	/delete-node/ qcom,panel-supply-entry@3;
+};
+
+&mdss_dsi {
+	hw-config = "single_dsi";
+};
+
+&mdss_dsi0 {
+	qcom,dsi-pref-prim-pan = <&dsi_hx8399c_hd_vid>;
+	pinctrl-names = "mdss_default", "mdss_sleep";
+	pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+	pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+
+	qcom,platform-te-gpio = <&tlmm 24 0>;
+	qcom,platform-reset-gpio = <&tlmm 60 0>;
+	qcom,platform-bklight-en-gpio = <&tlmm 93 0>;
+	qcom,platform-enable-gpio = <&tlmm 94 0>;
+};
+
+&dsi_hx8399c_hd_vid {
+	qcom,mdss-dsi-panel-timings =
+		[e7 1c 12 00 42 42 18 20 17 03 04 00];
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+	qcom,mdss-dsi-pwm-gpio = <&pm8916_mpps 4 0>;
+	qcom,esd-check-enabled;
+	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+	qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+	qcom,mdss-dsi-panel-status-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-on-check-value = <0x9d 0x9d 0x9d 0x9d>;
+	qcom,mdss-dsi-panel-status-read-length = <4>;
+	qcom,mdss-dsi-panel-max-error-count = <3>;
+	qcom,mdss-dsi-min-refresh-rate = <48>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
 };
diff --git a/arch/arm64/boot/dts/qcom/qm215-regulator.dtsi b/arch/arm64/boot/dts/qcom/qm215-regulator.dtsi
index cab4661..6f6d301 100644
--- a/arch/arm64/boot/dts/qcom/qm215-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/qm215-regulator.dtsi
@@ -49,6 +49,14 @@
 			qcom,use-voltage-floor-level;
 			qcom,always-send-voltage;
 		};
+
+		pm8916_cx_cdev: regulator-cx-cdev {
+			compatible = "qcom,regulator-cooling-device";
+			regulator-cdev-supply = <&pm8916_s1_floor_level>;
+			regulator-levels = <RPM_SMD_REGULATOR_LEVEL_NOM_PLUS
+					RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			#cooling-cells = <2>;
+		};
 	};
 
 	rpm-regulator-smpa3 {
@@ -543,5 +551,10 @@
 		qcom,cpr-voltage-scaling-factor-max = <0 2000 2000>;
 		qcom,cpr-scaled-init-voltage-as-ceiling;
 		qcom,cpr-fuse-revision = <69 39 3 0>;
+		qcom,cpr-quotient-adjustment =
+				<50      40     50>;
+		qcom,cpr-init-voltage-adjustment =
+				<30000   5000   10000>;
+		qcom,cpr-enable;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/qm215.dts b/arch/arm64/boot/dts/qcom/qm215.dts
index 3fd0064..e1acd31 100644
--- a/arch/arm64/boot/dts/qcom/qm215.dts
+++ b/arch/arm64/boot/dts/qcom/qm215.dts
@@ -15,8 +15,10 @@
 
 #include "qm215.dtsi"
 #include "qm215-pm8916.dtsi"
+#include "qm215-audio.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. QM215 QRD";
+	model = "Qualcomm Technologies, Inc. QM215";
 	compatible = "qcom,qm215";
+	qcom,pmic-name = "PM8916";
 };
diff --git a/arch/arm64/boot/dts/qcom/qm215.dtsi b/arch/arm64/boot/dts/qcom/qm215.dtsi
index dc7ad4c..c16774b 100644
--- a/arch/arm64/boot/dts/qcom/qm215.dtsi
+++ b/arch/arm64/boot/dts/qcom/qm215.dtsi
@@ -16,6 +16,16 @@
 	model = "Qualcomm Technologies, Inc. QM215";
 	compatible = "qcom,qm215";
 	qcom,msm-id = <386 0x0>;
+	qcom,msm-name = "QM215";
+};
+
+/ {
+	qrd_batterydata: qcom,batterydata {
+		qcom,rpull-up-kohm = <100>;
+		qcom,vref-batt-therm = <1800000>;
+
+		#include "vbms-batterydata-mlp356477-2800mah.dtsi"
+	};
 };
 
 &soc {
diff --git a/arch/arm64/boot/dts/qcom/sda845-svr.dtsi b/arch/arm64/boot/dts/qcom/sda845-svr.dtsi
index f6bf107d..2426b47 100644
--- a/arch/arm64/boot/dts/qcom/sda845-svr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda845-svr.dtsi
@@ -576,5 +576,6 @@
 };
 
 &wil6210 {
+	qcom,wigig-dc = <&tlmm 81 0>;
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm429-cpu.dtsi b/arch/arm64/boot/dts/qcom/sdm429-cpu.dtsi
index 6c4ea9f..de0730d 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-cpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429-cpu.dtsi
@@ -134,6 +134,7 @@
 				1708800	327
 				1804800	343
 				1958400	445
+				2016000	470
 			>;
 			idle-cost-data = <
 				100 80 60 40
@@ -147,6 +148,7 @@
 				1708800	85
 				1804800	88
 				1958400	110
+				2016000	120
 			>;
 			idle-cost-data = <
 				4 3 2 1
diff --git a/arch/arm64/boot/dts/qcom/sdm429.dtsi b/arch/arm64/boot/dts/qcom/sdm429.dtsi
index 0a8aab3..06c54a1 100644
--- a/arch/arm64/boot/dts/qcom/sdm429.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429.dtsi
@@ -24,10 +24,10 @@
 	/delete-node/ etm@619d000;
 	/delete-node/ etm@619e000;
 	/delete-node/ etm@619f000;
-	/delete-node/ cti@61b8000;
-	/delete-node/ cti@61b9000;
-	/delete-node/ cti@61ba000;
-	/delete-node/ cti@61bb000;
+	/delete-node/ cti@6198000;
+	/delete-node/ cti@6199000;
+	/delete-node/ cti@619a000;
+	/delete-node/ cti@619b000;
 	/delete-node/ jtagmm@619c000;
 	/delete-node/ jtagmm@619d000;
 	/delete-node/ jtagmm@619e000;
@@ -60,7 +60,8 @@
 			 < 1497600 >,
 			 < 1708800 >,
 			 < 1804800 >,
-			 < 1958400 >;
+			 < 1958400 >,
+			 < 2016000 >;
 	};
 
 	/delete-node/ devfreq-cpufreq;
@@ -73,7 +74,8 @@
 			< 1497600  5712 >,
 			< 1708800  6445 >,
 			< 1804800  7104 >,
-			< 1958400  7104 >;
+			< 1958400  7104 >,
+			< 2016000  7104 >;
 		};
 
 		cci-cpufreq {
@@ -84,7 +86,8 @@
 			< 1497600  400000 >,
 			< 1708800  533000 >,
 			< 1804800  576000 >,
-			< 1958400  576000 >;
+			< 1958400  576000 >,
+			< 2016000  576000 >;
 		};
 
 		mincpubw-cpufreq {
@@ -203,6 +206,20 @@
 			<  400000000 1>,
 			<  533333333 3>;
 
+		qcom,speed4-bin-v0-c1 =
+			<          0 0>,
+			<  960000000 1>,
+			< 1305600000 1>,
+			< 1497600000 2>,
+			< 1708800000 3>,
+			< 1958400000 5>,
+			< 2016000000 6>;
+
+		qcom,speed4-bin-v0-cci =
+			<          0 0>,
+			<  400000000 1>,
+			<  533333333 3>;
+
 		#clock-cells = <1>;
 	};
 
@@ -218,6 +235,10 @@
 	status = "disabled";
 };
 
+&qseecom_ta_mem {
+	size = <0 0x400000>;
+};
+
 &clock_gcc_mdss {
 	compatible = "qcom,gcc-mdss-sdm429";
 	clocks = <&mdss_dsi0_pll clk_dsi0pll_pixel_clk_src>,
diff --git a/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
index 42c3e83..750cedb 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-regulator.dtsi
@@ -378,12 +378,12 @@
 			reg = <0x2000 0x100>;
 			regulator-name = "pm8953_s5";
 			regulator-min-microvolt = <490000>;
-			regulator-max-microvolt = <960000>;
+			regulator-max-microvolt = <980000>;
 
 			pm8953_s5_limit: avs-limit-regulator {
 				regulator-name = "pm8953_s5_avs_limit";
 				regulator-min-microvolt = <490000>;
-				regulator-max-microvolt = <960000>;
+				regulator-max-microvolt = <980000>;
 			};
 		};
 	};
@@ -418,14 +418,14 @@
 		interrupts = <0 15 0>;
 		regulator-name = "apc_corner";
 		regulator-min-microvolt = <1>;
-		regulator-max-microvolt = <5>;
+		regulator-max-microvolt = <6>;
 
 		qcom,cpr-fuse-corners = <3>;
-		qcom,cpr-voltage-ceiling = <810000 845000 960000>;
+		qcom,cpr-voltage-ceiling = <810000 845000 980000>;
 		qcom,cpr-voltage-floor =   <700000 700000 790000>;
 		vdd-apc-supply = <&pm8953_s5>;
 		mem-acc-supply = <&apc_mem_acc_vreg>;
-		qcom,mem-acc-corner-map = <1 1 1 1 2>;
+		qcom,mem-acc-corner-map = <1 1 1 1 2 2>;
 
 		qcom,cpr-ref-clk = <19200>;
 		qcom,cpr-timer-delay = <5000>;
@@ -455,15 +455,19 @@
 					<70 54 7 0>;
 		qcom,cpr-fuse-quot-offset-scale = <5 5 5>;
 		qcom,cpr-init-voltage-step = <10000>;
-		qcom,cpr-corner-map = <1 2 3 3 3>;
+		qcom,cpr-corner-map = <1 2 3 3 3 3>;
 		qcom,cpr-corner-frequency-map =
 				<1 1305600000>,
 				<2 1497600000>,
 				<3 1708800000>,
 				<4 1804800000>,
-				<5 1958400000>;
+				<5 1958400000>,
+				<6 2016000000>;
 		qcom,speed-bin-fuse-sel = <37 34 3 0>;
-		qcom,cpr-speed-bin-max-corners = <(-1) (-1) 1 2 5>;
+		qcom,cpr-speed-bin-max-corners =
+					<0 (-1) 1 2 5>,
+					<1 (-1) 1 2 5>,
+					<4 (-1) 1 2 6>;
 		qcom,cpr-fuse-revision = <69 39 3 0>;
 		qcom,cpr-quot-adjust-scaling-factor-max = <0 1400 1400>;
 		qcom,cpr-voltage-scaling-factor-max = <0 2000 2000>;
@@ -481,17 +485,17 @@
 			<0        0       0>;
 
 		qcom,cpr-floor-to-ceiling-max-range =
-			<50000 50000 65000 65000 65000>,
-			<50000 50000 65000 65000 65000>,
-			<50000 50000 65000 65000 65000>;
+			<50000 50000 65000 65000 65000 65000>,
+			<50000 50000 65000 65000 65000 65000>,
+			<50000 50000 65000 65000 65000 65000>;
 
 		qcom,cpr-voltage-ceiling-override =
-			<(-1) (-1) 810000 845000 885000 960000 960000>;
+			<(-1) (-1) 810000 845000 885000 980000 980000 980000>;
 
 		qcom,cpr-virtual-corner-quotient-adjustment =
-			<0  0    0   0   0>,
-			<0  0  (-22) 0   0>, /* NOMP: -10 mV */
-			<0  0    0   0   0>;
+			<0  0    0   0   0    0>,
+			<0  0  (-22) 0   0    0>, /* NOMP: -10 mV */
+			<0  0    0   0   0    0>;
 
 		qcom,cpr-enable;
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm439.dtsi b/arch/arm64/boot/dts/qcom/sdm439.dtsi
index ef22440..3fee488 100644
--- a/arch/arm64/boot/dts/qcom/sdm439.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439.dtsi
@@ -55,7 +55,8 @@
 			 < 1497600 >,
 			 < 1708800 >,
 			 < 1804800 >,
-			 < 1958400 >;
+			 < 1958400 >,
+			 < 2016000 >;
 
 		qcom,cpufreq-table-4 =
 			 <  768000 >,
@@ -128,7 +129,8 @@
 			< 1497600  5712 >,
 			< 1708800  6445 >,
 			< 1804800  7104 >,
-			< 1958400  7104 >;
+			< 1958400  7104 >,
+			< 2016000  7104 >;
 		cpu-to-dev-map-4 =
 			<  768000  2929 >,
 			<  998400  5053 >,
@@ -145,7 +147,8 @@
 			< 1497600  400000 >,
 			< 1708800  533000 >,
 			< 1804800  576000 >,
-			< 1958400  576000 >;
+			< 1958400  576000 >,
+			< 2016000  576000 >;
 		cpu-to-dev-map-4 =
 			<  768000  400000 >,
 			<  998400  400000 >,
@@ -179,6 +182,7 @@
 				1708800	327
 				1804800	343
 				1958400	445
+				2016000	470
 			>;
 			idle-cost-data = <
 				100 80 60 40
@@ -204,6 +208,7 @@
 				1708800	85
 				1804800	88
 				1958400	110
+				2016000	120
 			>;
 			idle-cost-data = <
 				4 3 2 1
@@ -323,6 +328,28 @@
 		<          0 0>,
 		<  400000000 1>,
 		<  533333333 3>;
+
+	qcom,speed4-bin-v0-c0 =
+		<          0 0>,
+		<  768000000 1>,
+		<  998400000 1>,
+		< 1171200000 2>,
+		< 1305600000 3>,
+		< 1459200000 5>;
+
+	qcom,speed4-bin-v0-c1 =
+		<          0 0>,
+		<  960000000 1>,
+		< 1305600000 1>,
+		< 1497600000 2>,
+		< 1708800000 3>,
+		< 1958400000 5>,
+		< 2016000000 6>;
+
+	qcom,speed4-bin-v0-cci =
+		<          0 0>,
+		<  400000000 1>,
+		<  533333333 3>;
 };
 
 &clock_gcc {
@@ -648,6 +675,10 @@
 	};
 };
 
+&sdhc_1 {
+	qcom,ddr-config = <0x00040868>;
+};
+
 &mdss_mdp {
 	qcom,vbif-settings = <0xd0 0x20>;
 };
@@ -663,5 +694,5 @@
 };
 
 &usb_otg {
-	qcom,hsusb-otg-phy-init-seq = <0x06 0x82 0xffffffff>;
+	qcom,hsusb-otg-phy-init-seq = <0x43 0x80 0x06 0x82 0xffffffff>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm632-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm632-coresight.dtsi
index 62eeb65..2e19b67 100644
--- a/arch/arm64/boot/dts/qcom/sdm632-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm632-coresight.dtsi
@@ -104,6 +104,7 @@
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu4";
 		cpu = <&CPU4>;
+		qcom,cti-save;
 
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
@@ -118,7 +119,7 @@
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu5";
 		cpu = <&CPU5>;
-
+		qcom,cti-save;
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
 		clock-names = "apb_pclk";
@@ -132,6 +133,7 @@
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu6";
 		cpu = <&CPU6>;
+		qcom,cti-save;
 
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
@@ -146,6 +148,7 @@
 		reg-names = "cti-base";
 		coresight-name = "coresight-cti-cpu7";
 		cpu = <&CPU7>;
+		qcom,cti-save;
 
 		clocks = <&clock_gcc clk_qdss_clk>,
 			 <&clock_gcc clk_qdss_a_clk>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
index 73c7be2..15874ff 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
@@ -43,6 +43,7 @@
 		qcom,mi2s-audio-intf;
 		qcom,auxpcm-audio-intf;
 		qcom,ext-disp-audio-rx;
+		qcom,afe-rxtx-lb;
 		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
 			<&loopback>, <&compress>, <&hostless>,
 			<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
@@ -73,7 +74,8 @@
 			<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
 			<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
 			<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>,
-			<&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>;
+			<&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>,
+			<&afe_loopback_tx>;
 		asoc-cpu-names = "msm-dai-q6-dp.24608",
 			"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
 			"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
@@ -99,7 +101,8 @@
 			"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
 			"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
 			"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913",
-			"msm-dai-q6-tdm.36928", "msm-dai-q6-tdm.36929";
+			"msm-dai-q6-tdm.36928", "msm-dai-q6-tdm.36929",
+			"msm-dai-q6-dev.24577";
 	};
 
 	tasha_snd: sound-tasha {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
index a3c9e91..5324581 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
@@ -49,9 +49,11 @@
 		status = "ok";
 		reg =   <0x5000000 0x40000>,
 			<0x5061000 0x800>,
+			<0x509e000 0x1000>,
 			<0x780000 0x6300>;
 		reg-names =     "kgsl_3d0_reg_memory",
 				"kgsl_3d0_cx_dbgc_memory",
+				"cx_misc",
 				"qfprom_memory";
 		interrupts = <0 300 0>;
 		interrupt-names = "kgsl_3d0_irq";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index cfd96cb3..59fb78e 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -98,6 +98,35 @@
 			};
 		};
 
+		micbias_supply_en_pins: micbias_supply_en_pins {
+			micbias_supply_en_active: micbias_supply_en_active {
+				mux {
+					pins = "gpio126";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio126";
+					drive-strength = <16>;
+					output-high;
+				};
+			};
+
+			micbias_supply_en_sleep: micbias_supply_en_sleep {
+				mux {
+					pins = "gpio126";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio126";
+					drive-strength = <16>;
+					bias-disable;
+					output-low;
+				};
+			};
+		};
+
 		qupv3_se0_spi_pins: qupv3_se0_spi_pins {
 			qupv3_se0_spi_active: qupv3_se0_spi_active {
 				mux {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
index 8ed821a..925923d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
@@ -195,5 +195,6 @@
 	qcom,rpmh-master-stats@b221200 {
 		compatible = "qcom,rpmh-master-stats-v1";
 		reg = <0xb221200 0x60>;
+		qcom,use-alt-unit = <3>;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index 92d4317..2887f38 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -137,9 +137,16 @@
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
-			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll BYTECLK_SRC_0_CLK>,
+			<&mdss_dsi0_pll PCLK_SRC_0_CLK>,
+			<&mdss_dsi0_pll SHADOW_BYTECLK_SRC_0_CLK>,
+			<&mdss_dsi0_pll SHADOW_PCLK_SRC_0_CLK>;
+		clock-names = "mux_byte_clk", "mux_pixel_clk",
+				"src_byte_clk", "src_pixel_clk",
+				"shadow_byte_clk", "shadow_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -162,7 +169,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -186,7 +193,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy1>;
 		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
 			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -209,8 +216,14 @@
 		qcom,dsi-ctrl = <&mdss_dsi1>;
 		qcom,dsi-phy = <&mdss_dsi_phy1>;
 		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
-			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+			<&mdss_dsi1_pll PCLK_MUX_1_CLK>,
+			<&mdss_dsi1_pll BYTECLK_SRC_1_CLK>,
+			<&mdss_dsi1_pll PCLK_SRC_1_CLK>,
+			<&mdss_dsi1_pll SHADOW_BYTECLK_SRC_1_CLK>,
+			<&mdss_dsi1_pll SHADOW_PCLK_SRC_1_CLK>;
+		clock-names = "mux_byte_clk", "mux_pixel_clk",
+				"src_byte_clk", "src_pixel_clk",
+				"shadow_byte_clk", "shadow_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -234,7 +247,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -252,7 +265,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -270,7 +283,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -288,7 +301,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -306,7 +319,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -324,7 +337,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -342,7 +355,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -365,7 +378,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -388,7 +401,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -410,7 +423,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -432,7 +445,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -455,7 +468,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -478,7 +491,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -501,7 +514,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -524,7 +537,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		ports {
 			#address-cells = <1>;
@@ -621,11 +634,10 @@
 &dsi_dual_nt35597_truly_video {
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <53 55 60>;
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
@@ -633,6 +645,9 @@
 	qcom,mdss-dsi-panel-status-value = <0x9c>;
 	qcom,mdss-dsi-panel-on-check-value = <0x9c>;
 	qcom,mdss-dsi-panel-status-read-length = <1>;
+	qcom,dsi-dyn-clk-enable;
+	qcom,dsi-dyn-clk-list =
+		<804948480 798240576 801594528 808302432 811656384>;
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
@@ -694,11 +709,10 @@
 &dsi_nt35597_truly_dsc_video {
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <53 55 60>;
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
@@ -935,11 +949,10 @@
 &dsi_nt35695b_truly_fhd_video {
 	qcom,mdss-dsi-t-clk-post = <0x07>;
 	qcom,mdss-dsi-t-clk-pre = <0x1c>;
-	qcom,mdss-dsi-min-refresh-rate = <48>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <48 53 55 60>;
 	qcom,mdss-dsi-display-timings {
 		timing@0 {
 			qcom,mdss-dsi-panel-phy-timings = [00 1c 05 06 0b 0c
@@ -990,11 +1003,10 @@
 &dsi_hx8399_truly_cmd {
 	qcom,mdss-dsi-t-clk-post = <0x0E>;
 	qcom,mdss-dsi-t-clk-pre = <0x30>;
-	qcom,mdss-dsi-min-refresh-rate = <55>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <55 60>;
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
index 72e3f5f..326f4c0 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,11 +18,14 @@
 		#clock-cells = <1>;
 		reg = <0xae94a00 0x1e0>,
 		      <0xae94400 0x800>,
-		      <0xaf03000 0x8>;
-		reg-names = "pll_base", "phy_base", "gdsc_base";
+		      <0xaf03000 0x8>,
+		      <0xae94200 0x100>;
+		reg-names = "pll_base", "phy_base", "gdsc_base",
+				"dynamic_pll_base";
 		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
 		clock-names = "iface_clk";
 		clock-rate = <0>;
+		memory-region = <&dfps_data_memory>;
 		gdsc-supply = <&mdss_core_gdsc>;
 		qcom,platform-supply-entries {
 			#address-cells = <1>;
@@ -45,8 +48,10 @@
 		#clock-cells = <1>;
 		reg = <0xae96a00 0x1e0>,
 		      <0xae96400 0x800>,
-		      <0xaf03000 0x8>;
-		reg-names = "pll_base", "phy_base", "gdsc_base";
+		      <0xaf03000 0x8>,
+		      <0xae96200 0x100>;
+		reg-names = "pll_base", "phy_base", "gdsc_base",
+				"dynamic_pll_base";
 		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
 		clock-names = "iface_clk";
 		clock-rate = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
index fb717f3..9a567a3 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
@@ -485,8 +485,9 @@
 		compatible = "qcom,dsi-phy-v3.0";
 		label = "dsi-phy-0";
 		cell-index = <0>;
-		reg = <0xae94400 0x7c0>;
-		reg-names = "dsi_phy";
+		reg = <0xae94400 0x7c0>,
+			<0xae94200 0x100>;
+		reg-names = "dsi_phy", "dyn_refresh_base";
 		gdsc-supply = <&mdss_core_gdsc>;
 		vdda-0p9-supply = <&pm660l_l1>;
 		qcom,platform-strength-ctrl = [55 03
@@ -518,8 +519,9 @@
 		compatible = "qcom,dsi-phy-v3.0";
 		label = "dsi-phy-1";
 		cell-index = <1>;
-		reg = <0xae96400 0x7c0>;
-		reg-names = "dsi_phy";
+		reg = <0xae96400 0x7c0>,
+			<0xae96200 0x100>;
+		reg-names = "dsi_phy", "dyn_refresh_base";
 		gdsc-supply = <&mdss_core_gdsc>;
 		vdda-0p9-supply = <&pm660l_l1>;
 		qcom,platform-strength-ctrl = [55 03
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
index 1e84e2c..10fcc14 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
@@ -83,4 +83,5 @@
 &usb_qmp_dp_phy {
 	vdd-supply = <&pm660l_l1>; /* 0.88v */
 	core-supply = <&pm660_l1>; /* 1.2v */
+	extcon = <&pm660_pdphy>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 5c40ecc..af44079 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -357,7 +357,7 @@
 				1708800   139
 			>;
 			idle-cost-data = <
-				12 10 8 6
+				12 10 8 6 4
 			>;
 		};
 		CPU_COST_1: core-cost1 {
@@ -383,12 +383,12 @@
 				2611200   1400
 			>;
 			idle-cost-data = <
-				100 80 60 40
+				100 80 60 40 20
 			>;
 		};
 		CLUSTER_COST_0: cluster-cost0 {
 			busy-cost-data = <
-				 300000    5
+				 300000    6
 				 576000    7
 				 748800    8
 				 998400    9
@@ -399,7 +399,7 @@
 				1708800   19
 			>;
 			idle-cost-data = <
-				4 3 2 1
+				5 4 3 2 1
 			>;
 		};
 		CLUSTER_COST_1: cluster-cost1 {
@@ -425,7 +425,7 @@
 				2611200   140
 			>;
 			idle-cost-data = <
-				4 3 2 1
+				5 4 3 2 1
 			>;
 		};
 	};
@@ -596,10 +596,15 @@
 		};
 
 		cont_splash_memory: cont_splash_region@9c000000 {
-			reg = <0x0 0x9c000000 0x0 0x02400000>;
+			reg = <0x0 0x9c000000 0x0 0x2300000>;
 			label = "cont_splash_region";
 		};
 
+		dfps_data_memory: dfps_data_region@9e300000 {
+			reg = <0x0 0x9e300000 0x0 0x0100000>;
+			label = "dfps_data_region";
+		};
+
 		dump_mem: mem_dump_region {
 			compatible = "shared-dma-pool";
 			reusable;
@@ -659,7 +664,7 @@
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "core_clk";
 
-		qom,coresight-jtagmm-cpu = <&CPU1>;
+		qcom,coresight-jtagmm-cpu = <&CPU1>;
 	};
 
 	jtag_mm2: jtagmm@7240000 {
@@ -1875,7 +1880,7 @@
 		ufs-qcom-crypto = <&ufs_ice>;
 
 		lanes-per-direction = <1>;
-
+		spm-level = <5>;
 		dev-ref-clk-freq = <0>; /* 19.2 MHz */
 
 		clock-names =
diff --git a/arch/arm64/boot/dts/qcom/sdm710-aqt1000-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-aqt1000-cdp-overlay.dts
index 31c99de..c482acc 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-aqt1000-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-aqt1000-cdp-overlay.dts
@@ -26,7 +26,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L AQT CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <1 4>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-cdp-overlay.dts
index a61d714..d0be6b0 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-cdp-overlay.dts
@@ -25,7 +25,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <1 0>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-external-codec-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-external-codec-cdp-overlay.dts
index c64e623..8b1dfc7 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-external-codec-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-external-codec-cdp-overlay.dts
@@ -25,7 +25,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L Ext. Audio Codec CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <1 1>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-external-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-external-codec-mtp-overlay.dts
index 058a3f1..59b02bb 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-external-codec-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-external-codec-mtp-overlay.dts
@@ -25,7 +25,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L Ext. Audio Codec MTP";
 	compatible = "qcom,sdm670-mtp", "qcom,sdm670", "qcom,mtp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <8 1>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-external-codec-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-external-codec-pm660a-cdp-overlay.dts
index 408a376..3981c83 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-external-codec-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-external-codec-pm660a-cdp-overlay.dts
@@ -26,7 +26,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660A Ext. Audio Codec CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <1 1>;
 	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
 		       <0x0001001b 0x0002001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-external-codec-pm660a-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-external-codec-pm660a-mtp-overlay.dts
index 9a41edd..e4b3c27 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-external-codec-pm660a-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-external-codec-pm660a-mtp-overlay.dts
@@ -26,7 +26,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660A Ext. Audio Codec MTP";
 	compatible = "qcom,sdm670-mtp", "qcom,sdm670", "qcom,mtp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <8 1>;
 	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
 		       <0x0001001b 0x0002001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-mtp-overlay.dts
index aa8ba6a..5128e3c 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-mtp-overlay.dts
@@ -25,7 +25,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L MTP";
 	compatible = "qcom,sdm670-mtp", "qcom,sdm670", "qcom,mtp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <8 0>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-pm660a-aqt1000-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-pm660a-aqt1000-cdp-overlay.dts
index 50b3470..052f582 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-pm660a-aqt1000-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-pm660a-aqt1000-cdp-overlay.dts
@@ -27,7 +27,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660A AQT CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <1 4>;
 	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
 		       <0x0001001b 0x0002001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-pm660a-cdp-overlay.dts
index 66afaad..f187cbf 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-pm660a-cdp-overlay.dts
@@ -26,7 +26,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660A CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <1 0>;
 	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
 		       <0x0001001b 0x0002001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-pm660a-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-pm660a-mtp-overlay.dts
index d9b055d..f9b79e6 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-pm660a-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-pm660a-mtp-overlay.dts
@@ -26,7 +26,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660A MTP";
 	compatible = "qcom,sdm670-mtp", "qcom,sdm670", "qcom,mtp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <8 0>;
 	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
 		       <0x0001001b 0x0002001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-pm660a-tasha-codec-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-pm660a-tasha-codec-cdp-overlay.dts
index 7e9eaa7..da332ce 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-pm660a-tasha-codec-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-pm660a-tasha-codec-cdp-overlay.dts
@@ -26,7 +26,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660A + Tasha Codec CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <1 5>;
 	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
 		       <0x0001001b 0x0002001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts
index 91139ba..a2213b2 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts
@@ -23,7 +23,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD";
 	compatible = "qcom,sdm670-qrd", "qcom,sdm670", "qcom,qrd";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <0x0002000b 0>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts
index 526fe6b..48dbd1f 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts
@@ -23,7 +23,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD SKU2";
 	compatible = "qcom,sdm670-qrd", "qcom,sdm670", "qcom,qrd";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <0x0012000b 0>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-tasha-codec-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-tasha-codec-cdp-overlay.dts
index 1632ebf..abfb46c 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-tasha-codec-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-tasha-codec-cdp-overlay.dts
@@ -25,7 +25,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L Tasha Codec CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <1 5>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-usbc-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-usbc-cdp-overlay.dts
index fe05472..078088f 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-usbc-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-usbc-cdp-overlay.dts
@@ -25,7 +25,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L, USB-C Audio, CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <1 2>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-cdp-overlay.dts
index 846bee4..f466ab9 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-cdp-overlay.dts
@@ -26,7 +26,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660+PM660L, USB-C Audio, Ext. Audio Codec CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <1 3>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-mtp-overlay.dts
index bfc9a7f..2354504 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-mtp-overlay.dts
@@ -25,7 +25,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660+PM660L, USB-C Audio, Ext. Audio Codec MTP";
 	compatible = "qcom,sdm670-mtp", "qcom,sdm670", "qcom,mtp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <8 3>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-pm660a-cdp-overlay.dts
index f8642bb..ef8f6a3 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-pm660a-cdp-overlay.dts
@@ -27,7 +27,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660+PM660A, USB-C Audio, Ext. Audio Codec CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <1 3>;
 	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
 		       <0x0001001b 0x0002001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-pm660a-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-pm660a-mtp-overlay.dts
index c820950..fac39cc 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-pm660a-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-usbc-external-codec-pm660a-mtp-overlay.dts
@@ -27,7 +27,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660+PM660A, USB-C Audio, Ext. Audio Codec MTP";
 	compatible = "qcom,sdm670-mtp", "qcom,sdm670", "qcom,mtp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <8 3>;
 	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
 		       <0x0001001b 0x0002001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-usbc-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-usbc-mtp-overlay.dts
index a40ab83..dc5c80c 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-usbc-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-usbc-mtp-overlay.dts
@@ -25,7 +25,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L, USB-C Audio, MTP";
 	compatible = "qcom,sdm670-mtp", "qcom,sdm670", "qcom,mtp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <8 2>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-usbc-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-usbc-pm660a-cdp-overlay.dts
index df4c2b9..51313c1 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-usbc-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-usbc-pm660a-cdp-overlay.dts
@@ -26,7 +26,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660A, USB-C Audio, CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <1 2>;
 	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
 		       <0x0001001b 0x0002001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710-usbc-pm660a-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-usbc-pm660a-mtp-overlay.dts
index 35f9570..f9179c3 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-usbc-pm660a-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-usbc-pm660a-mtp-overlay.dts
@@ -26,7 +26,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660A, USB-C Audio, MTP";
 	compatible = "qcom,sdm670-mtp", "qcom,sdm670", "qcom,mtp";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 	qcom,board-id = <8 2>;
 	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
 		       <0x0001001b 0x0002001a 0x0 0x0>,
diff --git a/arch/arm64/boot/dts/qcom/sdm710.dtsi b/arch/arm64/boot/dts/qcom/sdm710.dtsi
index 347e846..3039d58 100644
--- a/arch/arm64/boot/dts/qcom/sdm710.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm710.dtsi
@@ -15,7 +15,8 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM710";
 	compatible = "qcom,sdm670";
-	qcom,msm-id = <360 0x0>;
+	qcom,msm-id =	<360 0x0>,
+			<393 0x0>;
 };
 
 &msm_gpu {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr-dvt.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr-dvt.dtsi
index b671d0e..9409a4c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr-dvt.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr-dvt.dtsi
@@ -69,7 +69,7 @@
 		sensor-position-yaw = <0>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
-		cam_vdig-supply = <&pm8998_s3>;
+		cam_vdig-supply = <&camera_eyetracking_force>;
 		cam_clk-supply = <&titan_top_gdsc>;
 		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 					"cam_clk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
index 6052074..f19007f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
@@ -591,7 +591,7 @@
 		sensor-position-yaw = <0>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
-		cam_vdig-supply = <&pm8998_s3>;
+		cam_vdig-supply = <&camera_eyetracking_force>;
 		cam_clk-supply = <&titan_top_gdsc>;
 		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
 					"cam_clk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index e8f85a9..36f556f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -48,8 +48,10 @@
 		label = "kgsl-3d0";
 		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
 		status = "ok";
-		reg = <0x5000000 0x40000>, <0x5061000 0x800>;
-		reg-names = "kgsl_3d0_reg_memory", "kgsl_3d0_cx_dbgc_memory";
+		reg = <0x5000000 0x40000>, <0x5061000 0x800>,
+			<0x509e000 0x1000>;
+		reg-names = "kgsl_3d0_reg_memory", "kgsl_3d0_cx_dbgc_memory",
+			"cx_misc";
 		interrupts = <0 300 0>;
 		interrupt-names = "kgsl_3d0_irq";
 		qcom,id = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi
index 9313a75..6b3ec0e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -30,6 +30,7 @@
 };
 
 &usb_qmp_dp_phy {
+	/delete-property/ extcon;
 	vdd-supply = <&pm660l_l1>; /* 0.88v */
 	core-supply = <&pm660_l1>; /* 1.2v */
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi
index e7ff910..f384a52 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
 };
 
 &usb_qmp_dp_phy {
+	/delete-property/ extcon;
 	vdd-supply = <&pm660l_l1>; /* 0.88v */
 	core-supply = <&pm660_l1>; /* 1.2v */
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index 350f156..9a4e43d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -149,5 +149,6 @@
 	qcom,rpmh-master-stats@b221200 {
 		compatible = "qcom,rpmh-master-stats-v1";
 		reg = <0xb221200 0x60>;
+		qcom,use-alt-unit = <3>;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pmic-overlay.dtsi
index 48040a3..db4483c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pmic-overlay.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -45,3 +45,7 @@
 &usb0 {
 	extcon = <&pmi8998_pdphy>, <&pmi8998_pdphy>, <&eud>;
 };
+
+&usb_qmp_dp_phy {
+	extcon = <&pmi8998_pdphy>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 0de0331..8c746e5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -116,7 +116,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -140,7 +140,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -164,7 +164,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -188,7 +188,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -212,7 +212,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -235,7 +235,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -259,7 +259,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy1>;
 		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
 			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -283,7 +283,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy1>;
 		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
 			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -307,7 +307,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -325,7 +325,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -343,7 +343,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -361,7 +361,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -379,7 +379,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -397,7 +397,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -415,7 +415,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -439,7 +439,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 		       <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -463,7 +463,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -487,7 +487,7 @@
 		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 				<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
-		clock-names = "src_byte_clk", "src_pixel_clk";
+		clock-names = "mux_byte_clk", "mux_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
 		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -561,11 +561,10 @@
 &dsi_dual_nt35597_truly_video {
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <53 55 60>;
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
@@ -634,11 +633,10 @@
 &dsi_nt35597_truly_dsc_video {
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
-	qcom,mdss-dsi-min-refresh-rate = <53>;
-	qcom,mdss-dsi-max-refresh-rate = <60>;
 	qcom,mdss-dsi-pan-enable-dynamic-fps;
 	qcom,mdss-dsi-pan-fps-update =
 		"dfps_immediate_porch_mode_vfp";
+	qcom,dsi-supported-dfps-list = <53 55 60>;
 	qcom,esd-check-enabled;
 	qcom,mdss-dsi-panel-status-check-mode = "reg_read";
 	qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
diff --git a/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-alpha.dts b/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-alpha.dts
index be8416f..63638fc 100644
--- a/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-alpha.dts
+++ b/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-alpha.dts
@@ -15,6 +15,7 @@
 
 #include "msm8909-mtp.dtsi"
 #include "msm8909w.dtsi"
+#include "msm8909w-gpu.dtsi"
 #include "8909w-pm660.dtsi"
 #include "apq8009w-bg-memory.dtsi"
 #include "msm8909-audio-bg_codec.dtsi"
@@ -116,6 +117,13 @@
 			qcom,glinkpkt-ch-name = "RSB_CTRL";
 			qcom,glinkpkt-dev-name = "glink_pkt_bg_rsb_ctrl";
 		};
+
+		qcom,glinkpkt-bg-sso-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "sso-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_sso_ctrl";
+		};
 	};
 
 	spi@78B8000 {  /* BLSP1 QUP4 */
diff --git a/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-wtp.dts b/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-wtp.dts
index 1457b92..1938875 100644
--- a/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-wtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-wtp.dts
@@ -15,6 +15,7 @@
 
 #include "msm8909-mtp.dtsi"
 #include "msm8909w.dtsi"
+#include "msm8909w-gpu.dtsi"
 #include "8909w-pm660.dtsi"
 #include "apq8009w-bg-memory.dtsi"
 #include "msm8909-audio-bg_codec.dtsi"
@@ -134,6 +135,13 @@
 			qcom,glinkpkt-ch-name = "RSB_CTRL";
 			qcom,glinkpkt-dev-name = "glink_pkt_bg_rsb_ctrl";
 		};
+
+		qcom,glinkpkt-bg-sso-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "sso-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_sso_ctrl";
+		};
 	};
 
 	spi@78B8000 {  /* BLSP1 QUP4 */
diff --git a/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-1gb-wtp.dts b/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-1gb-wtp.dts
index a66387e..b702ad0 100644
--- a/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-1gb-wtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-1gb-wtp.dts
@@ -137,6 +137,13 @@
 			qcom,glinkpkt-ch-name = "RSB_CTRL";
 			qcom,glinkpkt-dev-name = "glink_pkt_bg_rsb_ctrl";
 		};
+
+		qcom,glinkpkt-bg-sso-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "sso-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_sso_ctrl";
+		};
 	};
 
 	spi@78B8000 {  /* BLSP1 QUP4 */
diff --git a/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-wtp.dts b/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-wtp.dts
index 761879f..d51e6da 100644
--- a/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-wtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-wtp.dts
@@ -136,6 +136,13 @@
 			qcom,glinkpkt-ch-name = "RSB_CTRL";
 			qcom,glinkpkt-dev-name = "glink_pkt_bg_rsb_ctrl";
 		};
+
+		qcom,glinkpkt-bg-sso-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "sso-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_sso_ctrl";
+		};
 	};
 
 	spi@78B8000 {  /* BLSP1 QUP4 */
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sxr1120-audio-overlay.dtsi
index 0038ae7..9f42e38 100644
--- a/arch/arm64/boot/dts/qcom/sxr1120-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sxr1120-audio-overlay.dtsi
@@ -115,6 +115,14 @@
 		compatible = "qcom,wcd-dsp-glink";
 	};
 
+	micbias_supply_en_gpio: msm_cdc_pinctrl@126 {
+		status = "okay";
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&micbias_supply_en_active>;
+		pinctrl-1 = <&micbias_supply_en_sleep>;
+	};
+
 };
 
 &slim_aud {
@@ -152,6 +160,9 @@
 
 		qcom,wdsp-cmpnt-dev-name = "tavil_codec";
 
+		qcom,has-micbias-supply-en-gpio;
+		qcom,micbias-supply-en-gpio-node = <&micbias_supply_en_gpio>;
+
 		wcd_spi_0: wcd_spi {
 			compatible = "qcom,wcd-spi-v2";
 			qcom,master-bus-num = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp-overlay.dts
index 315f43c..0c36abe 100644
--- a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp-overlay.dts
@@ -30,3 +30,7 @@
 	qcom,msm-id = <370 0x0>;
 	qcom,board-id = <1 1>;
 };
+
+&int_codec {
+	status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp.dts
index 7d5b715..3f1214b 100644
--- a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-cdp.dts
@@ -24,3 +24,7 @@
 	compatible = "qcom,sxr1120-cdp", "qcom,sxr1120", "qcom,cdp";
 	qcom,board-id = <1 1>;
 };
+
+&int_codec {
+	status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp-overlay.dts
index 4242d5e..e03506a 100644
--- a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp-overlay.dts
@@ -31,3 +31,7 @@
 	qcom,msm-id = <370 0x0>;
 	qcom,board-id = <8 9>;
 };
+
+&int_codec {
+	status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp.dts b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp.dts
index 164ef61..64866b2 100644
--- a/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc-external-codec-mtp.dts
@@ -24,3 +24,7 @@
 	compatible = "qcom,sxr1120-mtp", "qcom,sxr1120", "qcom,mtp";
 	qcom,board-id = <8 9>;
 };
+
+&int_codec {
+	status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sxr1120-lc.dtsi b/arch/arm64/boot/dts/qcom/sxr1120-lc.dtsi
index c5d393c..79f4776 100644
--- a/arch/arm64/boot/dts/qcom/sxr1120-lc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sxr1120-lc.dtsi
@@ -23,6 +23,24 @@
 	qcom,qbt1000 {
 		/delete-property/ qcom,ipc-gpio;
 	};
+
+	/delete-node/ devfreq-cpufreq;
+	devfreq-cpufreq {
+		mincpubw-cpufreq {
+			target-dev = <&mincpubw>;
+			cpu-to-dev-map-0 =
+				<  748800 MHZ_TO_MBPS( 300, 4) >,
+				< 1209660 MHZ_TO_MBPS( 451, 4) >,
+				< 1612800 MHZ_TO_MBPS( 547, 4) >,
+				< 1708000 MHZ_TO_MBPS( 768, 4) >;
+			cpu-to-dev-map-2 =
+				< 1132800 MHZ_TO_MBPS( 300, 4) >,
+				< 1363200 MHZ_TO_MBPS( 547, 4) >,
+				< 1747200 MHZ_TO_MBPS( 768, 4) >,
+				< 1996800 MHZ_TO_MBPS(1017, 4) >,
+				< 2457600 MHZ_TO_MBPS(1804, 4) >;
+		};
+	};
 };
 
 &ext_dsi_bridge_display {
diff --git a/arch/arm64/boot/dts/qcom/sxr1130.dtsi b/arch/arm64/boot/dts/qcom/sxr1130.dtsi
index 75707b1..bfab826 100644
--- a/arch/arm64/boot/dts/qcom/sxr1130.dtsi
+++ b/arch/arm64/boot/dts/qcom/sxr1130.dtsi
@@ -12,6 +12,7 @@
  */
 
 #include "qcs605.dtsi"
+#include "msm-qvr-external.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SXR1130";
diff --git a/arch/arm64/boot/dts/qcom/vbms-batterydata-mlp356477-2800mah.dtsi b/arch/arm64/boot/dts/qcom/vbms-batterydata-mlp356477-2800mah.dtsi
new file mode 100644
index 0000000..d3dd995
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/vbms-batterydata-mlp356477-2800mah.dtsi
@@ -0,0 +1,118 @@
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+qcom,mlp356477_2800mah {
+	qcom,fcc-mah = <4200>;
+	qcom,batt-id-kohm = <82>;
+	qcom,rbatt-capacitive-mohm = <50>;
+	qcom,default-rbatt-mohm = <148>;
+	qcom,max-voltage-uv = <4400000>;
+	qcom,v-cutoff-uv = <3400000>;
+	qcom,chg-term-ua = <100000>;
+	qcom,battery-type = "mlp356477_2800mah";
+
+	qcom,fcc-temp-lut {
+		qcom,lut-col-legend = <(-20) 0 25 40 60>;
+		qcom,lut-data = <2863 2856 2854 2852 2841>;
+	};
+
+	qcom,ibat-acc-lut {
+		qcom,lut-col-legend = <(-20) 0 25>;
+		qcom,lut-row-legend = <0 250 500 1000>;
+		qcom,lut-data = <2792 2798 2797>,
+				<618 2712 2780>,
+				<128 2440 2766>,
+				<14 1806 2724>;
+	};
+
+	qcom,pc-temp-ocv-lut {
+		qcom,lut-col-legend = <(-20) 0 25 40 60>;
+		qcom,lut-row-legend = <100 95 90 85 80>,
+					<75 70 65 60 55>,
+					<50 45 40 35 30>,
+					<25 20 16 13 11>,
+					<10 9 8 7 6>,
+					<5 4 3 2 1>,
+					<0>;
+		qcom,lut-data = <4390 4384 4378 4374 4366>,
+				<4252 4302 4314 4313 4307>,
+				<4160 4238 4256 4255 4250>,
+				<4103 4179 4200 4198 4194>,
+				<4014 4126 4144 4144 4138>,
+				<3962 4077 4092 4090 4086>,
+				<3913 4022 4042 4042 4037>,
+				<3875 3960 3993 3995 3992>,
+				<3850 3914 3946 3948 3946>,
+				<3832 3872 3892 3894 3892>,
+				<3816 3839 3858 3860 3859>,
+				<3802 3814 3832 3834 3832>,
+				<3787 3798 3810 3812 3811>,
+				<3771 3785 3792 3794 3792>,
+				<3754 3772 3774 3774 3768>,
+				<3734 3756 3756 3749 3738>,
+				<3712 3734 3734 3726 3714>,
+				<3692 3712 3712 3704 3690>,
+				<3672 3698 3692 3684 3674>,
+				<3656 3689 3686 3680 3669>,
+				<3646 3685 3685 3679 3668>,
+				<3634 3681 3683 3678 3666>,
+				<3620 3676 3680 3676 3664>,
+				<3604 3668 3676 3670 3654>,
+				<3580 3651 3660 3652 3630>,
+				<3550 3620 3625 3614 3590>,
+				<3508 3574 3575 3565 3538>,
+				<3445 3510 3510 3500 3470>,
+				<3350 3420 3421 3413 3377>,
+				<3182 3274 3282 3266 3232>,
+				<3000 3000 3000 3000 3000>;
+	};
+
+	qcom,rbatt-sf-lut {
+		qcom,lut-col-legend = <(-20) 0 25 40 60>;
+		qcom,lut-row-legend = <100 95 90 85 80>,
+					<75 70 65 60 55>,
+					<50 45 40 35 30>,
+					<25 20 16 13 11>,
+					<10 9 8 7 6>,
+					<5 4 3 2 1>;
+		qcom,lut-data = <1593 376 99 75 68>,
+				<1591 376 99 75 68>,
+				<1455 370 99 75 68>,
+				<1391 362 99 76 67>,
+				<1280 358 99 76 69>,
+				<1245 363 102 78 70>,
+				<1213 358 107 80 72>,
+				<1200 330 112 84 74>,
+				<1207 322 116 89 77>,
+				<1228 311 97 76 68>,
+				<1261 309 94 74 68>,
+				<1309 312 94 74 68>,
+				<1411 320 96 76 70>,
+				<1588 337 97 78 72>,
+				<1827 364 99 79 71>,
+				<2133 397 97 75 69>,
+				<2536 438 97 74 69>,
+				<2964 476 100 76 68>,
+				<3313 509 100 74 68>,
+				<3539 520 100 76 68>,
+				<3793 536 101 76 70>,
+				<4098 558 104 78 72>,
+				<4461 581 108 82 76>,
+				<4893 605 112 84 77>,
+				<5421 626 114 82 72>,
+				<6107 646 111 79 72>,
+				<7007 676 114 80 72>,
+				<8309 727 119 82 76>,
+				<10285 857 130 89 84>,
+				<14336 1715 261 178 168>;
+	};
+};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index dab2cb0..b4c4d82 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -260,6 +260,8 @@
 CONFIG_GPIO_PCA953X=y
 CONFIG_GPIO_PCA953X_IRQ=y
 CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_AVS=y
+CONFIG_ROCKCHIP_IODOMAIN=y
 CONFIG_POWER_RESET_MSM=y
 CONFIG_BATTERY_BQ27XXX=y
 CONFIG_POWER_RESET_XGENE=y
diff --git a/arch/arm64/configs/msm8937-perf_defconfig b/arch/arm64/configs/msm8937-perf_defconfig
index 1e16d20..65b5dd6 100644
--- a/arch/arm64/configs/msm8937-perf_defconfig
+++ b/arch/arm64/configs/msm8937-perf_defconfig
@@ -56,7 +56,9 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_MSM8937=y
+CONFIG_ARCH_MSM8940=y
 CONFIG_ARCH_MSM8917=y
+CONFIG_ARCH_QM215=y
 CONFIG_ARCH_SDM429=y
 CONFIG_ARCH_SDM439=y
 # CONFIG_ARM64_ERRATUM_1024718 is not set
@@ -263,6 +265,7 @@
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -344,6 +347,7 @@
 CONFIG_SPMI=y
 CONFIG_PINCTRL_MSM8937=y
 CONFIG_PINCTRL_MSM8917=y
+CONFIG_PINCTRL_MSM8940=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
@@ -352,10 +356,13 @@
 CONFIG_QCOM_DLOAD_MODE=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1360_CHARGER_FG=y
 CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_VM_BMS=y
+CONFIG_QPNP_LINEAR_CHARGER=y
 CONFIG_QPNP_TYPEC=y
 CONFIG_QPNP_QG=y
 CONFIG_MSM_APM=y
@@ -509,6 +516,7 @@
 CONFIG_LEDS_QPNP_WLED=y
 CONFIG_LEDS_QPNP_HAPTICS=y
 CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_QPNP_VIBRATOR=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_EDAC=y
diff --git a/arch/arm64/configs/msm8937_defconfig b/arch/arm64/configs/msm8937_defconfig
index e4ca873..4b416db7 100644
--- a/arch/arm64/configs/msm8937_defconfig
+++ b/arch/arm64/configs/msm8937_defconfig
@@ -57,7 +57,9 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_MSM8937=y
+CONFIG_ARCH_MSM8940=y
 CONFIG_ARCH_MSM8917=y
+CONFIG_ARCH_QM215=y
 CONFIG_ARCH_SDM429=y
 CONFIG_ARCH_SDM439=y
 # CONFIG_ARM64_ERRATUM_1024718 is not set
@@ -269,6 +271,7 @@
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -351,6 +354,7 @@
 CONFIG_SPMI=y
 CONFIG_PINCTRL_MSM8937=y
 CONFIG_PINCTRL_MSM8917=y
+CONFIG_PINCTRL_MSM8940=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
@@ -359,10 +363,13 @@
 CONFIG_QCOM_DLOAD_MODE=y
 CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1360_CHARGER_FG=y
 CONFIG_SMB1355_SLAVE_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
 CONFIG_QPNP_SMB5=y
 CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_VM_BMS=y
+CONFIG_QPNP_LINEAR_CHARGER=y
 CONFIG_QPNP_TYPEC=y
 CONFIG_QPNP_QG=y
 CONFIG_MSM_APM=y
@@ -520,6 +527,7 @@
 CONFIG_LEDS_QPNP_WLED=y
 CONFIG_LEDS_QPNP_HAPTICS=y
 CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_QPNP_VIBRATOR=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_EDAC=y
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index a66ea31..3039177 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -265,6 +265,7 @@
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index 34d16db..4620bec 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -271,6 +271,7 @@
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index fcecc6b..0b511cb 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -271,6 +271,7 @@
 CONFIG_DM_DEBUG=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -361,7 +362,6 @@
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
-CONFIG_REGULATOR_CPRH_KBSS=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
 CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP_OLEDB=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index fa802d2..a4363ac9 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -279,6 +279,7 @@
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -369,7 +370,6 @@
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
-CONFIG_REGULATOR_CPRH_KBSS=y
 CONFIG_REGULATOR_QPNP_LABIBB=y
 CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP_OLEDB=y
@@ -625,6 +625,7 @@
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_MODULE_LOAD_INFO=y
 CONFIG_DEBUG_INFO=y
 CONFIG_PAGE_OWNER=y
 CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index c9f60b5..d3ad4ef 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -620,6 +620,7 @@
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_MODULE_LOAD_INFO=y
 CONFIG_DEBUG_INFO=y
 CONFIG_PAGE_OWNER=y
 CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 6e1cb8c..7e842dc 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -4,6 +4,8 @@
 #include <asm/cpucaps.h>
 #include <asm/insn.h>
 
+#define ARM64_CB_PATCH ARM64_NCAPS
+
 #ifndef __ASSEMBLY__
 
 #include <linux/init.h>
@@ -11,6 +13,8 @@
 #include <linux/stddef.h>
 #include <linux/stringify.h>
 
+extern int alternatives_applied;
+
 struct alt_instr {
 	s32 orig_offset;	/* offset to original instruction */
 	s32 alt_offset;		/* offset to replacement instruction */
@@ -19,12 +23,19 @@
 	u8  alt_len;		/* size of new instruction(s), <= orig_len */
 };
 
+typedef void (*alternative_cb_t)(struct alt_instr *alt,
+				 __le32 *origptr, __le32 *updptr, int nr_inst);
+
 void __init apply_alternatives_all(void);
 void apply_alternatives(void *start, size_t length);
 
-#define ALTINSTR_ENTRY(feature)						      \
+#define ALTINSTR_ENTRY(feature,cb)					      \
 	" .word 661b - .\n"				/* label           */ \
+	" .if " __stringify(cb) " == 0\n"				      \
 	" .word 663f - .\n"				/* new instruction */ \
+	" .else\n"							      \
+	" .word " __stringify(cb) "- .\n"		/* callback */	      \
+	" .endif\n"							      \
 	" .hword " __stringify(feature) "\n"		/* feature bit     */ \
 	" .byte 662b-661b\n"				/* source len      */ \
 	" .byte 664f-663f\n"				/* replacement len */
@@ -42,15 +53,18 @@
  * but most assemblers die if insn1 or insn2 have a .inst. This should
  * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
  * containing commit 4e4d08cf7399b606 or c1baaddf8861).
+ *
+ * Alternatives with callbacks do not generate replacement instructions.
  */
-#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled)	\
+#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb)	\
 	".if "__stringify(cfg_enabled)" == 1\n"				\
 	"661:\n\t"							\
 	oldinstr "\n"							\
 	"662:\n"							\
 	".pushsection .altinstructions,\"a\"\n"				\
-	ALTINSTR_ENTRY(feature)						\
+	ALTINSTR_ENTRY(feature,cb)					\
 	".popsection\n"							\
+	" .if " __stringify(cb) " == 0\n"				\
 	".pushsection .altinstr_replacement, \"a\"\n"			\
 	"663:\n\t"							\
 	newinstr "\n"							\
@@ -58,11 +72,17 @@
 	".popsection\n\t"						\
 	".org	. - (664b-663b) + (662b-661b)\n\t"			\
 	".org	. - (662b-661b) + (664b-663b)\n"			\
+	".else\n\t"							\
+	"663:\n\t"							\
+	"664:\n\t"							\
+	".endif\n"							\
 	".endif\n"
 
 #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...)	\
-	__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
+	__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
 
+#define ALTERNATIVE_CB(oldinstr, cb) \
+	__ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
 #else
 
 #include <asm/assembler.h>
@@ -129,6 +149,14 @@
 661:
 .endm
 
+.macro alternative_cb cb
+	.set .Lasm_alt_mode, 0
+	.pushsection .altinstructions, "a"
+	altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
+	.popsection
+661:
+.endm
+
 /*
  * Provide the other half of the alternative code sequence.
  */
@@ -155,6 +183,13 @@
 .endm
 
 /*
+ * Callback-based alternative epilogue
+ */
+.macro alternative_cb_end
+662:
+.endm
+
+/*
  * Provides a trivial alternative or default sequence consisting solely
  * of NOPs. The number of NOPs is chosen automatically to match the
  * previous case.
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index c7749d8..8351201 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -266,7 +266,11 @@
 	 */
 	.macro adr_this_cpu, dst, sym, tmp
 	adr_l	\dst, \sym
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
 	mrs	\tmp, tpidr_el1
+alternative_else
+	mrs	\tmp, tpidr_el2
+alternative_endif
 	add	\dst, \dst, \tmp
 	.endm
 
@@ -277,7 +281,11 @@
 	 */
 	.macro ldr_this_cpu dst, sym, tmp
 	adr_l	\dst, \sym
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
 	mrs	\tmp, tpidr_el1
+alternative_else
+	mrs	\tmp, tpidr_el2
+alternative_endif
 	ldr	\dst, [\dst, \tmp]
 	.endm
 
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h
index f558869..877d478 100644
--- a/arch/arm64/include/asm/cachetype.h
+++ b/arch/arm64/include/asm/cachetype.h
@@ -22,6 +22,11 @@
 #define CTR_L1IP_MASK		3
 #define CTR_CWG_SHIFT		24
 #define CTR_CWG_MASK		15
+#define CTR_DMINLINE_SHIFT	16
+#define CTR_IMINLINE_SHIFT	0
+
+#define CTR_CACHE_MINLINE_MASK	\
+	((0xf << CTR_DMINLINE_SHIFT) | (0xf << CTR_IMINLINE_SHIFT))
 
 #define ICACHE_POLICY_RESERVED	0
 #define ICACHE_POLICY_AIVIVT	1
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index ae852ad..0f2e1ab 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -229,7 +229,9 @@
 	unsigned long tmp;						\
 									\
 	asm volatile(							\
-	"	ldxr" #sz "\t%" #w "[tmp], %[v]\n"		\
+	"	sevl\n"							\
+	"	wfe\n"							\
+	"	ldxr" #sz "\t%" #w "[tmp], %[v]\n"			\
 	"	eor	%" #w "[tmp], %" #w "[tmp], %" #w "[val]\n"	\
 	"	cbnz	%" #w "[tmp], 1f\n"				\
 	"	wfe\n"							\
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index d64bf94..8c7c4b2 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -34,11 +34,11 @@
 #define ARM64_HAS_32BIT_EL0			13
 #define ARM64_HYP_OFFSET_LOW			14
 #define ARM64_MISMATCHED_CACHE_LINE_SIZE	15
-
 #define ARM64_UNMAP_KERNEL_AT_EL0		16
-
 #define ARM64_HARDEN_BRANCH_PREDICTOR		17
+#define ARM64_SSBD				18
+#define ARM64_MISMATCHED_CACHE_TYPE		19
 
-#define ARM64_NCAPS				18
+#define ARM64_NCAPS				20
 
 #endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 829331c..ddaeb43 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -229,6 +229,28 @@
 		!cpus_have_cap(ARM64_HAS_PAN);
 }
 
+#define ARM64_SSBD_UNKNOWN		-1
+#define ARM64_SSBD_FORCE_DISABLE	0
+#define ARM64_SSBD_KERNEL		1
+#define ARM64_SSBD_FORCE_ENABLE		2
+#define ARM64_SSBD_MITIGATED		3
+
+static inline int arm64_get_ssbd_state(void)
+{
+#ifdef CONFIG_ARM64_SSBD
+	extern int ssbd_state;
+	return ssbd_state;
+#else
+	return ARM64_SSBD_UNKNOWN;
+#endif
+}
+
+#ifdef CONFIG_ARM64_SSBD
+void arm64_set_ssbd_mitigation(bool state);
+#else
+static inline void arm64_set_ssbd_mitigation(bool state) {}
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 #endif
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 1b5e0e8..7e2b3e3 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -28,7 +28,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-	asm goto("1: nop\n\t"
+	asm_volatile_goto("1: nop\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 ".align 3\n\t"
 		 ".quad 1b, %l[l_yes], %c0\n\t"
@@ -42,7 +42,7 @@
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-	asm goto("1: b %l[l_yes]\n\t"
+	asm_volatile_goto("1: b %l[l_yes]\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 ".align 3\n\t"
 		 ".quad 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index ec3553eb..8f5cf83 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -33,6 +33,10 @@
 #define KVM_ARM64_DEBUG_DIRTY_SHIFT	0
 #define KVM_ARM64_DEBUG_DIRTY		(1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
 
+#define	VCPU_WORKAROUND_2_FLAG_SHIFT	0
+#define	VCPU_WORKAROUND_2_FLAG		(_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
+
+/* Translate a kernel address of @sym into its equivalent linear mapping */
 #define kvm_ksym_ref(sym)						\
 	({								\
 		void *val = &sym;					\
@@ -65,6 +69,43 @@
 
 extern u32 __init_stage2_translation(void);
 
+/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
+#define __hyp_this_cpu_ptr(sym)						\
+	({								\
+		void *__ptr = hyp_symbol_addr(sym);			\
+		__ptr += read_sysreg(tpidr_el2);			\
+		(typeof(&sym))__ptr;					\
+	 })
+
+#define __hyp_this_cpu_read(sym)					\
+	({								\
+		*__hyp_this_cpu_ptr(sym);				\
+	 })
+
+#else /* __ASSEMBLY__ */
+
+.macro hyp_adr_this_cpu reg, sym, tmp
+	adr_l	\reg, \sym
+	mrs	\tmp, tpidr_el2
+	add	\reg, \reg, \tmp
+.endm
+
+.macro hyp_ldr_this_cpu reg, sym, tmp
+	adr_l	\reg, \sym
+	mrs	\tmp, tpidr_el2
+	ldr	\reg,  [\reg, \tmp]
+.endm
+
+.macro get_host_ctxt reg, tmp
+	hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
+.endm
+
+.macro get_vcpu_ptr vcpu, ctxt
+	get_host_ctxt \ctxt, \vcpu
+	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
+	kern_hyp_va	\vcpu
+.endm
+
 #endif
 
 #endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fe39e68..ba0d52c 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -42,6 +42,11 @@
 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 
+static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+	return !(vcpu->arch.hcr_el2 & HCR_RW);
+}
+
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2abb449..4cdfbd0 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -197,6 +197,8 @@
 		u64 sys_regs[NR_SYS_REGS];
 		u32 copro[NR_COPRO_REGS];
 	};
+
+	struct kvm_vcpu *__hyp_running_vcpu;
 };
 
 typedef struct kvm_cpu_context kvm_cpu_context_t;
@@ -211,6 +213,9 @@
 	/* Exception Information */
 	struct kvm_vcpu_fault_info fault;
 
+	/* State of various workarounds, see kvm_asm.h for bit assignment */
+	u64 workaround_flags;
+
 	/* Guest debug state */
 	u64 debug_flags;
 
@@ -354,10 +359,15 @@
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 
+void __kvm_set_tpidr_el2(u64 tpidr_el2);
+DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
+
 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
 				       unsigned long hyp_stack_ptr,
 				       unsigned long vector_ptr)
 {
+	u64 tpidr_el2;
+
 	/*
 	 * Call initialization code, and switch to the full blown HYP code.
 	 * If the cpucaps haven't been finalized yet, something has gone very
@@ -366,6 +376,16 @@
 	 */
 	BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
 	__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
+
+	/*
+	 * Calculate the raw per-cpu offset without a translation from the
+	 * kernel's mapping to the linear mapping, and store it in tpidr_el2
+	 * so that we can use adr_l to access per-cpu variables in EL2.
+	 */
+	tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state)
+		- (u64)kvm_ksym_ref(kvm_host_cpu_state);
+
+	kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
 }
 
 void __kvm_hyp_teardown(void);
@@ -405,4 +425,27 @@
 	return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
 }
 
+#define KVM_SSBD_UNKNOWN		-1
+#define KVM_SSBD_FORCE_DISABLE		0
+#define KVM_SSBD_KERNEL		1
+#define KVM_SSBD_FORCE_ENABLE		2
+#define KVM_SSBD_MITIGATED		3
+
+static inline int kvm_arm_have_ssbd(void)
+{
+	switch (arm64_get_ssbd_state()) {
+	case ARM64_SSBD_FORCE_DISABLE:
+		return KVM_SSBD_FORCE_DISABLE;
+	case ARM64_SSBD_KERNEL:
+		return KVM_SSBD_KERNEL;
+	case ARM64_SSBD_FORCE_ENABLE:
+		return KVM_SSBD_FORCE_ENABLE;
+	case ARM64_SSBD_MITIGATED:
+		return KVM_SSBD_MITIGATED;
+	case ARM64_SSBD_UNKNOWN:
+	default:
+		return KVM_SSBD_UNKNOWN;
+	}
+}
+
 #endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index ecc2ae6..4287acb 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -131,6 +131,26 @@
 #define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
 
 /*
+ * Obtain the PC-relative address of a kernel symbol
+ * s: symbol
+ *
+ * The goal of this macro is to return a symbol's address based on a
+ * PC-relative computation, as opposed to a loading the VA from a
+ * constant pool or something similar. This works well for HYP, as an
+ * absolute VA is guaranteed to be wrong. Only use this if trying to
+ * obtain the address of a symbol (i.e. not something you obtained by
+ * following a pointer).
+ */
+#define hyp_symbol_addr(s)						\
+	({								\
+		typeof(s) *addr;					\
+		asm("adrp	%0, %1\n"				\
+		    "add	%0, %0, :lo12:%1\n"			\
+		    : "=r" (addr) : "S" (&s));				\
+		addr;							\
+	})
+
+/*
  * We currently only support a 40bit IPA.
  */
 #define KVM_PHYS_SHIFT	(40)
@@ -367,5 +387,29 @@
 }
 #endif
 
+#ifdef CONFIG_ARM64_SSBD
+DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
+static inline int hyp_map_aux_data(void)
+{
+	int cpu, err;
+
+	for_each_possible_cpu(cpu) {
+		u64 *ptr;
+
+		ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
+		err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+#else
+static inline int hyp_map_aux_data(void)
+{
+	return 0;
+}
+#endif
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index d7a3c62..a2f6bd2 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -17,10 +17,14 @@
 #define __ASM_PERCPU_H
 
 #include <asm/stack_pointer.h>
+#include <asm/alternative.h>
 
 static inline void set_my_cpu_offset(unsigned long off)
 {
-	asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
+	asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
+				 "msr tpidr_el2, %0",
+				 ARM64_HAS_VIRT_HOST_EXTN)
+			:: "r" (off) : "memory");
 }
 
 static inline unsigned long __my_cpu_offset(void)
@@ -31,7 +35,10 @@
 	 * We want to allow caching the value, so avoid using volatile and
 	 * instead use a fake stack read to hazard against barrier().
 	 */
-	asm("mrs %0, tpidr_el1" : "=r" (off) :
+	asm(ALTERNATIVE("mrs %0, tpidr_el1",
+			"mrs %0, tpidr_el2",
+			ARM64_HAS_VIRT_HOST_EXTN)
+		: "=r" (off) :
 		"Q" (*(const unsigned long *)current_stack_pointer));
 
 	return off;
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index ba3a69a..3b79682 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -79,6 +79,7 @@
 #define TIF_NEED_RESCHED	1
 #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
 #define TIF_FOREIGN_FPSTATE	3	/* CPU's FP state is not current's */
+#define TIF_FSCHECK		4	/* Check FS is USER_DS on return */
 #define TIF_NOHZ		7
 #define TIF_SYSCALL_TRACE	8
 #define TIF_SYSCALL_AUDIT	9
@@ -89,6 +90,7 @@
 #define TIF_RESTORE_SIGMASK	20
 #define TIF_SINGLESTEP		21
 #define TIF_32BIT		22	/* 32bit process */
+#define TIF_SSBD		23	/* Wants SSB mitigation */
 #define TIF_MM_RELEASED		24
 
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
@@ -100,10 +102,12 @@
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
+#define _TIF_FSCHECK		(1 << TIF_FSCHECK)
 #define _TIF_32BIT		(1 << TIF_32BIT)
 
 #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
-				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
+				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
+				 _TIF_FSCHECK)
 
 #define _TIF_SYSCALL_WORK	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
 				 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index de21caa..45b3ac9 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -73,6 +73,9 @@
 {
 	current_thread_info()->addr_limit = fs;
 
+	/* On user-mode return, check fs is correct */
+	set_thread_flag(TIF_FSCHECK);
+
 	/*
 	 * Prevent a mispredicted conditional call to set_fs from forwarding
 	 * the wrong address limit to access_ok under speculation.
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 446eabd..cc3c030 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -51,6 +51,7 @@
 arm64-obj-$(CONFIG_HIBERNATION)		+= hibernate.o hibernate-asm.o
 arm64-obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o	\
 					   cpu-reset.o
+arm64-obj-$(CONFIG_ARM64_SSBD)		+= ssbd.o
 
 ifeq ($(CONFIG_KVM),y)
 arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR)	+= bpi.o
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 06d650f..0917480 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -28,10 +28,12 @@
 #include <asm/sections.h>
 #include <linux/stop_machine.h>
 
-#define __ALT_PTR(a,f)		(u32 *)((void *)&(a)->f + (a)->f)
+#define __ALT_PTR(a,f)		((void *)&(a)->f + (a)->f)
 #define ALT_ORIG_PTR(a)		__ALT_PTR(a, orig_offset)
 #define ALT_REPL_PTR(a)		__ALT_PTR(a, alt_offset)
 
+int alternatives_applied;
+
 struct alt_region {
 	struct alt_instr *begin;
 	struct alt_instr *end;
@@ -105,31 +107,52 @@
 	return insn;
 }
 
+static void patch_alternative(struct alt_instr *alt,
+			      __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+	__le32 *replptr;
+	int i;
+
+	replptr = ALT_REPL_PTR(alt);
+	for (i = 0; i < nr_inst; i++) {
+		u32 insn;
+
+		insn = get_alt_insn(alt, origptr + i, replptr + i);
+		updptr[i] = cpu_to_le32(insn);
+	}
+}
+
 static void __apply_alternatives(void *alt_region)
 {
 	struct alt_instr *alt;
 	struct alt_region *region = alt_region;
-	u32 *origptr, *replptr;
+	__le32 *origptr;
+	alternative_cb_t alt_cb;
 
 	for (alt = region->begin; alt < region->end; alt++) {
-		u32 insn;
-		int i, nr_inst;
+		int nr_inst;
 
-		if (!cpus_have_cap(alt->cpufeature))
+		/* Use ARM64_CB_PATCH as an unconditional patch */
+		if (alt->cpufeature < ARM64_CB_PATCH &&
+		    !cpus_have_cap(alt->cpufeature))
 			continue;
 
-		BUG_ON(alt->alt_len != alt->orig_len);
+		if (alt->cpufeature == ARM64_CB_PATCH)
+			BUG_ON(alt->alt_len != 0);
+		else
+			BUG_ON(alt->alt_len != alt->orig_len);
 
 		pr_info_once("patching kernel code\n");
 
 		origptr = ALT_ORIG_PTR(alt);
-		replptr = ALT_REPL_PTR(alt);
-		nr_inst = alt->alt_len / sizeof(insn);
+		nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
 
-		for (i = 0; i < nr_inst; i++) {
-			insn = get_alt_insn(alt, origptr + i, replptr + i);
-			*(origptr + i) = cpu_to_le32(insn);
-		}
+		if (alt->cpufeature < ARM64_CB_PATCH)
+			alt_cb = patch_alternative;
+		else
+			alt_cb  = ALT_REPL_PTR(alt);
+
+		alt_cb(alt, origptr, origptr, nr_inst);
 
 		flush_icache_range((uintptr_t)origptr,
 				   (uintptr_t)(origptr + nr_inst));
@@ -142,7 +165,6 @@
  */
 static int __apply_alternatives_multi_stop(void *unused)
 {
-	static int patched = 0;
 	struct alt_region region = {
 		.begin	= (struct alt_instr *)__alt_instructions,
 		.end	= (struct alt_instr *)__alt_instructions_end,
@@ -150,14 +172,14 @@
 
 	/* We always have a CPU 0 at this point (__init) */
 	if (smp_processor_id()) {
-		while (!READ_ONCE(patched))
+		while (!READ_ONCE(alternatives_applied))
 			cpu_relax();
 		isb();
 	} else {
-		BUG_ON(patched);
+		BUG_ON(alternatives_applied);
 		__apply_alternatives(&region);
 		/* Barriers provided by the cache flushing */
-		WRITE_ONCE(patched, 1);
+		WRITE_ONCE(alternatives_applied, 1);
 	}
 
 	return 0;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 5d2d356..61ae19e 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -130,11 +130,13 @@
   BLANK();
 #ifdef CONFIG_KVM_ARM_HOST
   DEFINE(VCPU_CONTEXT,		offsetof(struct kvm_vcpu, arch.ctxt));
+  DEFINE(VCPU_WORKAROUND_FLAGS,	offsetof(struct kvm_vcpu, arch.workaround_flags));
   DEFINE(CPU_GP_REGS,		offsetof(struct kvm_cpu_context, gp_regs));
   DEFINE(CPU_USER_PT_REGS,	offsetof(struct kvm_regs, regs));
   DEFINE(CPU_FP_REGS,		offsetof(struct kvm_regs, fp_regs));
   DEFINE(VCPU_FPEXC32_EL2,	offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
   DEFINE(VCPU_HOST_CONTEXT,	offsetof(struct kvm_vcpu, arch.host_cpu_context));
+  DEFINE(HOST_CONTEXT_VCPU,	offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
 #endif
 #ifdef CONFIG_CPU_PM
   DEFINE(CPU_SUSPEND_SZ,	sizeof(struct cpu_suspend_ctx));
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index e7908c9..0566f18 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -17,9 +17,13 @@
  */
 
 #include <linux/types.h>
+#include <asm/cachetype.h>
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
+#include <uapi/linux/psci.h>
+#include <linux/arm-smccc.h>
+#include <linux/psci.h>
 
 static bool __maybe_unused
 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
@@ -31,12 +35,18 @@
 }
 
 static bool
-has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
-				int scope)
+has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
+			  int scope)
 {
+	u64 mask = CTR_CACHE_MINLINE_MASK;
+
+	/* Skip matching the min line sizes for cache type check */
+	if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
+		mask ^= arm64_ftr_reg_ctrel0.strict_mask;
+
 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-	return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
-		(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
+	return (read_cpuid_cachetype() & mask) !=
+	       (arm64_ftr_reg_ctrel0.sys_val & mask);
 }
 
 static int cpu_enable_trap_ctr_access(void *__unused)
@@ -132,10 +142,6 @@
 	__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
 }
 
-#include <uapi/linux/psci.h>
-#include <linux/arm-smccc.h>
-#include <linux/psci.h>
-
 #ifdef CONFIG_PSCI_BP_HARDENING
 static int enable_psci_bp_hardening(void *data)
 {
@@ -204,6 +210,178 @@
 }
 #endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
 
+#ifdef CONFIG_ARM64_SSBD
+DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
+int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+
+static const struct ssbd_options {
+	const char	*str;
+	int		state;
+} ssbd_options[] = {
+	{ "force-on",	ARM64_SSBD_FORCE_ENABLE, },
+	{ "force-off",	ARM64_SSBD_FORCE_DISABLE, },
+	{ "kernel",	ARM64_SSBD_KERNEL, },
+};
+
+static int __init ssbd_cfg(char *buf)
+{
+	int i;
+
+	if (!buf || !buf[0])
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
+		int len = strlen(ssbd_options[i].str);
+
+		if (strncmp(buf, ssbd_options[i].str, len))
+			continue;
+
+		ssbd_state = ssbd_options[i].state;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+early_param("ssbd", ssbd_cfg);
+
+void __init arm64_update_smccc_conduit(struct alt_instr *alt,
+				       __le32 *origptr, __le32 *updptr,
+				       int nr_inst)
+{
+	u32 insn;
+
+	BUG_ON(nr_inst != 1);
+
+	switch (psci_ops.conduit) {
+	case PSCI_CONDUIT_HVC:
+		insn = aarch64_insn_get_hvc_value();
+		break;
+	case PSCI_CONDUIT_SMC:
+		insn = aarch64_insn_get_smc_value();
+		break;
+	default:
+		return;
+	}
+
+	*updptr = cpu_to_le32(insn);
+}
+
+void __init arm64_enable_wa2_handling(struct alt_instr *alt,
+				      __le32 *origptr, __le32 *updptr,
+				      int nr_inst)
+{
+	BUG_ON(nr_inst != 1);
+	/*
+	 * Only allow mitigation on EL1 entry/exit and guest
+	 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
+	 * be flipped.
+	 */
+	if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
+		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
+void arm64_set_ssbd_mitigation(bool state)
+{
+	switch (psci_ops.conduit) {
+	case PSCI_CONDUIT_HVC:
+		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
+		break;
+
+	case PSCI_CONDUIT_SMC:
+		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
+		break;
+
+	default:
+		WARN_ON_ONCE(1);
+		break;
+	}
+}
+
+static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
+				    int scope)
+{
+	struct arm_smccc_res res;
+	bool required = true;
+	s32 val;
+
+	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+	if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
+		ssbd_state = ARM64_SSBD_UNKNOWN;
+		return false;
+	}
+
+	switch (psci_ops.conduit) {
+	case PSCI_CONDUIT_HVC:
+		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+				  ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+		break;
+
+	case PSCI_CONDUIT_SMC:
+		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+				  ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+		break;
+
+	default:
+		ssbd_state = ARM64_SSBD_UNKNOWN;
+		return false;
+	}
+
+	val = (s32)res.a0;
+
+	switch (val) {
+	case SMCCC_RET_NOT_SUPPORTED:
+		ssbd_state = ARM64_SSBD_UNKNOWN;
+		return false;
+
+	case SMCCC_RET_NOT_REQUIRED:
+		pr_info_once("%s mitigation not required\n", entry->desc);
+		ssbd_state = ARM64_SSBD_MITIGATED;
+		return false;
+
+	case SMCCC_RET_SUCCESS:
+		required = true;
+		break;
+
+	case 1:	/* Mitigation not required on this CPU */
+		required = false;
+		break;
+
+	default:
+		WARN_ON(1);
+		return false;
+	}
+
+	switch (ssbd_state) {
+	case ARM64_SSBD_FORCE_DISABLE:
+		pr_info_once("%s disabled from command-line\n", entry->desc);
+		arm64_set_ssbd_mitigation(false);
+		required = false;
+		break;
+
+	case ARM64_SSBD_KERNEL:
+		if (required) {
+			__this_cpu_write(arm64_ssbd_callback_required, 1);
+			arm64_set_ssbd_mitigation(true);
+		}
+		break;
+
+	case ARM64_SSBD_FORCE_ENABLE:
+		pr_info_once("%s forced from command-line\n", entry->desc);
+		arm64_set_ssbd_mitigation(true);
+		required = true;
+		break;
+
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	return required;
+}
+#endif	/* CONFIG_ARM64_SSBD */
+
 #define MIDR_RANGE(model, min, max) \
 	.def_scope = SCOPE_LOCAL_CPU, \
 	.matches = is_affected_midr_range, \
@@ -294,7 +472,14 @@
 	{
 		.desc = "Mismatched cache line size",
 		.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
-		.matches = has_mismatched_cache_line_size,
+		.matches = has_mismatched_cache_type,
+		.def_scope = SCOPE_LOCAL_CPU,
+		.enable = cpu_enable_trap_ctr_access,
+	},
+	{
+		.desc = "Mismatched cache type",
+		.capability = ARM64_MISMATCHED_CACHE_TYPE,
+		.matches = has_mismatched_cache_type,
 		.def_scope = SCOPE_LOCAL_CPU,
 		.enable = cpu_enable_trap_ctr_access,
 	},
@@ -348,6 +533,14 @@
 		.enable = enable_smccc_arch_workaround_1,
 	},
 #endif
+#ifdef CONFIG_ARM64_SSBD
+	{
+		.desc = "Speculative Store Bypass Disable",
+		.def_scope = SCOPE_LOCAL_CPU,
+		.capability = ARM64_SSBD,
+		.matches = has_ssbd_mitigation,
+	},
+#endif
 	{
 	}
 };
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 2eea592..db68387 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -153,7 +153,7 @@
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
 	ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0),	/* CWG */
 	ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),	/* ERG */
-	ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1),	/* DminLine */
+	ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
 	/*
 	 * Linux can handle differing I-cache policies. Userspace JITs will
 	 * make use of *minLine.
@@ -161,7 +161,7 @@
 	 */
 	ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_AIVIVT),	/* L1Ip */
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0),	/* RAZ */
-	ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),	/* IminLine */
+	ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
 	ARM64_FTR_END,
 };
 
@@ -828,6 +828,22 @@
 early_param("kpti", parse_kpti);
 #endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
+static int cpu_copy_el2regs(void *__unused)
+{
+	/*
+	 * Copy register values that aren't redirected by hardware.
+	 *
+	 * Before code patching, we only set tpidr_el1, all CPUs need to copy
+	 * this value to tpidr_el2 before we patch the code. Once we've done
+	 * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
+	 * do anything here.
+	 */
+	if (!alternatives_applied)
+		write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
+
+	return 0;
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "GIC system register CPU interface",
@@ -894,6 +910,7 @@
 		.capability = ARM64_HAS_VIRT_HOST_EXTN,
 		.def_scope = SCOPE_SYSTEM,
 		.matches = runs_at_el2,
+		.enable = cpu_copy_el2regs,
 	},
 	{
 		.desc = "32-bit EL0 Support",
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 7613ed1..58dec4b 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -18,6 +18,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/arm-smccc.h>
 #include <linux/init.h>
 #include <linux/linkage.h>
 
@@ -97,6 +98,25 @@
 	add	\dst, \dst, #(\sym - .entry.tramp.text)
 	.endm
 
+	// This macro corrupts x0-x3. It is the caller's duty
+	// to save/restore them if required.
+	.macro	apply_ssbd, state, targ, tmp1, tmp2
+#ifdef CONFIG_ARM64_SSBD
+alternative_cb	arm64_enable_wa2_handling
+	b	\targ
+alternative_cb_end
+	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
+	cbz	\tmp2, \targ
+	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
+	tbnz	\tmp2, #TIF_SSBD, \targ
+	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+	mov	w1, #\state
+alternative_cb	arm64_update_smccc_conduit
+	nop					// Patched to SMC/HVC #0
+alternative_cb_end
+#endif
+	.endm
+
 	.macro	kernel_entry, el, regsize = 64
 	.if	\regsize == 32
 	mov	w0, w0				// zero upper 32 bits of x0
@@ -123,6 +143,14 @@
 	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
 	disable_step_tsk x19, x20		// exceptions when scheduling.
 
+	apply_ssbd 1, 1f, x22, x23
+
+#ifdef CONFIG_ARM64_SSBD
+	ldp	x0, x1, [sp, #16 * 0]
+	ldp	x2, x3, [sp, #16 * 1]
+#endif
+1:
+
 	mov	x29, xzr			// fp pointed to user-space
 	.else
 	add	x21, sp, #S_FRAME_SIZE
@@ -251,6 +279,8 @@
 alternative_else_nop_endif
 #endif
 3:
+	apply_ssbd 0, 5f, x0, x1
+5:
 	.endif
 
 	msr	elr_el1, x21			// set up the return data
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 8bed26a..db603cd 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -305,6 +305,17 @@
 
 		sleep_cpu = -EINVAL;
 		__cpu_suspend_exit();
+
+		/*
+		 * Just in case the boot kernel did turn the SSBD
+		 * mitigation off behind our back, let's set the state
+		 * to what we expect it to be.
+		 */
+		switch (arm64_get_ssbd_state()) {
+		case ARM64_SSBD_FORCE_ENABLE:
+		case ARM64_SSBD_KERNEL:
+			arm64_set_ssbd_mitigation(true);
+		}
 	}
 
 	local_dbg_restore(flags);
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index f5077ea..30bcae0 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -274,7 +274,7 @@
 		break;
 	case KPROBE_HIT_SS:
 	case KPROBE_REENTER:
-		pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
+		pr_warn("Unrecoverable kprobe detected.\n");
 		dump_kprobe(p);
 		BUG();
 		break;
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 404dd67..c59e675 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -25,6 +25,7 @@
 #include <linux/uaccess.h>
 #include <linux/tracehook.h>
 #include <linux/ratelimit.h>
+#include <linux/syscalls.h>
 
 #include <asm/debug-monitors.h>
 #include <asm/elf.h>
@@ -408,7 +409,11 @@
 	 * Update the trace code with the current status.
 	 */
 	trace_hardirqs_off();
+
 	do {
+		/* Check valid user FS if needed */
+		addr_limit_user_check();
+
 		if (thread_flags & _TIF_NEED_RESCHED) {
 			schedule();
 		} else {
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 69d3266..7c45aa7 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -212,7 +212,7 @@
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
  */
-asmlinkage void secondary_start_kernel(void)
+asmlinkage notrace void secondary_start_kernel(void)
 {
 	struct mm_struct *mm = &init_mm;
 	unsigned int cpu;
diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
new file mode 100644
index 0000000..0560738
--- /dev/null
+++ b/arch/arm64/kernel/ssbd.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/prctl.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+
+#include <asm/cpufeature.h>
+
+/*
+ * prctl interface for SSBD
+ */
+static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+	int state = arm64_get_ssbd_state();
+
+	/* Unsupported */
+	if (state == ARM64_SSBD_UNKNOWN)
+		return -EINVAL;
+
+	/* Treat the unaffected/mitigated state separately */
+	if (state == ARM64_SSBD_MITIGATED) {
+		switch (ctrl) {
+		case PR_SPEC_ENABLE:
+			return -EPERM;
+		case PR_SPEC_DISABLE:
+		case PR_SPEC_FORCE_DISABLE:
+			return 0;
+		}
+	}
+
+	/*
+	 * Things are a bit backward here: the arm64 internal API
+	 * *enables the mitigation* when the userspace API *disables
+	 * speculation*. So much fun.
+	 */
+	switch (ctrl) {
+	case PR_SPEC_ENABLE:
+		/* If speculation is force disabled, enable is not allowed */
+		if (state == ARM64_SSBD_FORCE_ENABLE ||
+		    task_spec_ssb_force_disable(task))
+			return -EPERM;
+		task_clear_spec_ssb_disable(task);
+		clear_tsk_thread_flag(task, TIF_SSBD);
+		break;
+	case PR_SPEC_DISABLE:
+		if (state == ARM64_SSBD_FORCE_DISABLE)
+			return -EPERM;
+		task_set_spec_ssb_disable(task);
+		set_tsk_thread_flag(task, TIF_SSBD);
+		break;
+	case PR_SPEC_FORCE_DISABLE:
+		if (state == ARM64_SSBD_FORCE_DISABLE)
+			return -EPERM;
+		task_set_spec_ssb_disable(task);
+		task_set_spec_ssb_force_disable(task);
+		set_tsk_thread_flag(task, TIF_SSBD);
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+			     unsigned long ctrl)
+{
+	switch (which) {
+	case PR_SPEC_STORE_BYPASS:
+		return ssbd_prctl_set(task, ctrl);
+	default:
+		return -ENODEV;
+	}
+}
+
+static int ssbd_prctl_get(struct task_struct *task)
+{
+	switch (arm64_get_ssbd_state()) {
+	case ARM64_SSBD_UNKNOWN:
+		return -EINVAL;
+	case ARM64_SSBD_FORCE_ENABLE:
+		return PR_SPEC_DISABLE;
+	case ARM64_SSBD_KERNEL:
+		if (task_spec_ssb_force_disable(task))
+			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+		if (task_spec_ssb_disable(task))
+			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+	case ARM64_SSBD_FORCE_DISABLE:
+		return PR_SPEC_ENABLE;
+	default:
+		return PR_SPEC_NOT_AFFECTED;
+	}
+}
+
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+{
+	switch (which) {
+	case PR_SPEC_STORE_BYPASS:
+		return ssbd_prctl_get(task);
+	default:
+		return -ENODEV;
+	}
+}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 1e3be90..e12f2d0 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -61,6 +61,14 @@
 	 */
 	if (hw_breakpoint_restore)
 		hw_breakpoint_restore(cpu);
+
+	/*
+	 * On resume, firmware implementing dynamic mitigation will
+	 * have turned the mitigation on. If the user has forcefully
+	 * disabled it, make sure their wishes are obeyed.
+	 */
+	if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
+		arm64_set_ssbd_mitigation(false);
 }
 
 /*
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index d3e0a2f..e41a7b4 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -57,6 +57,45 @@
 	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
 }
 
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+	u64 off = core_reg_offset_from_id(reg->id);
+	int size;
+
+	switch (off) {
+	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
+	case KVM_REG_ARM_CORE_REG(regs.sp):
+	case KVM_REG_ARM_CORE_REG(regs.pc):
+	case KVM_REG_ARM_CORE_REG(regs.pstate):
+	case KVM_REG_ARM_CORE_REG(sp_el1):
+	case KVM_REG_ARM_CORE_REG(elr_el1):
+	case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+	     KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+		size = sizeof(__u64);
+		break;
+
+	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+		size = sizeof(__uint128_t);
+		break;
+
+	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+		size = sizeof(__u32);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (KVM_REG_SIZE(reg->id) == size &&
+	    IS_ALIGNED(off, size / sizeof(__u32)))
+		return 0;
+
+	return -EINVAL;
+}
+
 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
 	/*
@@ -76,6 +115,9 @@
 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 		return -ENOENT;
 
+	if (validate_core_offset(reg))
+		return -EINVAL;
+
 	if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
 		return -EFAULT;
 
@@ -98,6 +140,9 @@
 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 		return -ENOENT;
 
+	if (validate_core_offset(reg))
+		return -EINVAL;
+
 	if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
 		return -EINVAL;
 
@@ -107,17 +152,25 @@
 	}
 
 	if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
-		u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
+		u64 mode = (*(u64 *)valp) & COMPAT_PSR_MODE_MASK;
 		switch (mode) {
 		case COMPAT_PSR_MODE_USR:
+			if (!system_supports_32bit_el0())
+				return -EINVAL;
+			break;
 		case COMPAT_PSR_MODE_FIQ:
 		case COMPAT_PSR_MODE_IRQ:
 		case COMPAT_PSR_MODE_SVC:
 		case COMPAT_PSR_MODE_ABT:
 		case COMPAT_PSR_MODE_UND:
+			if (!vcpu_el1_is_32bit(vcpu))
+				return -EINVAL;
+			break;
 		case PSR_MODE_EL0t:
 		case PSR_MODE_EL1t:
 		case PSR_MODE_EL1h:
+			if (vcpu_el1_is_32bit(vcpu))
+				return -EINVAL;
 			break;
 		default:
 			err = -EINVAL;
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 4bbff90..db5efaf 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -118,6 +118,10 @@
 	kern_hyp_va	x2
 	msr	vbar_el2, x2
 
+	/* copy tpidr_el1 into tpidr_el2 for use by HYP */
+	mrs	x1, tpidr_el1
+	msr	tpidr_el2, x1
+
 	/* Hello, World! */
 	eret
 ENDPROC(__kvm_hyp_init)
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 12ee62d..a360ac6 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -62,9 +62,6 @@
 	// Store the host regs
 	save_callee_saved_regs x1
 
-	// Store the host_ctxt for use at exit time
-	str	x1, [sp, #-16]!
-
 	add	x18, x0, #VCPU_CONTEXT
 
 	// Restore guest regs x0-x17
@@ -118,8 +115,7 @@
 	// Store the guest regs x19-x29, lr
 	save_callee_saved_regs x1
 
-	// Restore the host_ctxt from the stack
-	ldr	x2, [sp], #16
+	get_host_ctxt	x2, x3
 
 	// Now restore the host regs
 	restore_callee_saved_regs x2
@@ -159,6 +155,10 @@
 ENDPROC(__guest_exit)
 
 ENTRY(__fpsimd_guest_restore)
+	// x0: esr
+	// x1: vcpu
+	// x2-x29,lr: vcpu regs
+	// vcpu x0-x1 on the stack
 	stp	x2, x3, [sp, #-16]!
 	stp	x4, lr, [sp, #-16]!
 
@@ -173,7 +173,7 @@
 alternative_endif
 	isb
 
-	mrs	x3, tpidr_el2
+	mov	x3, x1
 
 	ldr	x0, [x3, #VCPU_HOST_CONTEXT]
 	kern_hyp_va x0
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 4e9d50c..bf4988f 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -72,13 +72,8 @@
 el1_sync:				// Guest trapped into EL2
 	stp	x0, x1, [sp, #-16]!
 
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
-	mrs	x1, esr_el2
-alternative_else
-	mrs	x1, esr_el1
-alternative_endif
-	lsr	x0, x1, #ESR_ELx_EC_SHIFT
-
+	mrs	x0, esr_el2
+	lsr	x0, x0, #ESR_ELx_EC_SHIFT
 	cmp	x0, #ESR_ELx_EC_HVC64
 	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
 	b.ne	el1_trap
@@ -112,33 +107,73 @@
 	 */
 	ldr	x1, [sp]				// Guest's x0
 	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
+	cbz	w1, wa_epilogue
+
+	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
+	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
+			  ARM_SMCCC_ARCH_WORKAROUND_2)
 	cbnz	w1, el1_trap
-	mov	x0, x1
+
+#ifdef CONFIG_ARM64_SSBD
+alternative_cb	arm64_enable_wa2_handling
+	b	wa2_end
+alternative_cb_end
+	get_vcpu_ptr	x2, x0
+	ldr	x0, [x2, #VCPU_WORKAROUND_FLAGS]
+
+	// Sanitize the argument and update the guest flags
+	ldr	x1, [sp, #8]			// Guest's x1
+	clz	w1, w1				// Murphy's device:
+	lsr	w1, w1, #5			// w1 = !!w1 without using
+	eor	w1, w1, #1			// the flags...
+	bfi	x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
+	str	x0, [x2, #VCPU_WORKAROUND_FLAGS]
+
+	/* Check that we actually need to perform the call */
+	hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
+	cbz	x0, wa2_end
+
+	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+	smc	#0
+
+	/* Don't leak data from the SMC call */
+	mov	x3, xzr
+wa2_end:
+	mov	x2, xzr
+	mov	x1, xzr
+#endif
+
+wa_epilogue:
+	mov	x0, xzr
 	add	sp, sp, #16
 	eret
 
 el1_trap:
+	get_vcpu_ptr	x1, x0
+
+	mrs		x0, esr_el2
+	lsr		x0, x0, #ESR_ELx_EC_SHIFT
 	/*
 	 * x0: ESR_EC
+	 * x1: vcpu pointer
 	 */
 
 	/* Guest accessed VFP/SIMD registers, save host, restore Guest */
 	cmp	x0, #ESR_ELx_EC_FP_ASIMD
 	b.eq	__fpsimd_guest_restore
 
-	mrs	x1, tpidr_el2
 	mov	x0, #ARM_EXCEPTION_TRAP
 	b	__guest_exit
 
 el1_irq:
 	stp     x0, x1, [sp, #-16]!
-	mrs	x1, tpidr_el2
+	get_vcpu_ptr	x1, x0
 	mov	x0, #ARM_EXCEPTION_IRQ
 	b	__guest_exit
 
 el1_error:
 	stp     x0, x1, [sp, #-16]!
-	mrs	x1, tpidr_el2
+	get_vcpu_ptr	x1, x0
 	mov	x0, #ARM_EXCEPTION_EL1_SERROR
 	b	__guest_exit
 
@@ -173,6 +208,11 @@
 	eret
 ENDPROC(__hyp_do_panic)
 
+ENTRY(__hyp_panic)
+	get_host_ctxt x0, x1
+	b	hyp_panic
+ENDPROC(__hyp_panic)
+
 .macro invalid_vector	label, target = __hyp_panic
 	.align	2
 \label:
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 849ee8a..c6a76ea 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -15,6 +15,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/arm-smccc.h>
 #include <linux/types.h>
 #include <linux/jump_label.h>
 #include <uapi/linux/psci.h>
@@ -267,6 +268,39 @@
 	write_sysreg_el2(*vcpu_pc(vcpu), elr);
 }
 
+static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
+{
+	if (!cpus_have_cap(ARM64_SSBD))
+		return false;
+
+	return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
+}
+
+static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ARM64_SSBD
+	/*
+	 * The host runs with the workaround always present. If the
+	 * guest wants it disabled, so be it...
+	 */
+	if (__needs_ssbd_off(vcpu) &&
+	    __hyp_this_cpu_read(arm64_ssbd_callback_required))
+		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
+#endif
+}
+
+static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ARM64_SSBD
+	/*
+	 * If the guest has disabled the workaround, bring it back on.
+	 */
+	if (__needs_ssbd_off(vcpu) &&
+	    __hyp_this_cpu_read(arm64_ssbd_callback_required))
+		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
+#endif
+}
+
 int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpu_context *host_ctxt;
@@ -275,9 +309,9 @@
 	u64 exit_code;
 
 	vcpu = kern_hyp_va(vcpu);
-	write_sysreg(vcpu, tpidr_el2);
 
 	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+	host_ctxt->__hyp_running_vcpu = vcpu;
 	guest_ctxt = &vcpu->arch.ctxt;
 
 	__sysreg_save_host_state(host_ctxt);
@@ -297,6 +331,8 @@
 	__sysreg_restore_guest_state(guest_ctxt);
 	__debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
 
+	__set_guest_arch_workaround_state(vcpu);
+
 	/* Jump in the fire! */
 again:
 	exit_code = __guest_enter(vcpu, host_ctxt);
@@ -353,6 +389,8 @@
 		}
 	}
 
+	__set_host_arch_workaround_state(vcpu);
+
 	fp_enabled = __fpsimd_enabled();
 
 	__sysreg_save_guest_state(guest_ctxt);
@@ -378,7 +416,8 @@
 
 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
 
-static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
+static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
+					     struct kvm_vcpu *vcpu)
 {
 	unsigned long str_va;
 
@@ -392,35 +431,32 @@
 	__hyp_do_panic(str_va,
 		       spsr,  elr,
 		       read_sysreg(esr_el2),   read_sysreg_el2(far),
-		       read_sysreg(hpfar_el2), par,
-		       (void *)read_sysreg(tpidr_el2));
+		       read_sysreg(hpfar_el2), par, vcpu);
 }
 
-static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
+static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
+					    struct kvm_vcpu *vcpu)
 {
 	panic(__hyp_panic_string,
 	      spsr,  elr,
 	      read_sysreg_el2(esr),   read_sysreg_el2(far),
-	      read_sysreg(hpfar_el2), par,
-	      (void *)read_sysreg(tpidr_el2));
+	      read_sysreg(hpfar_el2), par, vcpu);
 }
 
 static hyp_alternate_select(__hyp_call_panic,
 			    __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
 			    ARM64_HAS_VIRT_HOST_EXTN);
 
-void __hyp_text __noreturn __hyp_panic(void)
+void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
 {
+	struct kvm_vcpu *vcpu = NULL;
+
 	u64 spsr = read_sysreg_el2(spsr);
 	u64 elr = read_sysreg_el2(elr);
 	u64 par = read_sysreg(par_el1);
 
 	if (read_sysreg(vttbr_el2)) {
-		struct kvm_vcpu *vcpu;
-		struct kvm_cpu_context *host_ctxt;
-
-		vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
-		host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+		vcpu = host_ctxt->__hyp_running_vcpu;
 		__timer_save_state(vcpu);
 		__deactivate_traps(vcpu);
 		__deactivate_vm(vcpu);
@@ -428,7 +464,7 @@
 	}
 
 	/* Call panic for real */
-	__hyp_call_panic()(spsr, elr, par);
+	__hyp_call_panic()(spsr, elr, par, vcpu);
 
 	unreachable();
 }
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 9341376..e19d89c 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -27,8 +27,8 @@
 /*
  * Non-VHE: Both host and guest must save everything.
  *
- * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
- * pstate, and guest must save everything.
+ * VHE: Host must save tpidr*_el0, actlr_el1, mdscr_el1, sp_el0,
+ * and guest must save everything.
  */
 
 static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
@@ -36,11 +36,8 @@
 	ctxt->sys_regs[ACTLR_EL1]	= read_sysreg(actlr_el1);
 	ctxt->sys_regs[TPIDR_EL0]	= read_sysreg(tpidr_el0);
 	ctxt->sys_regs[TPIDRRO_EL0]	= read_sysreg(tpidrro_el0);
-	ctxt->sys_regs[TPIDR_EL1]	= read_sysreg(tpidr_el1);
 	ctxt->sys_regs[MDSCR_EL1]	= read_sysreg(mdscr_el1);
 	ctxt->gp_regs.regs.sp		= read_sysreg(sp_el0);
-	ctxt->gp_regs.regs.pc		= read_sysreg_el2(elr);
-	ctxt->gp_regs.regs.pstate	= read_sysreg_el2(spsr);
 }
 
 static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
@@ -62,10 +59,13 @@
 	ctxt->sys_regs[AMAIR_EL1]	= read_sysreg_el1(amair);
 	ctxt->sys_regs[CNTKCTL_EL1]	= read_sysreg_el1(cntkctl);
 	ctxt->sys_regs[PAR_EL1]		= read_sysreg(par_el1);
+	ctxt->sys_regs[TPIDR_EL1]	= read_sysreg(tpidr_el1);
 
 	ctxt->gp_regs.sp_el1		= read_sysreg(sp_el1);
 	ctxt->gp_regs.elr_el1		= read_sysreg_el1(elr);
 	ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
+	ctxt->gp_regs.regs.pc		= read_sysreg_el2(elr);
+	ctxt->gp_regs.regs.pstate	= read_sysreg_el2(spsr);
 }
 
 static hyp_alternate_select(__sysreg_call_save_host_state,
@@ -89,11 +89,8 @@
 	write_sysreg(ctxt->sys_regs[ACTLR_EL1],	  actlr_el1);
 	write_sysreg(ctxt->sys_regs[TPIDR_EL0],	  tpidr_el0);
 	write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
-	write_sysreg(ctxt->sys_regs[TPIDR_EL1],	  tpidr_el1);
 	write_sysreg(ctxt->sys_regs[MDSCR_EL1],	  mdscr_el1);
 	write_sysreg(ctxt->gp_regs.regs.sp,	  sp_el0);
-	write_sysreg_el2(ctxt->gp_regs.regs.pc,	  elr);
-	write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
 }
 
 static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
@@ -115,10 +112,13 @@
 	write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1],	amair);
 	write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], 	cntkctl);
 	write_sysreg(ctxt->sys_regs[PAR_EL1],		par_el1);
+	write_sysreg(ctxt->sys_regs[TPIDR_EL1],		tpidr_el1);
 
 	write_sysreg(ctxt->gp_regs.sp_el1,		sp_el1);
 	write_sysreg_el1(ctxt->gp_regs.elr_el1,		elr);
 	write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
+	write_sysreg_el2(ctxt->gp_regs.regs.pc,		elr);
+	write_sysreg_el2(ctxt->gp_regs.regs.pstate,	spsr);
 }
 
 static hyp_alternate_select(__sysreg_call_restore_host_state,
@@ -183,3 +183,8 @@
 	if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
 		write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
 }
+
+void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2)
+{
+	asm("msr tpidr_el2, %0": : "r" (tpidr_el2));
+}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 5bc4608..29a27a0 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -135,6 +135,10 @@
 	/* Reset PMU */
 	kvm_pmu_vcpu_reset(vcpu);
 
+	/* Default workaround setup is enabled (if supported) */
+	if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
+		vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
+
 	/* Reset timer */
 	return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
 }
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 2b35b67..c410c9e 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -146,11 +146,13 @@
 #endif /* CONFIG_NUMA */
 
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
-#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
-
 int pfn_valid(unsigned long pfn)
 {
-	return (pfn & PFN_MASK) == pfn && memblock_is_map_memory(pfn << PAGE_SHIFT);
+	phys_addr_t addr = pfn << PAGE_SHIFT;
+
+	if ((addr >> PAGE_SHIFT) != pfn)
+		return 0;
+	return memblock_is_map_memory(addr);
 }
 EXPORT_SYMBOL(pfn_valid);
 #endif
@@ -471,11 +473,13 @@
 	BUILD_BUG_ON(TASK_SIZE_32			> TASK_SIZE_64);
 #endif
 
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
 	/*
 	 * Make sure we chose the upper bound of sizeof(struct page)
-	 * correctly.
+	 * correctly when sizing the VMEMMAP array.
 	 */
 	BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
+#endif
 
 	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
 		extern int sysctl_overcommit_memory;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6e989eb..9e2ec8a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -862,12 +862,12 @@
 	return 1;
 }
 
-int pud_free_pmd_page(pud_t *pud)
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
 {
 	return pud_none(*pud);
 }
 
-int pmd_free_pte_page(pmd_t *pmd)
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 {
 	return pmd_none(*pmd);
 }
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
index 5e4a59b..2691a18 100644
--- a/arch/hexagon/include/asm/bitops.h
+++ b/arch/hexagon/include/asm/bitops.h
@@ -211,7 +211,7 @@
  * This is defined the same way as ffs.
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
-static inline long fls(int x)
+static inline int fls(int x)
 {
 	int r;
 
@@ -232,7 +232,7 @@
  * the libc and compiler builtin ffs routines, therefore
  * differs in spirit from the above ffz (man ffs).
  */
-static inline long ffs(int x)
+static inline int ffs(int x)
 {
 	int r;
 
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index b901778..0e2be48 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -68,7 +68,7 @@
 			panic("Can't create %s() memory pool!", __func__);
 		else
 			gen_pool_add(coherent_pool,
-				pfn_to_virt(max_low_pfn),
+				(unsigned long)pfn_to_virt(max_low_pfn),
 				hexagon_coherent_pool_size, -1);
 	}
 
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index fb95aed..dac7564 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -43,6 +43,7 @@
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
 				  unsigned long address)
 {
+	pgtable_page_dtor(page);
 	__free_page(page);
 }
 
@@ -73,8 +74,9 @@
 	return page;
 }
 
-extern inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
 {
+	pgtable_page_dtor(page);
 	__free_page(page);
 }
 
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 91d2068..0f3fe6a 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -21,17 +21,19 @@
 quiet_cmd_cp = CP      $< $@$2
 	cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
 
-quiet_cmd_strip = STRIP   $@
+quiet_cmd_strip = STRIP   $< $@$2
 	cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \
-				-K _fdt_start vmlinux -o $@
+				-K _fdt_start $< -o $@$2
 
 UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR)
+UIMAGE_IN = $@
+UIMAGE_OUT = $@.ub
 
 $(obj)/simpleImage.%: vmlinux FORCE
 	$(call if_changed,cp,.unstrip)
 	$(call if_changed,objcopy)
 	$(call if_changed,uimage)
-	$(call if_changed,strip)
-	@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
+	$(call if_changed,strip,.strip)
+	@echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')'
 
 clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index d071a3a..fc97a11 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -58,7 +58,7 @@
 
 void ath79_ddr_wb_flush(u32 reg)
 {
-	void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
+	void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
 
 	/* Flush the DDR write buffer. */
 	__raw_writel(0x1, flush_reg);
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index f206daf..26a058d 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -40,6 +40,7 @@
 
 static void ath79_restart(char *command)
 {
+	local_irq_disable();
 	ath79_device_reset_set(AR71XX_RESET_FULL_CHIP);
 	for (;;)
 		if (cpu_wait)
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 8c9cbf1..6054d49 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -212,12 +212,6 @@
 		 */
 		if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
 			cpu_wait = NULL;
-
-		/*
-		 * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
-		 * Enable ExternalSync for sync instruction to take effect
-		 */
-		set_c0_config7(MIPS_CONF7_ES);
 		break;
 #endif
 	}
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 37a932d..1ba6bcf 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -366,6 +366,7 @@
 		return 0;
 
 	pd = of_find_device_by_node(ehci_node);
+	of_node_put(ehci_node);
 	if (!pd)
 		return 0;
 
@@ -428,6 +429,7 @@
 		return 0;
 
 	pd = of_find_device_by_node(ohci_node);
+	of_node_put(ohci_node);
 	if (!pd)
 		return 0;
 
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index d493ccb..cf5b564 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -159,6 +159,7 @@
 					    "mti,cpu-interrupt-controller");
 	if (!cpu_has_veic && !intc_node)
 		mips_cpu_irq_init();
+	of_node_put(intc_node);
 
 	irqchip_init();
 }
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 853b2f4..06049b6 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -141,14 +141,14 @@
 /*
  * ISA I/O bus memory addresses are 1:1 with the physical address.
  */
-static inline unsigned long isa_virt_to_bus(volatile void * address)
+static inline unsigned long isa_virt_to_bus(volatile void *address)
 {
-	return (unsigned long)address - PAGE_OFFSET;
+	return virt_to_phys(address);
 }
 
-static inline void * isa_bus_to_virt(unsigned long address)
+static inline void *isa_bus_to_virt(unsigned long address)
 {
-	return (void *)(address + PAGE_OFFSET);
+	return phys_to_virt(address);
 }
 
 #define isa_page_to_bus page_to_phys
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 441faa9..6e6c0fe 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -134,6 +134,7 @@
 static inline void ath79_reset_wr(unsigned reg, u32 val)
 {
 	__raw_writel(val, ath79_reset_base + reg);
+	(void) __raw_readl(ath79_reset_base + reg); /* flush */
 }
 
 static inline u32 ath79_reset_rr(unsigned reg)
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 22a6782..df78b2c 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -663,8 +663,6 @@
 #define MIPS_CONF7_WII		(_ULCAST_(1) << 31)
 
 #define MIPS_CONF7_RPS		(_ULCAST_(1) << 2)
-/* ExternalSync */
-#define MIPS_CONF7_ES		(_ULCAST_(1) << 8)
 
 #define MIPS_CONF7_IAR		(_ULCAST_(1) << 10)
 #define MIPS_CONF7_AR		(_ULCAST_(1) << 16)
@@ -2643,7 +2641,6 @@
 __BUILD_SET_C0(cause)
 __BUILD_SET_C0(config)
 __BUILD_SET_C0(config5)
-__BUILD_SET_C0(config7)
 __BUILD_SET_C0(intcontrol)
 __BUILD_SET_C0(intctl)
 __BUILD_SET_C0(srsmap)
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 0d36c87..ad6f019 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -141,7 +141,7 @@
 
 #define NUM_DSP_REGS   6
 
-typedef __u32 dspreg_t;
+typedef unsigned long dspreg_t;
 
 struct mips_dsp_state {
 	dspreg_t	dspr[NUM_DSP_REGS];
diff --git a/arch/mips/jz4740/Platform b/arch/mips/jz4740/Platform
index 28448d35..a2a5a85 100644
--- a/arch/mips/jz4740/Platform
+++ b/arch/mips/jz4740/Platform
@@ -1,4 +1,4 @@
 platform-$(CONFIG_MACH_INGENIC)	+= jz4740/
 cflags-$(CONFIG_MACH_INGENIC)	+= -I$(srctree)/arch/mips/include/asm/mach-jz4740
 load-$(CONFIG_MACH_INGENIC)	+= 0xffffffff80010000
-zload-$(CONFIG_MACH_INGENIC)	+= 0xffffffff80600000
+zload-$(CONFIG_MACH_INGENIC)	+= 0xffffffff81000000
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ebb575c..ba315e5 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -26,6 +26,7 @@
 #include <linux/kallsyms.h>
 #include <linux/random.h>
 #include <linux/prctl.h>
+#include <linux/nmi.h>
 
 #include <asm/asm.h>
 #include <asm/bootinfo.h>
@@ -117,7 +118,6 @@
 	struct thread_info *ti = task_thread_info(p);
 	struct pt_regs *childregs, *regs = current_pt_regs();
 	unsigned long childksp;
-	p->set_child_tid = p->clear_child_tid = NULL;
 
 	childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
 
@@ -633,28 +633,42 @@
 	return sp & ALMASK;
 }
 
-static void arch_dump_stack(void *info)
+static DEFINE_PER_CPU(struct call_single_data, backtrace_csd);
+static struct cpumask backtrace_csd_busy;
+
+static void handle_backtrace(void *info)
 {
-	struct pt_regs *regs;
+	nmi_cpu_backtrace(get_irq_regs());
+	cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
+}
 
-	regs = get_irq_regs();
+static void raise_backtrace(cpumask_t *mask)
+{
+	struct call_single_data *csd;
+	int cpu;
 
-	if (regs)
-		show_regs(regs);
+	for_each_cpu(cpu, mask) {
+		/*
+		 * If we previously sent an IPI to the target CPU & it hasn't
+		 * cleared its bit in the busy cpumask then it didn't handle
+		 * our previous IPI & it's not safe for us to reuse the
+		 * call_single_data_t.
+		 */
+		if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
+			pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
+				cpu);
+			continue;
+		}
 
-	dump_stack();
+		csd = &per_cpu(backtrace_csd, cpu);
+		csd->func = handle_backtrace;
+		smp_call_function_single_async(cpu, csd);
+	}
 }
 
 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
 {
-	long this_cpu = get_cpu();
-
-	if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
-		dump_stack();
-
-	smp_call_function_many(mask, arch_dump_stack, NULL, 1);
-
-	put_cpu();
+	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
 }
 
 int mips_get_process_fp_mode(struct task_struct *task)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 4f64913..b702ba3 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -876,7 +876,7 @@
 				goto out;
 			}
 			dregs = __get_dsp_regs(child);
-			tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+			tmp = dregs[addr - DSP_BASE];
 			break;
 		}
 		case DSP_CONTROL:
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index b1e9457..4840af1 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -140,7 +140,7 @@
 				goto out;
 			}
 			dregs = __get_dsp_regs(child);
-			tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+			tmp = dregs[addr - DSP_BASE];
 			break;
 		}
 		case DSP_CONTROL:
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index bb1d9ff..8e07496 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -351,6 +351,7 @@
 void show_regs(struct pt_regs *regs)
 {
 	__show_regs((struct pt_regs *)regs);
+	dump_stack();
 }
 
 void show_registers(struct pt_regs *regs)
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index f9dbfb1..e88344e 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -14,12 +14,14 @@
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/irqchip/mips-gic.h>
+#include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/timekeeper_internal.h>
 
 #include <asm/abi.h>
+#include <asm/page.h>
 #include <asm/vdso.h>
 
 /* Kernel-provided data used by the VDSO. */
@@ -129,12 +131,30 @@
 	vvar_size = gic_size + PAGE_SIZE;
 	size = vvar_size + image->size;
 
+	/*
+	 * Find a region that's large enough for us to perform the
+	 * colour-matching alignment below.
+	 */
+	if (cpu_has_dc_aliases)
+		size += shm_align_mask + 1;
+
 	base = get_unmapped_area(NULL, 0, size, 0, 0);
 	if (IS_ERR_VALUE(base)) {
 		ret = base;
 		goto out;
 	}
 
+	/*
+	 * If we suffer from dcache aliasing, ensure that the VDSO data page
+	 * mapping is coloured the same as the kernel's mapping of that memory.
+	 * This ensures that when the kernel updates the VDSO data userland
+	 * will observe it without requiring cache invalidations.
+	 */
+	if (cpu_has_dc_aliases) {
+		base = __ALIGN_MASK(base, shm_align_mask);
+		base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
+	}
+
 	data_addr = base + gic_size;
 	vdso_addr = data_addr + PAGE_SIZE;
 
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
index 111ad47..4c2483f 100644
--- a/arch/mips/lib/multi3.c
+++ b/arch/mips/lib/multi3.c
@@ -4,12 +4,12 @@
 #include "libgcc.h"
 
 /*
- * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
- * specific case only we'll implement it here.
+ * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
+ * that specific case only we implement that intrinsic here.
  *
  * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
  */
-#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
 
 /* multiply 64-bit values, low 64-bits returned */
 static inline long long notrace dmulu(long long a, long long b)
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c b/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
index f7c905e..92dc6ba 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
+++ b/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
@@ -138,7 +138,7 @@
 		break;
 	case PCI_OHCI_INT_REG:
 		_rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
-		if ((lo & 0x00000f00) == CS5536_USB_INTR)
+		if (((lo >> PIC_YSEL_LOW_USB_SHIFT) & 0xf) == CS5536_USB_INTR)
 			conf_data = 1;
 		break;
 	default:
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 43fa682..0ff379f 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -835,7 +835,8 @@
 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
 {
 	/* Catch bad driver code */
-	BUG_ON(size == 0);
+	if (WARN_ON(size == 0))
+		return;
 
 	preempt_disable();
 	if (cpu_has_inclusive_pcaches) {
@@ -871,7 +872,8 @@
 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
 {
 	/* Catch bad driver code */
-	BUG_ON(size == 0);
+	if (WARN_ON(size == 0))
+		return;
 
 	preempt_disable();
 	if (cpu_has_inclusive_pcaches) {
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 1f18962..0dbcd90b 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -9,6 +9,7 @@
 #include <linux/export.h>
 #include <asm/addrspace.h>
 #include <asm/byteorder.h>
+#include <linux/ioport.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
@@ -97,6 +98,20 @@
 	return error;
 }
 
+static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+			       void *arg)
+{
+	unsigned long i;
+
+	for (i = 0; i < nr_pages; i++) {
+		if (pfn_valid(start_pfn + i) &&
+		    !PageReserved(pfn_to_page(start_pfn + i)))
+			return 1;
+	}
+
+	return 0;
+}
+
 /*
  * Generic mapping function (not visible outside):
  */
@@ -115,8 +130,8 @@
 
 void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
 {
+	unsigned long offset, pfn, last_pfn;
 	struct vm_struct * area;
-	unsigned long offset;
 	phys_addr_t last_addr;
 	void * addr;
 
@@ -136,18 +151,16 @@
 		return (void __iomem *) CKSEG1ADDR(phys_addr);
 
 	/*
-	 * Don't allow anybody to remap normal RAM that we're using..
+	 * Don't allow anybody to remap RAM that may be allocated by the page
+	 * allocator, since that could lead to races & data clobbering.
 	 */
-	if (phys_addr < virt_to_phys(high_memory)) {
-		char *t_addr, *t_end;
-		struct page *page;
-
-		t_addr = __va(phys_addr);
-		t_end = t_addr + (size - 1);
-
-		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
-			if(!PageReserved(page))
-				return NULL;
+	pfn = PFN_DOWN(phys_addr);
+	last_pfn = PFN_DOWN(last_addr);
+	if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+				  __ioremap_check_ram) == 1) {
+		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+			  &phys_addr, &last_addr);
+		return NULL;
 	}
 
 	/*
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index f6325fa..64ae8c0 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -55,7 +55,7 @@
 	phys_addr_t size = resource_size(rsrc);
 
 	*start = fixup_bigphys_addr(rsrc->start, size);
-	*end = rsrc->start + size;
+	*end = rsrc->start + size - 1;
 }
 
 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 7095dfe..9623721 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -152,8 +152,6 @@
 
 	top_of_kernel_stack = sp;
 
-	p->set_child_tid = p->clear_child_tid = NULL;
-
 	/* Locate userspace context on stack... */
 	sp -= STACK_FRAME_OVERHEAD;	/* redzone */
 	sp -= sizeof(struct pt_regs);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index a14b865..3c37af1 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -184,7 +184,7 @@
 
 config MLONGCALLS
 	bool "Enable the -mlong-calls compiler option for big kernels"
-	def_bool y if (!MODULES)
+	default y
 	depends on PA8X00
 	help
 	  If you configure the kernel to include many drivers built-in instead
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
new file mode 100644
index 0000000..dbaaca84
--- /dev/null
+++ b/arch/parisc/include/asm/barrier.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* The synchronize caches instruction executes as a nop on systems in
+   which all memory references are performed in order. */
+#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
+
+#if defined(CONFIG_SMP)
+#define mb()		do { synchronize_caches(); } while (0)
+#define rmb()		mb()
+#define wmb()		mb()
+#define dma_rmb()	mb()
+#define dma_wmb()	mb()
+#else
+#define mb()		barrier()
+#define rmb()		barrier()
+#define wmb()		barrier()
+#define dma_rmb()	barrier()
+#define dma_wmb()	barrier()
+#endif
+
+#define __smp_mb()	mb()
+#define __smp_rmb()	mb()
+#define __smp_wmb()	mb()
+
+#include <asm-generic/barrier.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index e32936c..7031483 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -26,7 +26,6 @@
 {
 	volatile unsigned int *a;
 
-	mb();
 	a = __ldcw_align(x);
 	while (__ldcw(a) == 0)
 		while (*a == 0)
@@ -36,16 +35,15 @@
 				local_irq_disable();
 			} else
 				cpu_relax();
-	mb();
 }
 
 static inline void arch_spin_unlock(arch_spinlock_t *x)
 {
 	volatile unsigned int *a;
-	mb();
+
 	a = __ldcw_align(x);
-	*a = 1;
 	mb();
+	*a = 1;
 }
 
 static inline int arch_spin_trylock(arch_spinlock_t *x)
@@ -53,10 +51,8 @@
 	volatile unsigned int *a;
 	int ret;
 
-	mb();
 	a = __ldcw_align(x);
         ret = __ldcw(a) != 0;
-	mb();
 
 	return ret;
 }
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index e3d3e8e..0156144 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -482,6 +482,8 @@
 	.macro		tlb_unlock0	spc,tmp
 #ifdef CONFIG_SMP
 	or,COND(=)	%r0,\spc,%r0
+	sync
+	or,COND(=)	%r0,\spc,%r0
 	stw             \spc,0(\tmp)
 #endif
 	.endm
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 67b0f75..3e163df 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -354,6 +354,7 @@
 	.macro	tlb_unlock	la,flags,tmp
 #ifdef CONFIG_SMP
 	ldi		1,\tmp
+	sync
 	stw		\tmp,0(\la)
 	mtsm		\flags
 #endif
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index e775f80..5f7e57f 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -629,11 +629,12 @@
 	stw	%r1, 4(%sr2,%r20)
 #endif
 	/* The load and store could fail */
-1:	ldw,ma	0(%r26), %r28
+1:	ldw	0(%r26), %r28
 	sub,<>	%r28, %r25, %r0
-2:	stw,ma	%r24, 0(%r26)
+2:	stw	%r24, 0(%r26)
 	/* Free lock */
-	stw,ma	%r20, 0(%sr2,%r20)
+	sync
+	stw	%r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
 	/* Clear thread register indicator */
 	stw	%r0, 4(%sr2,%r20)
@@ -647,6 +648,7 @@
 3:		
 	/* Error occurred on load or store */
 	/* Free lock */
+	sync
 	stw	%r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
 	stw	%r0, 4(%sr2,%r20)
@@ -796,30 +798,30 @@
 	ldo	1(%r0),%r28
 
 	/* 8bit CAS */
-13:	ldb,ma	0(%r26), %r29
+13:	ldb	0(%r26), %r29
 	sub,=	%r29, %r25, %r0
 	b,n	cas2_end
-14:	stb,ma	%r24, 0(%r26)
+14:	stb	%r24, 0(%r26)
 	b	cas2_end
 	copy	%r0, %r28
 	nop
 	nop
 
 	/* 16bit CAS */
-15:	ldh,ma	0(%r26), %r29
+15:	ldh	0(%r26), %r29
 	sub,=	%r29, %r25, %r0
 	b,n	cas2_end
-16:	sth,ma	%r24, 0(%r26)
+16:	sth	%r24, 0(%r26)
 	b	cas2_end
 	copy	%r0, %r28
 	nop
 	nop
 
 	/* 32bit CAS */
-17:	ldw,ma	0(%r26), %r29
+17:	ldw	0(%r26), %r29
 	sub,=	%r29, %r25, %r0
 	b,n	cas2_end
-18:	stw,ma	%r24, 0(%r26)
+18:	stw	%r24, 0(%r26)
 	b	cas2_end
 	copy	%r0, %r28
 	nop
@@ -827,10 +829,10 @@
 
 	/* 64bit CAS */
 #ifdef CONFIG_64BIT
-19:	ldd,ma	0(%r26), %r29
+19:	ldd	0(%r26), %r29
 	sub,*=	%r29, %r25, %r0
 	b,n	cas2_end
-20:	std,ma	%r24, 0(%r26)
+20:	std	%r24, 0(%r26)
 	copy	%r0, %r28
 #else
 	/* Compare first word */
@@ -848,7 +850,8 @@
 
 cas2_end:
 	/* Free lock */
-	stw,ma	%r20, 0(%sr2,%r20)
+	sync
+	stw	%r20, 0(%sr2,%r20)
 	/* Enable interrupts */
 	ssm	PSW_SM_I, %r0
 	/* Return to userspace, set no error */
@@ -858,6 +861,7 @@
 22:
 	/* Error occurred on load or store */
 	/* Free lock */
+	sync
 	stw	%r20, 0(%sr2,%r20)
 	ssm	PSW_SM_I, %r0
 	ldo	1(%r0),%r28
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 0031806..f93238a 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -190,9 +190,6 @@
 	struct cpumask	online_mask;
 };
 
-/* Crash memory ranges */
-#define INIT_CRASHMEM_RANGES	(INIT_MEMBLOCK_REGIONS + 2)
-
 struct fad_crash_memory_ranges {
 	unsigned long long	base;
 	unsigned long long	size;
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 27843665..620e08d 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -450,9 +450,11 @@
 
 	driver = eeh_pcid_get(dev);
 	if (driver) {
-		eeh_pcid_put(dev);
-		if (driver->err_handler)
+		if (driver->err_handler) {
+			eeh_pcid_put(dev);
 			return NULL;
+		}
+		eeh_pcid_put(dev);
 	}
 
 #ifdef CONFIG_PPC_POWERNV
@@ -489,17 +491,19 @@
 	if (eeh_dev_removed(edev))
 		return NULL;
 
-	driver = eeh_pcid_get(dev);
-	if (driver) {
-		eeh_pcid_put(dev);
-		if (removed &&
-		    eeh_pe_passed(edev->pe))
+	if (removed) {
+		if (eeh_pe_passed(edev->pe))
 			return NULL;
-		if (removed &&
-		    driver->err_handler &&
-		    driver->err_handler->error_detected &&
-		    driver->err_handler->slot_reset)
-			return NULL;
+		driver = eeh_pcid_get(dev);
+		if (driver) {
+			if (driver->err_handler &&
+			    driver->err_handler->error_detected &&
+			    driver->err_handler->slot_reset) {
+				eeh_pcid_put(dev);
+				return NULL;
+			}
+			eeh_pcid_put(dev);
+		}
 	}
 
 	/* Remove it from PCI subsystem */
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 93a6eeb..0292504 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -35,6 +35,7 @@
 #include <linux/crash_dump.h>
 #include <linux/kobject.h>
 #include <linux/sysfs.h>
+#include <linux/slab.h>
 
 #include <asm/page.h>
 #include <asm/prom.h>
@@ -48,8 +49,10 @@
 static const struct fadump_mem_struct *fdm_active;
 
 static DEFINE_MUTEX(fadump_mutex);
-struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
+struct fad_crash_memory_ranges *crash_memory_ranges;
+int crash_memory_ranges_size;
 int crash_mem_ranges;
+int max_crash_mem_ranges;
 
 /* Scan the Firmware Assisted dump configuration details. */
 int __init early_init_dt_scan_fw_dump(unsigned long node,
@@ -362,9 +365,9 @@
 }
 early_param("fadump_reserve_mem", early_fadump_reserve_mem);
 
-static void register_fw_dump(struct fadump_mem_struct *fdm)
+static int register_fw_dump(struct fadump_mem_struct *fdm)
 {
-	int rc;
+	int rc, err;
 	unsigned int wait_time;
 
 	pr_debug("Registering for firmware-assisted kernel dump...\n");
@@ -381,7 +384,11 @@
 
 	} while (wait_time);
 
+	err = -EIO;
 	switch (rc) {
+	default:
+		pr_err("Failed to register. Unknown Error(%d).\n", rc);
+		break;
 	case -1:
 		printk(KERN_ERR "Failed to register firmware-assisted kernel"
 			" dump. Hardware Error(%d).\n", rc);
@@ -389,18 +396,22 @@
 	case -3:
 		printk(KERN_ERR "Failed to register firmware-assisted kernel"
 			" dump. Parameter Error(%d).\n", rc);
+		err = -EINVAL;
 		break;
 	case -9:
 		printk(KERN_ERR "firmware-assisted kernel dump is already "
 			" registered.");
 		fw_dump.dump_registered = 1;
+		err = -EEXIST;
 		break;
 	case 0:
 		printk(KERN_INFO "firmware-assisted kernel dump registration"
 			" is successful\n");
 		fw_dump.dump_registered = 1;
+		err = 0;
 		break;
 	}
+	return err;
 }
 
 void crash_fadump(struct pt_regs *regs, const char *str)
@@ -731,38 +742,88 @@
 	return 0;
 }
 
-static inline void fadump_add_crash_memory(unsigned long long base,
-					unsigned long long end)
+static void free_crash_memory_ranges(void)
+{
+	kfree(crash_memory_ranges);
+	crash_memory_ranges = NULL;
+	crash_memory_ranges_size = 0;
+	max_crash_mem_ranges = 0;
+}
+
+/*
+ * Allocate or reallocate crash memory ranges array in incremental units
+ * of PAGE_SIZE.
+ */
+static int allocate_crash_memory_ranges(void)
+{
+	struct fad_crash_memory_ranges *new_array;
+	u64 new_size;
+
+	new_size = crash_memory_ranges_size + PAGE_SIZE;
+	pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
+		 new_size);
+
+	new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
+	if (new_array == NULL) {
+		pr_err("Insufficient memory for setting up crash memory ranges\n");
+		free_crash_memory_ranges();
+		return -ENOMEM;
+	}
+
+	crash_memory_ranges = new_array;
+	crash_memory_ranges_size = new_size;
+	max_crash_mem_ranges = (new_size /
+				sizeof(struct fad_crash_memory_ranges));
+	return 0;
+}
+
+static inline int fadump_add_crash_memory(unsigned long long base,
+					  unsigned long long end)
 {
 	if (base == end)
-		return;
+		return 0;
+
+	if (crash_mem_ranges == max_crash_mem_ranges) {
+		int ret;
+
+		ret = allocate_crash_memory_ranges();
+		if (ret)
+			return ret;
+	}
 
 	pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
 		crash_mem_ranges, base, end - 1, (end - base));
 	crash_memory_ranges[crash_mem_ranges].base = base;
 	crash_memory_ranges[crash_mem_ranges].size = end - base;
 	crash_mem_ranges++;
+	return 0;
 }
 
-static void fadump_exclude_reserved_area(unsigned long long start,
+static int fadump_exclude_reserved_area(unsigned long long start,
 					unsigned long long end)
 {
 	unsigned long long ra_start, ra_end;
+	int ret = 0;
 
 	ra_start = fw_dump.reserve_dump_area_start;
 	ra_end = ra_start + fw_dump.reserve_dump_area_size;
 
 	if ((ra_start < end) && (ra_end > start)) {
 		if ((start < ra_start) && (end > ra_end)) {
-			fadump_add_crash_memory(start, ra_start);
-			fadump_add_crash_memory(ra_end, end);
+			ret = fadump_add_crash_memory(start, ra_start);
+			if (ret)
+				return ret;
+
+			ret = fadump_add_crash_memory(ra_end, end);
 		} else if (start < ra_start) {
-			fadump_add_crash_memory(start, ra_start);
+			ret = fadump_add_crash_memory(start, ra_start);
 		} else if (ra_end < end) {
-			fadump_add_crash_memory(ra_end, end);
+			ret = fadump_add_crash_memory(ra_end, end);
 		}
 	} else
-		fadump_add_crash_memory(start, end);
+		ret = fadump_add_crash_memory(start, end);
+
+	return ret;
 }
 
 static int fadump_init_elfcore_header(char *bufp)
@@ -802,10 +863,11 @@
  * Traverse through memblock structure and setup crash memory ranges. These
  * ranges will be used create PT_LOAD program headers in elfcore header.
  */
-static void fadump_setup_crash_memory_ranges(void)
+static int fadump_setup_crash_memory_ranges(void)
 {
 	struct memblock_region *reg;
 	unsigned long long start, end;
+	int ret;
 
 	pr_debug("Setup crash memory ranges.\n");
 	crash_mem_ranges = 0;
@@ -816,7 +878,9 @@
 	 * specified during fadump registration. We need to create a separate
 	 * program header for this chunk with the correct offset.
 	 */
-	fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
+	ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
+	if (ret)
+		return ret;
 
 	for_each_memblock(memory, reg) {
 		start = (unsigned long long)reg->base;
@@ -825,8 +889,12 @@
 			start = fw_dump.boot_memory_size;
 
 		/* add this range excluding the reserved dump area. */
-		fadump_exclude_reserved_area(start, end);
+		ret = fadump_exclude_reserved_area(start, end);
+		if (ret)
+			return ret;
 	}
+
+	return 0;
 }
 
 /*
@@ -946,19 +1014,22 @@
 	return addr;
 }
 
-static void register_fadump(void)
+static int register_fadump(void)
 {
 	unsigned long addr;
 	void *vaddr;
+	int ret;
 
 	/*
 	 * If no memory is reserved then we can not register for firmware-
 	 * assisted dump.
 	 */
 	if (!fw_dump.reserve_dump_area_size)
-		return;
+		return -ENODEV;
 
-	fadump_setup_crash_memory_ranges();
+	ret = fadump_setup_crash_memory_ranges();
+	if (ret)
+		return ret;
 
 	addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
 	/* Initialize fadump crash info header. */
@@ -969,7 +1040,7 @@
 	fadump_create_elfcore_headers(vaddr);
 
 	/* register the future kernel dump with firmware. */
-	register_fw_dump(&fdm);
+	return register_fw_dump(&fdm);
 }
 
 static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
@@ -1036,6 +1107,7 @@
 	} else if (fw_dump.dump_registered) {
 		/* Un-register Firmware-assisted dump if it was registered. */
 		fadump_unregister_dump(&fdm);
+		free_crash_memory_ranges();
 	}
 }
 
@@ -1154,7 +1226,6 @@
 	switch (buf[0]) {
 	case '0':
 		if (fw_dump.dump_registered == 0) {
-			ret = -EINVAL;
 			goto unlock_out;
 		}
 		/* Un-register Firmware-assisted dump */
@@ -1162,11 +1233,11 @@
 		break;
 	case '1':
 		if (fw_dump.dump_registered == 1) {
-			ret = -EINVAL;
+			ret = -EEXIST;
 			goto unlock_out;
 		}
 		/* Register Firmware-assisted dump */
-		register_fadump();
+		ret = register_fadump();
 		break;
 	default:
 		ret = -EINVAL;
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index fb133a1..2274be5 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -769,7 +769,7 @@
 	tovirt(r6,r6)
 	lis	r5, abatron_pteptrs@h
 	ori	r5, r5, abatron_pteptrs@l
-	stw	r5, 0xf0(r0)	/* Must match your Abatron config file */
+	stw	r5, 0xf0(0)	/* Must match your Abatron config file */
 	tophys(r5,r5)
 	stw	r6, 0(r5)
 
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 2694d07..9dafd7a 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -186,7 +186,12 @@
 			(unsigned long)(crashk_res.start >> 20),
 			(unsigned long)(memblock_phys_mem_size() >> 20));
 
-	memblock_reserve(crashk_res.start, crash_size);
+	if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
+	    memblock_reserve(crashk_res.start, crash_size)) {
+		pr_err("Failed to reserve memory for crashkernel!\n");
+		crashk_res.start = crashk_res.end = 0;
+		return;
+	}
 }
 
 int overlaps_crashkernel(unsigned long start, unsigned long size)
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 678f87a..97b02b8 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -11,6 +11,7 @@
 #include <linux/sched.h>
 #include <linux/errno.h>
 #include <linux/bootmem.h>
+#include <linux/syscalls.h>
 #include <linux/irq.h>
 #include <linux/list.h>
 #include <linux/of.h>
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 05f09ae..915e89f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -314,7 +314,7 @@
 	unsigned long pp, key;
 	unsigned long v, gr;
 	__be64 *hptep;
-	int index;
+	long int index;
 	int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
 
 	/* Get SLB entry */
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 48fc28b..64c9a91 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -68,14 +68,14 @@
 	 * updating it.  No write barriers are needed here, provided
 	 * we only update the current CPU's SLB shadow buffer.
 	 */
-	p->save_area[index].esid = 0;
-	p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags));
-	p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index));
+	WRITE_ONCE(p->save_area[index].esid, 0);
+	WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
+	WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
 }
 
 static inline void slb_shadow_clear(enum slb_index index)
 {
-	get_slb_shadow()->save_area[index].esid = 0;
+	WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0);
 }
 
 static inline void create_shadowed_slbe(unsigned long ea, int ssize,
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index be9d968..bdbbc32 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -207,25 +207,37 @@
 
 static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
 {
+	unsigned int i, ctx_idx = ctx->idx;
+
+	/* Load function address into r12 */
+	PPC_LI64(12, func);
+
+	/* For bpf-to-bpf function calls, the callee's address is unknown
+	 * until the last extra pass. As seen above, we use PPC_LI64() to
+	 * load the callee's address, but this may optimize the number of
+	 * instructions required based on the nature of the address.
+	 *
+	 * Since we don't want the number of instructions emitted to change,
+	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
+	 * we always have a five-instruction sequence, which is the maximum
+	 * that PPC_LI64() can emit.
+	 */
+	for (i = ctx->idx - ctx_idx; i < 5; i++)
+		PPC_NOP();
+
 #ifdef PPC64_ELF_ABI_v1
-	/* func points to the function descriptor */
-	PPC_LI64(b2p[TMP_REG_2], func);
-	/* Load actual entry point from function descriptor */
-	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
-	/* ... and move it to LR */
-	PPC_MTLR(b2p[TMP_REG_1]);
 	/*
 	 * Load TOC from function descriptor at offset 8.
 	 * We can clobber r2 since we get called through a
 	 * function pointer (so caller will save/restore r2)
 	 * and since we don't use a TOC ourself.
 	 */
-	PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
-#else
-	/* We can clobber r12 */
-	PPC_FUNC_ADDR(12, func);
-	PPC_MTLR(12);
+	PPC_BPF_LL(2, 12, 8);
+	/* Load actual entry point from function descriptor */
+	PPC_BPF_LL(12, 12, 0);
 #endif
+
+	PPC_MTLR(12);
 	PPC_BLRL();
 }
 
@@ -314,6 +326,7 @@
 		u64 imm64;
 		u8 *func;
 		u32 true_cond;
+		u32 tmp_idx;
 
 		/*
 		 * addrs[] maps a BPF bytecode address into a real offset from
@@ -673,11 +686,7 @@
 		case BPF_STX | BPF_XADD | BPF_W:
 			/* Get EA into TMP_REG_1 */
 			PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
-			/* error if EA is not word-aligned */
-			PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
-			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
-			PPC_LI(b2p[BPF_REG_0], 0);
-			PPC_JMP(exit_addr);
+			tmp_idx = ctx->idx * 4;
 			/* load value from memory into TMP_REG_2 */
 			PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
 			/* add value from src_reg into this */
@@ -685,32 +694,16 @@
 			/* store result back */
 			PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
 			/* we're done if this succeeded */
-			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
-			/* otherwise, let's try once more */
-			PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
-			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
-			PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
-			/* exit if the store was not successful */
-			PPC_LI(b2p[BPF_REG_0], 0);
-			PPC_BCC(COND_NE, exit_addr);
+			PPC_BCC_SHORT(COND_NE, tmp_idx);
 			break;
 		/* *(u64 *)(dst + off) += src */
 		case BPF_STX | BPF_XADD | BPF_DW:
 			PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
-			/* error if EA is not doubleword-aligned */
-			PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
-			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
-			PPC_LI(b2p[BPF_REG_0], 0);
-			PPC_JMP(exit_addr);
+			tmp_idx = ctx->idx * 4;
 			PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
 			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
 			PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
-			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
-			PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
-			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
-			PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
-			PPC_LI(b2p[BPF_REG_0], 0);
-			PPC_BCC(COND_NE, exit_addr);
+			PPC_BCC_SHORT(COND_NE, tmp_idx);
 			break;
 
 		/*
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
index f803f4b..8608e35 100644
--- a/arch/powerpc/platforms/chrp/time.c
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -27,6 +27,8 @@
 #include <asm/sections.h>
 #include <asm/time.h>
 
+#include <platforms/chrp/chrp.h>
+
 extern spinlock_t rtc_lock;
 
 #define NVRAM_AS0  0x74
@@ -62,7 +64,7 @@
 	return 0;
 }
 
-int chrp_cmos_clock_read(int addr)
+static int chrp_cmos_clock_read(int addr)
 {
 	if (nvram_as1 != 0)
 		outb(addr>>8, nvram_as1);
@@ -70,7 +72,7 @@
 	return (inb(nvram_data));
 }
 
-void chrp_cmos_clock_write(unsigned long val, int addr)
+static void chrp_cmos_clock_write(unsigned long val, int addr)
 {
 	if (nvram_as1 != 0)
 		outb(addr>>8, nvram_as1);
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 89c54de..bf4a125 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -35,6 +35,8 @@
  */
 #define HW_BROADWAY_ICR		0x00
 #define HW_BROADWAY_IMR		0x04
+#define HW_STARLET_ICR		0x08
+#define HW_STARLET_IMR		0x0c
 
 
 /*
@@ -74,6 +76,9 @@
 	void __iomem *io_base = irq_data_get_irq_chip_data(d);
 
 	setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
+
+	/* Make sure the ARM (aka. Starlet) doesn't handle this interrupt. */
+	clrbits32(io_base + HW_STARLET_IMR, 1 << irq);
 }
 
 
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index c3c9bbb..ba0964c 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -468,7 +468,7 @@
 	boot_infos_t *bi = (boot_infos_t *) r4;
 	unsigned long hdr;
 	unsigned long space;
-	unsigned long ptr, x;
+	unsigned long ptr;
 	char *model;
 	unsigned long offset = reloc_offset();
 
@@ -562,6 +562,8 @@
 	 * MMU switched OFF, so this should not be useful anymore.
 	 */
 	if (bi->version < 4) {
+		unsigned long x __maybe_unused;
+
 		bootx_printf("Touching pages...\n");
 
 		/*
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 6b4e9d18..4929dd4 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -352,6 +352,7 @@
 }
 machine_late_initcall(powermac, pmac_late_init);
 
+void note_bootable_part(dev_t dev, int part, int goodness);
 /*
  * This is __ref because we check for "initializing" before
  * touching any of the __init sensitive things and "initializing"
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 6c9a65b..7fb61eb 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -369,7 +369,7 @@
 		/* Closed or other error drop */
 		if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
 		    rc != OPAL_BUSY_EVENT) {
-			written = total_len;
+			written += total_len;
 			break;
 		}
 		if (rc == OPAL_SUCCESS) {
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 9ed90c5..8015e40 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2623,7 +2623,7 @@
 	level_shift = entries_shift + 3;
 	level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
 
-	if ((level_shift - 3) * levels + page_shift >= 60)
+	if ((level_shift - 3) * levels + page_shift >= 55)
 		return -EINVAL;
 
 	/* Allocate TCE table */
@@ -3124,12 +3124,49 @@
 #endif /* CONFIG_DEBUG_FS */
 }
 
+static void pnv_pci_enable_bridge(struct pci_bus *bus)
+{
+	struct pci_dev *dev = bus->self;
+	struct pci_bus *child;
+
+	/* Empty bus ? bail */
+	if (list_empty(&bus->devices))
+		return;
+
+	/*
+	 * If there's a bridge associated with that bus enable it. This works
+	 * around races in the generic code if the enabling is done during
+	 * parallel probing. This can be removed once those races have been
+	 * fixed.
+	 */
+	if (dev) {
+		int rc = pci_enable_device(dev);
+		if (rc)
+			pci_err(dev, "Error enabling bridge (%d)\n", rc);
+		pci_set_master(dev);
+	}
+
+	/* Perform the same to child busses */
+	list_for_each_entry(child, &bus->children, node)
+		pnv_pci_enable_bridge(child);
+}
+
+static void pnv_pci_enable_bridges(void)
+{
+	struct pci_controller *hose;
+
+	list_for_each_entry(hose, &hose_list, list_node)
+		pnv_pci_enable_bridge(hose->bus);
+}
+
 static void pnv_pci_ioda_fixup(void)
 {
 	pnv_pci_ioda_setup_PEs();
 	pnv_pci_ioda_setup_iommu_api();
 	pnv_pci_ioda_create_dbgfs();
 
+	pnv_pci_enable_bridges();
+
 #ifdef CONFIG_EEH
 	eeh_init();
 	eeh_addr_cache_build();
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 904a677..8799d8a 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -346,7 +346,7 @@
 	}
 
 	savep = __va(regs->gpr[3]);
-	regs->gpr[3] = savep[0];	/* restore original r3 */
+	regs->gpr[3] = be64_to_cpu(savep[0]);	/* restore original r3 */
 
 	/* If it isn't an extended log we can use the per cpu 64bit buffer */
 	h = (struct rtas_error_log *)&savep[1];
@@ -357,7 +357,7 @@
 		int len, error_log_length;
 
 		error_log_length = 8 + rtas_error_extended_log_length(h);
-		len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
+		len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
 		memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
 		memcpy(global_mce_data_buf, h, len);
 		errhdr = (struct rtas_error_log *)global_mce_data_buf;
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index db2286b..47fb336 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -196,7 +196,7 @@
 
 	/* IO map the message register block. */
 	of_address_to_resource(np, 0, &rsrc);
-	msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
+	msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
 	if (!msgr_block_addr) {
 		dev_err(&dev->dev, "Failed to iomap MPIC message registers");
 		return -EFAULT;
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 0351647..ee64e62 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -113,7 +113,7 @@
 
 struct hws_diag_entry {
 	unsigned int def:16;	    /* 0-15  Data Entry Format		 */
-	unsigned int R:14;	    /* 16-19 and 20-30 reserved		 */
+	unsigned int R:15;	    /* 16-19 and 20-30 reserved		 */
 	unsigned int I:1;	    /* 31 entry valid or invalid	 */
 	u8	     data[];	    /* Machine-dependent sample data	 */
 } __packed;
@@ -129,7 +129,9 @@
 			unsigned int f:1;	/* 0 - Block Full Indicator   */
 			unsigned int a:1;	/* 1 - Alert request control  */
 			unsigned int t:1;	/* 2 - Timestamp format	      */
-			unsigned long long:61;	/* 3 - 63: Reserved	      */
+			unsigned int :29;	/* 3 - 31: Reserved	      */
+			unsigned int bsdes:16;	/* 32-47: size of basic SDE   */
+			unsigned int dsdes:16;	/* 48-63: size of diagnostic SDE */
 		};
 		unsigned long long flags;	/* 0 - 63: All indicators     */
 	};
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 998b61c..4b39ba7 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -261,7 +261,6 @@
 	void *user;
 };
 
-#define QDIO_OUTBUF_STATE_FLAG_NONE	0x00
 #define QDIO_OUTBUF_STATE_FLAG_PENDING	0x01
 
 #define CHSC_AC1_INITIATE_INPUTQ	0x80
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 5982544..1671352 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -401,11 +401,13 @@
 	if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
 			       sizeof(nt_name) - 1))
 		return NULL;
-	if (strcmp(nt_name, "VMCOREINFO") != 0)
+	if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0)
 		return NULL;
 	vmcoreinfo = kzalloc_panic(note.n_descsz);
-	if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz))
+	if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) {
+		kfree(vmcoreinfo);
 		return NULL;
+	}
 	*size = note.n_descsz;
 	return vmcoreinfo;
 }
@@ -415,15 +417,20 @@
  */
 static void *nt_vmcoreinfo(void *ptr)
 {
+	const char *name = VMCOREINFO_NOTE_NAME;
 	unsigned long size;
 	void *vmcoreinfo;
 
 	vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
-	if (!vmcoreinfo)
-		vmcoreinfo = get_vmcoreinfo_old(&size);
+	if (vmcoreinfo)
+		return nt_init_name(ptr, 0, vmcoreinfo, size, name);
+
+	vmcoreinfo = get_vmcoreinfo_old(&size);
 	if (!vmcoreinfo)
 		return ptr;
-	return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
+	ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name);
+	kfree(vmcoreinfo);
+	return ptr;
 }
 
 /*
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 51f842c..da246d9 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -156,7 +156,8 @@
 		return set_validity_icpt(scb_s, 0x0039U);
 
 	/* copy only the wrapping keys */
-	if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
+	if (read_guest_real(vcpu, crycb_addr + 72,
+			    vsie_page->crycb.dea_wrapping_key_mask, 56))
 		return set_validity_icpt(scb_s, 0x0035U);
 
 	scb_s->ecb3 |= ecb3_flags;
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index e7672ed..5ff0520 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -27,7 +27,7 @@
  */
 ENTRY(memset)
 	ltgr	%r4,%r4
-	bzr	%r14
+	jz	.Lmemset_exit
 	ltgr	%r3,%r3
 	jnz	.Lmemset_fill
 	aghi	%r4,-1
@@ -42,12 +42,13 @@
 .Lmemset_clear_rest:
 	larl	%r3,.Lmemset_xc
 	ex	%r4,0(%r3)
+.Lmemset_exit:
 	BR_EX	%r14
 .Lmemset_fill:
 	stc	%r3,0(%r2)
 	cghi	%r4,1
 	lgr	%r1,%r2
-	ber	%r14
+	je	.Lmemset_fill_exit
 	aghi	%r4,-2
 	srlg	%r3,%r4,8
 	ltgr	%r3,%r3
@@ -59,6 +60,7 @@
 .Lmemset_fill_rest:
 	larl	%r3,.Lmemset_mvc
 	ex	%r4,0(%r3)
+.Lmemset_fill_exit:
 	BR_EX	%r14
 .Lmemset_xc:
 	xc	0(1,%r1),0(%r1)
@@ -73,7 +75,7 @@
  */
 ENTRY(memcpy)
 	ltgr	%r4,%r4
-	bzr	%r14
+	jz	.Lmemcpy_exit
 	aghi	%r4,-1
 	srlg	%r5,%r4,8
 	ltgr	%r5,%r5
@@ -82,6 +84,7 @@
 .Lmemcpy_rest:
 	larl	%r5,.Lmemcpy_mvc
 	ex	%r4,0(%r5)
+.Lmemcpy_exit:
 	BR_EX	%r14
 .Lmemcpy_loop:
 	mvc	0(256,%r1),0(%r3)
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 02042b6..e6665a6 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -79,7 +79,7 @@
 struct dcss_segment {
 	struct list_head list;
 	char dcss_name[8];
-	char res_name[15];
+	char res_name[16];
 	unsigned long start_addr;
 	unsigned long end;
 	atomic_t ref_count;
@@ -432,7 +432,7 @@
 	memcpy(&seg->res_name, seg->dcss_name, 8);
 	EBCASC(seg->res_name, 8);
 	seg->res_name[8] = '\0';
-	strncat(seg->res_name, " (DCSS)", 7);
+	strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
 	seg->res->name = seg->res_name;
 	rc = seg->vm_segtype;
 	if (rc == SEG_TYPE_SC ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 661d9fe..ba2f218 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -462,6 +462,8 @@
 	/* No reason to continue if interrupted by SIGKILL. */
 	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
 		fault = VM_FAULT_SIGNAL;
+		if (flags & FAULT_FLAG_RETRY_NOWAIT)
+			goto out_up;
 		goto out;
 	}
 	if (unlikely(fault & VM_FAULT_ERROR))
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 995f785..781a044 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -26,7 +26,7 @@
 		.data		= &page_table_allocate_pgste,
 		.maxlen		= sizeof(int),
 		.mode		= S_IRUGO | S_IWUSR,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= proc_dointvec_minmax,
 		.extra1		= &page_table_allocate_pgste_min,
 		.extra2		= &page_table_allocate_pgste_max,
 	},
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e7ce257..8bd25ae 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -517,8 +517,6 @@
 			/* br %r1 */
 			_EMIT2(0x07f1);
 		} else {
-			/* larl %r1,.+14 */
-			EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
 			/* ex 0,S390_lowcore.br_r1_tampoline */
 			EMIT4_DISP(0x44000000, REG_0, REG_0,
 				   offsetof(struct lowcore, br_r1_trampoline));
@@ -1386,6 +1384,7 @@
 		goto free_addrs;
 	}
 	if (bpf_jit_prog(&jit, fp)) {
+		bpf_jit_binary_free(header);
 		fp = orig_fp;
 		goto free_addrs;
 	}
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index f576f10..0dac264 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -133,6 +133,8 @@
 {
 	pr_info("NUMA mode: %s\n", mode->name);
 	nodes_clear(node_possible_map);
+	/* Initially attach all possible CPUs to node 0. */
+	cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
 	if (mode->setup)
 		mode->setup();
 	numa_setup_memory();
@@ -140,20 +142,6 @@
 }
 
 /*
- * numa_init_early() - Initialization initcall
- *
- * This runs when only one CPU is online and before the first
- * topology update is called for by the scheduler.
- */
-static int __init numa_init_early(void)
-{
-	/* Attach all possible CPUs to node 0 for now. */
-	cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
-	return 0;
-}
-early_initcall(numa_init_early);
-
-/*
  * numa_init_late() - Initialization initcall
  *
  * Register NUMA nodes.
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 03a1d59..8757411 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -407,6 +407,8 @@
 	hwirq = 0;
 	for_each_pci_msi_entry(msi, pdev) {
 		rc = -EIO;
+		if (hwirq >= msi_vecs)
+			break;
 		irq = irq_alloc_desc(0);	/* Alloc irq on node 0 */
 		if (irq < 0)
 			goto out_msi;
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 24384e1..a7aeb03 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -602,7 +602,7 @@
 {
 	struct pci_dev *dev;
 	int i, has_io, has_mem;
-	unsigned int cmd;
+	unsigned int cmd = 0;
 	struct linux_pcic *pcic;
 	/* struct linux_pbm_info* pbm = &pcic->pbm; */
 	int node;
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 646988d..740f43b 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -201,23 +201,27 @@
 
 asmlinkage long sys_getdomainname(char __user *name, int len)
 {
- 	int nlen, err;
- 	
+	int nlen, err;
+	char tmp[__NEW_UTS_LEN + 1];
+
 	if (len < 0)
 		return -EINVAL;
 
- 	down_read(&uts_sem);
- 	
+	down_read(&uts_sem);
+
 	nlen = strlen(utsname()->domainname) + 1;
 	err = -EINVAL;
 	if (nlen > len)
-		goto out;
+		goto out_unlock;
+	memcpy(tmp, utsname()->domainname, nlen);
 
-	err = -EFAULT;
-	if (!copy_to_user(name, utsname()->domainname, nlen))
-		err = 0;
+	up_read(&uts_sem);
 
-out:
+	if (copy_to_user(name, tmp, nlen))
+		return -EFAULT;
+	return 0;
+
+out_unlock:
 	up_read(&uts_sem);
 	return err;
 }
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 02e05e2..ebecbc9 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -524,23 +524,27 @@
 
 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
 {
-        int nlen, err;
+	int nlen, err;
+	char tmp[__NEW_UTS_LEN + 1];
 
 	if (len < 0)
 		return -EINVAL;
 
- 	down_read(&uts_sem);
- 	
+	down_read(&uts_sem);
+
 	nlen = strlen(utsname()->domainname) + 1;
 	err = -EINVAL;
 	if (nlen > len)
-		goto out;
+		goto out_unlock;
+	memcpy(tmp, utsname()->domainname, nlen);
 
-	err = -EFAULT;
-	if (!copy_to_user(name, utsname()->domainname, nlen))
-		err = 0;
+	up_read(&uts_sem);
 
-out:
+	if (copy_to_user(name, tmp, nlen))
+		return -EFAULT;
+	return 0;
+
+out_unlock:
 	up_read(&uts_sem);
 	return err;
 }
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 957731d..eb431d8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -148,6 +148,7 @@
 	select HAVE_UID16			if X86_32 || IA32_EMULATION
 	select HAVE_UNSTABLE_SCHED_CLOCK
 	select HAVE_USER_RETURN_NOTIFIER
+	select HOTPLUG_SMT			if SMP
 	select IRQ_FORCED_THREADING
 	select MODULES_USE_ELF_RELA		if X86_64
 	select MODULES_USE_ELF_REL		if X86_32
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 4669b3a..cda8e14 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -101,9 +101,13 @@
 	done
 endef
 
+# We need to run two commands under "if_changed", so merge them into a
+# single invocation.
+quiet_cmd_check-and-link-vmlinux = LD      $@
+      cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
+
 $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
-	$(call if_changed,check_data_rel)
-	$(call if_changed,ld)
+	$(call if_changed,check-and-link-vmlinux)
 
 OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
 $(obj)/vmlinux.bin: vmlinux FORCE
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index 308aac3..98543be 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -52,6 +52,7 @@
 CONFIG_KSM=y
 CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_ZSMALLOC=y
 # CONFIG_MTRR is not set
 CONFIG_HZ_100=y
 CONFIG_KEXEC=y
@@ -203,6 +204,7 @@
 CONFIG_OF=y
 CONFIG_OF_UNITTEST=y
 # CONFIG_PNP_DEBUG_MESSAGES is not set
+CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
@@ -224,7 +226,9 @@
 CONFIG_DM_ZERO=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE=1
 CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
 CONFIG_NETDEVICES=y
 CONFIG_NETCONSOLE=y
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -460,5 +464,13 @@
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
+CONFIG_CRYPTO_RSA=y
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
 CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_LZ4=y
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_SYSTEM_TRUSTED_KEYRING=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity_dev_keys.x509"
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index dd19584..5773e11 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -48,21 +48,13 @@
 #ifdef CONFIG_X86_64
 /*
  * use carryless multiply version of crc32c when buffer
- * size is >= 512 (when eager fpu is enabled) or
- * >= 1024 (when eager fpu is disabled) to account
+ * size is >= 512 to account
  * for fpu state save/restore overhead.
  */
-#define CRC32C_PCL_BREAKEVEN_EAGERFPU	512
-#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU	1024
+#define CRC32C_PCL_BREAKEVEN	512
 
 asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
 				unsigned int crc_init);
-static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
-#define set_pcl_breakeven_point()					\
-do {									\
-	if (!use_eager_fpu())						\
-		crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU;	\
-} while (0)
 #endif /* CONFIG_X86_64 */
 
 static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
@@ -185,7 +177,7 @@
 	 * use faster PCL version if datasize is large enough to
 	 * overcome kernel fpu state save/restore overhead
 	 */
-	if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+	if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
 		kernel_fpu_begin();
 		*crcp = crc_pcl(data, len, *crcp);
 		kernel_fpu_end();
@@ -197,7 +189,7 @@
 static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
 				u8 *out)
 {
-	if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+	if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
 		kernel_fpu_begin();
 		*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
 		kernel_fpu_end();
@@ -257,7 +249,6 @@
 		alg.update = crc32c_pcl_intel_update;
 		alg.finup = crc32c_pcl_intel_finup;
 		alg.digest = crc32c_pcl_intel_digest;
-		set_pcl_breakeven_point();
 	}
 #endif
 	return crypto_register_shash(&alg);
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index ec9bee6..b7f5042 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -265,7 +265,7 @@
 	vpinsrd	$1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
 	vpinsrd	$2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
 	vpinsrd	$3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
-	vmovd   _args_digest(state , idx, 4) , %xmm0
+	vmovd	_args_digest+4*32(state, idx, 4), %xmm1
 	vpinsrd	$1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
 	vpinsrd	$2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
 	vpinsrd	$3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index b0cd306..b83eafa 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -22,6 +22,7 @@
 #include <linux/user-return-notifier.h>
 #include <linux/nospec.h>
 #include <linux/uprobes.h>
+#include <linux/syscalls.h>
 
 #include <asm/desc.h>
 #include <asm/traps.h>
@@ -180,6 +181,8 @@
 	struct thread_info *ti = current_thread_info();
 	u32 cached_flags;
 
+	addr_limit_user_check();
+
 	if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
 		local_irq_disable();
 
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 3a31fd4..d764992 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -91,7 +91,7 @@
 .endm
 
 .macro TRACE_IRQS_IRETQ_DEBUG
-	bt	$9, EFLAGS(%rsp)		/* interrupts off? */
+	btl	$9, EFLAGS(%rsp)		/* interrupts off? */
 	jnc	1f
 	TRACE_IRQS_ON_DEBUG
 1:
@@ -485,7 +485,7 @@
 #ifdef CONFIG_PREEMPT
 	/* Interrupts are off */
 	/* Check if we need preemption */
-	bt	$9, EFLAGS(%rsp)		/* were interrupts off? */
+	btl	$9, EFLAGS(%rsp)		/* were interrupts off? */
 	jnc	1f
 0:	cmpl	$0, PER_CPU_VAR(__preempt_count)
 	jnz	1f
@@ -769,7 +769,7 @@
 
 	call	\do_sym
 
-	jmp	error_exit			/* %ebx: no swapgs flag */
+	jmp	error_exit
 	.endif
 END(\sym)
 .endm
@@ -1038,7 +1038,6 @@
 
 /*
  * Save all registers in pt_regs, and switch gs if needed.
- * Return: EBX=0: came from user mode; EBX=1: otherwise
  */
 ENTRY(error_entry)
 	cld
@@ -1051,7 +1050,6 @@
 	 * the kernel CR3 here.
 	 */
 	SWITCH_KERNEL_CR3
-	xorl	%ebx, %ebx
 	testb	$3, CS+8(%rsp)
 	jz	.Lerror_kernelspace
 
@@ -1082,7 +1080,6 @@
 	 * for these here too.
 	 */
 .Lerror_kernelspace:
-	incl	%ebx
 	leaq	native_irq_return_iret(%rip), %rcx
 	cmpq	%rcx, RIP+8(%rsp)
 	je	.Lerror_bad_iret
@@ -1114,28 +1111,19 @@
 
 	/*
 	 * Pretend that the exception came from user mode: set up pt_regs
-	 * as if we faulted immediately after IRET and clear EBX so that
-	 * error_exit knows that we will be returning to user mode.
+	 * as if we faulted immediately after IRET.
 	 */
 	mov	%rsp, %rdi
 	call	fixup_bad_iret
 	mov	%rax, %rsp
-	decl	%ebx
 	jmp	.Lerror_entry_from_usermode_after_swapgs
 END(error_entry)
 
-
-/*
- * On entry, EBX is a "return to kernel mode" flag:
- *   1: already in kernel mode, don't need SWAPGS
- *   0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
- */
 ENTRY(error_exit)
-	movl	%ebx, %eax
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
-	testl	%eax, %eax
-	jnz	retint_kernel
+	testb	$3, CS(%rsp)
+	jz	retint_kernel
 	jmp	retint_user
 END(error_exit)
 
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 02223cb..1e96709 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -37,8 +37,9 @@
 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
 	long ret;
-	asm("syscall" : "=a" (ret) :
-	    "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+	asm ("syscall" : "=a" (ret), "=m" (*ts) :
+	     "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
+	     "memory", "rcx", "r11");
 	return ret;
 }
 
@@ -46,8 +47,9 @@
 {
 	long ret;
 
-	asm("syscall" : "=a" (ret) :
-	    "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+	asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
+	     "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
+	     "memory", "rcx", "r11");
 	return ret;
 }
 
@@ -58,13 +60,13 @@
 {
 	long ret;
 
-	asm(
+	asm (
 		"mov %%ebx, %%edx \n"
-		"mov %2, %%ebx \n"
+		"mov %[clock], %%ebx \n"
 		"call __kernel_vsyscall \n"
 		"mov %%edx, %%ebx \n"
-		: "=a" (ret)
-		: "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+		: "=a" (ret), "=m" (*ts)
+		: "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
 		: "memory", "edx");
 	return ret;
 }
@@ -73,13 +75,13 @@
 {
 	long ret;
 
-	asm(
+	asm (
 		"mov %%ebx, %%edx \n"
-		"mov %2, %%ebx \n"
+		"mov %[tv], %%ebx \n"
 		"call __kernel_vsyscall \n"
 		"mov %%edx, %%ebx \n"
-		: "=a" (ret)
-		: "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+		: "=a" (ret), "=m" (*tv), "=m" (*tz)
+		: "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
 		: "memory", "edx");
 	return ret;
 }
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index b26ee32..fd4484ae 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -578,7 +578,7 @@
 {
 	struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
 	struct perf_event *event = pcpu->event;
-	struct hw_perf_event *hwc = &event->hw;
+	struct hw_perf_event *hwc;
 	struct perf_sample_data data;
 	struct perf_raw_record raw;
 	struct pt_regs regs;
@@ -601,6 +601,10 @@
 		return 0;
 	}
 
+	if (WARN_ON_ONCE(!event))
+		goto fail;
+
+	hwc = &event->hw;
 	msr = hwc->config_base;
 	buf = ibs_data.regs;
 	rdmsrl(msr, *buf);
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 5d103a8..10c1a5c 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -342,7 +342,7 @@
 
 	mask = x86_pmu.lbr_nr - 1;
 	tos = task_ctx->tos;
-	for (i = 0; i < tos; i++) {
+	for (i = 0; i < task_ctx->valid_lbrs; i++) {
 		lbr_idx = (tos - i) & mask;
 		wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
 		wrlbr_to  (lbr_idx, task_ctx->lbr_to[i]);
@@ -350,6 +350,15 @@
 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
 			wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
 	}
+
+	for (; i < x86_pmu.lbr_nr; i++) {
+		lbr_idx = (tos - i) & mask;
+		wrlbr_from(lbr_idx, 0);
+		wrlbr_to(lbr_idx, 0);
+		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+			wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
+	}
+
 	wrmsrl(x86_pmu.lbr_tos, tos);
 	task_ctx->lbr_stack_state = LBR_NONE;
 }
@@ -357,7 +366,7 @@
 static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
 {
 	unsigned lbr_idx, mask;
-	u64 tos;
+	u64 tos, from;
 	int i;
 
 	if (task_ctx->lbr_callstack_users == 0) {
@@ -367,13 +376,17 @@
 
 	mask = x86_pmu.lbr_nr - 1;
 	tos = intel_pmu_lbr_tos();
-	for (i = 0; i < tos; i++) {
+	for (i = 0; i < x86_pmu.lbr_nr; i++) {
 		lbr_idx = (tos - i) & mask;
-		task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
+		from = rdlbr_from(lbr_idx);
+		if (!from)
+			break;
+		task_ctx->lbr_from[i] = from;
 		task_ctx->lbr_to[i]   = rdlbr_to(lbr_idx);
 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
 			rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
 	}
+	task_ctx->valid_lbrs = i;
 	task_ctx->tos = tos;
 	task_ctx->lbr_stack_state = LBR_VALID;
 }
@@ -522,7 +535,7 @@
  */
 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
 {
-	bool need_info = false;
+	bool need_info = false, call_stack = false;
 	unsigned long mask = x86_pmu.lbr_nr - 1;
 	int lbr_format = x86_pmu.intel_cap.lbr_format;
 	u64 tos = intel_pmu_lbr_tos();
@@ -533,7 +546,7 @@
 	if (cpuc->lbr_sel) {
 		need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
 		if (cpuc->lbr_sel->config & LBR_CALL_STACK)
-			num = tos;
+			call_stack = true;
 	}
 
 	for (i = 0; i < num; i++) {
@@ -546,6 +559,13 @@
 		from = rdlbr_from(lbr_idx);
 		to   = rdlbr_to(lbr_idx);
 
+		/*
+		 * Read LBR call stack entries
+		 * until invalid entry (0s) is detected.
+		 */
+		if (call_stack && !from)
+			break;
+
 		if (lbr_format == LBR_FORMAT_INFO && need_info) {
 			u64 info;
 
@@ -1175,4 +1195,8 @@
 
 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
 	x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
+
+	/* Knights Landing does have MISPREDICT bit */
+	if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
+		x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
 }
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index aec6cc9..4f36526 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -212,7 +212,7 @@
 	u64 prev_count, new_count, delta;
 	int shift;
 
-	if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
+	if (event->hw.idx == UNCORE_PMC_IDX_FIXED)
 		shift = 64 - uncore_fixed_ctr_bits(box);
 	else
 		shift = 64 - uncore_perf_ctr_bits(box);
diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c
index cda5693..83e2188 100644
--- a/arch/x86/events/intel/uncore_nhmex.c
+++ b/arch/x86/events/intel/uncore_nhmex.c
@@ -245,7 +245,7 @@
 {
 	struct hw_perf_event *hwc = &event->hw;
 
-	if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
+	if (hwc->idx == UNCORE_PMC_IDX_FIXED)
 		wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
 	else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
 		wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index f356317..1bfebbc 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -633,6 +633,7 @@
 	u64 lbr_to[MAX_LBR_ENTRIES];
 	u64 lbr_info[MAX_LBR_ENTRIES];
 	int tos;
+	int valid_lbrs;
 	int lbr_callstack_users;
 	int lbr_stack_state;
 };
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index f5aaf6c..2188b5a 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -12,6 +12,7 @@
 #include <asm/mpspec.h>
 #include <asm/msr.h>
 #include <asm/idle.h>
+#include <asm/hardirq.h>
 
 #define ARCH_APICTIMER_STOPS_ON_C3	1
 
@@ -633,6 +634,13 @@
 #endif
 
 #endif /* CONFIG_X86_LOCAL_APIC */
+
+#ifdef CONFIG_SMP
+bool apic_id_is_primary_thread(unsigned int id);
+#else
+static inline bool apic_id_is_primary_thread(unsigned int id) { return false; }
+#endif
+
 extern void irq_enter(void);
 extern void irq_exit(void);
 
@@ -640,6 +648,7 @@
 {
 	irq_enter();
 	exit_idle();
+	kvm_set_cpu_l1tf_flush_l1d();
 }
 
 static inline void entering_ack_irq(void)
@@ -652,6 +661,7 @@
 {
 	irq_enter();
 	ack_APIC_irq();
+	kvm_set_cpu_l1tf_flush_l1d();
 }
 
 static inline void exiting_irq(void)
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
index 46e40ae..93eebc63 100644
--- a/arch/x86/include/asm/apm.h
+++ b/arch/x86/include/asm/apm.h
@@ -6,8 +6,6 @@
 #ifndef _ASM_X86_MACH_DEFAULT_APM_H
 #define _ASM_X86_MACH_DEFAULT_APM_H
 
-#include <asm/nospec-branch.h>
-
 #ifdef APM_ZERO_SEGS
 #	define APM_DO_ZERO_SEGS \
 		"pushl %%ds\n\t" \
@@ -33,7 +31,6 @@
 	 * N.B. We do NOT need a cld after the BIOS call
 	 * because we always save and restore the flags.
 	 */
-	firmware_restrict_branch_speculation_start();
 	__asm__ __volatile__(APM_DO_ZERO_SEGS
 		"pushl %%edi\n\t"
 		"pushl %%ebp\n\t"
@@ -46,7 +43,6 @@
 		  "=S" (*esi)
 		: "a" (func), "b" (ebx_in), "c" (ecx_in)
 		: "memory", "cc");
-	firmware_restrict_branch_speculation_end();
 }
 
 static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
@@ -59,7 +55,6 @@
 	 * N.B. We do NOT need a cld after the BIOS call
 	 * because we always save and restore the flags.
 	 */
-	firmware_restrict_branch_speculation_start();
 	__asm__ __volatile__(APM_DO_ZERO_SEGS
 		"pushl %%edi\n\t"
 		"pushl %%ebp\n\t"
@@ -72,7 +67,6 @@
 		  "=S" (si)
 		: "a" (func), "b" (ebx_in), "c" (ecx_in)
 		: "memory", "cc");
-	firmware_restrict_branch_speculation_end();
 	return error;
 }
 
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 8d8c24f..742712b 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -45,6 +45,65 @@
 #define _ASM_SI		__ASM_REG(si)
 #define _ASM_DI		__ASM_REG(di)
 
+#ifndef __x86_64__
+/* 32 bit */
+
+#define _ASM_ARG1	_ASM_AX
+#define _ASM_ARG2	_ASM_DX
+#define _ASM_ARG3	_ASM_CX
+
+#define _ASM_ARG1L	eax
+#define _ASM_ARG2L	edx
+#define _ASM_ARG3L	ecx
+
+#define _ASM_ARG1W	ax
+#define _ASM_ARG2W	dx
+#define _ASM_ARG3W	cx
+
+#define _ASM_ARG1B	al
+#define _ASM_ARG2B	dl
+#define _ASM_ARG3B	cl
+
+#else
+/* 64 bit */
+
+#define _ASM_ARG1	_ASM_DI
+#define _ASM_ARG2	_ASM_SI
+#define _ASM_ARG3	_ASM_DX
+#define _ASM_ARG4	_ASM_CX
+#define _ASM_ARG5	r8
+#define _ASM_ARG6	r9
+
+#define _ASM_ARG1Q	rdi
+#define _ASM_ARG2Q	rsi
+#define _ASM_ARG3Q	rdx
+#define _ASM_ARG4Q	rcx
+#define _ASM_ARG5Q	r8
+#define _ASM_ARG6Q	r9
+
+#define _ASM_ARG1L	edi
+#define _ASM_ARG2L	esi
+#define _ASM_ARG3L	edx
+#define _ASM_ARG4L	ecx
+#define _ASM_ARG5L	r8d
+#define _ASM_ARG6L	r9d
+
+#define _ASM_ARG1W	di
+#define _ASM_ARG2W	si
+#define _ASM_ARG3W	dx
+#define _ASM_ARG4W	cx
+#define _ASM_ARG5W	r8w
+#define _ASM_ARG6W	r9w
+
+#define _ASM_ARG1B	dil
+#define _ASM_ARG2B	sil
+#define _ASM_ARG3B	dl
+#define _ASM_ARG4B	cl
+#define _ASM_ARG5B	r8b
+#define _ASM_ARG6B	r9b
+
+#endif
+
 /*
  * Macros to generate condition code outputs from inline assembly,
  * The output operand must be type "bool".
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index aea30af..f6d1bc9 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -104,7 +104,6 @@
 #define X86_FEATURE_EXTD_APICID	( 3*32+26) /* has extended APICID (8 bits) */
 #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
 #define X86_FEATURE_APERFMPERF	( 3*32+28) /* APERFMPERF */
-/* free, was #define X86_FEATURE_EAGER_FPU	( 3*32+29) * "eagerfpu" Non lazy FPU restore */
 #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
@@ -213,7 +212,7 @@
 #define X86_FEATURE_IBPB	( 7*32+26) /* Indirect Branch Prediction Barrier */
 #define X86_FEATURE_STIBP	( 7*32+27) /* Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_ZEN		( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
-
+#define X86_FEATURE_L1TF_PTEINV	( 7*32+29) /* "" L1TF workaround PTE inversion */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
@@ -317,6 +316,7 @@
 #define X86_FEATURE_PCONFIG		(18*32+18) /* Intel PCONFIG */
 #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_FLUSH_L1D		(18*32+28) /* Flush L1D cache */
 #define X86_FEATURE_ARCH_CAPABILITIES	(18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
 #define X86_FEATURE_SPEC_CTRL_SSBD	(18*32+31) /* "" Speculative Store Bypass Disable */
 
@@ -349,5 +349,6 @@
 #define X86_BUG_SPECTRE_V1	X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
 #define X86_BUG_SPECTRE_V2	X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
 #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+#define X86_BUG_L1TF		X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h
index 3c69fed..d8b9560 100644
--- a/arch/x86/include/asm/dmi.h
+++ b/arch/x86/include/asm/dmi.h
@@ -3,8 +3,8 @@
 
 #include <linux/compiler.h>
 #include <linux/init.h>
+#include <linux/io.h>
 
-#include <asm/io.h>
 #include <asm/setup.h>
 
 static __always_inline __init void *dmi_alloc(unsigned len)
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 8554f96..2515284 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -14,6 +14,16 @@
 #ifndef _ASM_X86_FIXMAP_H
 #define _ASM_X86_FIXMAP_H
 
+/*
+ * Exposed to assembly code for setting up initial page tables. Cannot be
+ * calculated in assembly code (fixmap entries are an enum), but is sanity
+ * checked in the actual fixmap C code to make sure that the fixmap is
+ * covered fully.
+ */
+#define FIXMAP_PMD_NUM	2
+/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
+#define FIXMAP_PMD_TOP	507
+
 #ifndef __ASSEMBLY__
 #include <linux/kernel.h>
 #include <asm/acpi.h>
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 8852e3a..499d6ed 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -60,11 +60,6 @@
 /*
  * FPU related CPU feature flag helper routines:
  */
-static __always_inline __pure bool use_eager_fpu(void)
-{
-	return true;
-}
-
 static __always_inline __pure bool use_xsaveopt(void)
 {
 	return static_cpu_has(X86_FEATURE_XSAVEOPT);
@@ -501,24 +496,6 @@
 }
 
 
-/*
- * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
- * idiom, which is then paired with the sw-flag (fpregs_active) later on:
- */
-
-static inline void __fpregs_activate_hw(void)
-{
-	if (!use_eager_fpu())
-		clts();
-}
-
-static inline void __fpregs_deactivate_hw(void)
-{
-	if (!use_eager_fpu())
-		stts();
-}
-
-/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
 static inline void __fpregs_deactivate(struct fpu *fpu)
 {
 	WARN_ON_FPU(!fpu->fpregs_active);
@@ -528,7 +505,6 @@
 	trace_x86_fpu_regs_deactivated(fpu);
 }
 
-/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
 static inline void __fpregs_activate(struct fpu *fpu)
 {
 	WARN_ON_FPU(fpu->fpregs_active);
@@ -554,22 +530,17 @@
 }
 
 /*
- * Encapsulate the CR0.TS handling together with the
- * software flag.
- *
  * These generally need preemption protection to work,
  * do try to avoid using these on their own.
  */
 static inline void fpregs_activate(struct fpu *fpu)
 {
-	__fpregs_activate_hw();
 	__fpregs_activate(fpu);
 }
 
 static inline void fpregs_deactivate(struct fpu *fpu)
 {
 	__fpregs_deactivate(fpu);
-	__fpregs_deactivate_hw();
 }
 
 /*
@@ -596,8 +567,7 @@
 	 * or if the past 5 consecutive context-switches used math.
 	 */
 	fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
-		      new_fpu->fpstate_active &&
-		      (use_eager_fpu() || new_fpu->counter > 5);
+		      new_fpu->fpstate_active;
 
 	if (old_fpu->fpregs_active) {
 		if (!copy_fpregs_to_fpstate(old_fpu))
@@ -611,18 +581,13 @@
 
 		/* Don't change CR0.TS if we just switch! */
 		if (fpu.preload) {
-			new_fpu->counter++;
 			__fpregs_activate(new_fpu);
 			trace_x86_fpu_regs_activated(new_fpu);
 			prefetch(&new_fpu->state);
-		} else {
-			__fpregs_deactivate_hw();
 		}
 	} else {
-		old_fpu->counter = 0;
 		old_fpu->last_cpu = -1;
 		if (fpu.preload) {
-			new_fpu->counter++;
 			if (fpu_want_lazy_restore(new_fpu, cpu))
 				fpu.preload = 0;
 			else
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 48df486..3c80f5b 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -322,17 +322,6 @@
 	unsigned char			fpregs_active;
 
 	/*
-	 * @counter:
-	 *
-	 * This counter contains the number of consecutive context switches
-	 * during which the FPU stays used. If this is over a threshold, the
-	 * lazy FPU restore logic becomes eager, to save the trap overhead.
-	 * This is an unsigned char so that after 256 iterations the counter
-	 * wraps and the context switch behavior turns lazy again; this is to
-	 * deal with bursty apps that only use the FPU for a short time:
-	 */
-	unsigned char			counter;
-	/*
 	 * @state:
 	 *
 	 * In-memory copy of all FPU registers that we save/restore
@@ -340,29 +329,6 @@
 	 * the registers in the FPU are more recent than this state
 	 * copy. If the task context-switches away then they get
 	 * saved here and represent the FPU state.
-	 *
-	 * After context switches there may be a (short) time period
-	 * during which the in-FPU hardware registers are unchanged
-	 * and still perfectly match this state, if the tasks
-	 * scheduled afterwards are not using the FPU.
-	 *
-	 * This is the 'lazy restore' window of optimization, which
-	 * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
-	 *
-	 * We detect whether a subsequent task uses the FPU via setting
-	 * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
-	 *
-	 * During this window, if the task gets scheduled again, we
-	 * might be able to skip having to do a restore from this
-	 * memory buffer to the hardware registers - at the cost of
-	 * incurring the overhead of #NM fault traps.
-	 *
-	 * Note that on modern CPUs that support the XSAVEOPT (or other
-	 * optimized XSAVE instructions), we don't use #NM traps anymore,
-	 * as the hardware can track whether FPU registers need saving
-	 * or not. On such CPUs we activate the non-lazy ('eagerfpu')
-	 * logic, which unconditionally saves/restores all FPU state
-	 * across context switches. (if FPU state exists.)
 	 */
 	union fpregs_state		state;
 	/*
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 9b76cd3..9871659 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -2,10 +2,12 @@
 #define _ASM_X86_HARDIRQ_H
 
 #include <linux/threads.h>
-#include <linux/irq.h>
 
 typedef struct {
-	unsigned int __softirq_pending;
+	u16	     __softirq_pending;
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+	u8	     kvm_cpu_l1tf_flush_l1d;
+#endif
 	unsigned int __nmi_count;	/* arch dependent */
 #ifdef CONFIG_X86_LOCAL_APIC
 	unsigned int apic_timer_irqs;	/* arch dependent */
@@ -60,4 +62,24 @@
 extern u64 arch_irq_stat(void);
 #define arch_irq_stat		arch_irq_stat
 
+
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+static inline void kvm_set_cpu_l1tf_flush_l1d(void)
+{
+	__this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
+}
+
+static inline void kvm_clear_cpu_l1tf_flush_l1d(void)
+{
+	__this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0);
+}
+
+static inline bool kvm_get_cpu_l1tf_flush_l1d(void)
+{
+	return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
+}
+#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
+static inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
+#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
+
 #endif /* _ASM_X86_HARDIRQ_H */
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index bb07878..be6492c 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -2,6 +2,7 @@
 #define _ASM_X86_I8259_H
 
 #include <linux/delay.h>
+#include <asm/io.h>
 
 extern unsigned int cached_irq_mask;
 
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index ac7692d..508a062 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -12,7 +12,9 @@
  * Interrupt control:
  */
 
-static inline unsigned long native_save_fl(void)
+/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
+extern inline unsigned long native_save_fl(void);
+extern inline unsigned long native_save_fl(void)
 {
 	unsigned long flags;
 
@@ -30,7 +32,8 @@
 	return flags;
 }
 
-static inline void native_restore_fl(unsigned long flags)
+extern inline void native_restore_fl(unsigned long flags);
+extern inline void native_restore_fl(unsigned long flags)
 {
 	asm volatile("push %0 ; popf"
 		     : /* no output */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7598a6c..22a0ccb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -17,6 +17,7 @@
 #include <linux/tracepoint.h>
 #include <linux/cpumask.h>
 #include <linux/irq_work.h>
+#include <linux/irq.h>
 
 #include <linux/kvm.h>
 #include <linux/kvm_para.h>
@@ -485,6 +486,7 @@
 	u64 smbase;
 	bool tpr_access_reporting;
 	u64 ia32_xss;
+	u64 microcode_version;
 
 	/*
 	 * Paging state of the vcpu
@@ -659,6 +661,9 @@
 
 	int pending_ioapic_eoi;
 	int pending_external_vector;
+
+	/* Flush the L1 Data cache for L1TF mitigation on VMENTER */
+	bool l1tf_flush_l1d;
 };
 
 struct kvm_lpage_info {
@@ -819,6 +824,7 @@
 	u64 signal_exits;
 	u64 irq_window_exits;
 	u64 nmi_window_exits;
+	u64 l1d_flush;
 	u64 halt_exits;
 	u64 halt_successful_poll;
 	u64 halt_attempted_poll;
@@ -1020,6 +1026,8 @@
 	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
 
 	void (*setup_mce)(struct kvm_vcpu *vcpu);
+
+	int (*get_msr_feature)(struct kvm_msr_entry *entry);
 };
 
 struct kvm_arch_async_pf {
@@ -1338,6 +1346,7 @@
 void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
 					   unsigned long address);
 
+u64 kvm_get_arch_capabilities(void);
 void kvm_define_shared_msr(unsigned index, u32 msr);
 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
 
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 1ec13e2..bbbb9b1 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -63,12 +63,19 @@
 #define MSR_IA32_ARCH_CAPABILITIES	0x0000010a
 #define ARCH_CAP_RDCL_NO		(1 << 0)   /* Not susceptible to Meltdown */
 #define ARCH_CAP_IBRS_ALL		(1 << 1)   /* Enhanced IBRS support */
+#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH	(1 << 3)   /* Skip L1D flush on vmentry */
 #define ARCH_CAP_SSB_NO			(1 << 4)   /*
 						    * Not susceptible to Speculative Store Bypass
 						    * attack, so no Speculative Store Bypass
 						    * control required.
 						    */
 
+#define MSR_IA32_FLUSH_CMD		0x0000010b
+#define L1D_FLUSH			(1 << 0)   /*
+						    * Writeback and invalidate the
+						    * L1 data cache.
+						    */
+
 #define MSR_IA32_BBL_CR_CTL		0x00000119
 #define MSR_IA32_BBL_CR_CTL3		0x0000011e
 
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index 3bae496..2622984 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -28,8 +28,13 @@
 #define N_EXCEPTION_STACKS 1
 
 #ifdef CONFIG_X86_PAE
-/* 44=32+12, the limit we can fit into an unsigned long pfn */
-#define __PHYSICAL_MASK_SHIFT	44
+/*
+ * This is beyond the 44 bit limit imposed by the 32bit long pfns,
+ * but we need the full mask to make sure inverted PROT_NONE
+ * entries have all the host bits set in a guest.
+ * The real limit is still 44 bits.
+ */
+#define __PHYSICAL_MASK_SHIFT	52
 #define __VIRTUAL_MASK_SHIFT	32
 
 #else  /* !CONFIG_X86_PAE */
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index fd74a11..89c5033 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -77,4 +77,21 @@
 #define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_low })
 #define __swp_entry_to_pte(x)		((pte_t) { .pte = (x).val })
 
+/* No inverted PFNs on 2 level page tables */
+
+static inline u64 protnone_mask(u64 val)
+{
+	return 0;
+}
+
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
+{
+	return val;
+}
+
+static inline bool __pte_needs_invert(u64 val)
+{
+	return false;
+}
+
 #endif /* _ASM_X86_PGTABLE_2LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index cdaa58c..095dbc2 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_PGTABLE_3LEVEL_H
 #define _ASM_X86_PGTABLE_3LEVEL_H
 
+#include <asm/atomic64_32.h>
+
 /*
  * Intel Physical Address Extension (PAE) Mode - three-level page
  * tables on PPro+ CPUs.
@@ -142,10 +144,7 @@
 {
 	pte_t res;
 
-	/* xchg acts as a barrier before the setting of the high bits */
-	res.pte_low = xchg(&ptep->pte_low, 0);
-	res.pte_high = ptep->pte_high;
-	ptep->pte_high = 0;
+	res.pte = (pteval_t)atomic64_xchg((atomic64_t *)ptep, 0);
 
 	return res;
 }
@@ -177,11 +176,44 @@
 #endif
 
 /* Encode and de-code a swap entry */
+#define SWP_TYPE_BITS		5
+
+#define SWP_OFFSET_FIRST_BIT	(_PAGE_BIT_PROTNONE + 1)
+
+/* We always extract/encode the offset by shifting it all the way up, and then down again */
+#define SWP_OFFSET_SHIFT	(SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
+
 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
 #define __swp_type(x)			(((x).val) & 0x1f)
 #define __swp_offset(x)			((x).val >> 5)
 #define __swp_entry(type, offset)	((swp_entry_t){(type) | (offset) << 5})
-#define __pte_to_swp_entry(pte)		((swp_entry_t){ (pte).pte_high })
-#define __swp_entry_to_pte(x)		((pte_t){ { .pte_high = (x).val } })
+
+/*
+ * Normally, __swp_entry() converts from arch-independent swp_entry_t to
+ * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
+ * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
+ * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
+ * __swp_entry_to_pte() through the following helper macro based on 64bit
+ * __swp_entry().
+ */
+#define __swp_pteval_entry(type, offset) ((pteval_t) { \
+	(~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
+	| ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
+
+#define __swp_entry_to_pte(x)	((pte_t){ .pte = \
+		__swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
+/*
+ * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
+ * swp_entry_t, but also has to convert it from 64bit to the 32bit
+ * intermediate representation, using the following macros based on 64bit
+ * __swp_type() and __swp_offset().
+ */
+#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
+#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
+
+#define __pte_to_swp_entry(pte)	(__swp_entry(__pteval_swp_type(pte), \
+					     __pteval_swp_offset(pte)))
+
+#include <asm/pgtable-invert.h>
 
 #endif /* _ASM_X86_PGTABLE_3LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h
new file mode 100644
index 0000000..a0c1525
--- /dev/null
+++ b/arch/x86/include/asm/pgtable-invert.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PGTABLE_INVERT_H
+#define _ASM_PGTABLE_INVERT_H 1
+
+#ifndef __ASSEMBLY__
+
+/*
+ * A clear pte value is special, and doesn't get inverted.
+ *
+ * Note that even users that only pass a pgprot_t (rather
+ * than a full pte) won't trigger the special zero case,
+ * because even PAGE_NONE has _PAGE_PROTNONE | _PAGE_ACCESSED
+ * set. So the all zero case really is limited to just the
+ * cleared page table entry case.
+ */
+static inline bool __pte_needs_invert(u64 val)
+{
+	return val && !(val & _PAGE_PRESENT);
+}
+
+/* Get a mask to xor with the page table entry to get the correct pfn. */
+static inline u64 protnone_mask(u64 val)
+{
+	return __pte_needs_invert(val) ?  ~0ull : 0;
+}
+
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
+{
+	/*
+	 * When a PTE transitions from NONE to !NONE or vice-versa
+	 * invert the PFN part to stop speculation.
+	 * pte_pfn undoes this when needed.
+	 */
+	if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
+		val = (val & ~mask) | (~val & mask);
+	return val;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5af0401..5736306 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -165,19 +165,34 @@
 	return pte_flags(pte) & _PAGE_SPECIAL;
 }
 
+/* Entries that were set to PROT_NONE are inverted */
+
+static inline u64 protnone_mask(u64 val);
+
 static inline unsigned long pte_pfn(pte_t pte)
 {
-	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
+	phys_addr_t pfn = pte_val(pte);
+	pfn ^= protnone_mask(pfn);
+	return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
 }
 
 static inline unsigned long pmd_pfn(pmd_t pmd)
 {
-	return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
+	phys_addr_t pfn = pmd_val(pmd);
+	pfn ^= protnone_mask(pfn);
+	return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
 }
 
 static inline unsigned long pud_pfn(pud_t pud)
 {
-	return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
+	phys_addr_t pfn = pud_val(pud);
+	pfn ^= protnone_mask(pfn);
+	return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
+}
+
+static inline unsigned long pgd_pfn(pgd_t pgd)
+{
+	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
 }
 
 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
@@ -340,11 +355,6 @@
 	return pmd_set_flags(pmd, _PAGE_RW);
 }
 
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
-{
-	return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
-}
-
 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 static inline int pte_soft_dirty(pte_t pte)
 {
@@ -394,19 +404,58 @@
 
 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
 {
-	return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
-		     massage_pgprot(pgprot));
+	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+	pfn ^= protnone_mask(pgprot_val(pgprot));
+	pfn &= PTE_PFN_MASK;
+	return __pte(pfn | massage_pgprot(pgprot));
 }
 
 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
 {
-	return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
-		     massage_pgprot(pgprot));
+	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+	pfn ^= protnone_mask(pgprot_val(pgprot));
+	pfn &= PHYSICAL_PMD_PAGE_MASK;
+	return __pmd(pfn | massage_pgprot(pgprot));
 }
 
+static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
+{
+	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+	pfn ^= protnone_mask(pgprot_val(pgprot));
+	pfn &= PHYSICAL_PUD_PAGE_MASK;
+	return __pud(pfn | massage_pgprot(pgprot));
+}
+
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+	return pfn_pmd(pmd_pfn(pmd),
+		       __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
+}
+
+static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
+{
+	pudval_t v = native_pud_val(pud);
+
+	return __pud(v | set);
+}
+
+static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
+{
+	pudval_t v = native_pud_val(pud);
+
+	return __pud(v & ~clear);
+}
+
+static inline pud_t pud_mkhuge(pud_t pud)
+{
+	return pud_set_flags(pud, _PAGE_PSE);
+}
+
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
+
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-	pteval_t val = pte_val(pte);
+	pteval_t val = pte_val(pte), oldval = val;
 
 	/*
 	 * Chop off the NX bit (if present), and add the NX portion of
@@ -414,17 +463,17 @@
 	 */
 	val &= _PAGE_CHG_MASK;
 	val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
-
+	val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
 	return __pte(val);
 }
 
 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 {
-	pmdval_t val = pmd_val(pmd);
+	pmdval_t val = pmd_val(pmd), oldval = val;
 
 	val &= _HPAGE_CHG_MASK;
 	val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
-
+	val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
 	return __pmd(val);
 }
 
@@ -577,8 +626,7 @@
  * Currently stuck as a macro due to indirect forward reference to
  * linux/mmzone.h's __section_mem_map_addr() definition:
  */
-#define pmd_page(pmd)		\
-	pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
+#define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
 
 /*
  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
@@ -646,8 +694,7 @@
  * Currently stuck as a macro due to indirect forward reference to
  * linux/mmzone.h's __section_mem_map_addr() definition:
  */
-#define pud_page(pud)		\
-	pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
+#define pud_page(pud)	pfn_to_page(pud_pfn(pud))
 
 /* Find an entry in the second-level page table.. */
 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
@@ -687,7 +734,7 @@
  * Currently stuck as a macro due to indirect forward reference to
  * linux/mmzone.h's __section_mem_map_addr() definition:
  */
-#define pgd_page(pgd)		pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
+#define pgd_page(pgd)		pfn_to_page(pgd_pfn(pgd))
 
 /* to find an entry in a page-table-directory. */
 static inline unsigned long pud_index(unsigned long address)
@@ -1010,6 +1057,15 @@
 #endif
 }
 
+
+#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
+extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+	return boot_cpu_has_bug(X86_BUG_L1TF);
+}
+
 #include <asm-generic/pgtable.h>
 #endif	/* __ASSEMBLY__ */
 
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index ce97c8c6..d5c4df9 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -13,13 +13,14 @@
 #include <asm/processor.h>
 #include <linux/bitops.h>
 #include <linux/threads.h>
+#include <asm/fixmap.h>
 
 extern pud_t level3_kernel_pgt[512];
 extern pud_t level3_ident_pgt[512];
 extern pmd_t level2_kernel_pgt[512];
 extern pmd_t level2_fixmap_pgt[512];
 extern pmd_t level2_ident_pgt[512];
-extern pte_t level1_fixmap_pgt[512];
+extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
 extern pgd_t init_level4_pgt[];
 
 #define swapper_pg_dir init_level4_pgt
@@ -166,29 +167,49 @@
 /*
  * Encode and de-code a swap entry
  *
- * |     ...            | 11| 10|  9|8|7|6|5| 4| 3|2|1|0| <- bit number
- * |     ...            |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
- * | OFFSET (14->63) | TYPE (9-13)  |0|X|X|X| X| X|X|X|0| <- swp entry
+ * |     ...            | 11| 10|  9|8|7|6|5| 4| 3|2| 1|0| <- bit number
+ * |     ...            |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
+ * | TYPE (59-63) | ~OFFSET (9-58)  |0|0|X|X| X| X|X|SD|0| <- swp entry
  *
  * G (8) is aliased and used as a PROT_NONE indicator for
  * !present ptes.  We need to start storing swap entries above
  * there.  We also need to avoid using A and D because of an
  * erratum where they can be incorrectly set by hardware on
  * non-present PTEs.
+ *
+ * SD (1) in swp entry is used to store soft dirty bit, which helps us
+ * remember soft dirty over page migration
+ *
+ * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
+ * but also L and G.
+ *
+ * The offset is inverted by a binary not operation to make the high
+ * physical bits set.
  */
-#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
-#define SWP_TYPE_BITS 5
-/* Place the offset above the type: */
-#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
+#define SWP_TYPE_BITS		5
+
+#define SWP_OFFSET_FIRST_BIT	(_PAGE_BIT_PROTNONE + 1)
+
+/* We always extract/encode the offset by shifting it all the way up, and then down again */
+#define SWP_OFFSET_SHIFT	(SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
 
 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
 
-#define __swp_type(x)			(((x).val >> (SWP_TYPE_FIRST_BIT)) \
-					 & ((1U << SWP_TYPE_BITS) - 1))
-#define __swp_offset(x)			((x).val >> SWP_OFFSET_FIRST_BIT)
-#define __swp_entry(type, offset)	((swp_entry_t) { \
-					 ((type) << (SWP_TYPE_FIRST_BIT)) \
-					 | ((offset) << SWP_OFFSET_FIRST_BIT) })
+/* Extract the high bits for type */
+#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
+
+/* Shift up (to get rid of type), then down to get value */
+#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
+
+/*
+ * Shift the offset up "too far" by TYPE bits, then down again
+ * The offset is inverted by a binary not operation to make the high
+ * physical bits set.
+ */
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+	(~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
+	| ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
+
 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val((pte)) })
 #define __swp_entry_to_pte(x)		((pte_t) { .pte = (x).val })
 
@@ -215,6 +236,8 @@
 extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
 extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
 
+#include <asm/pgtable-invert.h>
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index f1c8ac4..dfdb7e2 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -97,15 +97,15 @@
 /*
  * Tracking soft dirty bit when a page goes to a swap is tricky.
  * We need a bit which can be stored in pte _and_ not conflict
- * with swap entry format. On x86 bits 6 and 7 are *not* involved
- * into swap entry computation, but bit 6 is used for nonlinear
- * file mapping, so we borrow bit 7 for soft dirty tracking.
+ * with swap entry format. On x86 bits 1-4 are *not* involved
+ * into swap entry computation, but bit 7 is used for thp migration,
+ * so we borrow bit 1 for soft dirty tracking.
  *
  * Please note that this bit must be treated as swap dirty page
- * mark if and only if the PTE has present bit clear!
+ * mark if and only if the PTE/PMD has present bit clear!
  */
 #ifdef CONFIG_MEM_SOFT_DIRTY
-#define _PAGE_SWP_SOFT_DIRTY	_PAGE_PSE
+#define _PAGE_SWP_SOFT_DIRTY	_PAGE_RW
 #else
 #define _PAGE_SWP_SOFT_DIRTY	(_AT(pteval_t, 0))
 #endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index ec15ca2..ee8c629 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -136,6 +136,8 @@
 	/* Index into per_cpu list: */
 	u16			cpu_index;
 	u32			microcode;
+	/* Address space bits used by the cache internally */
+	u8			x86_cache_bits;
 };
 
 #define X86_VENDOR_INTEL	0
@@ -173,6 +175,11 @@
 
 extern void cpu_detect(struct cpuinfo_x86 *c);
 
+static inline unsigned long long l1tf_pfn_limit(void)
+{
+	return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
+}
+
 extern void early_cpu_init(void);
 extern void identify_boot_cpu(void);
 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
@@ -855,4 +862,16 @@
 
 void stop_this_cpu(void *dummy);
 void df_debug(struct pt_regs *regs, long error_code);
+
+enum l1tf_mitigations {
+	L1TF_MITIGATION_OFF,
+	L1TF_MITIGATION_FLUSH_NOWARN,
+	L1TF_MITIGATION_FLUSH,
+	L1TF_MITIGATION_FLUSH_NOSMT,
+	L1TF_MITIGATION_FULL,
+	L1TF_MITIGATION_FULL_FORCE
+};
+
+extern enum l1tf_mitigations l1tf_mitigation;
+
 #endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 026ea82..d25fb6b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -156,7 +156,6 @@
 	wbinvd();
 	return 0;
 }
-#define smp_num_siblings	1
 #endif /* CONFIG_SMP */
 
 extern unsigned disabled_cpus;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2d8788a..83252c2 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -101,6 +101,7 @@
 #define TIF_SYSCALL_TRACEPOINT	28	/* syscall tracepoint instrumentation */
 #define TIF_ADDR32		29	/* 32-bit address space on 64 bits */
 #define TIF_X32			30	/* 32-bit native x86-64 binary */
+#define TIF_FSCHECK		31	/* Check FS is USER_DS on return */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
@@ -124,6 +125,7 @@
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
 #define _TIF_ADDR32		(1 << TIF_ADDR32)
 #define _TIF_X32		(1 << TIF_X32)
+#define _TIF_FSCHECK		(1 << TIF_FSCHECK)
 
 /*
  * work to do in syscall_trace_enter().  Also includes TIF_NOHZ for
@@ -137,7 +139,7 @@
 /* work to do on any return to user space */
 #define _TIF_ALLWORK_MASK						\
 	((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT |	\
-	_TIF_NOHZ)
+	_TIF_NOHZ | _TIF_FSCHECK)
 
 /* flags to check in __switch_to() */
 #define _TIF_WORK_CTXSW							\
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index cf75871..1fbb174 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -129,13 +129,17 @@
 }
 
 int topology_update_package_map(unsigned int apicid, unsigned int cpu);
-extern int topology_phys_to_logical_pkg(unsigned int pkg);
+int topology_phys_to_logical_pkg(unsigned int pkg);
+bool topology_is_primary_thread(unsigned int cpu);
+bool topology_smt_supported(void);
 #else
 #define topology_max_packages()			(1)
 static inline int
 topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
 static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
 static inline int topology_max_smt_threads(void) { return 1; }
+static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
+static inline bool topology_smt_supported(void) { return false; }
 #endif
 
 static inline void arch_fix_phys_package_id(int num, u32 slot)
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 9217ab1..342e597 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -14,7 +14,6 @@
 		__field(struct fpu *, fpu)
 		__field(bool, fpregs_active)
 		__field(bool, fpstate_active)
-		__field(int, counter)
 		__field(u64, xfeatures)
 		__field(u64, xcomp_bv)
 		),
@@ -23,17 +22,15 @@
 		__entry->fpu		= fpu;
 		__entry->fpregs_active	= fpu->fpregs_active;
 		__entry->fpstate_active	= fpu->fpstate_active;
-		__entry->counter	= fpu->counter;
 		if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
 			__entry->xfeatures = fpu->state.xsave.header.xfeatures;
 			__entry->xcomp_bv  = fpu->state.xsave.header.xcomp_bv;
 		}
 	),
-	TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx",
+	TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
 			__entry->fpu,
 			__entry->fpregs_active,
 			__entry->fpstate_active,
-			__entry->counter,
 			__entry->xfeatures,
 			__entry->xcomp_bv
 	)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 0c87840..76a12ab 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -8,6 +8,7 @@
 #include <linux/kasan-checks.h>
 #include <linux/thread_info.h>
 #include <linux/string.h>
+#include <linux/sched.h>
 #include <asm/asm.h>
 #include <asm/page.h>
 #include <asm/smap.h>
@@ -31,7 +32,12 @@
 
 #define get_ds()	(KERNEL_DS)
 #define get_fs()	(current->thread.addr_limit)
-#define set_fs(x)	(current->thread.addr_limit = (x))
+static inline void set_fs(mm_segment_t fs)
+{
+	current->thread.addr_limit = fs;
+	/* On user-mode return, check fs is correct */
+	set_thread_flag(TIF_FSCHECK);
+}
 
 #define segment_eq(a, b)	((a).seg == (b).seg)
 
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9cbfbef..72cacb0 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -499,4 +499,15 @@
 	VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
 };
 
+enum vmx_l1d_flush_state {
+	VMENTER_L1D_FLUSH_AUTO,
+	VMENTER_L1D_FLUSH_NEVER,
+	VMENTER_L1D_FLUSH_COND,
+	VMENTER_L1D_FLUSH_ALWAYS,
+	VMENTER_L1D_FLUSH_EPT_DISABLED,
+	VMENTER_L1D_FLUSH_NOT_REQUIRED,
+};
+
+extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
+
 #endif
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4c9c615..a9ba968 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -56,6 +56,7 @@
 obj-y			+= tsc.o tsc_msr.o io_delay.o rtc.o
 obj-y			+= pci-iommu_table.o
 obj-y			+= resource.o
+obj-y			+= irqflags.o
 
 obj-y				+= process.o
 obj-y				+= fpu/
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 76cf21f..4f2af1e 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -34,6 +34,7 @@
 #include <linux/dmi.h>
 #include <linux/smp.h>
 #include <linux/mm.h>
+#include <linux/irq.h>
 
 #include <asm/trace/irq_vectors.h>
 #include <asm/irq_remapping.h>
@@ -55,6 +56,7 @@
 #include <asm/mce.h>
 #include <asm/tsc.h>
 #include <asm/hypervisor.h>
+#include <asm/irq_regs.h>
 
 unsigned int num_processors;
 
@@ -2041,6 +2043,23 @@
 	[0 ... NR_CPUS - 1] = -1,
 };
 
+#ifdef CONFIG_SMP
+/**
+ * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
+ * @id:	APIC ID to check
+ */
+bool apic_id_is_primary_thread(unsigned int apicid)
+{
+	u32 mask;
+
+	if (smp_num_siblings == 1)
+		return true;
+	/* Isolate the SMT bit(s) in the APICID and check for 0 */
+	mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
+	return !(apicid & mask);
+}
+#endif
+
 /*
  * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
  * and cpuid_to_apicid[] synchronized.
diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c
index ae50d34..89d6e96 100644
--- a/arch/x86/kernel/apic/htirq.c
+++ b/arch/x86/kernel/apic/htirq.c
@@ -16,6 +16,8 @@
 #include <linux/device.h>
 #include <linux/pci.h>
 #include <linux/htirq.h>
+#include <linux/irq.h>
+
 #include <asm/irqdomain.h>
 #include <asm/hw_irq.h>
 #include <asm/apic.h>
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index cf89928..d34629d 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -32,6 +32,7 @@
 
 #include <linux/mm.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/sched.h>
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index 015bbf3..cfd17a3 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -12,6 +12,7 @@
  */
 #include <linux/mm.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/pci.h>
 #include <linux/dmar.h>
 #include <linux/hpet.h>
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 4922ab6..c6bd3f9 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -11,6 +11,7 @@
  * published by the Free Software Foundation.
  */
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/init.h>
 #include <linux/compiler.h>
 #include <linux/slab.h>
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 51287cd..313a85a 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -239,6 +239,7 @@
 #include <asm/olpc.h>
 #include <asm/paravirt.h>
 #include <asm/reboot.h>
+#include <asm/nospec-branch.h>
 
 #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
 extern int (*console_blank_hook)(int);
@@ -613,11 +614,13 @@
 	gdt[0x40 / 8] = bad_bios_desc;
 
 	apm_irq_save(flags);
+	firmware_restrict_branch_speculation_start();
 	APM_DO_SAVE_SEGS;
 	apm_bios_call_asm(call->func, call->ebx, call->ecx,
 			  &call->eax, &call->ebx, &call->ecx, &call->edx,
 			  &call->esi);
 	APM_DO_RESTORE_SEGS;
+	firmware_restrict_branch_speculation_end();
 	apm_irq_restore(flags);
 	gdt[0x40 / 8] = save_desc_40;
 	put_cpu();
@@ -689,10 +692,12 @@
 	gdt[0x40 / 8] = bad_bios_desc;
 
 	apm_irq_save(flags);
+	firmware_restrict_branch_speculation_start();
 	APM_DO_SAVE_SEGS;
 	error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
 					 &call->eax);
 	APM_DO_RESTORE_SEGS;
+	firmware_restrict_branch_speculation_end();
 	apm_irq_restore(flags);
 	gdt[0x40 / 8] = save_desc_40;
 	put_cpu();
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 4c2be99..4c2648b 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -296,13 +296,34 @@
 }
 #endif
 
+static void amd_get_topology_early(struct cpuinfo_x86 *c)
+{
+	if (cpu_has(c, X86_FEATURE_TOPOEXT))
+		smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
+}
+
+/*
+ * Fix up cpu_core_id for pre-F17h systems to be in the
+ * [0 .. cores_per_node - 1] range. Not really needed but
+ * kept so as not to break existing setups.
+ */
+static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
+{
+	u32 cus_per_node;
+
+	if (c->x86 >= 0x17)
+		return;
+
+	cus_per_node = c->x86_max_cores / nodes_per_socket;
+	c->cpu_core_id %= cus_per_node;
+}
+
 /*
  * Fixup core topology information for
  * (1) AMD multi-node processors
  *     Assumption: Number of cores in each internal node is the same.
  * (2) AMD processors supporting compute units
  */
-#ifdef CONFIG_SMP
 static void amd_get_topology(struct cpuinfo_x86 *c)
 {
 	u8 node_id;
@@ -315,7 +336,6 @@
 		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
 
 		node_id  = ecx & 0xff;
-		smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
 
 		if (c->x86 == 0x15)
 			c->cu_id = ebx & 0xff;
@@ -353,18 +373,11 @@
 	} else
 		return;
 
-	/* fixup multi-node processor information */
 	if (nodes_per_socket > 1) {
-		u32 cus_per_node;
-
 		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
-		cus_per_node = c->x86_max_cores / nodes_per_socket;
-
-		/* core id has to be in the [0 .. cores_per_node - 1] range */
-		c->cpu_core_id %= cus_per_node;
+		legacy_fixup_core_id(c);
 	}
 }
-#endif
 
 /*
  * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
@@ -372,7 +385,6 @@
  */
 static void amd_detect_cmp(struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_SMP
 	unsigned bits;
 	int cpu = smp_processor_id();
 
@@ -384,16 +396,11 @@
 	/* use socket ID also for last level cache */
 	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
 	amd_get_topology(c);
-#endif
 }
 
 u16 amd_get_nb_id(int cpu)
 {
-	u16 id = 0;
-#ifdef CONFIG_SMP
-	id = per_cpu(cpu_llc_id, cpu);
-#endif
-	return id;
+	return per_cpu(cpu_llc_id, cpu);
 }
 EXPORT_SYMBOL_GPL(amd_get_nb_id);
 
@@ -567,6 +574,8 @@
 
 static void early_init_amd(struct cpuinfo_x86 *c)
 {
+	u64 value;
+
 	early_init_amd_mc(c);
 
 	/*
@@ -633,6 +642,23 @@
 	 */
 	if (cpu_has_amd_erratum(c, amd_erratum_400))
 		set_cpu_bug(c, X86_BUG_AMD_E400);
+
+
+	/* Re-enable TopologyExtensions if switched off by BIOS */
+	if (c->x86 == 0x15 &&
+	    (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
+	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
+
+		if (msr_set_bit(0xc0011005, 54) > 0) {
+			rdmsrl(0xc0011005, value);
+			if (value & BIT_64(54)) {
+				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
+				pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+			}
+		}
+	}
+
+	amd_get_topology_early(c);
 }
 
 static void init_amd_k8(struct cpuinfo_x86 *c)
@@ -724,19 +750,6 @@
 {
 	u64 value;
 
-	/* re-enable TopologyExtensions if switched off by BIOS */
-	if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
-	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
-
-		if (msr_set_bit(0xc0011005, 54) > 0) {
-			rdmsrl(0xc0011005, value);
-			if (value & BIT_64(54)) {
-				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
-				pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
-			}
-		}
-	}
-
 	/*
 	 * The way access filter has a performance penalty on some workloads.
 	 * Disable it on the affected CPUs.
@@ -799,15 +812,8 @@
 
 	cpu_detect_cache_sizes(c);
 
-	/* Multi core CPU? */
-	if (c->extended_cpuid_level >= 0x80000008) {
-		amd_detect_cmp(c);
-		srat_detect_node(c);
-	}
-
-#ifdef CONFIG_X86_32
-	detect_ht(c);
-#endif
+	amd_detect_cmp(c);
+	srat_detect_node(c);
 
 	init_amd_cacheinfo(c);
 
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 86af9b1..8103ada 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -21,14 +21,17 @@
 #include <asm/processor-flags.h>
 #include <asm/fpu/internal.h>
 #include <asm/msr.h>
+#include <asm/vmx.h>
 #include <asm/paravirt.h>
 #include <asm/alternative.h>
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
 #include <asm/intel-family.h>
+#include <asm/e820.h>
 
 static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
+static void __init l1tf_select_mitigation(void);
 
 /*
  * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
@@ -54,6 +57,12 @@
 {
 	identify_boot_cpu();
 
+	/*
+	 * identify_boot_cpu() initialized SMT support information, let the
+	 * core code know.
+	 */
+	cpu_smt_check_topology_early();
+
 	if (!IS_ENABLED(CONFIG_SMP)) {
 		pr_info("CPU: ");
 		print_cpu_info(&boot_cpu_data);
@@ -80,6 +89,8 @@
 	 */
 	ssb_select_mitigation();
 
+	l1tf_select_mitigation();
+
 #ifdef CONFIG_X86_32
 	/*
 	 * Check whether we are able to run this kernel safely on SMP.
@@ -310,23 +321,6 @@
 	return cmd;
 }
 
-/* Check for Skylake-like CPUs (for RSB handling) */
-static bool __init is_skylake_era(void)
-{
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
-	    boot_cpu_data.x86 == 6) {
-		switch (boot_cpu_data.x86_model) {
-		case INTEL_FAM6_SKYLAKE_MOBILE:
-		case INTEL_FAM6_SKYLAKE_DESKTOP:
-		case INTEL_FAM6_SKYLAKE_X:
-		case INTEL_FAM6_KABYLAKE_MOBILE:
-		case INTEL_FAM6_KABYLAKE_DESKTOP:
-			return true;
-		}
-	}
-	return false;
-}
-
 static void __init spectre_v2_select_mitigation(void)
 {
 	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -387,22 +381,15 @@
 	pr_info("%s\n", spectre_v2_strings[mode]);
 
 	/*
-	 * If neither SMEP nor PTI are available, there is a risk of
-	 * hitting userspace addresses in the RSB after a context switch
-	 * from a shallow call stack to a deeper one. To prevent this fill
-	 * the entire RSB, even when using IBRS.
+	 * If spectre v2 protection has been enabled, unconditionally fill
+	 * RSB during a context switch; this protects against two independent
+	 * issues:
 	 *
-	 * Skylake era CPUs have a separate issue with *underflow* of the
-	 * RSB, when they will predict 'ret' targets from the generic BTB.
-	 * The proper mitigation for this is IBRS. If IBRS is not supported
-	 * or deactivated in favour of retpolines the RSB fill on context
-	 * switch is required.
+	 *	- RSB underflow (and switch to BTB) on Skylake+
+	 *	- SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
 	 */
-	if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
-	     !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
-		setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
-		pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
-	}
+	setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+	pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
 
 	/* Initialize Indirect Branch Prediction Barrier if supported */
 	if (boot_cpu_has(X86_FEATURE_IBPB)) {
@@ -653,8 +640,160 @@
 		x86_amd_ssb_disable();
 }
 
+#undef pr_fmt
+#define pr_fmt(fmt)	"L1TF: " fmt
+
+/* Default mitigation for L1TF-affected CPUs */
+enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+EXPORT_SYMBOL_GPL(l1tf_mitigation);
+#endif
+enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+
+/*
+ * These CPUs all support 44bits physical address space internally in the
+ * cache but CPUID can report a smaller number of physical address bits.
+ *
+ * The L1TF mitigation uses the top most address bit for the inversion of
+ * non present PTEs. When the installed memory reaches into the top most
+ * address bit due to memory holes, which has been observed on machines
+ * which report 36bits physical address bits and have 32G RAM installed,
+ * then the mitigation range check in l1tf_select_mitigation() triggers.
+ * This is a false positive because the mitigation is still possible due to
+ * the fact that the cache uses 44bit internally. Use the cache bits
+ * instead of the reported physical bits and adjust them on the affected
+ * machines to 44bit if the reported bits are less than 44.
+ */
+static void override_cache_bits(struct cpuinfo_x86 *c)
+{
+	if (c->x86 != 6)
+		return;
+
+	switch (c->x86_model) {
+	case INTEL_FAM6_NEHALEM:
+	case INTEL_FAM6_WESTMERE:
+	case INTEL_FAM6_SANDYBRIDGE:
+	case INTEL_FAM6_IVYBRIDGE:
+	case INTEL_FAM6_HASWELL_CORE:
+	case INTEL_FAM6_HASWELL_ULT:
+	case INTEL_FAM6_HASWELL_GT3E:
+	case INTEL_FAM6_BROADWELL_CORE:
+	case INTEL_FAM6_BROADWELL_GT3E:
+	case INTEL_FAM6_SKYLAKE_MOBILE:
+	case INTEL_FAM6_SKYLAKE_DESKTOP:
+	case INTEL_FAM6_KABYLAKE_MOBILE:
+	case INTEL_FAM6_KABYLAKE_DESKTOP:
+		if (c->x86_cache_bits < 44)
+			c->x86_cache_bits = 44;
+		break;
+	}
+}
+
+static void __init l1tf_select_mitigation(void)
+{
+	u64 half_pa;
+
+	if (!boot_cpu_has_bug(X86_BUG_L1TF))
+		return;
+
+	override_cache_bits(&boot_cpu_data);
+
+	switch (l1tf_mitigation) {
+	case L1TF_MITIGATION_OFF:
+	case L1TF_MITIGATION_FLUSH_NOWARN:
+	case L1TF_MITIGATION_FLUSH:
+		break;
+	case L1TF_MITIGATION_FLUSH_NOSMT:
+	case L1TF_MITIGATION_FULL:
+		cpu_smt_disable(false);
+		break;
+	case L1TF_MITIGATION_FULL_FORCE:
+		cpu_smt_disable(true);
+		break;
+	}
+
+#if CONFIG_PGTABLE_LEVELS == 2
+	pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
+	return;
+#endif
+
+	half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
+	if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
+		pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
+		pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
+				half_pa);
+		pr_info("However, doing so will make a part of your RAM unusable.\n");
+		pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
+		return;
+	}
+
+	setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
+}
+
+static int __init l1tf_cmdline(char *str)
+{
+	if (!boot_cpu_has_bug(X86_BUG_L1TF))
+		return 0;
+
+	if (!str)
+		return -EINVAL;
+
+	if (!strcmp(str, "off"))
+		l1tf_mitigation = L1TF_MITIGATION_OFF;
+	else if (!strcmp(str, "flush,nowarn"))
+		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
+	else if (!strcmp(str, "flush"))
+		l1tf_mitigation = L1TF_MITIGATION_FLUSH;
+	else if (!strcmp(str, "flush,nosmt"))
+		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
+	else if (!strcmp(str, "full"))
+		l1tf_mitigation = L1TF_MITIGATION_FULL;
+	else if (!strcmp(str, "full,force"))
+		l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
+
+	return 0;
+}
+early_param("l1tf", l1tf_cmdline);
+
+#undef pr_fmt
+
 #ifdef CONFIG_SYSFS
 
+#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
+
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+static const char *l1tf_vmx_states[] = {
+	[VMENTER_L1D_FLUSH_AUTO]		= "auto",
+	[VMENTER_L1D_FLUSH_NEVER]		= "vulnerable",
+	[VMENTER_L1D_FLUSH_COND]		= "conditional cache flushes",
+	[VMENTER_L1D_FLUSH_ALWAYS]		= "cache flushes",
+	[VMENTER_L1D_FLUSH_EPT_DISABLED]	= "EPT disabled",
+	[VMENTER_L1D_FLUSH_NOT_REQUIRED]	= "flush not necessary"
+};
+
+static ssize_t l1tf_show_state(char *buf)
+{
+	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
+		return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
+
+	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
+	    (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
+	     cpu_smt_control == CPU_SMT_ENABLED))
+		return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
+			       l1tf_vmx_states[l1tf_vmx_mitigation]);
+
+	return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
+		       l1tf_vmx_states[l1tf_vmx_mitigation],
+		       cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
+}
+#else
+static ssize_t l1tf_show_state(char *buf)
+{
+	return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
+}
+#endif
+
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
 			       char *buf, unsigned int bug)
 {
@@ -680,6 +819,10 @@
 	case X86_BUG_SPEC_STORE_BYPASS:
 		return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
 
+	case X86_BUG_L1TF:
+		if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
+			return l1tf_show_state(buf);
+		break;
 	default:
 		break;
 	}
@@ -706,4 +849,9 @@
 {
 	return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
 }
+
+ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
+}
 #endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 7a4279d..dc0850bb 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -61,6 +61,13 @@
 /* representing cpus for which sibling maps can be computed */
 cpumask_var_t cpu_sibling_setup_mask;
 
+/* Number of siblings per CPU package */
+int smp_num_siblings = 1;
+EXPORT_SYMBOL(smp_num_siblings);
+
+/* Last level cache ID of each logical CPU */
+DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
+
 /* correctly size the local cpu masks */
 void __init setup_cpu_local_masks(void)
 {
@@ -606,33 +613,36 @@
 		tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
 }
 
-void detect_ht(struct cpuinfo_x86 *c)
+int detect_ht_early(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_SMP
 	u32 eax, ebx, ecx, edx;
-	int index_msb, core_bits;
-	static bool printed;
 
 	if (!cpu_has(c, X86_FEATURE_HT))
-		return;
+		return -1;
 
 	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
-		goto out;
+		return -1;
 
 	if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
-		return;
+		return -1;
 
 	cpuid(1, &eax, &ebx, &ecx, &edx);
 
 	smp_num_siblings = (ebx & 0xff0000) >> 16;
-
-	if (smp_num_siblings == 1) {
+	if (smp_num_siblings == 1)
 		pr_info_once("CPU0: Hyper-Threading is disabled\n");
-		goto out;
-	}
+#endif
+	return 0;
+}
 
-	if (smp_num_siblings <= 1)
-		goto out;
+void detect_ht(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+	int index_msb, core_bits;
+
+	if (detect_ht_early(c) < 0)
+		return;
 
 	index_msb = get_count_order(smp_num_siblings);
 	c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
@@ -645,15 +655,6 @@
 
 	c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
 				       ((1 << core_bits) - 1);
-
-out:
-	if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
-		pr_info("CPU: Physical Processor ID: %d\n",
-			c->phys_proc_id);
-		pr_info("CPU: Processor Core ID: %d\n",
-			c->cpu_core_id);
-		printed = 1;
-	}
 #endif
 }
 
@@ -881,6 +882,7 @@
 			}
 		}
 #endif
+	c->x86_cache_bits = c->x86_phys_bits;
 }
 
 static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
@@ -925,6 +927,21 @@
 	{}
 };
 
+static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
+	/* in addition to cpu_no_speculation */
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT1	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT2	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_AIRMONT		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MERRIFIELD	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MOOREFIELD	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_GOLDMONT	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_DENVERTON	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_GEMINI_LAKE	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNL		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNM		},
+	{}
+};
+
 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
 	u64 ia32_cap = 0;
@@ -950,6 +967,11 @@
 		return;
 
 	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+
+	if (x86_match_cpu(cpu_no_l1tf))
+		return;
+
+	setup_force_cpu_bug(X86_BUG_L1TF);
 }
 
 /*
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 3b19d82..2275900 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -46,6 +46,8 @@
 
 extern void get_cpu_cap(struct cpuinfo_x86 *c);
 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
+extern int detect_ht_early(struct cpuinfo_x86 *c);
 
 extern void x86_spec_ctrl_setup_ap(void);
 
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 93781e3..cee0fec 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -109,6 +109,9 @@
 	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
 		return false;
 
+	if (c->x86 != 6)
+		return false;
+
 	for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
 		if (c->x86_model == spectre_bad_microcodes[i].model &&
 		    c->x86_stepping == spectre_bad_microcodes[i].stepping)
@@ -283,6 +286,13 @@
 	}
 
 	check_mpx_erratum(c);
+
+	/*
+	 * Get the number of SMT siblings early from the extended topology
+	 * leaf, if available. Otherwise try the legacy SMT detection.
+	 */
+	if (detect_extended_topology_early(c) < 0)
+		detect_ht_early(c);
 }
 
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index c49e146..7e6163c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2397,9 +2397,6 @@
 	if (check_interval == old_check_interval)
 		return ret;
 
-	if (check_interval < 1)
-		check_interval = 1;
-
 	mutex_lock(&mce_sysfs_mutex);
 	mce_restart();
 	mutex_unlock(&mce_sysfs_mutex);
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 0afaf00..b53a657 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -384,6 +384,24 @@
 /* fake device for request_firmware */
 static struct platform_device	*microcode_pdev;
 
+static int check_online_cpus(void)
+{
+	unsigned int cpu;
+
+	/*
+	 * Make sure all CPUs are online.  It's fine for SMT to be disabled if
+	 * all the primary threads are still online.
+	 */
+	for_each_present_cpu(cpu) {
+		if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) {
+			pr_err("Not all CPUs online, aborting microcode update.\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
 static int reload_for_cpu(int cpu)
 {
 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
@@ -418,7 +436,13 @@
 		return size;
 
 	get_online_cpus();
+
+	ret = check_online_cpus();
+	if (ret)
+		goto put;
+
 	mutex_lock(&microcode_mutex);
+
 	for_each_online_cpu(cpu) {
 		tmp_ret = reload_for_cpu(cpu);
 		if (tmp_ret != 0)
@@ -431,6 +455,8 @@
 	if (!ret)
 		perf_check_microcode();
 	mutex_unlock(&microcode_mutex);
+
+put:
 	put_online_cpus();
 
 	if (!ret)
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index cd53135..6b5a850 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -26,16 +26,13 @@
  * exists, use it for populating initial_apicid and cpu topology
  * detection.
  */
-void detect_extended_topology(struct cpuinfo_x86 *c)
+int detect_extended_topology_early(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_SMP
-	unsigned int eax, ebx, ecx, edx, sub_index;
-	unsigned int ht_mask_width, core_plus_mask_width;
-	unsigned int core_select_mask, core_level_siblings;
-	static bool printed;
+	unsigned int eax, ebx, ecx, edx;
 
 	if (c->cpuid_level < 0xb)
-		return;
+		return -1;
 
 	cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
 
@@ -43,7 +40,7 @@
 	 * check if the cpuid leaf 0xb is actually implemented.
 	 */
 	if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
-		return;
+		return -1;
 
 	set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
 
@@ -51,10 +48,30 @@
 	 * initial apic id, which also represents 32-bit extended x2apic id.
 	 */
 	c->initial_apicid = edx;
+	smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
+#endif
+	return 0;
+}
+
+/*
+ * Check for extended topology enumeration cpuid leaf 0xb and if it
+ * exists, use it for populating initial_apicid and cpu topology
+ * detection.
+ */
+void detect_extended_topology(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+	unsigned int eax, ebx, ecx, edx, sub_index;
+	unsigned int ht_mask_width, core_plus_mask_width;
+	unsigned int core_select_mask, core_level_siblings;
+
+	if (detect_extended_topology_early(c) < 0)
+		return;
 
 	/*
 	 * Populate HT related information from sub-leaf level 0.
 	 */
+	cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
 	core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
 	core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
 
@@ -85,15 +102,5 @@
 	c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
 
 	c->x86_max_cores = (core_level_siblings / smp_num_siblings);
-
-	if (!printed) {
-		pr_info("CPU: Physical Processor ID: %d\n",
-		       c->phys_proc_id);
-		if (c->x86_max_cores > 1)
-			pr_info("CPU: Processor Core ID: %d\n",
-			       c->cpu_core_id);
-		printed = 1;
-	}
-	return;
 #endif
 }
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 85f854b..3576ece 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -15,6 +15,7 @@
 #include <linux/bug.h>
 #include <linux/nmi.h>
 #include <linux/sysfs.h>
+#include <linux/kasan.h>
 
 #include <asm/stacktrace.h>
 #include <asm/unwind.h>
@@ -229,7 +230,10 @@
 	 * We're not going to return, but we might be on an IST stack or
 	 * have very little stack space left.  Rewind the stack and kill
 	 * the task.
+	 * Before we rewind the stack, we have to tell KASAN that we're going to
+	 * reuse the task stack and that existing poisons are invalid.
 	 */
+	kasan_unpoison_task_stack(current);
 	rewind_stack_do_exit(signr);
 }
 NOKPROBE_SYMBOL(oops_end);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 96d80df..fc96511 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -10,6 +10,7 @@
 #include <asm/fpu/signal.h>
 #include <asm/fpu/types.h>
 #include <asm/traps.h>
+#include <asm/irq_regs.h>
 
 #include <linux/hardirq.h>
 #include <linux/pkeys.h>
@@ -58,27 +59,9 @@
 	return this_cpu_read(in_kernel_fpu);
 }
 
-/*
- * Were we in an interrupt that interrupted kernel mode?
- *
- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
- * pair does nothing at all: the thread must not have fpu (so
- * that we don't try to save the FPU state), and TS must
- * be set (so that the clts/stts pair does nothing that is
- * visible in the interrupted kernel thread).
- *
- * Except for the eagerfpu case when we return true; in the likely case
- * the thread has FPU but we are not going to set/clear TS.
- */
 static bool interrupted_kernel_fpu_idle(void)
 {
-	if (kernel_fpu_disabled())
-		return false;
-
-	if (use_eager_fpu())
-		return true;
-
-	return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
+	return !kernel_fpu_disabled();
 }
 
 /*
@@ -126,7 +109,6 @@
 		copy_fpregs_to_fpstate(fpu);
 	} else {
 		this_cpu_write(fpu_fpregs_owner_ctx, NULL);
-		__fpregs_activate_hw();
 	}
 }
 EXPORT_SYMBOL(__kernel_fpu_begin);
@@ -137,8 +119,6 @@
 
 	if (fpu->fpregs_active)
 		copy_kernel_to_fpregs(&fpu->state);
-	else
-		__fpregs_deactivate_hw();
 
 	kernel_fpu_enable();
 }
@@ -200,10 +180,7 @@
 	trace_x86_fpu_before_save(fpu);
 	if (fpu->fpregs_active) {
 		if (!copy_fpregs_to_fpstate(fpu)) {
-			if (use_eager_fpu())
-				copy_kernel_to_fpregs(&fpu->state);
-			else
-				fpregs_deactivate(fpu);
+			copy_kernel_to_fpregs(&fpu->state);
 		}
 	}
 	trace_x86_fpu_after_save(fpu);
@@ -248,7 +225,6 @@
 
 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 {
-	dst_fpu->counter = 0;
 	dst_fpu->fpregs_active = 0;
 	dst_fpu->last_cpu = -1;
 
@@ -261,8 +237,7 @@
 	 * Don't let 'init optimized' areas of the XSAVE area
 	 * leak into the child task:
 	 */
-	if (use_eager_fpu())
-		memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
+	memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
 
 	/*
 	 * Save current FPU registers directly into the child
@@ -284,10 +259,7 @@
 		memcpy(&src_fpu->state, &dst_fpu->state,
 		       fpu_kernel_xstate_size);
 
-		if (use_eager_fpu())
-			copy_kernel_to_fpregs(&src_fpu->state);
-		else
-			fpregs_deactivate(src_fpu);
+		copy_kernel_to_fpregs(&src_fpu->state);
 	}
 	preempt_enable();
 
@@ -460,7 +432,6 @@
 	trace_x86_fpu_before_restore(fpu);
 	fpregs_activate(fpu);
 	copy_kernel_to_fpregs(&fpu->state);
-	fpu->counter++;
 	trace_x86_fpu_after_restore(fpu);
 	kernel_fpu_enable();
 }
@@ -478,7 +449,6 @@
 void fpu__drop(struct fpu *fpu)
 {
 	preempt_disable();
-	fpu->counter = 0;
 
 	if (fpu->fpregs_active) {
 		/* Ignore delayed exceptions from user space */
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 3ec0d2d..3a93186 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -344,11 +344,9 @@
 		}
 
 		fpu->fpstate_active = 1;
-		if (use_eager_fpu()) {
-			preempt_disable();
-			fpu__restore(fpu);
-			preempt_enable();
-		}
+		preempt_disable();
+		fpu__restore(fpu);
+		preempt_enable();
 
 		return err;
 	} else {
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index abfbb61b..e9d7f46 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -890,15 +890,6 @@
 	 */
 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
 		return -EINVAL;
-	/*
-	 * For most XSAVE components, this would be an arduous task:
-	 * brining fpstate up to date with fpregs, updating fpstate,
-	 * then re-populating fpregs.  But, for components that are
-	 * never lazily managed, we can just access the fpregs
-	 * directly.  PKRU is never managed lazily, so we can just
-	 * manipulate it directly.  Make sure it stays that way.
-	 */
-	WARN_ON_ONCE(!use_eager_fpu());
 
 	/* Set the bits we need in PKRU:  */
 	if (init_val & PKEY_DISABLE_ACCESS)
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 6bf09f5..5e06ffe 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -26,6 +26,7 @@
 
 #include <asm/cacheflush.h>
 #include <asm/kprobes.h>
+#include <asm/sections.h>
 #include <asm/ftrace.h>
 #include <asm/nops.h>
 
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 9d72cf5..b0d6697 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -23,6 +23,7 @@
 #include "../entry/calling.h"
 #include <asm/export.h>
 #include <asm/nospec-branch.h>
+#include <asm/fixmap.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
@@ -493,13 +494,20 @@
 		KERNEL_IMAGE_SIZE/PMD_SIZE)
 
 NEXT_PAGE(level2_fixmap_pgt)
-	.fill	506,8,0
-	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
-	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
-	.fill	5,8,0
+	.fill	(512 - 4 - FIXMAP_PMD_NUM),8,0
+	pgtno = 0
+	.rept (FIXMAP_PMD_NUM)
+	.quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
+		+ _PAGE_TABLE;
+	pgtno = pgtno + 1
+	.endr
+	/* 6 MB reserved space + a 2MB hole */
+	.fill	4,8,0
 
 NEXT_PAGE(level1_fixmap_pgt)
+	.rept (FIXMAP_PMD_NUM)
 	.fill	512,8,0
+	.endr
 
 #undef PMDS
 
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 9512529..756634f 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1,6 +1,7 @@
 #include <linux/clocksource.h>
 #include <linux/clockchips.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/export.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 4e3b8a5..26d5451 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -4,6 +4,7 @@
 #include <linux/sched.h>
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/timex.h>
 #include <linux/random.h>
 #include <linux/init.h>
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 8a7ad9f..c6f0ef1 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -10,6 +10,7 @@
 #include <linux/ftrace.h>
 #include <linux/delay.h>
 #include <linux/export.h>
+#include <linux/irq.h>
 
 #include <asm/apic.h>
 #include <asm/io_apic.h>
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 2763573..5aaa39a 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -10,6 +10,7 @@
 
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/kernel_stat.h>
 #include <linux/notifier.h>
 #include <linux/cpu.h>
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 9ebd0b0..bcd1b82 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -10,6 +10,7 @@
 
 #include <linux/kernel_stat.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/seq_file.h>
 #include <linux/delay.h>
 #include <linux/ftrace.h>
diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
new file mode 100644
index 0000000..ddeeaac
--- /dev/null
+++ b/arch/x86/kernel/irqflags.S
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <linux/linkage.h>
+
+/*
+ * unsigned long native_save_fl(void)
+ */
+ENTRY(native_save_fl)
+	pushf
+	pop %_ASM_AX
+	ret
+ENDPROC(native_save_fl)
+EXPORT_SYMBOL(native_save_fl)
+
+/*
+ * void native_restore_fl(unsigned long flags)
+ * %eax/%rdi: flags
+ */
+ENTRY(native_restore_fl)
+	push %_ASM_ARG1
+	popf
+	ret
+ENDPROC(native_restore_fl)
+EXPORT_SYMBOL(native_restore_fl)
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index f480b38..eeb77e5 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -4,6 +4,7 @@
 #include <linux/sched.h>
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/timex.h>
 #include <linux/random.h>
 #include <linux/kprobes.h>
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 3407b14..490f9be 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -529,7 +529,7 @@
 static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
 {
 	return verify_pefile_signature(kernel, kernel_len,
-				       NULL,
+				       VERIFY_USE_SECONDARY_KEYRING,
 				       VERIFYING_KEXEC_PE_SIGNATURE);
 }
 #endif
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 516be61..64a70b2 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -61,6 +61,7 @@
 #include <asm/alternative.h>
 #include <asm/insn.h>
 #include <asm/debugreg.h>
+#include <asm/sections.h>
 
 #include "common.h"
 
@@ -396,7 +397,6 @@
 		newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
 		if ((s64) (s32) newdisp != newdisp) {
 			pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
-			pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
 			return 0;
 		}
 		disp = (u8 *) dest + insn_offset_displacement(&insn);
@@ -612,8 +612,7 @@
 		 * Raise a BUG or we'll continue in an endless reentering loop
 		 * and eventually a stack overflow.
 		 */
-		printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
-		       p->addr);
+		pr_err("Unrecoverable kprobe detected.\n");
 		dump_kprobe(p);
 		BUG();
 	default:
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 1808a9c..1009d63 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -39,6 +39,7 @@
 #include <asm/insn.h>
 #include <asm/debugreg.h>
 #include <asm/nospec-branch.h>
+#include <asm/sections.h>
 
 #include "common.h"
 
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index bbf3d59..29d4656 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -88,10 +88,12 @@
 	struct branch *b = insnbuf;
 	unsigned long delta = (unsigned long)target - (addr+5);
 
-	if (tgt_clobbers & ~site_clobbers)
-		return len;	/* target would clobber too much for this site */
-	if (len < 5)
+	if (len < 5) {
+#ifdef CONFIG_RETPOLINE
+		WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
+#endif
 		return len;	/* call too long for patch site */
+	}
 
 	b->opcode = 0xe8; /* call */
 	b->delta = delta;
@@ -106,8 +108,12 @@
 	struct branch *b = insnbuf;
 	unsigned long delta = (unsigned long)target - (addr+5);
 
-	if (len < 5)
+	if (len < 5) {
+#ifdef CONFIG_RETPOLINE
+		WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
+#endif
 		return len;	/* call too long for patch site */
+	}
 
 	b->opcode = 0xe9;	/* jmp */
 	b->delta = delta;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index dffe81d..a266181 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -360,6 +360,7 @@
 	start_thread_common(regs, new_ip, new_sp,
 			    __USER_CS, __USER_DS, 0);
 }
+EXPORT_SYMBOL_GPL(start_thread);
 
 #ifdef CONFIG_COMPAT
 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 6b55012..49960ec 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -854,6 +854,12 @@
 	memblock_reserve(__pa_symbol(_text),
 			 (unsigned long)__bss_stop - (unsigned long)_text);
 
+	/*
+	 * Make sure page 0 is always reserved because on systems with
+	 * L1TF its contents can be leaked to user processes.
+	 */
+	memblock_reserve(0, PAGE_SIZE);
+
 	early_reserve_initrd();
 
 	/*
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index ea217ca..2863ad3 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -271,6 +271,7 @@
 	/*
 	 * KVM uses this interrupt to force a cpu out of guest mode
 	 */
+	kvm_set_cpu_l1tf_flush_l1d();
 }
 
 __visible void __irq_entry smp_trace_reschedule_interrupt(struct pt_regs *regs)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 10b22fc..ef38bc1 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -76,13 +76,7 @@
 #include <asm/realmode.h>
 #include <asm/misc.h>
 #include <asm/spec-ctrl.h>
-
-/* Number of siblings per CPU package */
-int smp_num_siblings = 1;
-EXPORT_SYMBOL(smp_num_siblings);
-
-/* Last level cache ID of each logical CPU */
-DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
+#include <asm/hw_irq.h>
 
 /* representing HT siblings of each logical CPU */
 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
@@ -296,6 +290,23 @@
 }
 
 /**
+ * topology_is_primary_thread - Check whether CPU is the primary SMT thread
+ * @cpu:	CPU to check
+ */
+bool topology_is_primary_thread(unsigned int cpu)
+{
+	return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
+}
+
+/**
+ * topology_smt_supported - Check whether SMT is supported by the CPUs
+ */
+bool topology_smt_supported(void)
+{
+	return smp_num_siblings > 1;
+}
+
+/**
  * topology_phys_to_logical_pkg - Map a physical package id to a logical
  *
  * Returns logical package id or -1 if not found
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index d39c091..f8a0518 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -11,6 +11,7 @@
 
 #include <linux/clockchips.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/i8253.h>
 #include <linux/time.h>
 #include <linux/export.h>
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 0fe720d..3f818ce 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -12,6 +12,7 @@
 #include <asm/setup.h>
 #include <asm/apic.h>
 #include <asm/param.h>
+#include <asm/tsc.h>
 
 #define MAX_NUM_FREQS	9
 
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 495c776..e78a6b1 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -290,7 +290,7 @@
 	insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
 	/* has the side-effect of processing the entire instruction */
 	insn_get_length(insn);
-	if (WARN_ON_ONCE(!insn_complete(insn)))
+	if (!insn_complete(insn))
 		return -ENOEXEC;
 
 	if (is_prefix_bad(insn))
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 7e5119c..c17d389 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -16,7 +16,6 @@
 #include <linux/export.h>
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
-#include <asm/fpu/internal.h> /* For use_eager_fpu.  Ugh! */
 #include <asm/user.h>
 #include <asm/fpu/xstate.h>
 #include "cpuid.h"
@@ -114,8 +113,7 @@
 	if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
 		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
 
-	if (use_eager_fpu())
-		kvm_x86_ops->fpu_activate(vcpu);
+	kvm_x86_ops->fpu_activate(vcpu);
 
 	/*
 	 * The existing code assumes virtual address is 48-bit in the canonical
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a16c066..8a4d6bc 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -698,7 +698,7 @@
 	if (cache->nobjs >= min)
 		return 0;
 	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-		page = (void *)__get_free_page(GFP_KERNEL);
+		page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
 		if (!page)
 			return -ENOMEM;
 		cache->objects[cache->nobjs++] = page;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c4cd128..5f44d63 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -175,6 +175,8 @@
 	uint64_t sysenter_eip;
 	uint64_t tsc_aux;
 
+	u64 msr_decfg;
+
 	u64 next_rip;
 
 	u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
@@ -1567,6 +1569,7 @@
 	u32 dummy;
 	u32 eax = 1;
 
+	vcpu->arch.microcode_version = 0x01000065;
 	svm->spec_ctrl = 0;
 	svm->virt_spec_ctrl = 0;
 
@@ -2124,6 +2127,8 @@
 	u32 error_code;
 	int r = 1;
 
+	svm->vcpu.arch.l1tf_flush_l1d = true;
+
 	switch (svm->apf_reason) {
 	default:
 		error_code = svm->vmcb->control.exit_info_1;
@@ -3483,6 +3488,22 @@
 	return 0;
 }
 
+static int svm_get_msr_feature(struct kvm_msr_entry *msr)
+{
+	msr->data = 0;
+
+	switch (msr->index) {
+	case MSR_F10H_DECFG:
+		if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
+			msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
+		break;
+	default:
+		return 1;
+	}
+
+	return 0;
+}
+
 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -3565,9 +3586,6 @@
 
 		msr_info->data = svm->virt_spec_ctrl;
 		break;
-	case MSR_IA32_UCODE_REV:
-		msr_info->data = 0x01000065;
-		break;
 	case MSR_F15H_IC_CFG: {
 
 		int family, model;
@@ -3585,6 +3603,9 @@
 			msr_info->data = 0x1E;
 		}
 		break;
+	case MSR_F10H_DECFG:
+		msr_info->data = svm->msr_decfg;
+		break;
 	default:
 		return kvm_get_msr_common(vcpu, msr_info);
 	}
@@ -3773,6 +3794,24 @@
 	case MSR_VM_IGNNE:
 		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
 		break;
+	case MSR_F10H_DECFG: {
+		struct kvm_msr_entry msr_entry;
+
+		msr_entry.index = msr->index;
+		if (svm_get_msr_feature(&msr_entry))
+			return 1;
+
+		/* Check the supported bits */
+		if (data & ~msr_entry.data)
+			return 1;
+
+		/* Don't allow the guest to change a bit, #GP */
+		if (!msr->host_initiated && (data ^ msr_entry.data))
+			return 1;
+
+		svm->msr_decfg = data;
+		break;
+	}
 	case MSR_IA32_APICBASE:
 		if (kvm_vcpu_apicv_active(vcpu))
 			avic_update_vapic_bar(to_svm(vcpu), data);
@@ -4934,8 +4973,6 @@
 
 	clgi();
 
-	local_irq_enable();
-
 	/*
 	 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
 	 * it's non-zero. Since vmentry is serialising on affected CPUs, there
@@ -4944,6 +4981,8 @@
 	 */
 	x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
 
+	local_irq_enable();
+
 	asm volatile (
 		"push %%" _ASM_BP "; \n\t"
 		"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
@@ -5066,12 +5105,12 @@
 	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
 		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-	x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
-
 	reload_tss(vcpu);
 
 	local_irq_disable();
 
+	x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
+
 	vcpu->arch.cr2 = svm->vmcb->save.cr2;
 	vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
 	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
@@ -5502,6 +5541,7 @@
 	.vcpu_unblocking = svm_vcpu_unblocking,
 
 	.update_bp_intercept = update_bp_intercept,
+	.get_msr_feature = svm_get_msr_feature,
 	.get_msr = svm_get_msr,
 	.set_msr = svm_set_msr,
 	.get_segment_base = svm_get_segment_base,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 7cb1077..ff7696c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -189,6 +189,156 @@
 
 extern const ulong vmx_return;
 
+static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
+static DEFINE_MUTEX(vmx_l1d_flush_mutex);
+
+/* Storage for pre module init parameter parsing */
+static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
+
+static const struct {
+	const char *option;
+	bool for_parse;
+} vmentry_l1d_param[] = {
+	[VMENTER_L1D_FLUSH_AUTO]	 = {"auto", true},
+	[VMENTER_L1D_FLUSH_NEVER]	 = {"never", true},
+	[VMENTER_L1D_FLUSH_COND]	 = {"cond", true},
+	[VMENTER_L1D_FLUSH_ALWAYS]	 = {"always", true},
+	[VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
+	[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
+};
+
+#define L1D_CACHE_ORDER 4
+static void *vmx_l1d_flush_pages;
+
+static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
+{
+	struct page *page;
+	unsigned int i;
+
+	if (!enable_ept) {
+		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
+		return 0;
+	}
+
+       if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
+	       u64 msr;
+
+	       rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
+	       if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
+		       l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
+		       return 0;
+	       }
+       }
+
+	/* If set to auto use the default l1tf mitigation method */
+	if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
+		switch (l1tf_mitigation) {
+		case L1TF_MITIGATION_OFF:
+			l1tf = VMENTER_L1D_FLUSH_NEVER;
+			break;
+		case L1TF_MITIGATION_FLUSH_NOWARN:
+		case L1TF_MITIGATION_FLUSH:
+		case L1TF_MITIGATION_FLUSH_NOSMT:
+			l1tf = VMENTER_L1D_FLUSH_COND;
+			break;
+		case L1TF_MITIGATION_FULL:
+		case L1TF_MITIGATION_FULL_FORCE:
+			l1tf = VMENTER_L1D_FLUSH_ALWAYS;
+			break;
+		}
+	} else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
+		l1tf = VMENTER_L1D_FLUSH_ALWAYS;
+	}
+
+	if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
+	    !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+		page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+		if (!page)
+			return -ENOMEM;
+		vmx_l1d_flush_pages = page_address(page);
+
+		/*
+		 * Initialize each page with a different pattern in
+		 * order to protect against KSM in the nested
+		 * virtualization case.
+		 */
+		for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
+			memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
+			       PAGE_SIZE);
+		}
+	}
+
+	l1tf_vmx_mitigation = l1tf;
+
+	if (l1tf != VMENTER_L1D_FLUSH_NEVER)
+		static_branch_enable(&vmx_l1d_should_flush);
+	else
+		static_branch_disable(&vmx_l1d_should_flush);
+
+	if (l1tf == VMENTER_L1D_FLUSH_COND)
+		static_branch_enable(&vmx_l1d_flush_cond);
+	else
+		static_branch_disable(&vmx_l1d_flush_cond);
+	return 0;
+}
+
+static int vmentry_l1d_flush_parse(const char *s)
+{
+	unsigned int i;
+
+	if (s) {
+		for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
+			if (vmentry_l1d_param[i].for_parse &&
+			    sysfs_streq(s, vmentry_l1d_param[i].option))
+				return i;
+		}
+	}
+	return -EINVAL;
+}
+
+static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
+{
+	int l1tf, ret;
+
+	l1tf = vmentry_l1d_flush_parse(s);
+	if (l1tf < 0)
+		return l1tf;
+
+	if (!boot_cpu_has(X86_BUG_L1TF))
+		return 0;
+
+	/*
+	 * Has vmx_init() run already? If not then this is the pre init
+	 * parameter parsing. In that case just store the value and let
+	 * vmx_init() do the proper setup after enable_ept has been
+	 * established.
+	 */
+	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
+		vmentry_l1d_flush_param = l1tf;
+		return 0;
+	}
+
+	mutex_lock(&vmx_l1d_flush_mutex);
+	ret = vmx_setup_l1d_flush(l1tf);
+	mutex_unlock(&vmx_l1d_flush_mutex);
+	return ret;
+}
+
+static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
+{
+	if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
+		return sprintf(s, "???\n");
+
+	return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
+}
+
+static const struct kernel_param_ops vmentry_l1d_flush_ops = {
+	.set = vmentry_l1d_flush_set,
+	.get = vmentry_l1d_flush_get,
+};
+module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
+
 #define NR_AUTOLOAD_MSRS 8
 
 struct vmcs {
@@ -541,6 +691,11 @@
 			(unsigned long *)&pi_desc->control);
 }
 
+struct vmx_msrs {
+	unsigned int		nr;
+	struct vmx_msr_entry	val[NR_AUTOLOAD_MSRS];
+};
+
 struct vcpu_vmx {
 	struct kvm_vcpu       vcpu;
 	unsigned long         host_rsp;
@@ -573,9 +728,8 @@
 	struct loaded_vmcs   *loaded_vmcs;
 	bool                  __launched; /* temporary, used in vmx_vcpu_run */
 	struct msr_autoload {
-		unsigned nr;
-		struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
-		struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
+		struct vmx_msrs guest;
+		struct vmx_msrs host;
 	} msr_autoload;
 	struct {
 		int           loaded;
@@ -1920,9 +2074,20 @@
 	vm_exit_controls_clearbit(vmx, exit);
 }
 
+static int find_msr(struct vmx_msrs *m, unsigned int msr)
+{
+	unsigned int i;
+
+	for (i = 0; i < m->nr; ++i) {
+		if (m->val[i].index == msr)
+			return i;
+	}
+	return -ENOENT;
+}
+
 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
 {
-	unsigned i;
+	int i;
 	struct msr_autoload *m = &vmx->msr_autoload;
 
 	switch (msr) {
@@ -1943,18 +2108,21 @@
 		}
 		break;
 	}
+	i = find_msr(&m->guest, msr);
+	if (i < 0)
+		goto skip_guest;
+	--m->guest.nr;
+	m->guest.val[i] = m->guest.val[m->guest.nr];
+	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
 
-	for (i = 0; i < m->nr; ++i)
-		if (m->guest[i].index == msr)
-			break;
-
-	if (i == m->nr)
+skip_guest:
+	i = find_msr(&m->host, msr);
+	if (i < 0)
 		return;
-	--m->nr;
-	m->guest[i] = m->guest[m->nr];
-	m->host[i] = m->host[m->nr];
-	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
-	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+
+	--m->host.nr;
+	m->host.val[i] = m->host.val[m->host.nr];
+	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
 }
 
 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
@@ -1969,9 +2137,9 @@
 }
 
 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
-				  u64 guest_val, u64 host_val)
+				  u64 guest_val, u64 host_val, bool entry_only)
 {
-	unsigned i;
+	int i, j = 0;
 	struct msr_autoload *m = &vmx->msr_autoload;
 
 	switch (msr) {
@@ -2006,24 +2174,31 @@
 		wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
 	}
 
-	for (i = 0; i < m->nr; ++i)
-		if (m->guest[i].index == msr)
-			break;
+	i = find_msr(&m->guest, msr);
+	if (!entry_only)
+		j = find_msr(&m->host, msr);
 
-	if (i == NR_AUTOLOAD_MSRS) {
+	if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
 		printk_once(KERN_WARNING "Not enough msr switch entries. "
 				"Can't add msr %x\n", msr);
 		return;
-	} else if (i == m->nr) {
-		++m->nr;
-		vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
-		vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
 	}
+	if (i < 0) {
+		i = m->guest.nr++;
+		vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+	}
+	m->guest.val[i].index = msr;
+	m->guest.val[i].value = guest_val;
 
-	m->guest[i].index = msr;
-	m->guest[i].value = guest_val;
-	m->host[i].index = msr;
-	m->host[i].value = host_val;
+	if (entry_only)
+		return;
+
+	if (j < 0) {
+		j = m->host.nr++;
+		vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+	}
+	m->host.val[j].index = msr;
+	m->host.val[j].value = host_val;
 }
 
 static void reload_tss(void)
@@ -2080,7 +2255,7 @@
 			guest_efer &= ~EFER_LME;
 		if (guest_efer != host_efer)
 			add_atomic_switch_msr(vmx, MSR_EFER,
-					      guest_efer, host_efer);
+					      guest_efer, host_efer, false);
 		return false;
 	} else {
 		guest_efer &= ~ignore_bits;
@@ -2994,6 +3169,11 @@
 	return !(val & ~valid_bits);
 }
 
+static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
+{
+	return 1;
+}
+
 /*
  * Reads an msr value (of 'msr_index') into 'pdata'.
  * Returns 0 on success, non-0 otherwise.
@@ -3244,7 +3424,7 @@
 		vcpu->arch.ia32_xss = data;
 		if (vcpu->arch.ia32_xss != host_xss)
 			add_atomic_switch_msr(vmx, MSR_IA32_XSS,
-				vcpu->arch.ia32_xss, host_xss);
+				vcpu->arch.ia32_xss, host_xss, false);
 		else
 			clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
 		break;
@@ -5265,9 +5445,9 @@
 
 	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
 	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
-	vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
+	vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
 	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
-	vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
+	vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
 
 	if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
 		vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
@@ -5287,8 +5467,7 @@
 		++vmx->nmsrs;
 	}
 
-	if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
-		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities);
+	vmx->arch_capabilities = kvm_get_arch_capabilities();
 
 	vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
 
@@ -5317,6 +5496,7 @@
 	u64 cr0;
 
 	vmx->rmode.vm86_active = 0;
+	vcpu->arch.microcode_version = 0x100000000ULL;
 	vmx->spec_ctrl = 0;
 
 	vmx->soft_vnmi_blocked = 0;
@@ -5722,6 +5902,7 @@
 		BUG_ON(enable_ept);
 		cr2 = vmcs_readl(EXIT_QUALIFICATION);
 		trace_kvm_page_fault(cr2, error_code);
+		vcpu->arch.l1tf_flush_l1d = true;
 
 		if (kvm_event_needs_reinjection(vcpu))
 			kvm_mmu_unprotect_page_virt(vcpu, cr2);
@@ -7085,6 +7266,8 @@
 		     HRTIMER_MODE_REL_PINNED);
 	vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
 
+	vmx->nested.vpid02 = allocate_vpid();
+
 	vmx->nested.vmxon = true;
 
 	skip_emulated_instruction(vcpu);
@@ -8483,6 +8666,76 @@
 	}
 }
 
+/*
+ * Software based L1D cache flush which is used when microcode providing
+ * the cache control MSR is not loaded.
+ *
+ * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
+ * flush it is required to read in 64 KiB because the replacement algorithm
+ * is not exactly LRU. This could be sized at runtime via topology
+ * information but as all relevant affected CPUs have 32KiB L1D cache size
+ * there is no point in doing so.
+ */
+static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
+{
+	int size = PAGE_SIZE << L1D_CACHE_ORDER;
+
+	/*
+	 * This code is only executed when the the flush mode is 'cond' or
+	 * 'always'
+	 */
+	if (static_branch_likely(&vmx_l1d_flush_cond)) {
+		bool flush_l1d;
+
+		/*
+		 * Clear the per-vcpu flush bit, it gets set again
+		 * either from vcpu_run() or from one of the unsafe
+		 * VMEXIT handlers.
+		 */
+		flush_l1d = vcpu->arch.l1tf_flush_l1d;
+		vcpu->arch.l1tf_flush_l1d = false;
+
+		/*
+		 * Clear the per-cpu flush bit, it gets set again from
+		 * the interrupt handlers.
+		 */
+		flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
+		kvm_clear_cpu_l1tf_flush_l1d();
+
+		if (!flush_l1d)
+			return;
+	}
+
+	vcpu->stat.l1d_flush++;
+
+	if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+		wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+		return;
+	}
+
+	asm volatile(
+		/* First ensure the pages are in the TLB */
+		"xorl	%%eax, %%eax\n"
+		".Lpopulate_tlb:\n\t"
+		"movzbl	(%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+		"addl	$4096, %%eax\n\t"
+		"cmpl	%%eax, %[size]\n\t"
+		"jne	.Lpopulate_tlb\n\t"
+		"xorl	%%eax, %%eax\n\t"
+		"cpuid\n\t"
+		/* Now fill the cache */
+		"xorl	%%eax, %%eax\n"
+		".Lfill_cache:\n"
+		"movzbl	(%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+		"addl	$64, %%eax\n\t"
+		"cmpl	%%eax, %[size]\n\t"
+		"jne	.Lfill_cache\n\t"
+		"lfence\n"
+		:: [flush_pages] "r" (vmx_l1d_flush_pages),
+		    [size] "r" (size)
+		: "eax", "ebx", "ecx", "edx");
+}
+
 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
 {
 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -8854,7 +9107,7 @@
 			clear_atomic_switch_msr(vmx, msrs[i].msr);
 		else
 			add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
-					msrs[i].host);
+					msrs[i].host, false);
 }
 
 void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
@@ -8938,6 +9191,9 @@
 
 	vmx->__launched = vmx->loaded_vmcs->launched;
 
+	if (static_branch_unlikely(&vmx_l1d_should_flush))
+		vmx_l1d_flush(vcpu);
+
 	asm(
 		/* Store host registers */
 		"push %%" _ASM_DX "; push %%" _ASM_BP ";"
@@ -9263,10 +9519,8 @@
 			goto free_vmcs;
 	}
 
-	if (nested) {
+	if (nested)
 		nested_vmx_setup_ctls_msrs(vmx);
-		vmx->nested.vpid02 = allocate_vpid();
-	}
 
 	vmx->nested.posted_intr_nv = -1;
 	vmx->nested.current_vmptr = -1ull;
@@ -9284,7 +9538,6 @@
 	return &vmx->vcpu;
 
 free_vmcs:
-	free_vpid(vmx->nested.vpid02);
 	free_loaded_vmcs(vmx->loaded_vmcs);
 free_msrs:
 	kfree(vmx->guest_msrs);
@@ -9298,6 +9551,37 @@
 	return ERR_PTR(err);
 }
 
+#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+
+static int vmx_vm_init(struct kvm *kvm)
+{
+	if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
+		switch (l1tf_mitigation) {
+		case L1TF_MITIGATION_OFF:
+		case L1TF_MITIGATION_FLUSH_NOWARN:
+			/* 'I explicitly don't care' is set */
+			break;
+		case L1TF_MITIGATION_FLUSH:
+		case L1TF_MITIGATION_FLUSH_NOSMT:
+		case L1TF_MITIGATION_FULL:
+			/*
+			 * Warn upon starting the first VM in a potentially
+			 * insecure environment.
+			 */
+			if (cpu_smt_control == CPU_SMT_ENABLED)
+				pr_warn_once(L1TF_MSG_SMT);
+			if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
+				pr_warn_once(L1TF_MSG_L1D);
+			break;
+		case L1TF_MITIGATION_FULL_FORCE:
+			/* Flush is enforced */
+			break;
+		}
+	}
+	return 0;
+}
+
 static void __init vmx_check_processor_compat(void *rtn)
 {
 	struct vmcs_config vmcs_conf;
@@ -10093,6 +10377,15 @@
 	vmx_set_constant_host_state(vmx);
 
 	/*
+	 * Set the MSR load/store lists to match L0's settings.
+	 */
+	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+	vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
+	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+	vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
+
+	/*
 	 * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
 	 * entry, but only if the current (host) sp changed from the value
 	 * we wrote last (vmx->host_rsp). This cache is no longer relevant
@@ -10442,6 +10735,9 @@
 
 	vmcs12->launch_state = 1;
 
+	/* Hide L1D cache contents from the nested guest.  */
+	vmx->vcpu.arch.l1tf_flush_l1d = true;
+
 	if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
 		return kvm_vcpu_halt(vcpu);
 
@@ -10936,6 +11232,8 @@
 	load_vmcs12_host_state(vcpu, vmcs12);
 
 	/* Update any VMCS fields that might have changed while L2 ran */
+	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
 	vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
 	if (vmx->hv_deadline_tsc == -1)
 		vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
@@ -11367,6 +11665,8 @@
 	.cpu_has_accelerated_tpr = report_flexpriority,
 	.has_emulated_msr = vmx_has_emulated_msr,
 
+	.vm_init = vmx_vm_init,
+
 	.vcpu_create = vmx_create_vcpu,
 	.vcpu_free = vmx_free_vcpu,
 	.vcpu_reset = vmx_vcpu_reset,
@@ -11376,6 +11676,7 @@
 	.vcpu_put = vmx_vcpu_put,
 
 	.update_bp_intercept = update_exception_bitmap,
+	.get_msr_feature = vmx_get_msr_feature,
 	.get_msr = vmx_get_msr,
 	.set_msr = vmx_set_msr,
 	.get_segment_base = vmx_get_segment_base,
@@ -11486,13 +11787,54 @@
 	.setup_mce = vmx_setup_mce,
 };
 
+static void vmx_cleanup_l1d_flush(void)
+{
+	if (vmx_l1d_flush_pages) {
+		free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
+		vmx_l1d_flush_pages = NULL;
+	}
+	/* Restore state so sysfs ignores VMX */
+	l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+}
+
+
+static void vmx_exit(void)
+{
+#ifdef CONFIG_KEXEC_CORE
+	RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
+	synchronize_rcu();
+#endif
+
+	kvm_exit();
+
+	vmx_cleanup_l1d_flush();
+}
+module_exit(vmx_exit)
+
 static int __init vmx_init(void)
 {
-	int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
-                     __alignof__(struct vcpu_vmx), THIS_MODULE);
+	int r;
+
+	r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+		     __alignof__(struct vcpu_vmx), THIS_MODULE);
 	if (r)
 		return r;
 
+	/*
+	 * Must be called after kvm_init() so enable_ept is properly set
+	 * up. Hand the parameter mitigation value in which was stored in
+	 * the pre module init parser. If no parameter was given, it will
+	 * contain 'auto' which will be turned into the default 'cond'
+	 * mitigation mode.
+	 */
+	if (boot_cpu_has(X86_BUG_L1TF)) {
+		r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
+		if (r) {
+			vmx_exit();
+			return r;
+		}
+	}
+
 #ifdef CONFIG_KEXEC_CORE
 	rcu_assign_pointer(crash_vmclear_loaded_vmcss,
 			   crash_vmclear_local_loaded_vmcss);
@@ -11500,16 +11842,4 @@
 
 	return 0;
 }
-
-static void __exit vmx_exit(void)
-{
-#ifdef CONFIG_KEXEC_CORE
-	RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
-	synchronize_rcu();
-#endif
-
-	kvm_exit();
-}
-
 module_init(vmx_init)
-module_exit(vmx_exit)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5ca23af..5013ef1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -180,6 +180,7 @@
 	{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
 	{ "irq_injections", VCPU_STAT(irq_injections) },
 	{ "nmi_injections", VCPU_STAT(nmi_injections) },
+	{ "l1d_flush", VCPU_STAT(l1d_flush) },
 	{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
 	{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
 	{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
@@ -1007,6 +1008,71 @@
 
 static unsigned num_emulated_msrs;
 
+/*
+ * List of msr numbers which are used to expose MSR-based features that
+ * can be used by a hypervisor to validate requested CPU features.
+ */
+static u32 msr_based_features[] = {
+	MSR_F10H_DECFG,
+	MSR_IA32_UCODE_REV,
+	MSR_IA32_ARCH_CAPABILITIES,
+};
+
+static unsigned int num_msr_based_features;
+
+u64 kvm_get_arch_capabilities(void)
+{
+	u64 data;
+
+	rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data);
+
+	/*
+	 * If we're doing cache flushes (either "always" or "cond")
+	 * we will do one whenever the guest does a vmlaunch/vmresume.
+	 * If an outer hypervisor is doing the cache flush for us
+	 * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
+	 * capability to the guest too, and if EPT is disabled we're not
+	 * vulnerable.  Overall, only VMENTER_L1D_FLUSH_NEVER will
+	 * require a nested hypervisor to do a flush of its own.
+	 */
+	if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
+		data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
+
+	return data;
+}
+EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);
+
+static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
+{
+	switch (msr->index) {
+	case MSR_IA32_ARCH_CAPABILITIES:
+		msr->data = kvm_get_arch_capabilities();
+		break;
+	case MSR_IA32_UCODE_REV:
+		rdmsrl_safe(msr->index, &msr->data);
+		break;
+	default:
+		if (kvm_x86_ops->get_msr_feature(msr))
+			return 1;
+	}
+	return 0;
+}
+
+static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+{
+	struct kvm_msr_entry msr;
+	int r;
+
+	msr.index = index;
+	r = kvm_get_msr_feature(&msr);
+	if (r)
+		return r;
+
+	*data = msr.data;
+
+	return 0;
+}
+
 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
 	if (efer & efer_reserved_bits)
@@ -2121,13 +2187,16 @@
 
 	switch (msr) {
 	case MSR_AMD64_NB_CFG:
-	case MSR_IA32_UCODE_REV:
 	case MSR_IA32_UCODE_WRITE:
 	case MSR_VM_HSAVE_PA:
 	case MSR_AMD64_PATCH_LOADER:
 	case MSR_AMD64_BU_CFG2:
 		break;
 
+	case MSR_IA32_UCODE_REV:
+		if (msr_info->host_initiated)
+			vcpu->arch.microcode_version = data;
+		break;
 	case MSR_EFER:
 		return set_efer(vcpu, data);
 	case MSR_K7_HWCR:
@@ -2402,7 +2471,7 @@
 		msr_info->data = 0;
 		break;
 	case MSR_IA32_UCODE_REV:
-		msr_info->data = 0x100000000ULL;
+		msr_info->data = vcpu->arch.microcode_version;
 		break;
 	case MSR_MTRRcap:
 	case 0x200 ... 0x2ff:
@@ -2545,13 +2614,11 @@
 		    int (*do_msr)(struct kvm_vcpu *vcpu,
 				  unsigned index, u64 *data))
 {
-	int i, idx;
+	int i;
 
-	idx = srcu_read_lock(&vcpu->kvm->srcu);
 	for (i = 0; i < msrs->nmsrs; ++i)
 		if (do_msr(vcpu, entries[i].index, &entries[i].data))
 			break;
-	srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
 	return i;
 }
@@ -2651,6 +2718,7 @@
 	case KVM_CAP_ASSIGN_DEV_IRQ:
 	case KVM_CAP_PCI_2_3:
 #endif
+	case KVM_CAP_GET_MSR_FEATURES:
 		r = 1;
 		break;
 	case KVM_CAP_ADJUST_CLOCK:
@@ -2770,6 +2838,31 @@
 			goto out;
 		r = 0;
 		break;
+	case KVM_GET_MSR_FEATURE_INDEX_LIST: {
+		struct kvm_msr_list __user *user_msr_list = argp;
+		struct kvm_msr_list msr_list;
+		unsigned int n;
+
+		r = -EFAULT;
+		if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
+			goto out;
+		n = msr_list.nmsrs;
+		msr_list.nmsrs = num_msr_based_features;
+		if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
+			goto out;
+		r = -E2BIG;
+		if (n < msr_list.nmsrs)
+			goto out;
+		r = -EFAULT;
+		if (copy_to_user(user_msr_list->indices, &msr_based_features,
+				 num_msr_based_features * sizeof(u32)))
+			goto out;
+		r = 0;
+		break;
+	}
+	case KVM_GET_MSRS:
+		r = msr_io(NULL, argp, do_get_msr_feature, 1);
+		break;
 	}
 	default:
 		r = -EINVAL;
@@ -3451,12 +3544,18 @@
 		r = 0;
 		break;
 	}
-	case KVM_GET_MSRS:
+	case KVM_GET_MSRS: {
+		int idx = srcu_read_lock(&vcpu->kvm->srcu);
 		r = msr_io(vcpu, argp, do_get_msr, 1);
+		srcu_read_unlock(&vcpu->kvm->srcu, idx);
 		break;
-	case KVM_SET_MSRS:
+	}
+	case KVM_SET_MSRS: {
+		int idx = srcu_read_lock(&vcpu->kvm->srcu);
 		r = msr_io(vcpu, argp, do_set_msr, 0);
+		srcu_read_unlock(&vcpu->kvm->srcu, idx);
 		break;
+	}
 	case KVM_TPR_ACCESS_REPORTING: {
 		struct kvm_tpr_access_ctl tac;
 
@@ -4236,6 +4335,19 @@
 		j++;
 	}
 	num_emulated_msrs = j;
+
+	for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) {
+		struct kvm_msr_entry msr;
+
+		msr.index = msr_based_features[i];
+		if (kvm_get_msr_feature(&msr))
+			continue;
+
+		if (j < i)
+			msr_based_features[j] = msr_based_features[i];
+		j++;
+	}
+	num_msr_based_features = j;
 }
 
 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
@@ -4476,6 +4588,9 @@
 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
 				unsigned int bytes, struct x86_exception *exception)
 {
+	/* kvm_write_guest_virt_system can pull in tons of pages. */
+	vcpu->arch.l1tf_flush_l1d = true;
+
 	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
 					   PFERR_WRITE_MASK, exception);
 }
@@ -5574,6 +5689,8 @@
 	bool writeback = true;
 	bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
 
+	vcpu->arch.l1tf_flush_l1d = true;
+
 	/*
 	 * Clear write_fault_to_shadow_pgtable here to ensure it is
 	 * never reused.
@@ -6929,6 +7046,7 @@
 	struct kvm *kvm = vcpu->kvm;
 
 	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+	vcpu->arch.l1tf_flush_l1d = true;
 
 	for (;;) {
 		if (kvm_vcpu_running(vcpu)) {
@@ -7513,16 +7631,6 @@
 	copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
 	__kernel_fpu_end();
 	++vcpu->stat.fpu_reload;
-	/*
-	 * If using eager FPU mode, or if the guest is a frequent user
-	 * of the FPU, just leave the FPU active for next time.
-	 * Every 255 times fpu_counter rolls over to 0; a guest that uses
-	 * the FPU in bursts will revert to loading it on demand.
-	 */
-	if (!use_eager_fpu()) {
-		if (++vcpu->fpu_counter < 5)
-			kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
-	}
 	trace_kvm_fpu(0);
 }
 
@@ -7899,6 +8007,7 @@
 
 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
 {
+	vcpu->arch.l1tf_flush_l1d = true;
 	kvm_x86_ops->sched_in(vcpu, cpu);
 }
 
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 54efa85..0da3945 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -23,6 +23,7 @@
 #include <asm/vsyscall.h>		/* emulate_vsyscall		*/
 #include <asm/vm86.h>			/* struct vm86			*/
 #include <asm/mmu_context.h>		/* vma_pkey()			*/
+#include <asm/sections.h>
 
 #define CREATE_TRACE_POINTS
 #include <asm/trace/exceptions.h>
@@ -329,8 +330,6 @@
 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
 		return -1;
 
-	WARN_ON_ONCE(in_nmi());
-
 	/*
 	 * Synchronize this task's top level page-table
 	 * with the 'reference' page table.
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ae9b84c..90801a8 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -4,6 +4,8 @@
 #include <linux/swap.h>
 #include <linux/memblock.h>
 #include <linux/bootmem.h>	/* for max_low_pfn */
+#include <linux/swapfile.h>
+#include <linux/swapops.h>
 
 #include <asm/cacheflush.h>
 #include <asm/e820.h>
@@ -780,3 +782,26 @@
 	__cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
 	__pte2cachemode_tbl[entry] = cache;
 }
+
+#ifdef CONFIG_SWAP
+unsigned long max_swapfile_size(void)
+{
+	unsigned long pages;
+
+	pages = generic_max_swapfile_size();
+
+	if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+		/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
+		unsigned long long l1tf_limit = l1tf_pfn_limit();
+		/*
+		 * We encode swap offsets also with 3 bits below those for pfn
+		 * which makes the usable limit higher.
+		 */
+#if CONFIG_PGTABLE_LEVELS > 2
+		l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
+#endif
+		pages = min_t(unsigned long long, l1tf_limit, pages);
+	}
+	return pages;
+}
+#endif
diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
index ec678aa..3f729e2 100644
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -20,6 +20,7 @@
 #include <asm/desc.h>
 #include <asm/cmdline.h>
 #include <asm/vsyscall.h>
+#include <asm/sections.h>
 
 int kaiser_enabled __read_mostly = 1;
 EXPORT_SYMBOL(kaiser_enabled);	/* for inlined TLB flush functions */
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index cadb82b..c695272 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -125,24 +125,29 @@
 
 static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
 {
+	pmd_t new_pmd;
 	pmdval_t v = pmd_val(*pmd);
 	if (clear) {
-		*old = v & _PAGE_PRESENT;
-		v &= ~_PAGE_PRESENT;
-	} else	/* presume this has been called with clear==true previously */
-		v |= *old;
-	set_pmd(pmd, __pmd(v));
+		*old = v;
+		new_pmd = pmd_mknotpresent(*pmd);
+	} else {
+		/* Presume this has been called with clear==true previously */
+		new_pmd = __pmd(*old);
+	}
+	set_pmd(pmd, new_pmd);
 }
 
 static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
 {
 	pteval_t v = pte_val(*pte);
 	if (clear) {
-		*old = v & _PAGE_PRESENT;
-		v &= ~_PAGE_PRESENT;
-	} else	/* presume this has been called with clear==true previously */
-		v |= *old;
-	set_pte_atomic(pte, __pte(v));
+		*old = v;
+		/* Nothing should care about address */
+		pte_clear(&init_mm, 0, pte);
+	} else {
+		/* Presume this has been called with clear==true previously */
+		set_pte_atomic(pte, __pte(*old));
+	}
 }
 
 static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index d2dc043..74609a9 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -121,3 +121,24 @@
 		return "[mpx]";
 	return NULL;
 }
+
+/*
+ * Only allow root to set high MMIO mappings to PROT_NONE.
+ * This prevents an unpriv. user to set them to PROT_NONE and invert
+ * them, then pointing to valid memory for L1TF speculation.
+ *
+ * Note: for locked down kernels may want to disable the root override.
+ */
+bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+	if (!boot_cpu_has_bug(X86_BUG_L1TF))
+		return true;
+	if (!__pte_needs_invert(pgprot_val(prot)))
+		return true;
+	/* If it's real memory always allow */
+	if (pfn_valid(pfn))
+		return true;
+	if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
+		return false;
+	return true;
+}
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index a8f90ce..dc6d990 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -60,7 +60,7 @@
 	eb->nid = nid;
 
 	if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
-		emu_nid_to_phys[nid] = nid;
+		emu_nid_to_phys[nid] = pb->nid;
 
 	pb->start += size;
 	if (pb->start >= pb->end) {
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index dcd6714..1271bc9 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1001,8 +1001,8 @@
 
 		pmd = pmd_offset(pud, start);
 
-		set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
-				   massage_pgprot(pmd_pgprot)));
+		set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
+					canon_pgprot(pmd_pgprot))));
 
 		start	  += PMD_SIZE;
 		cpa->pfn  += PMD_SIZE >> PAGE_SHIFT;
@@ -1074,8 +1074,8 @@
 	 * Map everything starting from the Gb boundary, possibly with 1G pages
 	 */
 	while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
-		set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
-				   massage_pgprot(pud_pgprot)));
+		set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
+				   canon_pgprot(pud_pgprot))));
 
 		start	  += PUD_SIZE;
 		cpa->pfn  += PUD_SIZE >> PAGE_SHIFT;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index a3b63e5..8cbed30 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -536,6 +536,15 @@
 {
 	unsigned long address = __fix_to_virt(idx);
 
+#ifdef CONFIG_X86_64
+       /*
+	* Ensure that the static initial page tables are covering the
+	* fixmap completely.
+	*/
+	BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
+		     (FIXMAP_PMD_NUM * PTRS_PER_PTE));
+#endif
+
 	if (idx >= __end_of_fixed_addresses) {
 		BUG();
 		return;
@@ -653,28 +662,50 @@
 	return 0;
 }
 
+#ifdef CONFIG_X86_64
 /**
  * pud_free_pmd_page - Clear pud entry and free pmd page.
  * @pud: Pointer to a PUD.
+ * @addr: Virtual address associated with pud.
  *
- * Context: The pud range has been unmaped and TLB purged.
+ * Context: The pud range has been unmapped and TLB purged.
  * Return: 1 if clearing the entry succeeded. 0 otherwise.
+ *
+ * NOTE: Callers must allow a single page allocation.
  */
-int pud_free_pmd_page(pud_t *pud)
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
 {
-	pmd_t *pmd;
+	pmd_t *pmd, *pmd_sv;
+	pte_t *pte;
 	int i;
 
 	if (pud_none(*pud))
 		return 1;
 
 	pmd = (pmd_t *)pud_page_vaddr(*pud);
+	pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
+	if (!pmd_sv)
+		return 0;
 
-	for (i = 0; i < PTRS_PER_PMD; i++)
-		if (!pmd_free_pte_page(&pmd[i]))
-			return 0;
+	for (i = 0; i < PTRS_PER_PMD; i++) {
+		pmd_sv[i] = pmd[i];
+		if (!pmd_none(pmd[i]))
+			pmd_clear(&pmd[i]);
+	}
 
 	pud_clear(pud);
+
+	/* INVLPG to clear all paging-structure caches */
+	flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
+	for (i = 0; i < PTRS_PER_PMD; i++) {
+		if (!pmd_none(pmd_sv[i])) {
+			pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
+			free_page((unsigned long)pte);
+		}
+	}
+
+	free_page((unsigned long)pmd_sv);
 	free_page((unsigned long)pmd);
 
 	return 1;
@@ -683,11 +714,12 @@
 /**
  * pmd_free_pte_page - Clear pmd entry and free pte page.
  * @pmd: Pointer to a PMD.
+ * @addr: Virtual address associated with pmd.
  *
- * Context: The pmd range has been unmaped and TLB purged.
+ * Context: The pmd range has been unmapped and TLB purged.
  * Return: 1 if clearing the entry succeeded. 0 otherwise.
  */
-int pmd_free_pte_page(pmd_t *pmd)
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 {
 	pte_t *pte;
 
@@ -696,8 +728,30 @@
 
 	pte = (pte_t *)pmd_page_vaddr(*pmd);
 	pmd_clear(pmd);
+
+	/* INVLPG to clear all paging-structure caches */
+	flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
 	free_page((unsigned long)pte);
 
 	return 1;
 }
+
+#else /* !CONFIG_X86_64 */
+
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+	return pud_none(*pud);
+}
+
+/*
+ * Disable free page handling on x86-PAE. This assures that ioremap()
+ * does not update sync'd pmd entries. See vmalloc_sync_one().
+ */
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+	return pmd_none(*pmd);
+}
+
+#endif /* CONFIG_X86_64 */
 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index 0bbec04..e2d2b3c 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -142,8 +142,7 @@
  * Called from the FPU code when creating a fresh set of FPU
  * registers.  This is called from a very specific context where
  * we know the FPU regstiers are safe for use and we can use PKRU
- * directly.  The fact that PKRU is only available when we are
- * using eagerfpu mode makes this possible.
+ * directly.
  */
 void copy_init_pkru_to_fpregs(void)
 {
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index dcb2d9d..351a55d 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -45,6 +45,7 @@
 #include <asm/realmode.h>
 #include <asm/time.h>
 #include <asm/pgalloc.h>
+#include <asm/sections.h>
 
 /*
  * We allocate runtime services regions bottom-up, starting from -4G, i.e.
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 393a0c0..dee9939 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -13,6 +13,7 @@
 #include <linux/dmi.h>
 #include <asm/efi.h>
 #include <asm/uv/uv.h>
+#include <asm/sections.h>
 
 #define EFI_MIN_RESERVE 5120
 
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index 10bad1e..85e112e 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -18,6 +18,7 @@
 #include <asm/intel-mid.h>
 #include <asm/intel_scu_ipc.h>
 #include <asm/io_apic.h>
+#include <asm/hw_irq.h>
 
 #define TANGIER_EXT_TIMER0_MSI 12
 
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 0f017518..16d4967 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1283,6 +1283,7 @@
 	struct msg_desc msgdesc;
 
 	ack_APIC_irq();
+	kvm_set_cpu_l1tf_flush_l1d();
 	time_start = get_cycles();
 
 	bcp = &per_cpu(bau_control, smp_processor_id());
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2986a13..db7cf87 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -35,6 +35,7 @@
 #include <linux/frame.h>
 
 #include <linux/kexec.h>
+#include <linux/slab.h>
 
 #include <xen/xen.h>
 #include <xen/events.h>
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c92f75f..ebceaba 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1936,7 +1936,7 @@
 		 * L3_k[511] -> level2_fixmap_pgt */
 		convert_pfn_mfn(level3_kernel_pgt);
 
-		/* L3_k[511][506] -> level1_fixmap_pgt */
+		/* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
 		convert_pfn_mfn(level2_fixmap_pgt);
 	}
 	/* We get [511][511] and have Xen's version of level2_kernel_pgt */
@@ -1970,7 +1970,11 @@
 		set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
 		set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
 		set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
-		set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+
+		for (i = 0; i < FIXMAP_PMD_NUM; i++) {
+			set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
+				      PAGE_KERNEL_RO);
+		}
 
 		/* Pin down new L4 */
 		pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index b9fc525..0b29a43 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -477,7 +477,7 @@
 irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
 {
 	int err, ret = IRQ_NONE;
-	struct pt_regs regs;
+	struct pt_regs regs = {0};
 	const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
 	uint8_t xenpmu_flags = get_xenpmu_flags();
 
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 9f21b0c..36bfafb 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -18,6 +18,7 @@
 #include <asm/setup.h>
 #include <asm/acpi.h>
 #include <asm/numa.h>
+#include <asm/sections.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
 
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
index 2041abb..34545ec 100644
--- a/arch/xtensa/include/asm/cacheasm.h
+++ b/arch/xtensa/include/asm/cacheasm.h
@@ -31,16 +31,32 @@
  *
  */
 
-	.macro	__loop_cache_all ar at insn size line_width
+
+	.macro	__loop_cache_unroll ar at insn size line_width max_immed
+
+	.if	(1 << (\line_width)) > (\max_immed)
+	.set	_reps, 1
+	.elseif	(2 << (\line_width)) > (\max_immed)
+	.set	_reps, 2
+	.else
+	.set	_reps, 4
+	.endif
+
+	__loopi	\ar, \at, \size, (_reps << (\line_width))
+	.set	_index, 0
+	.rep	_reps
+	\insn	\ar, _index << (\line_width)
+	.set	_index, _index + 1
+	.endr
+	__endla	\ar, \at, _reps << (\line_width)
+
+	.endm
+
+
+	.macro	__loop_cache_all ar at insn size line_width max_immed
 
 	movi	\ar, 0
-
-	__loopi	\ar, \at, \size, (4 << (\line_width))
-	\insn	\ar, 0 << (\line_width)
-	\insn	\ar, 1 << (\line_width)
-	\insn	\ar, 2 << (\line_width)
-	\insn	\ar, 3 << (\line_width)
-	__endla	\ar, \at, 4 << (\line_width)
+	__loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
 
 	.endm
 
@@ -57,14 +73,9 @@
 	.endm
 
 
-	.macro	__loop_cache_page ar at insn line_width
+	.macro	__loop_cache_page ar at insn line_width max_immed
 
-	__loopi	\ar, \at, PAGE_SIZE, 4 << (\line_width)
-	\insn	\ar, 0 << (\line_width)
-	\insn	\ar, 1 << (\line_width)
-	\insn	\ar, 2 << (\line_width)
-	\insn	\ar, 3 << (\line_width)
-	__endla	\ar, \at, 4 << (\line_width)
+	__loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
 
 	.endm
 
@@ -72,7 +83,8 @@
 	.macro	___unlock_dcache_all ar at
 
 #if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
-	__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
+	__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
+		XCHAL_DCACHE_LINEWIDTH 240
 #endif
 
 	.endm
@@ -81,7 +93,8 @@
 	.macro	___unlock_icache_all ar at
 
 #if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
-	__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
+	__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
+		XCHAL_ICACHE_LINEWIDTH 240
 #endif
 
 	.endm
@@ -90,7 +103,8 @@
 	.macro	___flush_invalidate_dcache_all ar at
 
 #if XCHAL_DCACHE_SIZE
-	__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
+	__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
+		XCHAL_DCACHE_LINEWIDTH 240
 #endif
 
 	.endm
@@ -99,7 +113,8 @@
 	.macro	___flush_dcache_all ar at
 
 #if XCHAL_DCACHE_SIZE
-	__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
+	__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
+		XCHAL_DCACHE_LINEWIDTH 240
 #endif
 
 	.endm
@@ -108,8 +123,8 @@
 	.macro	___invalidate_dcache_all ar at
 
 #if XCHAL_DCACHE_SIZE
-	__loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
-			 XCHAL_DCACHE_LINEWIDTH
+	__loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
+			 XCHAL_DCACHE_LINEWIDTH 1020
 #endif
 
 	.endm
@@ -118,8 +133,8 @@
 	.macro	___invalidate_icache_all ar at
 
 #if XCHAL_ICACHE_SIZE
-	__loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
-			 XCHAL_ICACHE_LINEWIDTH
+	__loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
+			 XCHAL_ICACHE_LINEWIDTH 1020
 #endif
 
 	.endm
@@ -166,7 +181,7 @@
 	.macro	___flush_invalidate_dcache_page ar as
 
 #if XCHAL_DCACHE_SIZE
-	__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
+	__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020
 #endif
 
 	.endm
@@ -175,7 +190,7 @@
 	.macro ___flush_dcache_page ar as
 
 #if XCHAL_DCACHE_SIZE
-	__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
+	__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020
 #endif
 
 	.endm
@@ -184,7 +199,7 @@
 	.macro	___invalidate_dcache_page ar as
 
 #if XCHAL_DCACHE_SIZE
-	__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
+	__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020
 #endif
 
 	.endm
@@ -193,7 +208,7 @@
 	.macro	___invalidate_icache_page ar as
 
 #if XCHAL_ICACHE_SIZE
-	__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
+	__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020
 #endif
 
 	.endm
diff --git a/block/bio.c b/block/bio.c
index a4c8c99..8bd6e0d 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -155,7 +155,7 @@
 
 unsigned int bvec_nr_vecs(unsigned short idx)
 {
-	return bvec_slabs[idx].nr_vecs;
+	return bvec_slabs[--idx].nr_vecs;
 }
 
 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
@@ -568,8 +568,11 @@
 static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src)
 {
 #ifdef CONFIG_PFK
-	dst->bi_crypt_key = src->bi_crypt_key;
 	dst->bi_iter.bi_dun = src->bi_iter.bi_dun;
+#ifdef CONFIG_DM_DEFAULT_KEY
+	dst->bi_crypt_key = src->bi_crypt_key;
+	dst->bi_crypt_skip = src->bi_crypt_skip;
+#endif
 	dst->bi_dio_inode = src->bi_dio_inode;
 #endif
 }
diff --git a/block/blk-core.c b/block/blk-core.c
index 68ec55a..91a0bf2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -636,7 +636,6 @@
 int blk_queue_enter(struct request_queue *q, bool nowait)
 {
 	while (true) {
-		int ret;
 
 		if (percpu_ref_tryget_live(&q->q_usage_counter))
 			return 0;
@@ -644,13 +643,11 @@
 		if (nowait)
 			return -EBUSY;
 
-		ret = wait_event_interruptible(q->mq_freeze_wq,
-				!atomic_read(&q->mq_freeze_depth) ||
-				blk_queue_dying(q));
+		wait_event(q->mq_freeze_wq,
+			   !atomic_read(&q->mq_freeze_depth) ||
+			   blk_queue_dying(q));
 		if (blk_queue_dying(q))
 			return -ENODEV;
-		if (ret)
-			return ret;
 	}
 }
 
diff --git a/block/partitions/aix.c b/block/partitions/aix.c
index f3ed7b2..8e7d358 100644
--- a/block/partitions/aix.c
+++ b/block/partitions/aix.c
@@ -177,7 +177,7 @@
 	u32 vgda_sector = 0;
 	u32 vgda_len = 0;
 	int numlvs = 0;
-	struct pvd *pvd;
+	struct pvd *pvd = NULL;
 	struct lv_info {
 		unsigned short pps_per_lv;
 		unsigned short pps_found;
@@ -231,10 +231,11 @@
 				if (lvip[i].pps_per_lv)
 					foundlvs += 1;
 			}
+			/* pvd loops depend on n[].name and lvip[].pps_per_lv */
+			pvd = alloc_pvd(state, vgda_sector + 17);
 		}
 		put_dev_sector(sect);
 	}
-	pvd = alloc_pvd(state, vgda_sector + 17);
 	if (pvd) {
 		int numpps = be16_to_cpu(pvd->pp_count);
 		int psn_part1 = be32_to_cpu(pvd->psn_part1);
@@ -281,10 +282,14 @@
 				next_lp_ix += 1;
 		}
 		for (i = 0; i < state->limit; i += 1)
-			if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
+			if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
+				char tmp[sizeof(n[i].name) + 1]; // null char
+
+				snprintf(tmp, sizeof(tmp), "%s", n[i].name);
 				pr_warn("partition %s (%u pp's found) is "
 					"not contiguous\n",
-					n[i].name, lvip[i].pps_found);
+					tmp, lvip[i].pps_found);
+			}
 		kfree(pvd);
 	}
 	kfree(n);
diff --git a/certs/system_keyring.c b/certs/system_keyring.c
index 50979d6..8cde8ea 100644
--- a/certs/system_keyring.c
+++ b/certs/system_keyring.c
@@ -14,6 +14,7 @@
 #include <linux/sched.h>
 #include <linux/cred.h>
 #include <linux/err.h>
+#include <linux/verification.h>
 #include <keys/asymmetric-type.h>
 #include <keys/system_keyring.h>
 #include <crypto/pkcs7.h>
@@ -207,7 +208,7 @@
 
 	if (!trusted_keys) {
 		trusted_keys = builtin_trusted_keys;
-	} else if (trusted_keys == (void *)1UL) {
+	} else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) {
 #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
 		trusted_keys = secondary_trusted_keys;
 #else
@@ -240,5 +241,46 @@
 	return ret;
 }
 EXPORT_SYMBOL_GPL(verify_pkcs7_signature);
-
 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
+
+/**
+ * verify_signature_one - Verify a signature with keys from given keyring
+ * @sig: The signature to be verified
+ * @trusted_keys: Trusted keys to use (NULL for builtin trusted keys only,
+ *					(void *)1UL for all trusted keys).
+ * @keyid: key description (not partial)
+ */
+int verify_signature_one(const struct public_key_signature *sig,
+			   struct key *trusted_keys, const char *keyid)
+{
+	key_ref_t ref;
+	struct key *key;
+	int ret;
+
+	if (!sig)
+		return -EBADMSG;
+	if (!trusted_keys) {
+		trusted_keys = builtin_trusted_keys;
+	} else if (trusted_keys == (void *)1UL) {
+#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
+		trusted_keys = secondary_trusted_keys;
+#else
+		trusted_keys = builtin_trusted_keys;
+#endif
+	}
+
+	ref = keyring_search(make_key_ref(trusted_keys, 1),
+				&key_type_asymmetric, keyid);
+	if (IS_ERR(ref)) {
+		pr_err("Asymmetric key (%s) not found in keyring(%s)\n",
+				keyid, trusted_keys->description);
+		return -ENOKEY;
+	}
+
+	key = key_ref_to_ptr(ref);
+	ret = verify_signature(key, sig);
+	key_put(key);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(verify_signature_one);
+
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 0f32afc..64f50b7 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1622,6 +1622,15 @@
 	help
 	  This is the LZ4 high compression mode algorithm.
 
+config CRYPTO_ZSTD
+	tristate "Zstd compression algorithm"
+	select CRYPTO_ALGAPI
+	select CRYPTO_ACOMP2
+	select ZSTD_COMPRESS
+	select ZSTD_DECOMPRESS
+	help
+	  This is the zstd algorithm.
+
 comment "Random Number Generation"
 
 config CRYPTO_ANSI_CPRNG
diff --git a/crypto/Makefile b/crypto/Makefile
index 5b08597..8a455d0 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -133,6 +133,7 @@
 obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
 obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
 obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
+obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
 
 #
 # generic algorithms and the async_tx api
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index d676fc5..3bc0e76 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -70,11 +70,9 @@
 	return max(start, end_page);
 }
 
-static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
-						unsigned int bsize)
+static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
+					unsigned int n)
 {
-	unsigned int n = bsize;
-
 	for (;;) {
 		unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
 
@@ -86,17 +84,13 @@
 		n -= len_this_page;
 		scatterwalk_start(&walk->out, sg_next(walk->out.sg));
 	}
-
-	return bsize;
 }
 
-static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
-						unsigned int n)
+static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
+					unsigned int n)
 {
 	scatterwalk_advance(&walk->in, n);
 	scatterwalk_advance(&walk->out, n);
-
-	return n;
 }
 
 static int ablkcipher_walk_next(struct ablkcipher_request *req,
@@ -106,39 +100,40 @@
 			 struct ablkcipher_walk *walk, int err)
 {
 	struct crypto_tfm *tfm = req->base.tfm;
-	unsigned int nbytes = 0;
+	unsigned int n; /* bytes processed */
+	bool more;
 
-	if (likely(err >= 0)) {
-		unsigned int n = walk->nbytes - err;
+	if (unlikely(err < 0))
+		goto finish;
 
-		if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
-			n = ablkcipher_done_fast(walk, n);
-		else if (WARN_ON(err)) {
+	n = walk->nbytes - err;
+	walk->total -= n;
+	more = (walk->total != 0);
+
+	if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
+		ablkcipher_done_fast(walk, n);
+	} else {
+		if (WARN_ON(err)) {
+			/* unexpected case; didn't process all bytes */
 			err = -EINVAL;
-			goto err;
-		} else
-			n = ablkcipher_done_slow(walk, n);
-
-		nbytes = walk->total - n;
-		err = 0;
+			goto finish;
+		}
+		ablkcipher_done_slow(walk, n);
 	}
 
-	scatterwalk_done(&walk->in, 0, nbytes);
-	scatterwalk_done(&walk->out, 1, nbytes);
+	scatterwalk_done(&walk->in, 0, more);
+	scatterwalk_done(&walk->out, 1, more);
 
-err:
-	walk->total = nbytes;
-	walk->nbytes = nbytes;
-
-	if (nbytes) {
+	if (more) {
 		crypto_yield(req->base.flags);
 		return ablkcipher_walk_next(req, walk);
 	}
-
+	err = 0;
+finish:
+	walk->nbytes = 0;
 	if (walk->iv != req->info)
 		memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
 	kfree(walk->iv_buffer);
-
 	return err;
 }
 EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
@@ -372,6 +367,7 @@
 	strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
 	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
 		sizeof(rblkcipher.geniv));
+	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
 
 	rblkcipher.blocksize = alg->cra_blocksize;
 	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
@@ -446,6 +442,7 @@
 	strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
 	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
 		sizeof(rblkcipher.geniv));
+	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
 
 	rblkcipher.blocksize = alg->cra_blocksize;
 	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
diff --git a/crypto/api.c b/crypto/api.c
index e5c1abf..f12d6b9 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -216,7 +216,7 @@
 	type &= mask;
 
 	alg = crypto_alg_lookup(name, type, mask);
-	if (!alg) {
+	if (!alg && !(mask & CRYPTO_NOLOAD)) {
 		request_module("crypto-%s", name);
 
 		if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c
index 1063b64..b2aa925 100644
--- a/crypto/asymmetric_keys/pkcs7_key_type.c
+++ b/crypto/asymmetric_keys/pkcs7_key_type.c
@@ -62,7 +62,7 @@
 
 	return verify_pkcs7_signature(NULL, 0,
 				      prep->data, prep->datalen,
-				      (void *)1UL, usage,
+				      VERIFY_USE_SECONDARY_KEYRING, usage,
 				      pkcs7_view_content, prep);
 }
 
diff --git a/crypto/authenc.c b/crypto/authenc.c
index a7e1ac7..c3180eb 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -108,6 +108,7 @@
 				       CRYPTO_TFM_RES_MASK);
 
 out:
+	memzero_explicit(&keys, sizeof(keys));
 	return err;
 
 badkey:
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 18c94e1..49e7e85 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -90,6 +90,7 @@
 					   CRYPTO_TFM_RES_MASK);
 
 out:
+	memzero_explicit(&keys, sizeof(keys));
 	return err;
 
 badkey:
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index a832426..59a0936 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -70,19 +70,18 @@
 	return max(start, end_page);
 }
 
-static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
-					       unsigned int bsize)
+static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
+				       unsigned int bsize)
 {
 	u8 *addr;
 
 	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
 	addr = blkcipher_get_spot(addr, bsize);
 	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
-	return bsize;
 }
 
-static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
-					       unsigned int n)
+static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
+				       unsigned int n)
 {
 	if (walk->flags & BLKCIPHER_WALK_COPY) {
 		blkcipher_map_dst(walk);
@@ -96,49 +95,48 @@
 
 	scatterwalk_advance(&walk->in, n);
 	scatterwalk_advance(&walk->out, n);
-
-	return n;
 }
 
 int blkcipher_walk_done(struct blkcipher_desc *desc,
 			struct blkcipher_walk *walk, int err)
 {
-	unsigned int nbytes = 0;
+	unsigned int n; /* bytes processed */
+	bool more;
 
-	if (likely(err >= 0)) {
-		unsigned int n = walk->nbytes - err;
+	if (unlikely(err < 0))
+		goto finish;
 
-		if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
-			n = blkcipher_done_fast(walk, n);
-		else if (WARN_ON(err)) {
+	n = walk->nbytes - err;
+	walk->total -= n;
+	more = (walk->total != 0);
+
+	if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
+		blkcipher_done_fast(walk, n);
+	} else {
+		if (WARN_ON(err)) {
+			/* unexpected case; didn't process all bytes */
 			err = -EINVAL;
-			goto err;
-		} else
-			n = blkcipher_done_slow(walk, n);
-
-		nbytes = walk->total - n;
-		err = 0;
+			goto finish;
+		}
+		blkcipher_done_slow(walk, n);
 	}
 
-	scatterwalk_done(&walk->in, 0, nbytes);
-	scatterwalk_done(&walk->out, 1, nbytes);
+	scatterwalk_done(&walk->in, 0, more);
+	scatterwalk_done(&walk->out, 1, more);
 
-err:
-	walk->total = nbytes;
-	walk->nbytes = nbytes;
-
-	if (nbytes) {
+	if (more) {
 		crypto_yield(desc->flags);
 		return blkcipher_walk_next(desc, walk);
 	}
-
+	err = 0;
+finish:
+	walk->nbytes = 0;
 	if (walk->iv != desc->info)
 		memcpy(desc->info, walk->iv, walk->ivsize);
 	if (walk->buffer != walk->page)
 		kfree(walk->buffer);
 	if (walk->page)
 		free_page((unsigned long)walk->page);
-
 	return err;
 }
 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
@@ -512,6 +510,7 @@
 	strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
 	strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
 		sizeof(rblkcipher.geniv));
+	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
 
 	rblkcipher.blocksize = alg->cra_blocksize;
 	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index bcbd3d4..f1df5d2 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -4132,6 +4132,22 @@
 				}
 			}
 		}
+	}, {
+		.alg = "zstd",
+		.test = alg_test_comp,
+		.fips_allowed = 1,
+		.suite = {
+			.comp = {
+				.comp = {
+					.vecs = zstd_comp_tv_template,
+					.count = ZSTD_COMP_TEST_VECTORS
+				},
+				.decomp = {
+					.vecs = zstd_decomp_tv_template,
+					.count = ZSTD_DECOMP_TEST_VECTORS
+				}
+			}
+		}
 	}
 };
 
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index daae6c1..1f40d17 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -36031,4 +36031,78 @@
 	},
 };
 
+#define ZSTD_COMP_TEST_VECTORS 2
+#define ZSTD_DECOMP_TEST_VECTORS 2
+
+static struct comp_testvec zstd_comp_tv_template[] = {
+	{
+		.inlen	= 68,
+		.outlen	= 39,
+		.input	= "The algorithm is zstd. "
+			  "The algorithm is zstd. "
+			  "The algorithm is zstd.",
+		.output	= "\x28\xb5\x2f\xfd\x00\x50\xf5\x00\x00\xb8\x54\x68\x65"
+			  "\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d\x20\x69\x73"
+			  "\x20\x7a\x73\x74\x64\x2e\x20\x01\x00\x55\x73\x36\x01"
+			  ,
+	},
+	{
+		.inlen	= 244,
+		.outlen	= 151,
+		.input	= "zstd, short for Zstandard, is a fast lossless "
+			  "compression algorithm, targeting real-time "
+			  "compression scenarios at zlib-level and better "
+			  "compression ratios. The zstd compression library "
+			  "provides in-memory compression and decompression "
+			  "functions.",
+		.output	= "\x28\xb5\x2f\xfd\x00\x50\x75\x04\x00\x42\x4b\x1e\x17"
+			  "\x90\x81\x31\x00\xf2\x2f\xe4\x36\xc9\xef\x92\x88\x32"
+			  "\xc9\xf2\x24\x94\xd8\x68\x9a\x0f\x00\x0c\xc4\x31\x6f"
+			  "\x0d\x0c\x38\xac\x5c\x48\x03\xcd\x63\x67\xc0\xf3\xad"
+			  "\x4e\x90\xaa\x78\xa0\xa4\xc5\x99\xda\x2f\xb6\x24\x60"
+			  "\xe2\x79\x4b\xaa\xb6\x6b\x85\x0b\xc9\xc6\x04\x66\x86"
+			  "\xe2\xcc\xe2\x25\x3f\x4f\x09\xcd\xb8\x9d\xdb\xc1\x90"
+			  "\xa9\x11\xbc\x35\x44\x69\x2d\x9c\x64\x4f\x13\x31\x64"
+			  "\xcc\xfb\x4d\x95\x93\x86\x7f\x33\x7f\x1a\xef\xe9\x30"
+			  "\xf9\x67\xa1\x94\x0a\x69\x0f\x60\xcd\xc3\xab\x99\xdc"
+			  "\x42\xed\x97\x05\x00\x33\xc3\x15\x95\x3a\x06\xa0\x0e"
+			  "\x20\xa9\x0e\x82\xb9\x43\x45\x01",
+	},
+};
+
+static struct comp_testvec zstd_decomp_tv_template[] = {
+	{
+		.inlen	= 43,
+		.outlen	= 68,
+		.input	= "\x28\xb5\x2f\xfd\x04\x50\xf5\x00\x00\xb8\x54\x68\x65"
+			  "\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d\x20\x69\x73"
+			  "\x20\x7a\x73\x74\x64\x2e\x20\x01\x00\x55\x73\x36\x01"
+			  "\x6b\xf4\x13\x35",
+		.output	= "The algorithm is zstd. "
+			  "The algorithm is zstd. "
+			  "The algorithm is zstd.",
+	},
+	{
+		.inlen	= 155,
+		.outlen	= 244,
+		.input	= "\x28\xb5\x2f\xfd\x04\x50\x75\x04\x00\x42\x4b\x1e\x17"
+			  "\x90\x81\x31\x00\xf2\x2f\xe4\x36\xc9\xef\x92\x88\x32"
+			  "\xc9\xf2\x24\x94\xd8\x68\x9a\x0f\x00\x0c\xc4\x31\x6f"
+			  "\x0d\x0c\x38\xac\x5c\x48\x03\xcd\x63\x67\xc0\xf3\xad"
+			  "\x4e\x90\xaa\x78\xa0\xa4\xc5\x99\xda\x2f\xb6\x24\x60"
+			  "\xe2\x79\x4b\xaa\xb6\x6b\x85\x0b\xc9\xc6\x04\x66\x86"
+			  "\xe2\xcc\xe2\x25\x3f\x4f\x09\xcd\xb8\x9d\xdb\xc1\x90"
+			  "\xa9\x11\xbc\x35\x44\x69\x2d\x9c\x64\x4f\x13\x31\x64"
+			  "\xcc\xfb\x4d\x95\x93\x86\x7f\x33\x7f\x1a\xef\xe9\x30"
+			  "\xf9\x67\xa1\x94\x0a\x69\x0f\x60\xcd\xc3\xab\x99\xdc"
+			  "\x42\xed\x97\x05\x00\x33\xc3\x15\x95\x3a\x06\xa0\x0e"
+			  "\x20\xa9\x0e\x82\xb9\x43\x45\x01\xaa\x6d\xda\x0d",
+		.output	= "zstd, short for Zstandard, is a fast lossless "
+			  "compression algorithm, targeting real-time "
+			  "compression scenarios at zlib-level and better "
+			  "compression ratios. The zstd compression library "
+			  "provides in-memory compression and decompression "
+			  "functions.",
+	},
+};
 #endif	/* _CRYPTO_TESTMGR_H */
diff --git a/crypto/vmac.c b/crypto/vmac.c
index df76a81..bb2fc78 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -1,6 +1,10 @@
 /*
- * Modified to interface to the Linux kernel
+ * VMAC: Message Authentication Code using Universal Hashing
+ *
+ * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
+ *
  * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2018, Google Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -16,14 +20,15 @@
  * Place - Suite 330, Boston, MA 02111-1307 USA.
  */
 
-/* --------------------------------------------------------------------------
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Please send bug reports to the authors.
- * Last modified: 17 APR 08, 1700 PDT
- * ----------------------------------------------------------------------- */
+/*
+ * Derived from:
+ *	VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+ *	This implementation is herby placed in the public domain.
+ *	The authors offers no warranty. Use at your own risk.
+ *	Last modified: 17 APR 08, 1700 PDT
+ */
 
+#include <asm/unaligned.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/crypto.h>
@@ -31,10 +36,36 @@
 #include <linux/scatterlist.h>
 #include <asm/byteorder.h>
 #include <crypto/scatterwalk.h>
-#include <crypto/vmac.h>
 #include <crypto/internal/hash.h>
 
 /*
+ * User definable settings.
+ */
+#define VMAC_TAG_LEN	64
+#define VMAC_KEY_SIZE	128/* Must be 128, 192 or 256			*/
+#define VMAC_KEY_LEN	(VMAC_KEY_SIZE/8)
+#define VMAC_NHBYTES	128/* Must 2^i for any 3 < i < 13 Standard = 128*/
+
+/* per-transform (per-key) context */
+struct vmac_tfm_ctx {
+	struct crypto_cipher *cipher;
+	u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
+	u64 polykey[2*VMAC_TAG_LEN/64];
+	u64 l3key[2*VMAC_TAG_LEN/64];
+};
+
+/* per-request context */
+struct vmac_desc_ctx {
+	union {
+		u8 partial[VMAC_NHBYTES];	/* partial block */
+		__le64 partial_words[VMAC_NHBYTES / 8];
+	};
+	unsigned int partial_size;	/* size of the partial block */
+	bool first_block_processed;
+	u64 polytmp[2*VMAC_TAG_LEN/64];	/* running total of L2-hash */
+};
+
+/*
  * Constants and masks
  */
 #define UINT64_C(x) x##ULL
@@ -318,13 +349,6 @@
 	} while (0)
 #endif
 
-static void vhash_abort(struct vmac_ctx *ctx)
-{
-	ctx->polytmp[0] = ctx->polykey[0] ;
-	ctx->polytmp[1] = ctx->polykey[1] ;
-	ctx->first_block_processed = 0;
-}
-
 static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
 {
 	u64 rh, rl, t, z = 0;
@@ -364,280 +388,209 @@
 	return rl;
 }
 
-static void vhash_update(const unsigned char *m,
-			unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
-			struct vmac_ctx *ctx)
+/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
+static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
+			 struct vmac_desc_ctx *dctx,
+			 const __le64 *mptr, unsigned int blocks)
 {
-	u64 rh, rl, *mptr;
-	const u64 *kptr = (u64 *)ctx->nhkey;
-	int i;
-	u64 ch, cl;
-	u64 pkh = ctx->polykey[0];
-	u64 pkl = ctx->polykey[1];
+	const u64 *kptr = tctx->nhkey;
+	const u64 pkh = tctx->polykey[0];
+	const u64 pkl = tctx->polykey[1];
+	u64 ch = dctx->polytmp[0];
+	u64 cl = dctx->polytmp[1];
+	u64 rh, rl;
 
-	if (!mbytes)
-		return;
-
-	BUG_ON(mbytes % VMAC_NHBYTES);
-
-	mptr = (u64 *)m;
-	i = mbytes / VMAC_NHBYTES;  /* Must be non-zero */
-
-	ch = ctx->polytmp[0];
-	cl = ctx->polytmp[1];
-
-	if (!ctx->first_block_processed) {
-		ctx->first_block_processed = 1;
+	if (!dctx->first_block_processed) {
+		dctx->first_block_processed = true;
 		nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
 		rh &= m62;
 		ADD128(ch, cl, rh, rl);
 		mptr += (VMAC_NHBYTES/sizeof(u64));
-		i--;
+		blocks--;
 	}
 
-	while (i--) {
+	while (blocks--) {
 		nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
 		rh &= m62;
 		poly_step(ch, cl, pkh, pkl, rh, rl);
 		mptr += (VMAC_NHBYTES/sizeof(u64));
 	}
 
-	ctx->polytmp[0] = ch;
-	ctx->polytmp[1] = cl;
+	dctx->polytmp[0] = ch;
+	dctx->polytmp[1] = cl;
 }
 
-static u64 vhash(unsigned char m[], unsigned int mbytes,
-			u64 *tagl, struct vmac_ctx *ctx)
+static int vmac_setkey(struct crypto_shash *tfm,
+		       const u8 *key, unsigned int keylen)
 {
-	u64 rh, rl, *mptr;
-	const u64 *kptr = (u64 *)ctx->nhkey;
-	int i, remaining;
-	u64 ch, cl;
-	u64 pkh = ctx->polykey[0];
-	u64 pkl = ctx->polykey[1];
+	struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+	__be64 out[2];
+	u8 in[16] = { 0 };
+	unsigned int i;
+	int err;
 
-	mptr = (u64 *)m;
-	i = mbytes / VMAC_NHBYTES;
-	remaining = mbytes % VMAC_NHBYTES;
-
-	if (ctx->first_block_processed) {
-		ch = ctx->polytmp[0];
-		cl = ctx->polytmp[1];
-	} else if (i) {
-		nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
-		ch &= m62;
-		ADD128(ch, cl, pkh, pkl);
-		mptr += (VMAC_NHBYTES/sizeof(u64));
-		i--;
-	} else if (remaining) {
-		nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
-		ch &= m62;
-		ADD128(ch, cl, pkh, pkl);
-		mptr += (VMAC_NHBYTES/sizeof(u64));
-		goto do_l3;
-	} else {/* Empty String */
-		ch = pkh; cl = pkl;
-		goto do_l3;
+	if (keylen != VMAC_KEY_LEN) {
+		crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
 	}
 
-	while (i--) {
-		nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
-		rh &= m62;
-		poly_step(ch, cl, pkh, pkl, rh, rl);
-		mptr += (VMAC_NHBYTES/sizeof(u64));
-	}
-	if (remaining) {
-		nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
-		rh &= m62;
-		poly_step(ch, cl, pkh, pkl, rh, rl);
-	}
-
-do_l3:
-	vhash_abort(ctx);
-	remaining *= 8;
-	return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
-}
-
-static u64 vmac(unsigned char m[], unsigned int mbytes,
-			const unsigned char n[16], u64 *tagl,
-			struct vmac_ctx_t *ctx)
-{
-	u64 *in_n, *out_p;
-	u64 p, h;
-	int i;
-
-	in_n = ctx->__vmac_ctx.cached_nonce;
-	out_p = ctx->__vmac_ctx.cached_aes;
-
-	i = n[15] & 1;
-	if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
-		in_n[0] = *(u64 *)(n);
-		in_n[1] = *(u64 *)(n+8);
-		((unsigned char *)in_n)[15] &= 0xFE;
-		crypto_cipher_encrypt_one(ctx->child,
-			(unsigned char *)out_p, (unsigned char *)in_n);
-
-		((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
-	}
-	p = be64_to_cpup(out_p + i);
-	h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
-	return le64_to_cpu(p + h);
-}
-
-static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
-{
-	u64 in[2] = {0}, out[2];
-	unsigned i;
-	int err = 0;
-
-	err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
+	err = crypto_cipher_setkey(tctx->cipher, key, keylen);
 	if (err)
 		return err;
 
 	/* Fill nh key */
-	((unsigned char *)in)[0] = 0x80;
-	for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
-		crypto_cipher_encrypt_one(ctx->child,
-			(unsigned char *)out, (unsigned char *)in);
-		ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
-		ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
-		((unsigned char *)in)[15] += 1;
+	in[0] = 0x80;
+	for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
+		crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+		tctx->nhkey[i] = be64_to_cpu(out[0]);
+		tctx->nhkey[i+1] = be64_to_cpu(out[1]);
+		in[15]++;
 	}
 
 	/* Fill poly key */
-	((unsigned char *)in)[0] = 0xC0;
-	in[1] = 0;
-	for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
-		crypto_cipher_encrypt_one(ctx->child,
-			(unsigned char *)out, (unsigned char *)in);
-		ctx->__vmac_ctx.polytmp[i] =
-			ctx->__vmac_ctx.polykey[i] =
-				be64_to_cpup(out) & mpoly;
-		ctx->__vmac_ctx.polytmp[i+1] =
-			ctx->__vmac_ctx.polykey[i+1] =
-				be64_to_cpup(out+1) & mpoly;
-		((unsigned char *)in)[15] += 1;
+	in[0] = 0xC0;
+	in[15] = 0;
+	for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
+		crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+		tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
+		tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
+		in[15]++;
 	}
 
 	/* Fill ip key */
-	((unsigned char *)in)[0] = 0xE0;
-	in[1] = 0;
-	for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
+	in[0] = 0xE0;
+	in[15] = 0;
+	for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
 		do {
-			crypto_cipher_encrypt_one(ctx->child,
-				(unsigned char *)out, (unsigned char *)in);
-			ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
-			ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
-			((unsigned char *)in)[15] += 1;
-		} while (ctx->__vmac_ctx.l3key[i] >= p64
-			|| ctx->__vmac_ctx.l3key[i+1] >= p64);
+			crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+			tctx->l3key[i] = be64_to_cpu(out[0]);
+			tctx->l3key[i+1] = be64_to_cpu(out[1]);
+			in[15]++;
+		} while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
 	}
 
-	/* Invalidate nonce/aes cache and reset other elements */
-	ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
-	ctx->__vmac_ctx.cached_nonce[1] = (u64)0;  /* Ensure illegal nonce */
-	ctx->__vmac_ctx.first_block_processed = 0;
-
-	return err;
-}
-
-static int vmac_setkey(struct crypto_shash *parent,
-		const u8 *key, unsigned int keylen)
-{
-	struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
-
-	if (keylen != VMAC_KEY_LEN) {
-		crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
-		return -EINVAL;
-	}
-
-	return vmac_set_key((u8 *)key, ctx);
-}
-
-static int vmac_init(struct shash_desc *pdesc)
-{
-	return 0;
-}
-
-static int vmac_update(struct shash_desc *pdesc, const u8 *p,
-		unsigned int len)
-{
-	struct crypto_shash *parent = pdesc->tfm;
-	struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
-	int expand;
-	int min;
-
-	expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
-			VMAC_NHBYTES - ctx->partial_size : 0;
-
-	min = len < expand ? len : expand;
-
-	memcpy(ctx->partial + ctx->partial_size, p, min);
-	ctx->partial_size += min;
-
-	if (len < expand)
-		return 0;
-
-	vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
-	ctx->partial_size = 0;
-
-	len -= expand;
-	p += expand;
-
-	if (len % VMAC_NHBYTES) {
-		memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
-			len % VMAC_NHBYTES);
-		ctx->partial_size = len % VMAC_NHBYTES;
-	}
-
-	vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
-
 	return 0;
 }
 
-static int vmac_final(struct shash_desc *pdesc, u8 *out)
+static int vmac_init(struct shash_desc *desc)
 {
-	struct crypto_shash *parent = pdesc->tfm;
-	struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
-	vmac_t mac;
-	u8 nonce[16] = {};
+	const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+	struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
 
-	/* vmac() ends up accessing outside the array bounds that
-	 * we specify.  In appears to access up to the next 2-word
-	 * boundary.  We'll just be uber cautious and zero the
-	 * unwritten bytes in the buffer.
-	 */
-	if (ctx->partial_size) {
-		memset(ctx->partial + ctx->partial_size, 0,
-			VMAC_NHBYTES - ctx->partial_size);
+	dctx->partial_size = 0;
+	dctx->first_block_processed = false;
+	memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
+	return 0;
+}
+
+static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
+{
+	const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+	struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+	unsigned int n;
+
+	if (dctx->partial_size) {
+		n = min(len, VMAC_NHBYTES - dctx->partial_size);
+		memcpy(&dctx->partial[dctx->partial_size], p, n);
+		dctx->partial_size += n;
+		p += n;
+		len -= n;
+		if (dctx->partial_size == VMAC_NHBYTES) {
+			vhash_blocks(tctx, dctx, dctx->partial_words, 1);
+			dctx->partial_size = 0;
+		}
 	}
-	mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
-	memcpy(out, &mac, sizeof(vmac_t));
-	memzero_explicit(&mac, sizeof(vmac_t));
-	memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
-	ctx->partial_size = 0;
+
+	if (len >= VMAC_NHBYTES) {
+		n = round_down(len, VMAC_NHBYTES);
+		/* TODO: 'p' may be misaligned here */
+		vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
+		p += n;
+		len -= n;
+	}
+
+	if (len) {
+		memcpy(dctx->partial, p, len);
+		dctx->partial_size = len;
+	}
+
+	return 0;
+}
+
+static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
+		       struct vmac_desc_ctx *dctx)
+{
+	unsigned int partial = dctx->partial_size;
+	u64 ch = dctx->polytmp[0];
+	u64 cl = dctx->polytmp[1];
+
+	/* L1 and L2-hash the final block if needed */
+	if (partial) {
+		/* Zero-pad to next 128-bit boundary */
+		unsigned int n = round_up(partial, 16);
+		u64 rh, rl;
+
+		memset(&dctx->partial[partial], 0, n - partial);
+		nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
+		rh &= m62;
+		if (dctx->first_block_processed)
+			poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
+				  rh, rl);
+		else
+			ADD128(ch, cl, rh, rl);
+	}
+
+	/* L3-hash the 128-bit output of L2-hash */
+	return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
+}
+
+static int vmac_final(struct shash_desc *desc, u8 *out)
+{
+	const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+	struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+	static const u8 nonce[16] = {}; /* TODO: this is insecure */
+	union {
+		u8 bytes[16];
+		__be64 pads[2];
+	} block;
+	int index;
+	u64 hash, pad;
+
+	/* Finish calculating the VHASH of the message */
+	hash = vhash_final(tctx, dctx);
+
+	/* Generate pseudorandom pad by encrypting the nonce */
+	memcpy(&block, nonce, 16);
+	index = block.bytes[15] & 1;
+	block.bytes[15] &= ~1;
+	crypto_cipher_encrypt_one(tctx->cipher, block.bytes, block.bytes);
+	pad = be64_to_cpu(block.pads[index]);
+
+	/* The VMAC is the sum of VHASH and the pseudorandom pad */
+	put_unaligned_le64(hash + pad, out);
 	return 0;
 }
 
 static int vmac_init_tfm(struct crypto_tfm *tfm)
 {
-	struct crypto_cipher *cipher;
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
+	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
-	struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+	struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+	struct crypto_cipher *cipher;
 
 	cipher = crypto_spawn_cipher(spawn);
 	if (IS_ERR(cipher))
 		return PTR_ERR(cipher);
 
-	ctx->child = cipher;
+	tctx->cipher = cipher;
 	return 0;
 }
 
 static void vmac_exit_tfm(struct crypto_tfm *tfm)
 {
-	struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
-	crypto_free_cipher(ctx->child);
+	struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_cipher(tctx->cipher);
 }
 
 static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -655,6 +608,10 @@
 	if (IS_ERR(alg))
 		return PTR_ERR(alg);
 
+	err = -EINVAL;
+	if (alg->cra_blocksize != 16)
+		goto out_put_alg;
+
 	inst = shash_alloc_instance("vmac", alg);
 	err = PTR_ERR(inst);
 	if (IS_ERR(inst))
@@ -670,11 +627,12 @@
 	inst->alg.base.cra_blocksize = alg->cra_blocksize;
 	inst->alg.base.cra_alignmask = alg->cra_alignmask;
 
-	inst->alg.digestsize = sizeof(vmac_t);
-	inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
+	inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
 	inst->alg.base.cra_init = vmac_init_tfm;
 	inst->alg.base.cra_exit = vmac_exit_tfm;
 
+	inst->alg.descsize = sizeof(struct vmac_desc_ctx);
+	inst->alg.digestsize = VMAC_TAG_LEN / 8;
 	inst->alg.init = vmac_init;
 	inst->alg.update = vmac_update;
 	inst->alg.final = vmac_final;
diff --git a/crypto/zstd.c b/crypto/zstd.c
new file mode 100644
index 0000000..9bfd28f
--- /dev/null
+++ b/crypto/zstd.c
@@ -0,0 +1,209 @@
+/*
+ * Cryptographic API.
+ *
+ * Copyright (c) 2017-present, Facebook, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/vmalloc.h>
+#include <linux/zstd.h>
+
+
+#define ZSTD_DEF_LEVEL	3
+
+struct zstd_ctx {
+	ZSTD_CCtx *cctx;
+	ZSTD_DCtx *dctx;
+	void *cwksp;
+	void *dwksp;
+};
+
+static ZSTD_parameters zstd_params(void)
+{
+	return ZSTD_getParams(ZSTD_DEF_LEVEL, 0, 0);
+}
+
+static int zstd_comp_init(struct zstd_ctx *ctx)
+{
+	int ret = 0;
+	const ZSTD_parameters params = zstd_params();
+	const size_t wksp_size = ZSTD_CCtxWorkspaceBound(params.cParams);
+
+	ctx->cwksp = vzalloc(wksp_size);
+	if (!ctx->cwksp) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ctx->cctx = ZSTD_initCCtx(ctx->cwksp, wksp_size);
+	if (!ctx->cctx) {
+		ret = -EINVAL;
+		goto out_free;
+	}
+out:
+	return ret;
+out_free:
+	vfree(ctx->cwksp);
+	goto out;
+}
+
+static int zstd_decomp_init(struct zstd_ctx *ctx)
+{
+	int ret = 0;
+	const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
+
+	ctx->dwksp = vzalloc(wksp_size);
+	if (!ctx->dwksp) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ctx->dctx = ZSTD_initDCtx(ctx->dwksp, wksp_size);
+	if (!ctx->dctx) {
+		ret = -EINVAL;
+		goto out_free;
+	}
+out:
+	return ret;
+out_free:
+	vfree(ctx->dwksp);
+	goto out;
+}
+
+static void zstd_comp_exit(struct zstd_ctx *ctx)
+{
+	vfree(ctx->cwksp);
+	ctx->cwksp = NULL;
+	ctx->cctx = NULL;
+}
+
+static void zstd_decomp_exit(struct zstd_ctx *ctx)
+{
+	vfree(ctx->dwksp);
+	ctx->dwksp = NULL;
+	ctx->dctx = NULL;
+}
+
+static int __zstd_init(void *ctx)
+{
+	int ret;
+
+	ret = zstd_comp_init(ctx);
+	if (ret)
+		return ret;
+	ret = zstd_decomp_init(ctx);
+	if (ret)
+		zstd_comp_exit(ctx);
+	return ret;
+}
+
+static int zstd_init(struct crypto_tfm *tfm)
+{
+	struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	return __zstd_init(ctx);
+}
+
+static void __zstd_exit(void *ctx)
+{
+	zstd_comp_exit(ctx);
+	zstd_decomp_exit(ctx);
+}
+
+static void zstd_exit(struct crypto_tfm *tfm)
+{
+	struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	__zstd_exit(ctx);
+}
+
+static int __zstd_compress(const u8 *src, unsigned int slen,
+			   u8 *dst, unsigned int *dlen, void *ctx)
+{
+	size_t out_len;
+	struct zstd_ctx *zctx = ctx;
+	const ZSTD_parameters params = zstd_params();
+
+	out_len = ZSTD_compressCCtx(zctx->cctx, dst, *dlen, src, slen, params);
+	if (ZSTD_isError(out_len))
+		return -EINVAL;
+	*dlen = out_len;
+	return 0;
+}
+
+static int zstd_compress(struct crypto_tfm *tfm, const u8 *src,
+			 unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+	struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	return __zstd_compress(src, slen, dst, dlen, ctx);
+}
+
+static int __zstd_decompress(const u8 *src, unsigned int slen,
+			     u8 *dst, unsigned int *dlen, void *ctx)
+{
+	size_t out_len;
+	struct zstd_ctx *zctx = ctx;
+
+	out_len = ZSTD_decompressDCtx(zctx->dctx, dst, *dlen, src, slen);
+	if (ZSTD_isError(out_len))
+		return -EINVAL;
+	*dlen = out_len;
+	return 0;
+}
+
+static int zstd_decompress(struct crypto_tfm *tfm, const u8 *src,
+			   unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+	struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	return __zstd_decompress(src, slen, dst, dlen, ctx);
+}
+
+static struct crypto_alg alg = {
+	.cra_name		= "zstd",
+	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
+	.cra_ctxsize		= sizeof(struct zstd_ctx),
+	.cra_module		= THIS_MODULE,
+	.cra_init		= zstd_init,
+	.cra_exit		= zstd_exit,
+	.cra_u			= { .compress = {
+	.coa_compress		= zstd_compress,
+	.coa_decompress		= zstd_decompress } }
+};
+
+static int __init zstd_mod_init(void)
+{
+	int ret;
+
+	ret = crypto_register_alg(&alg);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static void __exit zstd_mod_fini(void)
+{
+	crypto_unregister_alg(&alg);
+}
+
+module_init(zstd_mod_init);
+module_exit(zstd_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Zstd Compression Algorithm");
+MODULE_ALIAS_CRYPTO("zstd");
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9480d84..5960816 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -138,6 +138,8 @@
 
 source "drivers/xen/Kconfig"
 
+source "drivers/vservices/Kconfig"
+
 source "drivers/staging/Kconfig"
 
 source "drivers/platform/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 06e2bb4..557cba5 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -10,6 +10,8 @@
 
 obj-$(CONFIG_GENERIC_PHY)	+= phy/
 
+obj-$(CONFIG_VSERVICES_SUPPORT)	+= vservices/
+
 # GPIO must come after pinctrl as gpios may need to mux pins etc
 obj-$(CONFIG_PINCTRL)		+= pinctrl/
 obj-$(CONFIG_GPIOLIB)		+= gpio/
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 373657f..3cdd2c3 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -187,10 +187,12 @@
 
 static const struct lpss_device_desc byt_pwm_dev_desc = {
 	.flags = LPSS_SAVE_CTX,
+	.prv_offset = 0x800,
 };
 
 static const struct lpss_device_desc bsw_pwm_dev_desc = {
 	.flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
+	.prv_offset = 0x800,
 };
 
 static const struct lpss_device_desc byt_uart_dev_desc = {
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 3874eec..ef32e57 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -201,6 +201,8 @@
 	const u8 *uuid;
 	int rc, i;
 
+	if (cmd_rc)
+		*cmd_rc = -EINVAL;
 	func = cmd;
 	if (cmd == ND_CMD_CALL) {
 		call_pkg = buf;
@@ -288,6 +290,8 @@
 		 * If we return an error (like elsewhere) then caller wouldn't
 		 * be able to rely upon data returned to make calculation.
 		 */
+		if (cmd_rc)
+			*cmd_rc = 0;
 		return 0;
 	}
 
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index bf601d4..b66815f 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -472,9 +472,11 @@
 	}
 
 	control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
-		| OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
 		| OSC_PCI_EXPRESS_PME_CONTROL;
 
+	if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+		control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
+
 	if (pci_aer_available()) {
 		if (aer_acpi_firmware_first())
 			dev_info(&device->dev,
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 145dcf2..0792ec5 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1453,7 +1453,8 @@
 	 * Note this must be done before the get power-/wakeup_dev-flags calls.
 	 */
 	if (type == ACPI_BUS_TYPE_DEVICE)
-		acpi_bus_get_status(device);
+		if (acpi_bus_get_status(device) < 0)
+			acpi_set_device_status(device, 0);
 
 	acpi_bus_get_power_flags(device);
 	acpi_bus_get_wakeup_device_flags(device);
@@ -1531,7 +1532,7 @@
 		 * acpi_add_single_object updates this once we've an acpi_device
 		 * so that acpi_bus_get_status' quirk handling can be used.
 		 */
-		*sta = 0;
+		*sta = ACPI_STA_DEFAULT;
 		break;
 	case ACPI_TYPE_PROCESSOR:
 		*type = ACPI_BUS_TYPE_PROCESSOR;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 097d630..f0633169 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -330,6 +330,14 @@
 		DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
 		},
 	},
+	{
+	.callback = init_nvs_save_s3,
+	.ident = "Asus 1025C",
+	.matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+		DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
+		},
+	},
 	/*
 	 * https://bugzilla.kernel.org/show_bug.cgi?id=189431
 	 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 957eb3c..3b2335e 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -142,7 +142,7 @@
 };
 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
 	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
-module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+module_param_named(debug_mask, binder_debug_mask, uint, 0644);
 
 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
 module_param_named(devices, binder_devices_param, charp, S_IRUGO);
@@ -161,7 +161,7 @@
 	return ret;
 }
 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
-	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+	param_get_int, &binder_stop_on_user_error, 0644);
 
 #define binder_debug(mask, x...) \
 	do { \
@@ -250,7 +250,7 @@
 	unsigned int cur = atomic_inc_return(&log->cur);
 
 	if (cur >= ARRAY_SIZE(log->entry))
-		log->full = 1;
+		log->full = true;
 	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
 	WRITE_ONCE(e->debug_id_done, 0);
 	/*
@@ -465,8 +465,9 @@
 };
 
 enum binder_deferred_state {
-	BINDER_DEFERRED_FLUSH        = 0x01,
-	BINDER_DEFERRED_RELEASE      = 0x02,
+	BINDER_DEFERRED_PUT_FILES    = 0x01,
+	BINDER_DEFERRED_FLUSH        = 0x02,
+	BINDER_DEFERRED_RELEASE      = 0x04,
 };
 
 /**
@@ -503,6 +504,9 @@
  *                        (invariant after initialized)
  * @tsk                   task_struct for group_leader of process
  *                        (invariant after initialized)
+ * @files                 files_struct for process
+ *                        (protected by @files_lock)
+ * @files_lock            mutex to protect @files
  * @deferred_work_node:   element for binder_deferred_list
  *                        (protected by binder_deferred_lock)
  * @deferred_work:        bitmap of deferred work to perform
@@ -547,6 +551,8 @@
 	struct list_head waiting_threads;
 	int pid;
 	struct task_struct *tsk;
+	struct files_struct *files;
+	struct mutex files_lock;
 	struct hlist_node deferred_work_node;
 	int deferred_work;
 	bool is_dead;
@@ -941,33 +947,27 @@
 static void binder_free_proc(struct binder_proc *proc);
 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
 
-struct files_struct *binder_get_files_struct(struct binder_proc *proc)
-{
-	return get_files_struct(proc->tsk);
-}
-
 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
 {
-	struct files_struct *files;
 	unsigned long rlim_cur;
 	unsigned long irqs;
 	int ret;
 
-	files = binder_get_files_struct(proc);
-	if (files == NULL)
-		return -ESRCH;
-
+	mutex_lock(&proc->files_lock);
+	if (proc->files == NULL) {
+		ret = -ESRCH;
+		goto err;
+	}
 	if (!lock_task_sighand(proc->tsk, &irqs)) {
 		ret = -EMFILE;
 		goto err;
 	}
-
 	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
 	unlock_task_sighand(proc->tsk, &irqs);
 
-	ret = __alloc_fd(files, 0, rlim_cur, flags);
+	ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
 err:
-	put_files_struct(files);
+	mutex_unlock(&proc->files_lock);
 	return ret;
 }
 
@@ -977,12 +977,10 @@
 static void task_fd_install(
 	struct binder_proc *proc, unsigned int fd, struct file *file)
 {
-	struct files_struct *files = binder_get_files_struct(proc);
-
-	if (files) {
-		__fd_install(files, fd, file);
-		put_files_struct(files);
-	}
+	mutex_lock(&proc->files_lock);
+	if (proc->files)
+		__fd_install(proc->files, fd, file);
+	mutex_unlock(&proc->files_lock);
 }
 
 /*
@@ -990,21 +988,22 @@
  */
 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
 {
-	struct files_struct *files = binder_get_files_struct(proc);
 	int retval;
 
-	if (files == NULL)
-		return -ESRCH;
-
-	retval = __close_fd(files, fd);
+	mutex_lock(&proc->files_lock);
+	if (proc->files == NULL) {
+		retval = -ESRCH;
+		goto err;
+	}
+	retval = __close_fd(proc->files, fd);
 	/* can't restart close syscall because file table entry was cleared */
 	if (unlikely(retval == -ERESTARTSYS ||
 		     retval == -ERESTARTNOINTR ||
 		     retval == -ERESTARTNOHAND ||
 		     retval == -ERESTART_RESTARTBLOCK))
 		retval = -EINTR;
-	put_files_struct(files);
-
+err:
+	mutex_unlock(&proc->files_lock);
 	return retval;
 }
 
@@ -2214,8 +2213,8 @@
 	struct binder_object_header *hdr;
 	size_t object_size = 0;
 
-	if (offset > buffer->data_size - sizeof(*hdr) ||
-	    buffer->data_size < sizeof(*hdr) ||
+	if (buffer->data_size < sizeof(*hdr) ||
+	    offset > buffer->data_size - sizeof(*hdr) ||
 	    !IS_ALIGNED(offset, sizeof(u32)))
 		return 0;
 
@@ -2355,7 +2354,7 @@
 	int debug_id = buffer->debug_id;
 
 	binder_debug(BINDER_DEBUG_TRANSACTION,
-		     "%d buffer release %d, size %zd-%zd, failed at %p\n",
+		     "%d buffer release %d, size %zd-%zd, failed at %pK\n",
 		     proc->pid, buffer->debug_id,
 		     buffer->data_size, buffer->offsets_size, failed_at);
 
@@ -2804,7 +2803,7 @@
 		if (node->has_async_transaction) {
 			pending_async = true;
 		} else {
-			node->has_async_transaction = 1;
+			node->has_async_transaction = true;
 		}
 	}
 
@@ -3669,7 +3668,7 @@
 				w = binder_dequeue_work_head_ilocked(
 						&buf_node->async_todo);
 				if (!w) {
-					buf_node->has_async_transaction = 0;
+					buf_node->has_async_transaction = false;
 				} else {
 					binder_enqueue_work_ilocked(
 							w, &proc->todo);
@@ -3891,7 +3890,7 @@
 				}
 			}
 			binder_debug(BINDER_DEBUG_DEAD_BINDER,
-				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
+				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
 				     proc->pid, thread->pid, (u64)cookie,
 				     death);
 			if (death == NULL) {
@@ -4097,6 +4096,7 @@
 			binder_inner_proc_unlock(proc);
 			if (put_user(e->cmd, (uint32_t __user *)ptr))
 				return -EFAULT;
+			cmd = e->cmd;
 			e->cmd = BR_OK;
 			ptr += sizeof(uint32_t);
 
@@ -4717,6 +4717,42 @@
 	return ret;
 }
 
+static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
+		struct binder_node_info_for_ref *info)
+{
+	struct binder_node *node;
+	struct binder_context *context = proc->context;
+	__u32 handle = info->handle;
+
+	if (info->strong_count || info->weak_count || info->reserved1 ||
+	    info->reserved2 || info->reserved3) {
+		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
+				  proc->pid);
+		return -EINVAL;
+	}
+
+	/* This ioctl may only be used by the context manager */
+	mutex_lock(&context->context_mgr_node_lock);
+	if (!context->binder_context_mgr_node ||
+		context->binder_context_mgr_node->proc != proc) {
+		mutex_unlock(&context->context_mgr_node_lock);
+		return -EPERM;
+	}
+	mutex_unlock(&context->context_mgr_node_lock);
+
+	node = binder_get_node_from_ref(proc, handle, true, NULL);
+	if (!node)
+		return -EINVAL;
+
+	info->strong_count = node->local_strong_refs +
+		node->internal_strong_refs;
+	info->weak_count = node->local_weak_refs;
+
+	binder_put_node(node);
+
+	return 0;
+}
+
 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
 				struct binder_node_debug_info *info) {
 	struct rb_node *n;
@@ -4810,6 +4846,25 @@
 		}
 		break;
 	}
+	case BINDER_GET_NODE_INFO_FOR_REF: {
+		struct binder_node_info_for_ref info;
+
+		if (copy_from_user(&info, ubuf, sizeof(info))) {
+			ret = -EFAULT;
+			goto err;
+		}
+
+		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
+		if (ret < 0)
+			goto err;
+
+		if (copy_to_user(ubuf, &info, sizeof(info))) {
+			ret = -EFAULT;
+			goto err;
+		}
+
+		break;
+	}
 	case BINDER_GET_NODE_DEBUG_INFO: {
 		struct binder_node_debug_info info;
 
@@ -4865,6 +4920,7 @@
 		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
 		     (unsigned long)pgprot_val(vma->vm_page_prot));
 	binder_alloc_vma_close(&proc->alloc);
+	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
 }
 
 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -4901,16 +4957,22 @@
 		failure_string = "bad vm_flags";
 		goto err_bad_arg;
 	}
-	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
+	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
+	vma->vm_flags &= ~VM_MAYWRITE;
+
 	vma->vm_ops = &binder_vm_ops;
 	vma->vm_private_data = proc;
 
 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
-
-	return ret;
+	if (ret)
+		return ret;
+	mutex_lock(&proc->files_lock);
+	proc->files = get_files_struct(current);
+	mutex_unlock(&proc->files_lock);
+	return 0;
 
 err_bad_arg:
-	pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
+	pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
 	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
 	return ret;
 }
@@ -4920,7 +4982,7 @@
 	struct binder_proc *proc;
 	struct binder_device *binder_dev;
 
-	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
+	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
 		     current->group_leader->pid, current->pid);
 
 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
@@ -4930,6 +4992,7 @@
 	spin_lock_init(&proc->outer_lock);
 	get_task_struct(current->group_leader);
 	proc->tsk = current->group_leader;
+	mutex_init(&proc->files_lock);
 	INIT_LIST_HEAD(&proc->todo);
 	if (binder_supported_policy(current->policy)) {
 		proc->default_priority.sched_policy = current->policy;
@@ -4965,7 +5028,7 @@
 		 * anyway print all contexts that a given PID has, so this
 		 * is not a problem.
 		 */
-		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
+		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
 			binder_debugfs_dir_entry_proc,
 			(void *)(unsigned long)proc->pid,
 			&binder_proc_fops);
@@ -5086,6 +5149,8 @@
 	struct rb_node *n;
 	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
 
+	BUG_ON(proc->files);
+
 	mutex_lock(&binder_procs_lock);
 	hlist_del(&proc->proc_node);
 	mutex_unlock(&binder_procs_lock);
@@ -5167,6 +5232,8 @@
 static void binder_deferred_func(struct work_struct *work)
 {
 	struct binder_proc *proc;
+	struct files_struct *files;
+
 	int defer;
 
 	do {
@@ -5183,11 +5250,23 @@
 		}
 		mutex_unlock(&binder_deferred_lock);
 
+		files = NULL;
+		if (defer & BINDER_DEFERRED_PUT_FILES) {
+			mutex_lock(&proc->files_lock);
+			files = proc->files;
+			if (files)
+				proc->files = NULL;
+			mutex_unlock(&proc->files_lock);
+		}
+
 		if (defer & BINDER_DEFERRED_FLUSH)
 			binder_deferred_flush(proc);
 
 		if (defer & BINDER_DEFERRED_RELEASE)
 			binder_deferred_release(proc); /* frees proc */
+
+		if (files)
+			put_files_struct(files);
 	} while (proc);
 }
 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
@@ -5216,7 +5295,7 @@
 	spin_lock(&t->lock);
 	to_proc = t->to_proc;
 	seq_printf(m,
-		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
+		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
 		   prefix, t->debug_id, t,
 		   t->from ? t->from->proc->pid : 0,
 		   t->from ? t->from->pid : 0,
@@ -5241,7 +5320,7 @@
 	}
 	if (buffer->target_node)
 		seq_printf(m, " node %d", buffer->target_node->debug_id);
-	seq_printf(m, " size %zd:%zd data %p\n",
+	seq_printf(m, " size %zd:%zd data %pK\n",
 		   buffer->data_size, buffer->offsets_size,
 		   buffer->data);
 }
@@ -5776,11 +5855,13 @@
 static int __init binder_init(void)
 {
 	int ret;
-	char *device_name, *device_names;
+	char *device_name, *device_names, *device_tmp;
 	struct binder_device *device;
 	struct hlist_node *tmp;
 
-	binder_alloc_shrinker_init();
+	ret = binder_alloc_shrinker_init();
+	if (ret)
+		return ret;
 
 	atomic_set(&binder_transaction_log.cur, ~0U);
 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
@@ -5792,27 +5873,27 @@
 
 	if (binder_debugfs_dir_entry_root) {
 		debugfs_create_file("state",
-				    S_IRUGO,
+				    0444,
 				    binder_debugfs_dir_entry_root,
 				    NULL,
 				    &binder_state_fops);
 		debugfs_create_file("stats",
-				    S_IRUGO,
+				    0444,
 				    binder_debugfs_dir_entry_root,
 				    NULL,
 				    &binder_stats_fops);
 		debugfs_create_file("transactions",
-				    S_IRUGO,
+				    0444,
 				    binder_debugfs_dir_entry_root,
 				    NULL,
 				    &binder_transactions_fops);
 		debugfs_create_file("transaction_log",
-				    S_IRUGO,
+				    0444,
 				    binder_debugfs_dir_entry_root,
 				    &binder_transaction_log,
 				    &binder_transaction_log_fops);
 		debugfs_create_file("failed_transaction_log",
-				    S_IRUGO,
+				    0444,
 				    binder_debugfs_dir_entry_root,
 				    &binder_transaction_log_failed,
 				    &binder_transaction_log_fops);
@@ -5829,7 +5910,8 @@
 	}
 	strcpy(device_names, binder_devices_param);
 
-	while ((device_name = strsep(&device_names, ","))) {
+	device_tmp = device_names;
+	while ((device_name = strsep(&device_tmp, ","))) {
 		ret = init_binder_device(device_name);
 		if (ret)
 			goto err_init_binder_device_failed;
@@ -5843,6 +5925,9 @@
 		hlist_del(&device->hlist);
 		kfree(device);
 	}
+
+	kfree(device_names);
+
 err_alloc_device_names_failed:
 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
 
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index abb56a9..bec6c0a 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -219,7 +219,7 @@
 		mm = alloc->vma_vm_mm;
 
 	if (mm) {
-		down_write(&mm->mmap_sem);
+		down_read(&mm->mmap_sem);
 		vma = alloc->vma;
 	}
 
@@ -288,7 +288,7 @@
 		/* vm_insert_page does not seem to increment the refcount */
 	}
 	if (mm) {
-		up_write(&mm->mmap_sem);
+		up_read(&mm->mmap_sem);
 		mmput(mm);
 	}
 	return 0;
@@ -321,17 +321,18 @@
 	}
 err_no_vma:
 	if (mm) {
-		up_write(&mm->mmap_sem);
+		up_read(&mm->mmap_sem);
 		mmput(mm);
 	}
 	return vma ? -ENOMEM : -ESRCH;
 }
 
-struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
-						  size_t data_size,
-						  size_t offsets_size,
-						  size_t extra_buffers_size,
-						  int is_async)
+static struct binder_buffer *binder_alloc_new_buf_locked(
+				struct binder_alloc *alloc,
+				size_t data_size,
+				size_t offsets_size,
+				size_t extra_buffers_size,
+				int is_async)
 {
 	struct rb_node *n = alloc->free_buffers.rb_node;
 	struct binder_buffer *buffer;
@@ -1008,8 +1009,14 @@
 	INIT_LIST_HEAD(&alloc->buffers);
 }
 
-void binder_alloc_shrinker_init(void)
+int binder_alloc_shrinker_init(void)
 {
-	list_lru_init(&binder_alloc_lru);
-	register_shrinker(&binder_shrinker);
+	int ret = list_lru_init(&binder_alloc_lru);
+
+	if (ret == 0) {
+		ret = register_shrinker(&binder_shrinker);
+		if (ret)
+			list_lru_destroy(&binder_alloc_lru);
+	}
+	return ret;
 }
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 0b14530..9ef64e5 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -130,7 +130,7 @@
 						  size_t extra_buffers_size,
 						  int is_async);
 extern void binder_alloc_init(struct binder_alloc *alloc);
-void binder_alloc_shrinker_init(void);
+extern int binder_alloc_shrinker_init(void);
 extern void binder_alloc_vma_close(struct binder_alloc *alloc);
 extern struct binder_buffer *
 binder_alloc_prepare_to_free(struct binder_alloc *alloc,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 4d4b5f6..faa91f8 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1260,6 +1260,59 @@
 	return strcmp(buf, dmi->driver_data) < 0;
 }
 
+static bool ahci_broken_lpm(struct pci_dev *pdev)
+{
+	static const struct dmi_system_id sysids[] = {
+		/* Various Lenovo 50 series have LPM issues with older BIOSen */
+		{
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+				DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
+			},
+			.driver_data = "20180406", /* 1.31 */
+		},
+		{
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+				DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
+			},
+			.driver_data = "20180420", /* 1.28 */
+		},
+		{
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+				DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
+			},
+			.driver_data = "20180315", /* 1.33 */
+		},
+		{
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+				DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
+			},
+			/*
+			 * Note date based on release notes, 2.35 has been
+			 * reported to be good, but I've been unable to get
+			 * a hold of the reporter to get the DMI BIOS date.
+			 * TODO: fix this.
+			 */
+			.driver_data = "20180310", /* 2.35 */
+		},
+		{ }	/* terminate list */
+	};
+	const struct dmi_system_id *dmi = dmi_first_match(sysids);
+	int year, month, date;
+	char buf[9];
+
+	if (!dmi)
+		return false;
+
+	dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+	snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
+
+	return strcmp(buf, dmi->driver_data) < 0;
+}
+
 static bool ahci_broken_online(struct pci_dev *pdev)
 {
 #define ENCODE_BUSDEVFN(bus, slot, func)			\
@@ -1626,6 +1679,12 @@
 			"quirky BIOS, skipping spindown on poweroff\n");
 	}
 
+	if (ahci_broken_lpm(pdev)) {
+		pi.flags |= ATA_FLAG_NO_LPM;
+		dev_warn(&pdev->dev,
+			 "BIOS update required for Link Power Management support\n");
+	}
+
 	if (ahci_broken_suspend(pdev)) {
 		hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
 		dev_warn(&pdev->dev,
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 0d028ea..f233ce6 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -35,6 +35,7 @@
 #include <linux/kernel.h>
 #include <linux/gfp.h>
 #include <linux/module.h>
+#include <linux/nospec.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
@@ -1124,10 +1125,12 @@
 
 	/* get the slot number from the message */
 	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
-	if (pmp < EM_MAX_SLOTS)
+	if (pmp < EM_MAX_SLOTS) {
+		pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
 		emp = &pp->em_priv[pmp];
-	else
+	} else {
 		return -EINVAL;
+	}
 
 	/* mask off the activity bits if we are in sw_activity
 	 * mode, user should turn off sw_activity before setting
@@ -2129,6 +2132,8 @@
 		deto = 20;
 	}
 
+	/* Make dito, mdat, deto bits to 0s */
+	devslp &= ~GENMASK_ULL(24, 2);
 	devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
 		   (mdat << PORT_DEVSLP_MDAT_OFFSET) |
 		   (deto << PORT_DEVSLP_DETO_OFFSET) |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 82c59a1..73d636d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2385,6 +2385,9 @@
 	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
 		dev->horkage |= ATA_HORKAGE_NOLPM;
 
+	if (ap->flags & ATA_FLAG_NO_LPM)
+		dev->horkage |= ATA_HORKAGE_NOLPM;
+
 	if (dev->horkage & ATA_HORKAGE_NOLPM) {
 		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
 		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 6475a13..90c3877 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2282,12 +2282,16 @@
 		if (qc->err_mask & ~AC_ERR_OTHER)
 			qc->err_mask &= ~AC_ERR_OTHER;
 
-		/* SENSE_VALID trumps dev/unknown error and revalidation */
+		/*
+		 * SENSE_VALID trumps dev/unknown error and revalidation. Upper
+		 * layers will determine whether the command is worth retrying
+		 * based on the sense data and device class/type. Otherwise,
+		 * determine directly if the command is worth retrying using its
+		 * error mask and flags.
+		 */
 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
-
-		/* determine whether the command is worth retrying */
-		if (ata_eh_worth_retry(qc))
+		else if (ata_eh_worth_retry(qc))
 			qc->flags |= ATA_QCFLAG_RETRY;
 
 		/* accumulate error info */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index fb2c00f..a3d60cc 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3772,10 +3772,20 @@
 		 */
 		goto invalid_param_len;
 	}
-	if (block > dev->n_sectors)
-		goto out_of_range;
 
 	all = cdb[14] & 0x1;
+	if (all) {
+		/*
+		 * Ignore the block address (zone ID) as defined by ZBC.
+		 */
+		block = 0;
+	} else if (block >= dev->n_sectors) {
+		/*
+		 * Block must be a valid zone ID (a zone start LBA).
+		 */
+		fp = 2;
+		goto invalid_fld;
+	}
 
 	if (ata_ncq_enabled(qc->dev) &&
 	    ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
@@ -3804,10 +3814,6 @@
  invalid_fld:
 	ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
 	return 1;
- out_of_range:
-	/* "Logical Block Address out of range" */
-	ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00);
-	return 1;
 invalid_param_len:
 	/* "Parameter list length error" */
 	ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index d0fac64..a0b88f1 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1483,6 +1483,8 @@
 					return -EFAULT;
 				if (pool < 0 || pool > ZATM_LAST_POOL)
 					return -EINVAL;
+				pool = array_index_nospec(pool,
+							  ZATM_LAST_POOL + 1);
 				if (copy_from_user(&info,
 				    &((struct zatm_pool_req __user *) arg)->info,
 				    sizeof(info))) return -EFAULT;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 4bb8016..e808d44 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2077,6 +2077,9 @@
 {
 	struct device *dev, *parent;
 
+	wait_for_device_probe();
+	device_block_probing();
+
 	spin_lock(&devices_kset->list_lock);
 	/*
 	 * Walk the devices list backward, shutting down each in turn.
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f9e1010..7ff7ae0 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -616,16 +616,24 @@
 	return sprintf(buf, "Not affected\n");
 }
 
+ssize_t __weak cpu_show_l1tf(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
 static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
 static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
+static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
 	&dev_attr_meltdown.attr,
 	&dev_attr_spectre_v1.attr,
 	&dev_attr_spectre_v2.attr,
 	&dev_attr_spec_store_bypass.attr,
+	&dev_attr_l1tf.attr,
 	NULL
 };
 
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index f95593a..a7baf3c 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -384,14 +384,6 @@
 			goto probe_failed;
 	}
 
-	/*
-	 * Ensure devices are listed in devices_kset in correct order
-	 * It's important to move Dev to the end of devices_kset before
-	 * calling .probe, because it could be recursive and parent Dev
-	 * should always go first
-	 */
-	devices_kset_move_last(dev);
-
 	if (dev->bus->probe) {
 		ret = dev->bus->probe(dev);
 		if (ret)
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 8e2e475..5a42ae4 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -185,7 +185,7 @@
 int of_pm_clk_add_clks(struct device *dev)
 {
 	struct clk **clks;
-	unsigned int i, count;
+	int i, count;
 	int ret;
 
 	if (!dev || !dev->of_node)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index dc259d2..574d08f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1362,8 +1362,10 @@
 
 	dpm_wait_for_children(dev, async);
 
-	if (async_error)
+	if (async_error) {
+		dev->power.direct_complete = false;
 		goto Complete;
+	}
 
 	/*
 	 * If a device configured to wake up the system from sleep states
@@ -1378,6 +1380,7 @@
 		pm_get_active_wakeup_sources(suspend_abort,
 			MAX_SUSPEND_ABORT_LEN);
 		log_suspend_abort_reason(suspend_abort);
+		dev->power.direct_complete = false;
 		async_error = -EBUSY;
 		goto Complete;
 	}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 39dd30b..64d95c9 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -538,3 +538,23 @@
 	  module will be called rsxx.
 
 endif # BLK_DEV
+
+config VSERVICES_BLOCK_SERVER
+	tristate "Virtual Services block server"
+	depends on BLOCK && VSERVICES_SUPPORT && VSERVICES_SERVER
+	default y
+	select VSERVICES_PROTOCOL_BLOCK_SERVER
+	help
+	  Select this option if you want support for server side Virtual
+	  Services block. This allows any Linux block device to be
+	  virtualized and exported as a virtual service.
+
+config VSERVICES_BLOCK_CLIENT
+	tristate "Virtual Services Block client device"
+	depends on BLOCK && VSERVICES_SUPPORT && VSERVICES_CLIENT
+	default y
+	select VSERVICES_PROTOCOL_BLOCK_CLIENT
+	help
+	  Select this option if you want support for client side Virtual
+	  Services block devices. The virtual block devices are typically
+	  named /dev/vblock0, /dev/vblock1, etc.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 1e9661e..fe9229f1 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -44,3 +44,8 @@
 
 skd-y		:= skd_main.o
 swim_mod-y	:= swim.o swim_asm.o
+
+obj-$(CONFIG_VSERVICES_BLOCK_SERVER)     += vs_block_server.o
+CFLAGS_vs_block_server.o += -Werror
+obj-$(CONFIG_VSERVICES_BLOCK_CLIENT)     += vs_block_client.o
+CFLAGS_vs_block_client.o += -Werror
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index e3d8e4c..a321d7d 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3459,6 +3459,9 @@
 					  (struct floppy_struct **)&outparam);
 		if (ret)
 			return ret;
+		memcpy(&inparam.g, outparam,
+				offsetof(struct floppy_struct, name));
+		outparam = &inparam.g;
 		break;
 	case FDMSGON:
 		UDP->flags |= FTD_MSG;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ff1c4d7..9f840d9 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -640,6 +640,36 @@
 			__func__, lo->lo_number, lo->lo_file_name, rc);
 }
 
+static inline int is_loop_device(struct file *file)
+{
+	struct inode *i = file->f_mapping->host;
+
+	return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
+}
+
+static int loop_validate_file(struct file *file, struct block_device *bdev)
+{
+	struct inode	*inode = file->f_mapping->host;
+	struct file	*f = file;
+
+	/* Avoid recursion */
+	while (is_loop_device(f)) {
+		struct loop_device *l;
+
+		if (f->f_mapping->host->i_bdev == bdev)
+			return -EBADF;
+
+		l = f->f_mapping->host->i_bdev->bd_disk->private_data;
+		if (l->lo_state == Lo_unbound) {
+			return -EINVAL;
+		}
+		f = l->lo_backing_file;
+	}
+	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
+		return -EINVAL;
+	return 0;
+}
+
 /*
  * loop_change_fd switched the backing store of a loopback device to
  * a new file. This is useful for operating system installers to free up
@@ -669,14 +699,15 @@
 	if (!file)
 		goto out;
 
+	error = loop_validate_file(file, bdev);
+	if (error)
+		goto out_putf;
+
 	inode = file->f_mapping->host;
 	old_file = lo->lo_backing_file;
 
 	error = -EINVAL;
 
-	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
-		goto out_putf;
-
 	/* size of the new backing store needs to be the same */
 	if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
 		goto out_putf;
@@ -697,13 +728,6 @@
 	return error;
 }
 
-static inline int is_loop_device(struct file *file)
-{
-	struct inode *i = file->f_mapping->host;
-
-	return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
-}
-
 /* loop sysfs attributes */
 
 static ssize_t loop_attr_show(struct device *dev, char *page,
@@ -800,16 +824,17 @@
 	.attrs= loop_attrs,
 };
 
-static int loop_sysfs_init(struct loop_device *lo)
+static void loop_sysfs_init(struct loop_device *lo)
 {
-	return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
-				  &loop_attribute_group);
+	lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
+						&loop_attribute_group);
 }
 
 static void loop_sysfs_exit(struct loop_device *lo)
 {
-	sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
-			   &loop_attribute_group);
+	if (lo->sysfs_inited)
+		sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
+				   &loop_attribute_group);
 }
 
 static void loop_config_discard(struct loop_device *lo)
@@ -861,7 +886,7 @@
 static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 		       struct block_device *bdev, unsigned int arg)
 {
-	struct file	*file, *f;
+	struct file	*file;
 	struct inode	*inode;
 	struct address_space *mapping;
 	unsigned lo_blocksize;
@@ -881,29 +906,13 @@
 	if (lo->lo_state != Lo_unbound)
 		goto out_putf;
 
-	/* Avoid recursion */
-	f = file;
-	while (is_loop_device(f)) {
-		struct loop_device *l;
-
-		if (f->f_mapping->host->i_bdev == bdev)
-			goto out_putf;
-
-		l = f->f_mapping->host->i_bdev->bd_disk->private_data;
-		if (l->lo_state == Lo_unbound) {
-			error = -EINVAL;
-			goto out_putf;
-		}
-		f = l->lo_backing_file;
-	}
+	error = loop_validate_file(file, bdev);
+	if (error)
+		goto out_putf;
 
 	mapping = file->f_mapping;
 	inode = mapping->host;
 
-	error = -EINVAL;
-	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
-		goto out_putf;
-
 	if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
 	    !file->f_op->write_iter)
 		lo_flags |= LO_FLAGS_READ_ONLY;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index fb2237c..60f0fd2 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -59,6 +59,7 @@
 	struct kthread_worker	worker;
 	struct task_struct	*worker_task;
 	bool			use_dio;
+	bool			sysfs_inited;
 
 	struct request_queue	*lo_queue;
 	struct blk_mq_tag_set	tag_set;
diff --git a/drivers/block/vs_block_client.c b/drivers/block/vs_block_client.c
new file mode 100644
index 0000000..974f8b9
--- /dev/null
+++ b/drivers/block/vs_block_client.c
@@ -0,0 +1,956 @@
+/*
+ * drivers/block/vs_block_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * block vservice client driver
+ *
+ * Function vs_block_client_vs_alloc() is partially derived from
+ * drivers/block/brd.c (brd_alloc())
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/version.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/client.h>
+#include <vservices/service.h>
+#include <vservices/session.h>
+#include <vservices/wait.h>
+
+/*
+ * BLK_DEF_MAX_SECTORS was replaced with the hard-coded number 1024 in 3.19,
+ * and restored in 4.3
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && \
+        (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+#define BLK_DEF_MAX_SECTORS 1024
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+#define bio_sector(bio) (bio)->bi_iter.bi_sector
+#define bio_size(bio) (bio)->bi_iter.bi_size
+#else
+#define bio_sector(bio) (bio)->bi_sector
+#define bio_size(bio) (bio)->bi_size
+#endif
+
+#define CLIENT_BLKDEV_NAME		"vblock"
+
+#define PERDEV_MINORS 256
+
+struct block_client;
+
+struct vs_block_device {
+	/*
+	 * The client that created this block device. A reference is held
+	 * to the client until the block device is released, so this pointer
+	 * should always be valid. However, the client may since have reset;
+	 * so it should only be used if, after locking it, its blkdev pointer
+	 * points back to this block device.
+	 */
+	struct block_client		*client;
+
+	int				id;
+	struct gendisk			*disk;
+	struct request_queue		*queue;
+
+	struct kref			kref;
+};
+
+struct block_client {
+	struct vs_client_block_state	client;
+	struct vs_service_device	*service;
+
+	/* Tasklet & queue for bouncing buffers out of read acks */
+	struct tasklet_struct		rx_tasklet;
+	struct list_head		rx_queue;
+	struct spinlock			rx_queue_lock;
+
+	/*
+	 * The current virtual block device. This gets replaced when we do
+	 * a reset since other parts of the kernel (e.g. vfs) may still
+	 * be accessing the disk.
+	 */
+	struct vs_block_device		*blkdev;
+
+	/* Shared work item for disk creation */
+	struct work_struct		disk_creation_work;
+
+	struct kref			kref;
+};
+
+#define state_to_block_client(state) \
+	container_of(state, struct block_client, client)
+
+static int block_client_major;
+
+/* Unique identifier allocation for virtual block devices */
+static DEFINE_IDA(vs_block_ida);
+static DEFINE_MUTEX(vs_block_ida_lock);
+
+static int
+block_client_vs_to_linux_error(vservice_block_block_io_error_t vs_err)
+{
+	switch (vs_err) {
+	case VSERVICE_BLOCK_INVALID_INDEX:
+		return -EILSEQ;
+	case VSERVICE_BLOCK_MEDIA_FAILURE:
+		return -EIO;
+	case VSERVICE_BLOCK_MEDIA_TIMEOUT:
+		return -ETIMEDOUT;
+	case VSERVICE_BLOCK_UNSUPPORTED_COMMAND:
+		return -ENOTSUPP;
+	case VSERVICE_BLOCK_SERVICE_RESET:
+		return -ENXIO;
+	default:
+		WARN_ON(vs_err);
+		return 0;
+	}
+
+	return 0;
+}
+
+static void vs_block_client_kfree(struct kref *kref)
+{
+	struct block_client *client =
+		container_of(kref, struct block_client, kref);
+
+	vs_put_service(client->service);
+	kfree(client);
+}
+
+static void vs_block_client_put(struct block_client *client)
+{
+	kref_put(&client->kref, vs_block_client_kfree);
+}
+
+static void vs_block_device_kfree(struct kref *kref)
+{
+	struct vs_block_device *blkdev =
+		container_of(kref, struct vs_block_device, kref);
+
+	/* Delete the disk and clean up its queue */
+	del_gendisk(blkdev->disk);
+	blk_cleanup_queue(blkdev->queue);
+	put_disk(blkdev->disk);
+
+	mutex_lock(&vs_block_ida_lock);
+	ida_remove(&vs_block_ida, blkdev->id);
+	mutex_unlock(&vs_block_ida_lock);
+
+	if (blkdev->client)
+		vs_block_client_put(blkdev->client);
+
+	kfree(blkdev);
+}
+
+static void vs_block_device_put(struct vs_block_device *blkdev)
+{
+	kref_put(&blkdev->kref, vs_block_device_kfree);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+static void
+#else
+static int
+#endif
+vs_block_client_blkdev_release(struct gendisk *disk, fmode_t mode)
+{
+	struct vs_block_device *blkdev = disk->private_data;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+	if (WARN_ON(!blkdev))
+		return;
+#else
+	if (WARN_ON(!blkdev))
+		return -ENXIO;
+#endif
+
+	vs_block_device_put(blkdev);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
+	return 0;
+#endif
+}
+
+static int vs_block_client_blkdev_open(struct block_device *bdev, fmode_t mode)
+{
+	struct vs_block_device *blkdev = bdev->bd_disk->private_data;
+	struct block_client *client;
+	int err = -ENXIO;
+
+	if (!blkdev || !kref_get_unless_zero(&blkdev->kref))
+		goto fail_get_blkdev;
+
+	client = blkdev->client;
+	if (WARN_ON(!client))
+		goto fail_lock_client;
+
+	if (!vs_state_lock_safe(&client->client)) {
+		err = -ENODEV;
+		goto fail_lock_client;
+	}
+
+	if (blkdev != client->blkdev) {
+		/* The client has reset, this blkdev is no longer usable */
+		err = -ENXIO;
+		goto fail_check_client;
+	}
+
+	if ((mode & FMODE_WRITE) > 0 && client->client.readonly) {
+		dev_dbg(&client->service->dev,
+			"opening a readonly disk as writable\n");
+		err = -EROFS;
+		goto fail_check_client;
+	}
+
+	vs_state_unlock(&client->client);
+
+	return 0;
+
+fail_check_client:
+	vs_state_unlock(&client->client);
+fail_lock_client:
+	vs_block_device_put(blkdev);
+fail_get_blkdev:
+	return err;
+}
+
+static int vs_block_client_blkdev_getgeo(struct block_device *bdev,
+		struct hd_geometry *geo)
+{
+	/* These numbers are some default sane values for disk geometry. */
+	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+	geo->heads = 4;
+	geo->sectors = 16;
+
+	return 0;
+}
+
+/*
+ * Indirectly determine linux block layer sector size and ensure that our
+ * sector size matches.
+ */
+static int vs_block_client_check_sector_size(struct block_client *client,
+		struct bio *bio)
+{
+	unsigned int expected_bytes;
+
+	if (unlikely(!bio_sectors(bio))) {
+		dev_err(&client->service->dev, "zero-length bio");
+		return -EIO;
+	}
+
+	expected_bytes = bio_sectors(bio) * client->client.sector_size;
+	if (unlikely(bio_size(bio) != expected_bytes)) {
+		dev_err(&client->service->dev,
+				"bio has %zd bytes, which is unexpected "
+				"for %d sectors of %zd bytes each",
+				(size_t)bio_size(bio), bio_sectors(bio),
+				(size_t)client->client.sector_size);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static const struct block_device_operations block_client_ops = {
+	.getgeo		= vs_block_client_blkdev_getgeo,
+	.open		= vs_block_client_blkdev_open,
+	.release	= vs_block_client_blkdev_release,
+	.owner		= THIS_MODULE,
+};
+
+static int block_client_send_write_req(struct block_client *client,
+		struct bio *bio)
+{
+	struct vs_client_block_state *state = &client->client;
+	struct vs_mbuf *mbuf;
+	struct vs_pbuf pbuf;
+	struct bio_vec *bvec;
+	int err;
+	bool flush, nodelay, commit;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+	struct bvec_iter iter;
+	struct bio_vec bvec_local;
+#else
+	int i;
+#endif
+
+	err = vs_block_client_check_sector_size(client, bio);
+	if (err < 0)
+		goto fail;
+
+	do {
+		/* Wait until it's possible to send a write request */
+		err = vs_wait_state_nointr(state,
+				vs_client_block_io_req_write_can_send(state));
+		if (err == -ECANCELED)
+			err = -ENXIO;
+		if (err < 0)
+			goto fail;
+
+		/* Wait for quota, while sending a write remains possible */
+		mbuf = vs_wait_alloc_nointr(state,
+				vs_client_block_io_req_write_can_send(state),
+				vs_client_block_io_alloc_req_write(
+					state, &pbuf, GFP_KERNEL));
+		err = IS_ERR(mbuf) ? PTR_ERR(mbuf) : 0;
+
+		/* Retry if sending is no longer possible */
+	} while (err == -ECANCELED);
+
+	if (err < 0)
+		goto fail;
+
+	vs_pbuf_resize(&pbuf, 0);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+	bvec = &bvec_local;
+	bio_for_each_segment(bvec_local, bio, iter)
+#else
+	bio_for_each_segment(bvec, bio, i)
+#endif
+	{
+		unsigned long flags;
+		void *buf = bvec_kmap_irq(bvec, &flags);
+		flush_kernel_dcache_page(bvec->bv_page);
+		err = vs_pbuf_append(&pbuf, buf, bvec->bv_len);
+		bvec_kunmap_irq(buf, &flags);
+		if (err < 0) {
+			dev_err(&client->service->dev,
+				"pbuf copy failed with err %d\n", err);
+			err = -EIO;
+			goto fail_free_write;
+		}
+	}
+
+	if (unlikely(vs_pbuf_size(&pbuf) != bio_size(bio))) {
+		dev_err(&client->service->dev,
+			"pbuf size is wrong: %zd, should be %zd\n",
+			vs_pbuf_size(&pbuf), (size_t)bio_size(bio));
+		err = -EIO;
+		goto fail_free_write;
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+	flush = (bio_flags(bio) & REQ_PREFLUSH);
+	commit = (bio_flags(bio) & REQ_FUA);
+	nodelay = (bio_flags(bio) & REQ_SYNC);
+#else
+	flush = (bio->bi_rw & REQ_FLUSH);
+	commit = (bio->bi_rw & REQ_FUA);
+	nodelay = (bio->bi_rw & REQ_SYNC);
+#endif
+	err = vs_client_block_io_req_write(state, bio, bio_sector(bio),
+			bio_sectors(bio), nodelay, flush, commit, pbuf, mbuf);
+
+	if (err) {
+		dev_err(&client->service->dev,
+				"write req failed with err %d\n", err);
+		goto fail_free_write;
+	}
+
+	return 0;
+
+fail_free_write:
+	vs_client_block_io_free_req_write(state, &pbuf, mbuf);
+fail:
+	return err;
+}
+
+static int block_client_send_read_req(struct block_client *client,
+		struct bio *bio)
+{
+	struct vs_client_block_state *state = &client->client;
+	int err;
+	bool flush, nodelay;
+
+	err = vs_block_client_check_sector_size(client, bio);
+	if (err < 0)
+		return err;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+	flush = (bio_flags(bio) & REQ_PREFLUSH);
+	nodelay = (bio_flags(bio) & REQ_SYNC);
+#else
+	flush = (bio->bi_rw & REQ_FLUSH);
+	nodelay = (bio->bi_rw & REQ_SYNC);
+#endif
+	do {
+		/* Wait until it's possible to send a read request */
+		err = vs_wait_state_nointr(state,
+				vs_client_block_io_req_read_can_send(state));
+		if (err == -ECANCELED)
+			err = -ENXIO;
+		if (err < 0)
+			break;
+
+		/* Wait for quota, while sending a read remains possible */
+		err = vs_wait_send_nointr(state,
+			vs_client_block_io_req_read_can_send(state),
+			vs_client_block_io_req_read(state, bio,
+				bio_sector(bio), bio_sectors(bio),
+				nodelay, flush, GFP_KERNEL));
+	} while (err == -ECANCELED);
+
+	return err;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+static blk_qc_t
+#else
+static void
+#endif
+vs_block_client_make_request(struct request_queue *q, struct bio *bio)
+{
+	struct block_device *bdev = bio->bi_bdev;
+	struct vs_block_device *blkdev = bdev->bd_disk->private_data;
+	struct block_client *client;
+	int err = 0;
+
+	client = blkdev->client;
+	if (!client || !kref_get_unless_zero(&client->kref)) {
+		err = -ENODEV;
+		goto fail_get_client;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	blk_queue_split(q, &bio, q->bio_split);
+#endif
+
+	if (!vs_state_lock_safe(&client->client)) {
+		err = -ENODEV;
+		goto fail_lock_client;
+	}
+
+	if (client->blkdev != blkdev) {
+		/* Client has reset, this block device is no longer usable */
+		err = -EIO;
+		goto fail_check_client;
+	}
+
+	if (bio_data_dir(bio) == WRITE)
+		err = block_client_send_write_req(client, bio);
+	else
+		err = block_client_send_read_req(client, bio);
+
+fail_check_client:
+	if (err == -ENOLINK)
+		err = -EIO;
+	else
+		vs_state_unlock(&client->client);
+fail_lock_client:
+	vs_block_client_put(client);
+fail_get_client:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	if (err < 0) {
+		bio->bi_error = err;
+		bio_endio(bio);
+	}
+#else
+	if (err < 0)
+		bio_endio(bio, err);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+	return BLK_QC_T_NONE;
+#endif
+}
+
+static int vs_block_client_get_blkdev_id(struct block_client *client)
+{
+	int id;
+	int ret;
+
+retry:
+	ret = ida_pre_get(&vs_block_ida, GFP_KERNEL);
+	if (ret == 0)
+		return -ENOMEM;
+
+	mutex_lock(&vs_block_ida_lock);
+	ret = ida_get_new(&vs_block_ida, &id);
+	mutex_unlock(&vs_block_ida_lock);
+
+	if (ret == -EAGAIN)
+		goto retry;
+
+	return id;
+}
+
+static int vs_block_client_disk_add(struct block_client *client)
+{
+	struct vs_block_device *blkdev;
+	unsigned int max_hw_sectors;
+	int err;
+
+	dev_dbg(&client->service->dev, "device add\n");
+
+	blkdev = kzalloc(sizeof(*blkdev), GFP_KERNEL);
+	if (!blkdev) {
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	kref_init(&blkdev->kref);
+	blkdev->id = vs_block_client_get_blkdev_id(client);
+	if (blkdev->id < 0) {
+		err = blkdev->id;
+		goto fail_free_blkdev;
+	}
+
+	if ((blkdev->id * PERDEV_MINORS) >> MINORBITS) {
+		err = -ENODEV;
+		goto fail_remove_ida;
+	}
+
+	blkdev->queue = blk_alloc_queue(GFP_KERNEL);
+	if (!blkdev->queue) {
+		dev_err(&client->service->dev,
+				"Error initializing blk queue\n");
+		err = -ENOMEM;
+		goto fail_remove_ida;
+	}
+
+	blk_queue_make_request(blkdev->queue, vs_block_client_make_request);
+	blk_queue_bounce_limit(blkdev->queue, BLK_BOUNCE_ANY);
+	blk_queue_dma_alignment(blkdev->queue, 0);
+
+	/*
+	 * Mark this as a paravirtualised device. This is just an alias
+	 * of QUEUE_FLAG_NONROT, which prevents the I/O schedulers trying
+	 * to wait for the disk to spin.
+	 */
+	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, blkdev->queue);
+
+	blkdev->queue->queuedata = blkdev;
+
+	blkdev->client = client;
+	kref_get(&client->kref);
+
+	max_hw_sectors = min_t(sector_t, BLK_DEF_MAX_SECTORS,
+			client->client.segment_size /
+			client->client.sector_size);
+	blk_queue_max_hw_sectors(blkdev->queue, max_hw_sectors);
+
+	blkdev->disk = alloc_disk(PERDEV_MINORS);
+	if (!blkdev->disk) {
+		dev_err(&client->service->dev, "Error allocating disk\n");
+		err = -ENOMEM;
+		goto fail_free_blk_queue;
+	}
+
+	if (client->client.readonly) {
+		dev_dbg(&client->service->dev, "set device as readonly\n");
+		set_disk_ro(blkdev->disk, true);
+	}
+
+	blkdev->disk->major = block_client_major;
+	blkdev->disk->first_minor = blkdev->id * PERDEV_MINORS;
+	blkdev->disk->fops         = &block_client_ops;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	blkdev->disk->driverfs_dev = &client->service->dev;
+#endif
+	blkdev->disk->private_data = blkdev;
+	blkdev->disk->queue        = blkdev->queue;
+	blkdev->disk->flags       |= GENHD_FL_EXT_DEVT;
+
+	/*
+	 * The block device name is vblock<x>, where x is a unique
+	 * identifier. Userspace should rename or symlink the device
+	 * appropriately, typically by processing the add uevent.
+	 *
+	 * If a virtual block device is reset then it may re-open with a
+	 * different identifier if something still holds a reference to
+	 * the old device (such as a userspace application having an open
+	 * file handle).
+	 */
+	snprintf(blkdev->disk->disk_name, sizeof(blkdev->disk->disk_name),
+			"%s%d", CLIENT_BLKDEV_NAME, blkdev->id);
+	set_capacity(blkdev->disk, client->client.device_sectors);
+
+	/*
+	 * We need to hold a reference on blkdev across add_disk(), to make
+	 * sure a concurrent reset does not immediately release the blkdev
+	 * and call del_gendisk().
+	 */
+	kref_get(&blkdev->kref);
+
+	vs_service_state_lock(client->service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(client->client.state.base)) {
+		vs_service_state_unlock(client->service);
+		err = -ENXIO;
+		goto fail_free_blk_queue;
+	}
+	client->blkdev = blkdev;
+	vs_service_state_unlock(client->service);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	device_add_disk(&client->service->dev, blkdev->disk);
+#else
+	add_disk(blkdev->disk);
+#endif
+	dev_dbg(&client->service->dev, "added block disk '%s'\n",
+			blkdev->disk->disk_name);
+
+	/* Release the reference taken above. */
+	vs_block_device_put(blkdev);
+
+	return 0;
+
+fail_free_blk_queue:
+	blk_cleanup_queue(blkdev->queue);
+fail_remove_ida:
+	mutex_lock(&vs_block_ida_lock);
+	ida_remove(&vs_block_ida, blkdev->id);
+	mutex_unlock(&vs_block_ida_lock);
+fail_free_blkdev:
+	kfree(blkdev);
+fail:
+	return err;
+}
+
+static void vs_block_client_disk_creation_work(struct work_struct *work)
+{
+	struct block_client *client = container_of(work,
+			struct block_client, disk_creation_work);
+	struct vs_block_device *blkdev;
+	bool running;
+
+	vs_service_state_lock(client->service);
+	blkdev = client->blkdev;
+	running = VSERVICE_BASE_STATE_IS_RUNNING(client->client.state.base);
+
+	dev_dbg(&client->service->dev,
+			"disk changed: blkdev = %pK, running = %d\n",
+			client->blkdev, running);
+	if (!blkdev && running) {
+		dev_dbg(&client->service->dev, "adding block disk\n");
+		vs_service_state_unlock(client->service);
+		vs_block_client_disk_add(client);
+	} else {
+		vs_service_state_unlock(client->service);
+	}
+}
+
+static void vs_block_client_rx_tasklet(unsigned long data);
+
+static struct vs_client_block_state *
+vs_block_client_alloc(struct vs_service_device *service)
+{
+	struct block_client *client;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client) {
+		dev_err(&service->dev, "Error allocating client struct\n");
+		return NULL;
+	}
+
+	vs_get_service(service);
+	client->service = service;
+
+	INIT_LIST_HEAD(&client->rx_queue);
+	spin_lock_init(&client->rx_queue_lock);
+	tasklet_init(&client->rx_tasklet, vs_block_client_rx_tasklet,
+			(unsigned long)client);
+	tasklet_disable(&client->rx_tasklet);
+
+	INIT_WORK(&client->disk_creation_work,
+			vs_block_client_disk_creation_work);
+	kref_init(&client->kref);
+
+	dev_dbg(&service->dev, "New block client %pK\n", client);
+
+	return &client->client;
+}
+
+static void vs_block_client_release(struct vs_client_block_state *state)
+{
+	struct block_client *client = state_to_block_client(state);
+
+	flush_work(&client->disk_creation_work);
+
+	vs_block_client_put(client);
+}
+
+/* FIXME: Jira ticket SDK-2459 - anjaniv */
+static void vs_block_client_closed(struct vs_client_block_state *state)
+{
+	struct block_client *client = state_to_block_client(state);
+
+	/*
+	 * Stop the RX bounce tasklet and clean up its queue. We can wait for
+	 * it to stop safely because it doesn't need to acquire the state
+	 * lock, only the RX lock which we acquire after it is disabled.
+	 */
+	tasklet_disable(&client->rx_tasklet);
+	spin_lock(&client->rx_queue_lock);
+	while (!list_empty(&client->rx_queue)) {
+		struct vs_mbuf *mbuf = list_first_entry(&client->rx_queue,
+				struct vs_mbuf, queue);
+		struct vs_pbuf pbuf;
+		list_del(&mbuf->queue);
+		vs_client_block_io_getbufs_ack_read(state, &pbuf, mbuf);
+		vs_client_block_io_free_ack_read(state, &pbuf, mbuf);
+	}
+	spin_unlock(&client->rx_queue_lock);
+
+	if (client->blkdev) {
+		struct vs_block_device *blkdev = client->blkdev;
+		char service_remove[] = "REMOVING_SERVICE=1";
+		/* + 9 because "DEVNAME=" is 8 chars plus 1 for '\0' */
+		char devname[sizeof(blkdev->disk->disk_name) + 9];
+		char *envp[] = { service_remove, devname, NULL };
+
+		dev_dbg(&client->service->dev, "removing block disk\n");
+
+		/*
+		 * Send a change event with DEVNAME to allow the block helper
+		 * script to remove any server sessions which use either
+		 * v${SERVICE_NAME} or ${DEVNAME}.  The remove event generated
+		 * by the session driver doesn't include DEVNAME so the only
+		 * way for userspace to map SERVICE_NAME to DEVNAME is by the
+		 * symlink added when the client service was created.  If that
+		 * symlink has been deleted, there's no other way to connect
+		 * the two names.
+		 */
+		snprintf(devname, sizeof(devname), "DEVNAME=%s",
+				blkdev->disk->disk_name);
+		kobject_uevent_env(&client->service->dev.kobj, KOBJ_CHANGE,
+				envp);
+
+		/*
+		 * We are done with the device now. The block device will only
+		 * get removed once there are no more users (e.g. userspace
+		 * applications).
+		 */
+		client->blkdev = NULL;
+		vs_block_device_put(blkdev);
+	}
+}
+
+static void vs_block_client_opened(struct vs_client_block_state *state)
+{
+	struct block_client *client = state_to_block_client(state);
+
+#if !defined(CONFIG_LBDAF) && !defined(CONFIG_64BIT)
+	if (state->device_sectors >> (sizeof(sector_t) * 8)) {
+		dev_err(&client->service->dev,
+				"Client doesn't support full capacity large block devices\n");
+		vs_client_block_close(state);
+		return;
+	}
+#endif
+
+	/* Unblock the RX bounce tasklet. */
+	tasklet_enable(&client->rx_tasklet);
+
+	/*
+	 * The block device allocation needs to sleep, so we defer it to a
+	 * work queue.
+	 */
+	queue_work(client->service->work_queue, &client->disk_creation_work);
+}
+
+static int vs_block_client_ack_read(struct vs_client_block_state *state,
+		void *tag, struct vs_pbuf pbuf, struct vs_mbuf *mbuf)
+{
+	struct block_client *client = state_to_block_client(state);
+	struct bio *bio = tag;
+	struct bio_vec *bvec;
+	int err = 0;
+	size_t bytes_read = 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+	struct bio_vec bvec_local;
+	struct bvec_iter iter;
+#else
+	int i;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+	bvec = &bvec_local;
+	bio_for_each_segment(bvec_local, bio, iter)
+#else
+	bio_for_each_segment(bvec, bio, i)
+#endif
+	{
+		unsigned long flags;
+		void *buf;
+		if (vs_pbuf_size(&pbuf) < bytes_read + bvec->bv_len) {
+			dev_err(&client->service->dev,
+					"bio read overrun: %zu into %zu byte response, but need %zd bytes\n",
+					bytes_read, vs_pbuf_size(&pbuf),
+					(size_t)bvec->bv_len);
+			err = -EIO;
+			break;
+		}
+		buf = bvec_kmap_irq(bvec, &flags);
+		memcpy(buf, vs_pbuf_data(&pbuf) + bytes_read, bvec->bv_len);
+		flush_kernel_dcache_page(bvec->bv_page);
+		bvec_kunmap_irq(buf, &flags);
+		bytes_read += bvec->bv_len;
+	}
+
+	vs_client_block_io_free_ack_read(state, &pbuf, mbuf);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	if (err < 0)
+		bio->bi_error = err;
+	bio_endio(bio);
+#else
+	bio_endio(bio, err);
+#endif
+
+	return 0;
+}
+
+static void vs_block_client_rx_tasklet(unsigned long data)
+{
+	struct block_client *client = (struct block_client *)data;
+	struct vs_mbuf *mbuf;
+	struct vs_pbuf pbuf;
+
+	spin_lock(&client->rx_queue_lock);
+
+	/* The list shouldn't be empty. */
+	if (WARN_ON(list_empty(&client->rx_queue))) {
+		spin_unlock(&client->rx_queue_lock);
+		return;
+	}
+
+	/* Get the next mbuf, and reschedule ourselves if there are more. */
+	mbuf = list_first_entry(&client->rx_queue, struct vs_mbuf, queue);
+	list_del(&mbuf->queue);
+	if (!list_empty(&client->rx_queue))
+		tasklet_schedule(&client->rx_tasklet);
+
+	spin_unlock(&client->rx_queue_lock);
+
+	/* Process the ack. */
+	vs_client_block_io_getbufs_ack_read(&client->client, &pbuf, mbuf);
+	vs_block_client_ack_read(&client->client, mbuf->priv, pbuf, mbuf);
+}
+
+static int vs_block_client_queue_ack_read(struct vs_client_block_state *state,
+		void *tag, struct vs_pbuf pbuf, struct vs_mbuf *mbuf)
+{
+	struct block_client *client = state_to_block_client(state);
+
+	spin_lock(&client->rx_queue_lock);
+	list_add_tail(&mbuf->queue, &client->rx_queue);
+	mbuf->priv = tag;
+	spin_unlock(&client->rx_queue_lock);
+
+	tasklet_schedule(&client->rx_tasklet);
+
+	wake_up(&state->service->quota_wq);
+
+	return 0;
+}
+
+static int vs_block_client_ack_write(struct vs_client_block_state *state,
+		void *tag)
+{
+	struct bio *bio = tag;
+
+	if (WARN_ON(!bio))
+		return -EPROTO;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	bio_endio(bio);
+#else
+	bio_endio(bio, 0);
+#endif
+
+	wake_up(&state->service->quota_wq);
+
+	return 0;
+}
+
+static int vs_block_client_nack_io(struct vs_client_block_state *state,
+		void *tag, vservice_block_block_io_error_t err)
+{
+	struct bio *bio = tag;
+
+	if (WARN_ON(!bio))
+		return -EPROTO;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	bio->bi_error = block_client_vs_to_linux_error(err);
+	bio_endio(bio);
+#else
+	bio_endio(bio, block_client_vs_to_linux_error(err));
+#endif
+
+	wake_up(&state->service->quota_wq);
+
+	return 0;
+}
+
+static struct vs_client_block block_client_driver = {
+	.rx_atomic		= true,
+	.alloc			= vs_block_client_alloc,
+	.release		= vs_block_client_release,
+	.opened			= vs_block_client_opened,
+	.closed			= vs_block_client_closed,
+	.io = {
+		.ack_read	= vs_block_client_queue_ack_read,
+		.nack_read	= vs_block_client_nack_io,
+		.ack_write	= vs_block_client_ack_write,
+		.nack_write	= vs_block_client_nack_io,
+	}
+};
+
+static int __init vs_block_client_init(void)
+{
+	int err;
+
+	block_client_major = register_blkdev(0, CLIENT_BLKDEV_NAME);
+	if (block_client_major < 0) {
+		pr_err("Err registering blkdev\n");
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	err = vservice_block_client_register(&block_client_driver,
+			"block_client_driver");
+	if (err)
+		goto fail_unregister_blkdev;
+
+	return 0;
+
+fail_unregister_blkdev:
+	unregister_blkdev(block_client_major, CLIENT_BLKDEV_NAME);
+fail:
+	return err;
+}
+
+static void __exit vs_block_client_exit(void)
+{
+	vservice_block_client_unregister(&block_client_driver);
+	unregister_blkdev(block_client_major, CLIENT_BLKDEV_NAME);
+}
+
+module_init(vs_block_client_init);
+module_exit(vs_block_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Block Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/block/vs_block_server.c b/drivers/block/vs_block_server.c
new file mode 100644
index 0000000..9d20f6a
--- /dev/null
+++ b/drivers/block/vs_block_server.c
@@ -0,0 +1,1179 @@
+/*
+ * drivers/block/vs_block_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * block vservice server driver
+ *
+ */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/genhd.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/server.h>
+#include <vservices/protocol/block/client.h>
+#include <vservices/service.h>
+#include <vservices/wait.h>
+
+#define VS_BLOCK_BLKDEV_DEFAULT_MODE FMODE_READ
+#define VS_BLOCK_BLK_DEF_SECTOR_SIZE 512
+
+/*
+ * Metadata for a request. Note that the bio must be embedded at the end of
+ * this structure, because it is allocated from a bioset.
+ */
+struct block_server_request {
+	struct block_server	*server;
+	u32			tagid;
+	u32			size;
+	int			op_err;
+	struct list_head	list;
+	struct vs_pbuf		pbuf;
+	struct vs_mbuf		*mbuf;
+	bool			bounced;
+	bool			submitted;
+
+	struct bio		bio;
+};
+
+struct block_server {
+	struct vs_server_block_state	server;
+	struct vs_service_device	*service;
+
+	struct block_device		*bdev;
+	struct bio_set			*bioset;
+
+	unsigned int			sector_size;
+	bool				started;
+
+	/* Bounced writes are deferred to keep memcpy off service queue */
+	struct list_head		bounce_req_queue;
+	struct work_struct		bounce_req_work;
+	spinlock_t			bounce_req_lock;
+
+	/* Count of outstanding requests submitted to block layer */
+	atomic_t			submitted_req_count;
+	wait_queue_head_t		submitted_req_wq;
+
+	/* Completions are deferred because end_io may be in atomic context */
+	struct list_head		completed_req_queue;
+	struct work_struct		completed_req_work;
+	spinlock_t			completed_req_lock;
+};
+
+#define state_to_block_server(state) \
+	container_of(state, struct block_server, server)
+
+#define dev_to_block_server(dev) \
+	state_to_block_server(dev_get_drvdata(dev))
+
+static inline vservice_block_block_io_error_t
+block_server_linux_to_vs_error(int err)
+{
+	/*
+	 * This list is not exhaustive. For all other errors, we return
+	 * unsupported_command.
+	 */
+	switch (err) {
+	case -ECOMM:
+	case -EIO:
+	case -ENOMEM:
+		return VSERVICE_BLOCK_MEDIA_FAILURE;
+	case -ETIME:
+	case -ETIMEDOUT:
+		return VSERVICE_BLOCK_MEDIA_TIMEOUT;
+	case -EILSEQ:
+		return VSERVICE_BLOCK_INVALID_INDEX;
+	default:
+		if (err)
+			return VSERVICE_BLOCK_UNSUPPORTED_COMMAND;
+		return 0;
+	}
+
+	return 0;
+}
+
+static inline u32 vs_req_num_sectors(struct block_server *server,
+		struct block_server_request *req)
+{
+	return req->size / server->sector_size;
+}
+
+static inline u64 vs_req_sector_index(struct block_server_request *req)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+	return req->bio.bi_iter.bi_sector;
+#else
+	return req->bio.bi_sector;
+#endif
+}
+
+static void vs_block_server_closed(struct vs_server_block_state *state)
+{
+	struct block_server *server = state_to_block_server(state);
+	struct block_server_request *req;
+
+	/*
+	 * Fail all requests that haven't been sent to the block layer yet.
+	 */
+	spin_lock(&server->bounce_req_lock);
+	while (!list_empty(&server->bounce_req_queue)) {
+		req = list_first_entry(&server->bounce_req_queue,
+				struct block_server_request, list);
+		list_del(&req->list);
+		spin_unlock(&server->bounce_req_lock);
+		bio_io_error(&req->bio);
+		spin_lock(&server->bounce_req_lock);
+	}
+	spin_unlock(&server->bounce_req_lock);
+
+	/*
+	 * Wait until all outstanding requests to the block layer are
+	 * complete.
+	 */
+	wait_event(server->submitted_req_wq,
+			!atomic_read(&server->submitted_req_count));
+
+	/*
+	 * Discard all the completed requests.
+	 */
+	spin_lock_irq(&server->completed_req_lock);
+	while (!list_empty(&server->completed_req_queue)) {
+		req = list_first_entry(&server->completed_req_queue,
+				struct block_server_request, list);
+		list_del(&req->list);
+		if (req->mbuf) {
+			spin_unlock_irq(&server->completed_req_lock);
+			if (bio_data_dir(&req->bio) == WRITE)
+				vs_server_block_io_free_req_write(state,
+						&req->pbuf, req->mbuf);
+			else
+				vs_server_block_io_free_ack_read(state,
+						&req->pbuf, req->mbuf);
+			spin_lock_irq(&server->completed_req_lock);
+		}
+		bio_put(&req->bio);
+	}
+	spin_unlock_irq(&server->completed_req_lock);
+}
+
+static ssize_t
+vs_block_server_readonly_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct block_server *server = dev_to_block_server(dev);
+	int err;
+	unsigned long val;
+
+	vs_service_state_lock(server->service);
+	if (server->started) {
+		err = -EBUSY;
+		goto unlock;
+	}
+
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		goto unlock;
+
+	if (bdev_read_only(server->bdev) && !val) {
+		dev_info(dev,
+				"Cannot set %s to read/write: read-only device\n",
+				server->service->name);
+		err = -EINVAL;
+		goto unlock;
+	}
+
+	server->server.readonly = val;
+	err = count;
+
+unlock:
+	vs_service_state_unlock(server->service);
+
+	return err;
+}
+
+static ssize_t
+vs_block_server_readonly_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct block_server *server = dev_to_block_server(dev);
+	int cnt;
+
+	vs_service_state_lock(server->service);
+	cnt = scnprintf(buf, PAGE_SIZE, "%d\n", server->server.readonly);
+	vs_service_state_unlock(server->service);
+
+	return cnt;
+}
+
+static ssize_t
+vs_block_server_start_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct block_server *server = dev_to_block_server(dev);
+	int err;
+	unsigned long val;
+
+	vs_service_state_lock(server->service);
+
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		goto unlock;
+
+	if (!val && server->started) {
+		err = -EBUSY;
+		goto unlock;
+	}
+
+	if (val && !server->started) {
+		server->started = true;
+
+		if (server->server.state.base.statenum ==
+				VSERVICE_BASE_STATE_CLOSED__OPEN)
+			vs_server_block_open_complete(&server->server,
+					VS_SERVER_RESP_SUCCESS);
+	}
+
+	err = count;
+unlock:
+	vs_service_state_unlock(server->service);
+
+	return err;
+}
+
+static ssize_t
+vs_block_server_start_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct block_server *server = dev_to_block_server(dev);
+	int cnt;
+
+	vs_service_state_lock(server->service);
+	cnt = scnprintf(buf, PAGE_SIZE, "%d\n", server->started);
+	vs_service_state_unlock(server->service);
+
+	return cnt;
+}
+
+static DEVICE_ATTR(start, S_IWUSR | S_IRUSR, vs_block_server_start_show,
+	vs_block_server_start_store);
+static DEVICE_ATTR(readonly, S_IWUSR | S_IRUSR, vs_block_server_readonly_show,
+	vs_block_server_readonly_store);
+
+static struct attribute *vs_block_server_dev_attrs[] = {
+	&dev_attr_start.attr,
+	&dev_attr_readonly.attr,
+	NULL,
+};
+
+static const struct attribute_group vs_block_server_attr_group = {
+	.attrs = vs_block_server_dev_attrs
+};
+
+/*
+ * Invoked by vs_server_block_handle_req_open() after receiving open
+ * requests to perform server specific initialisations
+ *
+ * The "delayed start" feature can be enforced here
+ */
+static vs_server_response_type_t
+vs_block_server_open(struct vs_server_block_state * _state)
+{
+	struct block_server *server = state_to_block_server(_state);
+
+	return (server->started) ? VS_SERVER_RESP_SUCCESS :
+				   VS_SERVER_RESP_EXPLICIT_COMPLETE;
+}
+
+static int
+vs_block_server_complete_req_read(struct block_server_request *req)
+{
+	struct block_server *server = req->server;
+	struct vs_server_block_state *state = &server->server;
+	int err = -EIO;
+
+	if (req->op_err) {
+		err = req->op_err;
+		dev_dbg(&server->service->dev,
+				"read nack, err %d sector 0x%llx num 0x%x\n",
+				err, vs_req_sector_index(req),
+				vs_req_num_sectors(server, req));
+
+		if (req->mbuf)
+			vs_server_block_io_free_ack_read(state, &req->pbuf,
+					req->mbuf);
+
+		err = vs_server_block_io_send_nack_read(state, req->tagid,
+				block_server_linux_to_vs_error(err),
+				GFP_KERNEL);
+	} else {
+		if (req->bounced && !req->mbuf) {
+			req->mbuf = vs_server_block_io_alloc_ack_read(
+					&server->server, &req->pbuf,
+					GFP_KERNEL);
+			if (IS_ERR(req->mbuf)) {
+				err = PTR_ERR(req->mbuf);
+				req->mbuf = NULL;
+			}
+		}
+
+		if (req->bounced && req->mbuf) {
+			int i;
+			struct bio_vec *bv;
+			void *data = req->pbuf.data;
+
+			if (vs_pbuf_resize(&req->pbuf, req->size) < 0) {
+				bio_io_error(&req->bio);
+				return 0;
+			}
+
+			bio_for_each_segment_all(bv, &req->bio, i) {
+				memcpy(data, page_address(bv->bv_page) +
+						bv->bv_offset, bv->bv_len);
+				data += bv->bv_len;
+				__free_page(bv->bv_page);
+			}
+			req->bounced = false;
+		}
+
+		if (req->mbuf) {
+			dev_vdbg(&server->service->dev,
+					"read ack, sector 0x%llx num 0x%x\n",
+					vs_req_sector_index(req),
+					vs_req_num_sectors(server, req));
+
+			err = vs_server_block_io_send_ack_read(state,
+					req->tagid, req->pbuf, req->mbuf);
+
+			if (err && (err != -ENOBUFS)) {
+				vs_server_block_io_free_ack_read(state,
+						&req->pbuf, req->mbuf);
+				req->mbuf = NULL;
+			}
+		} else {
+			WARN_ON(!err || !req->bounced);
+		}
+	}
+
+	if (err && (err != -ENOBUFS))
+		dev_dbg(&server->service->dev,
+				"error %d sending read reply\n", err);
+	else if (err == -ENOBUFS)
+		dev_vdbg(&server->service->dev, "out of quota, will retry\n");
+
+	return err;
+}
+
+static int
+vs_block_server_complete_req_write(struct block_server_request *req)
+{
+	struct block_server *server = req->server;
+	struct vs_server_block_state *state = &server->server;
+	int err;
+
+	WARN_ON(req->mbuf);
+
+	if (req->op_err) {
+		dev_dbg(&server->service->dev,
+				"write nack, err %d sector 0x%llx num 0x%x\n",
+				req->op_err, vs_req_sector_index(req),
+				vs_req_num_sectors(server, req));
+
+		err = vs_server_block_io_send_nack_write(state, req->tagid,
+				block_server_linux_to_vs_error(req->op_err),
+				GFP_KERNEL);
+	} else {
+		dev_vdbg(&server->service->dev,
+				"write ack, sector 0x%llx num 0x%x\n",
+				vs_req_sector_index(req),
+				vs_req_num_sectors(server, req));
+
+		err = vs_server_block_io_send_ack_write(state, req->tagid,
+				GFP_KERNEL);
+	}
+
+	if (err && (err != -ENOBUFS))
+		dev_dbg(&server->service->dev,
+				"error %d sending write reply\n", err);
+	else if (err == -ENOBUFS)
+		dev_vdbg(&server->service->dev, "out of quota, will retry\n");
+
+	return err;
+}
+
+static int vs_block_server_complete_req(struct block_server *server,
+		struct block_server_request *req)
+{
+	int err;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+	req->bio.bi_iter.bi_idx = 0;
+#else
+	req->bio.bi_idx = 0;
+#endif
+	if (!vs_state_lock_safe(&server->server))
+		return -ENOLINK;
+
+	if (bio_data_dir(&req->bio) == WRITE)
+		err = vs_block_server_complete_req_write(req);
+	else
+		err = vs_block_server_complete_req_read(req);
+
+	vs_state_unlock(&server->server);
+
+	if (err == -ENOBUFS)
+		dev_vdbg(&server->service->dev, "bio %pK response out of quota, will retry\n", &req->bio);
+
+	return err;
+}
+
+static void vs_block_server_complete_requests_work(struct work_struct *work)
+{
+	struct block_server *server = container_of(work, struct block_server,
+			completed_req_work);
+	struct block_server_request *req;
+
+	vs_service_send_batch_start(server->service, false);
+
+	/*
+	 * Send ack/nack responses for each completed request. If a request
+	 * cannot be sent because we are over-quota then this function will
+	 * return with a non-empty list, and the tx_ready handler will
+	 * reschedule us when we are back under quota. In all other cases
+	 * this function will return with an empty list.
+	 */
+	spin_lock_irq(&server->completed_req_lock);
+	while (!list_empty(&server->completed_req_queue)) {
+		int err;
+		req = list_first_entry(&server->completed_req_queue,
+				struct block_server_request, list);
+		dev_vdbg(&server->service->dev, "complete bio %pK\n", &req->bio);
+		list_del(&req->list);
+		spin_unlock_irq(&server->completed_req_lock);
+
+		err = vs_block_server_complete_req(server, req);
+		if (err == -ENOBUFS) {
+			dev_vdbg(&server->service->dev, "defer bio %pK\n", &req->bio);
+			/*
+			 * Couldn't send the completion; re-queue the request
+			 * and exit. We'll start again when more quota becomes
+			 * available.
+			 */
+			spin_lock_irq(&server->completed_req_lock);
+			list_add_tail(&req->list,
+					&server->completed_req_queue);
+			break;
+		}
+
+		dev_vdbg(&server->service->dev, "free bio %pK err %d\n", &req->bio, err);
+		bio_put(&req->bio);
+
+		spin_lock_irq(&server->completed_req_lock);
+	}
+	spin_unlock_irq(&server->completed_req_lock);
+
+	vs_service_send_batch_end(server->service, true);
+}
+
+static int vs_block_server_tx_ready(struct vs_server_block_state *state)
+{
+	struct block_server *server = state_to_block_server(state);
+
+	schedule_work(&server->completed_req_work);
+
+	return 0;
+}
+
+static bool vs_block_can_map_pbuf(struct request_queue *q,
+		struct vs_pbuf *pbuf, size_t size)
+{
+	/* The pbuf must satisfy the driver's alignment requirements. */
+	if (!blk_rq_aligned(q, (unsigned long)pbuf->data, size))
+		return false;
+
+	/*
+	 * bios can only contain pages. Sometime the pbuf is in an IO region
+	 * that has no struct page (e.g. a channel primary buffer), in which
+	 * case we can't map it into a bio.
+	 */
+	/* FIXME: Redmine issue #930 - philip. */
+	if (!pfn_valid(__pa(pbuf->data) >> PAGE_SHIFT))
+		return false;
+
+	return true;
+}
+
+static int vs_block_bio_map_pbuf(struct bio *bio, struct vs_pbuf *pbuf)
+{
+	int offset = offset_in_page((unsigned long)pbuf->data);
+	void *ptr = pbuf->data;
+	int size = pbuf->size;
+
+	while (size > 0) {
+		unsigned bytes = min_t(unsigned, PAGE_SIZE - offset, size);
+
+		if (bio_add_page(bio, virt_to_page(ptr), bytes,
+					offset) < bytes)
+			return -EIO;
+
+		ptr += bytes;
+		size -= bytes;
+		offset = 0;
+	}
+
+	return 0;
+}
+
+/* Read request handling */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+static void vs_block_server_read_done(struct bio *bio, int err)
+#else
+static void vs_block_server_read_done(struct bio *bio)
+#endif
+{
+	unsigned long flags;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	int err = bio->bi_error;
+#endif
+	struct block_server_request *req = container_of(bio,
+			struct block_server_request, bio);
+	struct block_server *server = req->server;
+	req->op_err = err;
+
+	spin_lock_irqsave(&server->completed_req_lock, flags);
+	if (req->mbuf)
+		list_add(&req->list, &server->completed_req_queue);
+	else
+		list_add_tail(&req->list, &server->completed_req_queue);
+	spin_unlock_irqrestore(&server->completed_req_lock, flags);
+
+	if (req->submitted && atomic_dec_and_test(&server->submitted_req_count))
+		wake_up_all(&server->submitted_req_wq);
+
+	schedule_work(&server->completed_req_work);
+}
+
+/*
+ * TODO: this may need to split and chain the bio if it exceeds the physical
+ * segment limit of the device. Not clear whose responsibility that is; queue
+ * might do it for us (if there is one)
+ */
+#define vs_block_make_request(bio) generic_make_request(bio)
+
+static int vs_block_submit_read(struct block_server *server,
+		struct block_server_request *req, gfp_t gfp)
+{
+	struct request_queue *q = bdev_get_queue(server->bdev);
+	struct bio *bio = &req->bio;
+	int size = req->size;
+	int err = 0;
+
+	if (req->mbuf && vs_block_can_map_pbuf(q, &req->pbuf, size)) {
+		/*
+		 * The mbuf is valid and the driver can directly access the
+		 * pbuf, so we don't need a bounce buffer. Map the pbuf
+		 * directly into the bio.
+		*/
+		if (vs_pbuf_resize(&req->pbuf, size) < 0)
+			err = -EIO;
+		if (!err)
+			err = vs_block_bio_map_pbuf(bio, &req->pbuf);
+	} else {
+		/* We need a bounce buffer. First set up the bvecs. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+		bio->bi_iter.bi_size = size;
+#else
+		bio->bi_size = size;
+#endif
+
+		while (size > 0) {
+			struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt];
+
+			BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
+
+			bvec->bv_page = NULL; /* Allocated below */
+			bvec->bv_len = min_t(unsigned, PAGE_SIZE, size);
+			bvec->bv_offset = 0;
+
+			bio->bi_vcnt++;
+			size -= bvec->bv_len;
+		}
+
+		err = bio_alloc_pages(bio, gfp);
+		if (!err) {
+			blk_recount_segments(q, bio);
+			req->bounced = true;
+		}
+	}
+
+	if (err) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+		bio->bi_error = err;
+		bio_endio(bio);
+#else
+		bio_endio(bio, err);
+#endif
+	} else {
+		dev_vdbg(&server->service->dev,
+				"submit read req sector %#llx count %#x\n",
+				vs_req_sector_index(req),
+				vs_req_num_sectors(server, req));
+		req->submitted = true;
+		atomic_inc(&server->submitted_req_count);
+		vs_block_make_request(bio);
+	}
+
+	return 0;
+}
+
+static int vs_block_server_io_req_read(struct vs_server_block_state *state,
+		u32 tagid, u64 sector_index, u32 num_sects, bool nodelay,
+		bool flush)
+{
+	struct block_server *server = state_to_block_server(state);
+	struct bio *bio;
+	struct block_server_request *req;
+	unsigned size = num_sects * server->sector_size;
+	unsigned op_flags = 0;
+
+	/*
+	 * This nr_pages calculation assumes that the pbuf data is offset from
+	 * the start of the size-aligned message buffer by more than 0 but
+	 * less than one sector, which is always true for the current message
+	 * layout generated by mill when we assume 512-byte sectors.
+	 */
+	unsigned nr_pages = 1 + (size >> PAGE_SHIFT);
+
+	bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, server->bioset);
+	if (!bio)
+		return -ENOMEM;
+	dev_vdbg(&server->service->dev, "alloc r bio %pK\n", bio);
+	req = container_of(bio, struct block_server_request, bio);
+
+	req->server = server;
+	req->tagid = tagid;
+	req->op_err = 0;
+	req->mbuf = NULL;
+	req->size = size;
+	req->bounced = false;
+	req->submitted = false;
+
+	if (flush) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+		op_flags |= REQ_PREFLUSH;
+#else
+		op_flags |= REQ_FLUSH;
+#endif
+	}
+	if (nodelay) {
+		op_flags |= REQ_SYNC;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+	bio->bi_iter.bi_sector = (sector_t)sector_index;
+#else
+	bio->bi_sector = (sector_t)sector_index;
+#endif
+	bio->bi_bdev = server->bdev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+	bio_set_op_attrs(bio, REQ_OP_READ, op_flags);
+#else
+	bio->bi_rw = READ | op_flags;
+#endif
+	bio->bi_end_io = vs_block_server_read_done;
+
+	req->mbuf = vs_server_block_io_alloc_ack_read(state, &req->pbuf,
+			GFP_KERNEL);
+	if (IS_ERR(req->mbuf) && (PTR_ERR(req->mbuf) == -ENOBUFS)) {
+		/* Fall back to a bounce buffer */
+		req->mbuf = NULL;
+	} else if (IS_ERR(req->mbuf)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+		bio->bi_error = PTR_ERR(req->mbuf);
+		bio_endio(bio);
+#else
+		bio_endio(bio, PTR_ERR(req->mbuf));
+#endif
+		return 0;
+	}
+
+	return vs_block_submit_read(server, req, GFP_KERNEL);
+}
+
+/* Write request handling */
+static int vs_block_submit_bounced_write(struct block_server *server,
+		struct block_server_request *req, gfp_t gfp)
+{
+	struct bio *bio = &req->bio;
+	void *data = req->pbuf.data;
+	struct bio_vec *bv;
+	int i;
+
+	if (bio_alloc_pages(bio, gfp | __GFP_NOWARN) < 0)
+		return -ENOMEM;
+	blk_recount_segments(bdev_get_queue(server->bdev), bio);
+	req->bounced = true;
+
+	/* Copy all the data into the bounce buffer */
+	bio_for_each_segment_all(bv, bio, i) {
+		memcpy(page_address(bv->bv_page) + bv->bv_offset, data,
+				bv->bv_len);
+		data += bv->bv_len;
+	}
+
+	vs_server_block_io_free_req_write(&server->server, &req->pbuf,
+			req->mbuf);
+	req->mbuf = NULL;
+
+	dev_vdbg(&server->service->dev,
+			"submit bounced write req sector %#llx count %#x\n",
+			vs_req_sector_index(req),
+			vs_req_num_sectors(server, req));
+	req->submitted = true;
+	atomic_inc(&server->submitted_req_count);
+	vs_block_make_request(bio);
+
+	return 0;
+}
+
+static void vs_block_server_write_bounce_work(struct work_struct *work)
+{
+	struct block_server *server = container_of(work, struct block_server,
+			bounce_req_work);
+	struct block_server_request *req;
+
+	spin_lock(&server->bounce_req_lock);
+	while (!list_empty(&server->bounce_req_queue)) {
+		req = list_first_entry(&server->bounce_req_queue,
+				struct block_server_request, list);
+		dev_vdbg(&server->service->dev, "write bio %pK\n", &req->bio);
+		list_del(&req->list);
+		spin_unlock(&server->bounce_req_lock);
+
+		if (vs_block_submit_bounced_write(server, req,
+					GFP_KERNEL) == -ENOMEM) {
+			spin_lock(&server->bounce_req_lock);
+			list_add(&req->list, &server->bounce_req_queue);
+			spin_unlock(&server->bounce_req_lock);
+			schedule_work(work);
+			return;
+		}
+
+		spin_lock(&server->bounce_req_lock);
+	}
+	spin_unlock(&server->bounce_req_lock);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+static void vs_block_server_write_done(struct bio *bio, int err)
+#else
+static void vs_block_server_write_done(struct bio *bio)
+#endif
+{
+	unsigned long flags;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	int err = bio->bi_error;
+#endif
+	struct block_server_request *req = container_of(bio,
+			struct block_server_request, bio);
+	struct block_server *server = req->server;
+
+	if (req->bounced) {
+		int i;
+		struct bio_vec *bv;
+		bio_for_each_segment_all(bv, bio, i)
+			__free_page(bv->bv_page);
+	} else if (req->mbuf) {
+		vs_server_block_io_free_req_write(&server->server, &req->pbuf,
+				req->mbuf);
+		req->mbuf = NULL;
+	}
+
+	if (req->submitted && atomic_dec_and_test(&server->submitted_req_count))
+		wake_up_all(&server->submitted_req_wq);
+
+	req->op_err = err;
+
+	spin_lock_irqsave(&server->completed_req_lock, flags);
+	list_add_tail(&req->list, &server->completed_req_queue);
+	spin_unlock_irqrestore(&server->completed_req_lock, flags);
+
+	schedule_work(&server->completed_req_work);
+}
+
+static int vs_block_server_io_req_write(struct vs_server_block_state *state,
+		u32 tagid, u64 sector_index, u32 num_sects, bool nodelay,
+		bool flush, bool commit, struct vs_pbuf pbuf, struct vs_mbuf *mbuf)
+{
+	struct block_server *server = state_to_block_server(state);
+	struct request_queue *q = bdev_get_queue(server->bdev);
+	struct bio *bio;
+	struct block_server_request *req;
+	unsigned long data = (unsigned long)pbuf.data;
+	unsigned long start = data >> PAGE_SHIFT;
+	unsigned long end = (data + pbuf.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	int err;
+	unsigned op_flags = 0;
+
+	bio = bio_alloc_bioset(GFP_KERNEL, end - start, server->bioset);
+	if (!bio)
+		return -ENOMEM;
+	dev_vdbg(&server->service->dev, "alloc w bio %pK\n", bio);
+	req = container_of(bio, struct block_server_request, bio);
+
+	req->server = server;
+	req->tagid = tagid;
+	req->op_err = 0;
+	req->mbuf = mbuf;
+	req->pbuf = pbuf;
+	req->size = server->sector_size * num_sects;
+	req->bounced = false;
+	req->submitted = false;
+
+	if (flush) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+		op_flags |= REQ_PREFLUSH;
+#else
+		op_flags |= REQ_FLUSH;
+#endif
+	}
+	if (commit) {
+		op_flags |= REQ_FUA;
+	}
+	if (nodelay) {
+		op_flags |= REQ_SYNC;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+	bio->bi_iter.bi_sector = (sector_t)sector_index;
+#else
+	bio->bi_sector = (sector_t)sector_index;
+#endif
+	bio->bi_bdev = server->bdev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+	bio_set_op_attrs(bio, REQ_OP_WRITE, op_flags);
+#else
+	bio->bi_rw = WRITE | op_flags;
+#endif
+	bio->bi_end_io = vs_block_server_write_done;
+
+	if (pbuf.size < req->size) {
+		err = -EINVAL;
+		goto fail_bio;
+	}
+	if (WARN_ON(pbuf.size > req->size))
+		pbuf.size = req->size;
+
+	if (state->readonly) {
+		err = -EROFS;
+		goto fail_bio;
+	}
+
+	if (!vs_block_can_map_pbuf(q, &req->pbuf, req->pbuf.size)) {
+		/* We need a bounce buffer. First set up the bvecs. */
+		int size = pbuf.size;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+		bio->bi_iter.bi_size = size;
+#else
+		bio->bi_size = size;
+#endif
+
+		while (size > 0) {
+			struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt];
+
+			BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
+
+			bvec->bv_page = NULL; /* Allocated later */
+			bvec->bv_len = min_t(unsigned, PAGE_SIZE, size);
+			bvec->bv_offset = 0;
+
+			bio->bi_vcnt++;
+			size -= bvec->bv_len;
+		}
+
+		/*
+		 * Defer the rest so we don't have to hold the state lock
+		 * during alloc_page & memcpy
+		 */
+		spin_lock(&server->bounce_req_lock);
+		list_add_tail(&req->list, &server->bounce_req_queue);
+		spin_unlock(&server->bounce_req_lock);
+		schedule_work(&server->bounce_req_work);
+
+		return 0;
+	}
+
+	/* No bounce needed; map the pbuf directly. */
+	err = vs_block_bio_map_pbuf(bio, &pbuf);
+	if (err < 0)
+		goto fail_bio;
+
+	dev_vdbg(&server->service->dev,
+			"submit direct write req sector %#llx count %#x\n",
+			vs_req_sector_index(req),
+			vs_req_num_sectors(server, req));
+	req->submitted = true;
+	atomic_inc(&server->submitted_req_count);
+	vs_block_make_request(bio);
+
+	return 0;
+
+fail_bio:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+	bio->bi_error = err;
+	bio_endio(bio);
+#else
+	bio_endio(bio, err);
+#endif
+	return 0;
+}
+
+static struct block_device *
+vs_block_server_find_by_name(struct block_server *server)
+{
+	struct block_device *bdev = NULL;
+	struct class_dev_iter iter;
+	struct device *dev;
+
+	class_dev_iter_init(&iter, &block_class, NULL, NULL);
+	while (1) {
+		dev = class_dev_iter_next(&iter);
+		if (!dev)
+			break;
+
+		if (strcmp(dev_name(dev), server->service->name) == 0) {
+			bdev = blkdev_get_by_dev(dev->devt,
+					VS_BLOCK_BLKDEV_DEFAULT_MODE, NULL);
+			if (!IS_ERR_OR_NULL(bdev))
+				break;
+		}
+	}
+	class_dev_iter_exit(&iter);
+
+	if (!dev || IS_ERR_OR_NULL(bdev))
+		return ERR_PTR(-ENODEV);
+
+	dev_dbg(&server->service->dev, "Attached to block device %s (%d:%d)\n",
+			dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
+	return bdev;
+}
+
+static struct block_device *
+vs_block_server_find_by_path(struct block_server *server, const char *base_path)
+{
+	struct block_device *bdev;
+	char *bdev_path;
+
+	bdev_path = kasprintf(GFP_KERNEL, "%s/%s", base_path,
+			server->service->name);
+	if (!bdev_path)
+		return ERR_PTR(-ENOMEM);
+
+	bdev = blkdev_get_by_path(bdev_path, VS_BLOCK_BLKDEV_DEFAULT_MODE,
+			NULL);
+	dev_dbg(&server->service->dev, "Attached to block device %s\n",
+			bdev_path);
+
+	kfree(bdev_path);
+
+	if (!bdev)
+		return ERR_PTR(-ENODEV);
+	return bdev;
+}
+
+static struct block_device *
+vs_block_server_attach_block_device(struct block_server *server)
+{
+	const char *paths[] = {
+		"/dev",
+		"/dev/block",
+		"/dev/mapper",
+		"/dev/disk/by-partlabel",
+		"/dev/disk/by-label",
+		"/dev/disk/by-partuuid",
+		"/dev/disk/by-uuid"
+	};
+	struct block_device *bdev;
+	int i;
+
+	/*
+	 * Try first to look the block device up by path. This is done because
+	 * the name exposed to user-space in /dev/ is not necessarily the name
+	 * being used inside the kernel for the device.
+	 */
+	for (i = 0; i < ARRAY_SIZE(paths); i++) {
+		bdev = vs_block_server_find_by_path(server, paths[i]);
+		if (!IS_ERR(bdev))
+			break;
+	}
+	if (i == ARRAY_SIZE(paths)) {
+		/*
+		 * Couldn't find the block device in any of the usual places.
+		 * Try to match it against the kernel's device name. If the
+		 * name of the service and the name of a device in the block
+		 * class match then attempt to look the block device up by the
+		 * dev_t (major/minor) value.
+		 */
+		bdev = vs_block_server_find_by_name(server);
+	}
+	if (IS_ERR(bdev))
+		return bdev;
+
+	server->sector_size		= VS_BLOCK_BLK_DEF_SECTOR_SIZE;
+	server->server.segment_size	= round_down(
+		vs_service_max_mbuf_size(server->service) -
+		sizeof(vs_message_id_t), server->sector_size);
+	server->server.sector_size	= server->sector_size;
+	server->server.device_sectors	= bdev->bd_part->nr_sects;
+	if (bdev_read_only(bdev))
+		server->server.readonly = true;
+	server->server.flushable = true;
+	server->server.committable = true;
+
+	return bdev;
+}
+
+static struct vs_server_block_state *
+vs_block_server_alloc(struct vs_service_device *service)
+{
+	struct block_server *server;
+	int err;
+
+	server = kzalloc(sizeof(*server), GFP_KERNEL);
+	if (!server)
+		return NULL;
+
+	server->service = service;
+	server->started = false;
+	INIT_LIST_HEAD(&server->bounce_req_queue);
+	INIT_WORK(&server->bounce_req_work, vs_block_server_write_bounce_work);
+	spin_lock_init(&server->bounce_req_lock);
+	atomic_set(&server->submitted_req_count, 0);
+	init_waitqueue_head(&server->submitted_req_wq);
+	INIT_LIST_HEAD(&server->completed_req_queue);
+	INIT_WORK(&server->completed_req_work,
+			vs_block_server_complete_requests_work);
+	spin_lock_init(&server->completed_req_lock);
+
+	server->bdev = vs_block_server_attach_block_device(server);
+	if (IS_ERR(server->bdev)) {
+		dev_err(&server->service->dev,
+				"No appropriate block device was found to satisfy the service name %s - error %ld\n",
+				server->service->name, PTR_ERR(server->bdev));
+		goto fail_attach_device;
+	}
+
+	dev_set_drvdata(&service->dev, &server->server);
+
+	err = sysfs_create_group(&service->dev.kobj,
+				 &vs_block_server_attr_group);
+	if (err) {
+		dev_err(&service->dev,
+			"Failed to create attribute group for service %s\n",
+			service->name);
+		goto fail_create_group;
+	}
+
+	/*
+	 * We know the upper bound on simultaneously active bios (i.e. the
+	 * smaller of the in quota, and the sum of the read and write command
+	 * tag limits), so we can pre-allocate that many, and hopefully never
+	 * fail to allocate one in a request handler.
+	 *
+	 * However, allocation may fail if the number of pages (and thus
+	 * bvecs) in a request exceeds BIO_INLINE_VECS (which is hard-coded to
+	 * 4 in all mainline kernels). That possibility is the only reason we
+	 * can't enable rx_atomic for this driver.
+	 */
+	server->bioset = bioset_create(min_t(unsigned, service->recv_quota,
+				VSERVICE_BLOCK_IO_READ_MAX_PENDING +
+				VSERVICE_BLOCK_IO_WRITE_MAX_PENDING),
+			offsetof(struct block_server_request, bio));
+	if (!server->bioset) {
+		dev_err(&service->dev,
+			"Failed to allocate bioset for service %s\n",
+			service->name);
+		goto fail_create_bioset;
+	}
+
+	dev_dbg(&service->dev, "New block server %pK\n", server);
+
+	return &server->server;
+
+fail_create_bioset:
+	sysfs_remove_group(&server->service->dev.kobj,
+			   &vs_block_server_attr_group);
+fail_create_group:
+	dev_set_drvdata(&service->dev, NULL);
+	blkdev_put(server->bdev, VS_BLOCK_BLKDEV_DEFAULT_MODE);
+fail_attach_device:
+	kfree(server);
+
+	return NULL;
+}
+
+static void vs_block_server_release(struct vs_server_block_state *state)
+{
+	struct block_server *server = state_to_block_server(state);
+
+	cancel_work_sync(&server->bounce_req_work);
+	cancel_work_sync(&server->completed_req_work);
+
+	blkdev_put(server->bdev, VS_BLOCK_BLKDEV_DEFAULT_MODE);
+
+	sysfs_remove_group(&server->service->dev.kobj,
+			   &vs_block_server_attr_group);
+
+	bioset_free(server->bioset);
+
+	kfree(server);
+}
+
+static struct vs_server_block block_server_driver = {
+	.alloc			= vs_block_server_alloc,
+	.release		= vs_block_server_release,
+	.open			= vs_block_server_open,
+	.closed			= vs_block_server_closed,
+	.tx_ready		= vs_block_server_tx_ready,
+	.io = {
+		.req_read	= vs_block_server_io_req_read,
+		.req_write	= vs_block_server_io_req_write,
+	},
+
+	/* Large default quota for batching read/write commands */
+	.in_quota_best		= 32,
+	.out_quota_best		= 32,
+};
+
+static int __init vs_block_server_init(void)
+{
+	return vservice_block_server_register(&block_server_driver,
+			"block_server_driver");
+}
+
+static void __exit vs_block_server_exit(void)
+{
+	vservice_block_server_unregister(&block_server_driver);
+}
+
+module_init(vs_block_server_init);
+module_exit(vs_block_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Block Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index b8ecba6..cb53957 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -12,4 +12,26 @@
 	  It has several use cases, for example: /tmp storage, use as swap
 	  disks and maybe many more.
 
-	  See zram.txt for more information.
+	  See Documentation/blockdev/zram.txt for more information.
+
+config ZRAM_WRITEBACK
+       bool "Write back incompressible page to backing device"
+       depends on ZRAM
+       default n
+       help
+	 With incompressible page, there is no memory saving to keep it
+	 in memory. Instead, write it out to backing device.
+	 For this feature, admin should set up backing device via
+	 /sys/block/zramX/backing_dev.
+
+	 See Documentation/blockdev/zram.txt for more information.
+
+config ZRAM_MEMORY_TRACKING
+	bool "Track zRam block status"
+	depends on ZRAM && DEBUG_FS
+	help
+	  With this feature, admin can track the state of allocated blocks
+	  of zRAM. Admin could see the information via
+	  /sys/kernel/debug/zram/zramX/block_state.
+
+	  See Documentation/blockdev/zram.txt for more information.
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 4b5cd3a..c084a7f 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -32,6 +32,9 @@
 #if IS_ENABLED(CONFIG_CRYPTO_842)
 	"842",
 #endif
+#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
+	"zstd",
+#endif
 	NULL
 };
 
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 396c26e..9adf91f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -31,6 +31,7 @@
 #include <linux/err.h>
 #include <linux/idr.h>
 #include <linux/sysfs.h>
+#include <linux/debugfs.h>
 
 #include "zram_drv.h"
 
@@ -41,90 +42,107 @@
 static int zram_major;
 static const char *default_compressor = "lzo";
 
-/*
- * We don't need to see memory allocation errors more than once every 1
- * second to know that a problem is occurring.
- */
-#define ALLOC_ERROR_LOG_RATE_MS 1000
-
 /* Module params (documentation at end) */
 static unsigned int num_devices = 1;
+/*
+ * Pages that compress to sizes equals or greater than this are stored
+ * uncompressed in memory.
+ */
+static size_t huge_class_size;
 
-static inline void deprecated_attr_warn(const char *name)
+static void zram_free_page(struct zram *zram, size_t index);
+
+static void zram_slot_lock(struct zram *zram, u32 index)
 {
-	pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
-			task_pid_nr(current),
-			current->comm,
-			name,
-			"See zram documentation.");
+	bit_spin_lock(ZRAM_LOCK, &zram->table[index].value);
 }
 
-#define ZRAM_ATTR_RO(name)						\
-static ssize_t name##_show(struct device *d,				\
-				struct device_attribute *attr, char *b)	\
-{									\
-	struct zram *zram = dev_to_zram(d);				\
-									\
-	deprecated_attr_warn(__stringify(name));			\
-	return scnprintf(b, PAGE_SIZE, "%llu\n",			\
-		(u64)atomic64_read(&zram->stats.name));			\
-}									\
-static DEVICE_ATTR_RO(name);
+static void zram_slot_unlock(struct zram *zram, u32 index)
+{
+	bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value);
+}
 
 static inline bool init_done(struct zram *zram)
 {
 	return zram->disksize;
 }
 
+static inline bool zram_allocated(struct zram *zram, u32 index)
+{
+
+	return (zram->table[index].value >> (ZRAM_FLAG_SHIFT + 1)) ||
+					zram->table[index].handle;
+}
+
 static inline struct zram *dev_to_zram(struct device *dev)
 {
 	return (struct zram *)dev_to_disk(dev)->private_data;
 }
 
+static unsigned long zram_get_handle(struct zram *zram, u32 index)
+{
+	return zram->table[index].handle;
+}
+
+static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
+{
+	zram->table[index].handle = handle;
+}
+
 /* flag operations require table entry bit_spin_lock() being held */
-static int zram_test_flag(struct zram_meta *meta, u32 index,
+static bool zram_test_flag(struct zram *zram, u32 index,
 			enum zram_pageflags flag)
 {
-	return meta->table[index].value & BIT(flag);
+	return zram->table[index].value & BIT(flag);
 }
 
-static void zram_set_flag(struct zram_meta *meta, u32 index,
+static void zram_set_flag(struct zram *zram, u32 index,
 			enum zram_pageflags flag)
 {
-	meta->table[index].value |= BIT(flag);
+	zram->table[index].value |= BIT(flag);
 }
 
-static void zram_clear_flag(struct zram_meta *meta, u32 index,
+static void zram_clear_flag(struct zram *zram, u32 index,
 			enum zram_pageflags flag)
 {
-	meta->table[index].value &= ~BIT(flag);
+	zram->table[index].value &= ~BIT(flag);
 }
 
-static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
+static inline void zram_set_element(struct zram *zram, u32 index,
+			unsigned long element)
 {
-	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
+	zram->table[index].element = element;
 }
 
-static void zram_set_obj_size(struct zram_meta *meta,
+static unsigned long zram_get_element(struct zram *zram, u32 index)
+{
+	return zram->table[index].element;
+}
+
+static size_t zram_get_obj_size(struct zram *zram, u32 index)
+{
+	return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
+}
+
+static void zram_set_obj_size(struct zram *zram,
 					u32 index, size_t size)
 {
-	unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
+	unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT;
 
-	meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
+	zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
 }
 
+#if PAGE_SIZE != 4096
 static inline bool is_partial_io(struct bio_vec *bvec)
 {
 	return bvec->bv_len != PAGE_SIZE;
 }
-
-static void zram_revalidate_disk(struct zram *zram)
+#else
+static inline bool is_partial_io(struct bio_vec *bvec)
 {
-	revalidate_disk(zram->disk);
-	/* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
-	zram->disk->queue->backing_dev_info->capabilities |=
-		BDI_CAP_STABLE_WRITES;
+	return false;
 }
+#endif
 
 /*
  * Check if request is within bounds and aligned on zram logical blocks.
@@ -152,8 +170,7 @@
 
 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
 {
-	if (*offset + bvec->bv_len >= PAGE_SIZE)
-		(*index)++;
+	*index  += (*offset + bvec->bv_len) / PAGE_SIZE;
 	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
 }
 
@@ -172,36 +189,41 @@
 	} while (old_max != cur_max);
 }
 
-static bool page_zero_filled(void *ptr)
+static inline void zram_fill_page(char *ptr, unsigned long len,
+					unsigned long value)
+{
+	int i;
+	unsigned long *page = (unsigned long *)ptr;
+
+	WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
+
+	if (likely(value == 0)) {
+		memset(ptr, 0, len);
+	} else {
+		for (i = 0; i < len / sizeof(*page); i++)
+			page[i] = value;
+	}
+}
+
+static bool page_same_filled(void *ptr, unsigned long *element)
 {
 	unsigned int pos;
 	unsigned long *page;
+	unsigned long val;
 
 	page = (unsigned long *)ptr;
+	val = page[0];
 
-	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
-		if (page[pos])
+	for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
+		if (val != page[pos])
 			return false;
 	}
 
+	*element = val;
+
 	return true;
 }
 
-static void handle_zero_page(struct bio_vec *bvec)
-{
-	struct page *page = bvec->bv_page;
-	void *user_mem;
-
-	user_mem = kmap_atomic(page);
-	if (is_partial_io(bvec))
-		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
-	else
-		clear_page(user_mem);
-	kunmap_atomic(user_mem);
-
-	flush_dcache_page(page);
-}
-
 static ssize_t initstate_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -223,47 +245,6 @@
 	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
 }
 
-static ssize_t orig_data_size_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct zram *zram = dev_to_zram(dev);
-
-	deprecated_attr_warn("orig_data_size");
-	return scnprintf(buf, PAGE_SIZE, "%llu\n",
-		(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
-}
-
-static ssize_t mem_used_total_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	u64 val = 0;
-	struct zram *zram = dev_to_zram(dev);
-
-	deprecated_attr_warn("mem_used_total");
-	down_read(&zram->init_lock);
-	if (init_done(zram)) {
-		struct zram_meta *meta = zram->meta;
-		val = zs_get_total_pages(meta->mem_pool);
-	}
-	up_read(&zram->init_lock);
-
-	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
-}
-
-static ssize_t mem_limit_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	u64 val;
-	struct zram *zram = dev_to_zram(dev);
-
-	deprecated_attr_warn("mem_limit");
-	down_read(&zram->init_lock);
-	val = zram->limit_pages;
-	up_read(&zram->init_lock);
-
-	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
-}
-
 static ssize_t mem_limit_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t len)
 {
@@ -282,21 +263,6 @@
 	return len;
 }
 
-static ssize_t mem_used_max_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	u64 val = 0;
-	struct zram *zram = dev_to_zram(dev);
-
-	deprecated_attr_warn("mem_used_max");
-	down_read(&zram->init_lock);
-	if (init_done(zram))
-		val = atomic_long_read(&zram->stats.max_used_pages);
-	up_read(&zram->init_lock);
-
-	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
-}
-
 static ssize_t mem_used_max_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t len)
 {
@@ -310,15 +276,485 @@
 
 	down_read(&zram->init_lock);
 	if (init_done(zram)) {
-		struct zram_meta *meta = zram->meta;
 		atomic_long_set(&zram->stats.max_used_pages,
-				zs_get_total_pages(meta->mem_pool));
+				zs_get_total_pages(zram->mem_pool));
 	}
 	up_read(&zram->init_lock);
 
 	return len;
 }
 
+#ifdef CONFIG_ZRAM_WRITEBACK
+static bool zram_wb_enabled(struct zram *zram)
+{
+	return zram->backing_dev;
+}
+
+static void reset_bdev(struct zram *zram)
+{
+	struct block_device *bdev;
+
+	if (!zram_wb_enabled(zram))
+		return;
+
+	bdev = zram->bdev;
+	if (zram->old_block_size)
+		set_blocksize(bdev, zram->old_block_size);
+	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+	/* hope filp_close flush all of IO */
+	filp_close(zram->backing_dev, NULL);
+	zram->backing_dev = NULL;
+	zram->old_block_size = 0;
+	zram->bdev = NULL;
+
+	kvfree(zram->bitmap);
+	zram->bitmap = NULL;
+}
+
+static ssize_t backing_dev_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct zram *zram = dev_to_zram(dev);
+	struct file *file = zram->backing_dev;
+	char *p;
+	ssize_t ret;
+
+	down_read(&zram->init_lock);
+	if (!zram_wb_enabled(zram)) {
+		memcpy(buf, "none\n", 5);
+		up_read(&zram->init_lock);
+		return 5;
+	}
+
+	p = file_path(file, buf, PAGE_SIZE - 1);
+	if (IS_ERR(p)) {
+		ret = PTR_ERR(p);
+		goto out;
+	}
+
+	ret = strlen(p);
+	memmove(buf, p, ret);
+	buf[ret++] = '\n';
+out:
+	up_read(&zram->init_lock);
+	return ret;
+}
+
+static ssize_t backing_dev_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	char *file_name;
+	size_t sz;
+	struct file *backing_dev = NULL;
+	struct inode *inode;
+	struct address_space *mapping;
+	unsigned int bitmap_sz, old_block_size = 0;
+	unsigned long nr_pages, *bitmap = NULL;
+	struct block_device *bdev = NULL;
+	int err;
+	struct zram *zram = dev_to_zram(dev);
+	gfp_t kmalloc_flags;
+
+	file_name = kmalloc(PATH_MAX, GFP_KERNEL);
+	if (!file_name)
+		return -ENOMEM;
+
+	down_write(&zram->init_lock);
+	if (init_done(zram)) {
+		pr_info("Can't setup backing device for initialized device\n");
+		err = -EBUSY;
+		goto out;
+	}
+
+	strlcpy(file_name, buf, PATH_MAX);
+	/* ignore trailing newline */
+	sz = strlen(file_name);
+	if (sz > 0 && file_name[sz - 1] == '\n')
+		file_name[sz - 1] = 0x00;
+
+	backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
+	if (IS_ERR(backing_dev)) {
+		err = PTR_ERR(backing_dev);
+		backing_dev = NULL;
+		goto out;
+	}
+
+	mapping = backing_dev->f_mapping;
+	inode = mapping->host;
+
+	/* Support only block device in this moment */
+	if (!S_ISBLK(inode->i_mode)) {
+		err = -ENOTBLK;
+		goto out;
+	}
+
+	bdev = bdgrab(I_BDEV(inode));
+	err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
+	if (err < 0)
+		goto out;
+
+	nr_pages = i_size_read(inode) >> PAGE_SHIFT;
+	bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
+	kmalloc_flags = GFP_KERNEL | __GFP_ZERO;
+	if (bitmap_sz > PAGE_SIZE)
+		kmalloc_flags |= __GFP_NOWARN | __GFP_NORETRY;
+
+	bitmap = kmalloc_node(bitmap_sz, kmalloc_flags, NUMA_NO_NODE);
+	if (!bitmap && bitmap_sz > PAGE_SIZE)
+		bitmap = vzalloc(bitmap_sz);
+
+	if (!bitmap) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	old_block_size = block_size(bdev);
+	err = set_blocksize(bdev, PAGE_SIZE);
+	if (err)
+		goto out;
+
+	reset_bdev(zram);
+	spin_lock_init(&zram->bitmap_lock);
+
+	zram->old_block_size = old_block_size;
+	zram->bdev = bdev;
+	zram->backing_dev = backing_dev;
+	zram->bitmap = bitmap;
+	zram->nr_pages = nr_pages;
+	up_write(&zram->init_lock);
+
+	pr_info("setup backing device %s\n", file_name);
+	kfree(file_name);
+
+	return len;
+out:
+	if (bitmap)
+		kvfree(bitmap);
+
+	if (bdev)
+		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+
+	if (backing_dev)
+		filp_close(backing_dev, NULL);
+
+	up_write(&zram->init_lock);
+
+	kfree(file_name);
+
+	return err;
+}
+
+static unsigned long get_entry_bdev(struct zram *zram)
+{
+	unsigned long entry;
+
+	spin_lock(&zram->bitmap_lock);
+	/* skip 0 bit to confuse zram.handle = 0 */
+	entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
+	if (entry == zram->nr_pages) {
+		spin_unlock(&zram->bitmap_lock);
+		return 0;
+	}
+
+	set_bit(entry, zram->bitmap);
+	spin_unlock(&zram->bitmap_lock);
+
+	return entry;
+}
+
+static void put_entry_bdev(struct zram *zram, unsigned long entry)
+{
+	int was_set;
+
+	spin_lock(&zram->bitmap_lock);
+	was_set = test_and_clear_bit(entry, zram->bitmap);
+	spin_unlock(&zram->bitmap_lock);
+	WARN_ON_ONCE(!was_set);
+}
+
+static void zram_page_end_io(struct bio *bio)
+{
+	struct page *page = bio->bi_io_vec[0].bv_page;
+
+	page_endio(page, op_is_write(bio_op(bio)), bio->bi_error);
+	bio_put(bio);
+}
+
+/*
+ * Returns 1 if the submission is successful.
+ */
+static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
+			unsigned long entry, struct bio *parent)
+{
+	struct bio *bio;
+
+	bio = bio_alloc(GFP_ATOMIC, 1);
+	if (!bio)
+		return -ENOMEM;
+
+	bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
+	bio->bi_bdev = zram->bdev;
+	if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
+		bio_put(bio);
+		return -EIO;
+	}
+
+	if (!parent) {
+		bio->bi_opf = REQ_OP_READ;
+		bio->bi_end_io = zram_page_end_io;
+	} else {
+		bio->bi_opf = parent->bi_opf;
+		bio_chain(bio, parent);
+	}
+
+	submit_bio(bio);
+	return 1;
+}
+
+struct zram_work {
+	struct work_struct work;
+	struct zram *zram;
+	unsigned long entry;
+	struct bio *bio;
+};
+
+#if PAGE_SIZE != 4096
+static void zram_sync_read(struct work_struct *work)
+{
+	struct bio_vec bvec;
+	struct zram_work *zw = container_of(work, struct zram_work, work);
+	struct zram *zram = zw->zram;
+	unsigned long entry = zw->entry;
+	struct bio *bio = zw->bio;
+
+	read_from_bdev_async(zram, &bvec, entry, bio);
+}
+
+/*
+ * Block layer want one ->make_request_fn to be active at a time
+ * so if we use chained IO with parent IO in same context,
+ * it's a deadlock. To avoid, it, it uses worker thread context.
+ */
+static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
+				unsigned long entry, struct bio *bio)
+{
+	struct zram_work work;
+
+	work.zram = zram;
+	work.entry = entry;
+	work.bio = bio;
+
+	INIT_WORK_ONSTACK(&work.work, zram_sync_read);
+	queue_work(system_unbound_wq, &work.work);
+	flush_work(&work.work);
+	destroy_work_on_stack(&work.work);
+
+	return 1;
+}
+#else
+static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
+				unsigned long entry, struct bio *bio)
+{
+	WARN_ON(1);
+	return -EIO;
+}
+#endif
+
+static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
+			unsigned long entry, struct bio *parent, bool sync)
+{
+	if (sync)
+		return read_from_bdev_sync(zram, bvec, entry, parent);
+	else
+		return read_from_bdev_async(zram, bvec, entry, parent);
+}
+
+static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
+					u32 index, struct bio *parent,
+					unsigned long *pentry)
+{
+	struct bio *bio;
+	unsigned long entry;
+
+	bio = bio_alloc(GFP_ATOMIC, 1);
+	if (!bio)
+		return -ENOMEM;
+
+	entry = get_entry_bdev(zram);
+	if (!entry) {
+		bio_put(bio);
+		return -ENOSPC;
+	}
+
+	bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
+	bio->bi_bdev = zram->bdev;
+	if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len,
+					bvec->bv_offset)) {
+		bio_put(bio);
+		put_entry_bdev(zram, entry);
+		return -EIO;
+	}
+
+	if (!parent) {
+		bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
+		bio->bi_end_io = zram_page_end_io;
+	} else {
+		bio->bi_opf = parent->bi_opf;
+		bio_chain(bio, parent);
+	}
+
+	submit_bio(bio);
+	*pentry = entry;
+
+	return 0;
+}
+
+static void zram_wb_clear(struct zram *zram, u32 index)
+{
+	unsigned long entry;
+
+	zram_clear_flag(zram, index, ZRAM_WB);
+	entry = zram_get_element(zram, index);
+	zram_set_element(zram, index, 0);
+	put_entry_bdev(zram, entry);
+}
+
+#else
+static bool zram_wb_enabled(struct zram *zram) { return false; }
+static inline void reset_bdev(struct zram *zram) {};
+static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
+					u32 index, struct bio *parent,
+					unsigned long *pentry)
+
+{
+	return -EIO;
+}
+
+static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
+			unsigned long entry, struct bio *parent, bool sync)
+{
+	return -EIO;
+}
+static void zram_wb_clear(struct zram *zram, u32 index) {}
+#endif
+
+#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+
+static struct dentry *zram_debugfs_root;
+
+static void zram_debugfs_create(void)
+{
+	zram_debugfs_root = debugfs_create_dir("zram", NULL);
+}
+
+static void zram_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(zram_debugfs_root);
+}
+
+static void zram_accessed(struct zram *zram, u32 index)
+{
+	zram->table[index].ac_time = ktime_get_boottime();
+}
+
+static void zram_reset_access(struct zram *zram, u32 index)
+{
+	zram->table[index].ac_time.tv64 = 0;
+}
+
+static ssize_t read_block_state(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	char *kbuf;
+	ssize_t index, written = 0;
+	struct zram *zram = file->private_data;
+	unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+	struct timespec64 ts;
+
+	gfp_t kmalloc_flags;
+
+	kmalloc_flags = GFP_KERNEL;
+	if (count > PAGE_SIZE)
+		kmalloc_flags |= __GFP_NOWARN | __GFP_NORETRY;
+
+	kbuf = kmalloc_node(count, kmalloc_flags, NUMA_NO_NODE);
+	if (!kbuf && count > PAGE_SIZE)
+		kbuf = vmalloc(count);
+	if (!kbuf)
+		return -ENOMEM;
+
+	down_read(&zram->init_lock);
+	if (!init_done(zram)) {
+		up_read(&zram->init_lock);
+		kvfree(kbuf);
+		return -EINVAL;
+	}
+
+	for (index = *ppos; index < nr_pages; index++) {
+		int copied;
+
+		zram_slot_lock(zram, index);
+		if (!zram_allocated(zram, index))
+			goto next;
+
+		ts = ktime_to_timespec64(zram->table[index].ac_time);
+		copied = snprintf(kbuf + written, count,
+			"%12zd %12lld.%06lu %c%c%c\n",
+			index, (s64)ts.tv_sec,
+			ts.tv_nsec / NSEC_PER_USEC,
+			zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
+			zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
+			zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.');
+
+		if (count < copied) {
+			zram_slot_unlock(zram, index);
+			break;
+		}
+		written += copied;
+		count -= copied;
+next:
+		zram_slot_unlock(zram, index);
+		*ppos += 1;
+	}
+
+	up_read(&zram->init_lock);
+	if (copy_to_user(buf, kbuf, written))
+		written = -EFAULT;
+	kvfree(kbuf);
+
+	return written;
+}
+
+static const struct file_operations proc_zram_block_state_op = {
+	.open = simple_open,
+	.read = read_block_state,
+	.llseek = default_llseek,
+};
+
+static void zram_debugfs_register(struct zram *zram)
+{
+	if (!zram_debugfs_root)
+		return;
+
+	zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
+						zram_debugfs_root);
+	debugfs_create_file("block_state", 0400, zram->debugfs_dir,
+				zram, &proc_zram_block_state_op);
+}
+
+static void zram_debugfs_unregister(struct zram *zram)
+{
+	debugfs_remove_recursive(zram->debugfs_dir);
+}
+#else
+static void zram_debugfs_create(void) {};
+static void zram_debugfs_destroy(void) {};
+static void zram_accessed(struct zram *zram, u32 index) {};
+static void zram_reset_access(struct zram *zram, u32 index) {};
+static void zram_debugfs_register(struct zram *zram) {};
+static void zram_debugfs_unregister(struct zram *zram) {};
+#endif
+
 /*
  * We switched to per-cpu streams and this attr is not needed anymore.
  * However, we will keep it around for some time, because:
@@ -357,7 +793,7 @@
 		struct device_attribute *attr, const char *buf, size_t len)
 {
 	struct zram *zram = dev_to_zram(dev);
-	char compressor[CRYPTO_MAX_ALG_NAME];
+	char compressor[ARRAY_SIZE(zram->compressor)];
 	size_t sz;
 
 	strlcpy(compressor, buf, sizeof(compressor));
@@ -376,7 +812,7 @@
 		return -EBUSY;
 	}
 
-	strlcpy(zram->compressor, compressor, sizeof(compressor));
+	strcpy(zram->compressor, compressor);
 	up_write(&zram->init_lock);
 	return len;
 }
@@ -385,7 +821,6 @@
 		struct device_attribute *attr, const char *buf, size_t len)
 {
 	struct zram *zram = dev_to_zram(dev);
-	struct zram_meta *meta;
 
 	down_read(&zram->init_lock);
 	if (!init_done(zram)) {
@@ -393,8 +828,7 @@
 		return -EINVAL;
 	}
 
-	meta = zram->meta;
-	zs_compact(meta->mem_pool);
+	zs_compact(zram->mem_pool);
 	up_read(&zram->init_lock);
 
 	return len;
@@ -431,22 +865,23 @@
 
 	down_read(&zram->init_lock);
 	if (init_done(zram)) {
-		mem_used = zs_get_total_pages(zram->meta->mem_pool);
-		zs_pool_stats(zram->meta->mem_pool, &pool_stats);
+		mem_used = zs_get_total_pages(zram->mem_pool);
+		zs_pool_stats(zram->mem_pool, &pool_stats);
 	}
 
 	orig_size = atomic64_read(&zram->stats.pages_stored);
 	max_used = atomic_long_read(&zram->stats.max_used_pages);
 
 	ret = scnprintf(buf, PAGE_SIZE,
-			"%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
+			"%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
 			orig_size << PAGE_SHIFT,
 			(u64)atomic64_read(&zram->stats.compr_data_size),
 			mem_used << PAGE_SHIFT,
 			zram->limit_pages << PAGE_SHIFT,
 			max_used << PAGE_SHIFT,
-			(u64)atomic64_read(&zram->stats.zero_pages),
-			pool_stats.pages_compacted);
+			(u64)atomic64_read(&zram->stats.same_pages),
+			pool_stats.pages_compacted,
+			(u64)atomic64_read(&zram->stats.huge_pages));
 	up_read(&zram->init_lock);
 
 	return ret;
@@ -472,74 +907,38 @@
 static DEVICE_ATTR_RO(io_stat);
 static DEVICE_ATTR_RO(mm_stat);
 static DEVICE_ATTR_RO(debug_stat);
-ZRAM_ATTR_RO(num_reads);
-ZRAM_ATTR_RO(num_writes);
-ZRAM_ATTR_RO(failed_reads);
-ZRAM_ATTR_RO(failed_writes);
-ZRAM_ATTR_RO(invalid_io);
-ZRAM_ATTR_RO(notify_free);
-ZRAM_ATTR_RO(zero_pages);
-ZRAM_ATTR_RO(compr_data_size);
 
-static inline bool zram_meta_get(struct zram *zram)
-{
-	if (atomic_inc_not_zero(&zram->refcount))
-		return true;
-	return false;
-}
-
-static inline void zram_meta_put(struct zram *zram)
-{
-	atomic_dec(&zram->refcount);
-}
-
-static void zram_meta_free(struct zram_meta *meta, u64 disksize)
+static void zram_meta_free(struct zram *zram, u64 disksize)
 {
 	size_t num_pages = disksize >> PAGE_SHIFT;
 	size_t index;
 
 	/* Free all pages that are still in this zram device */
-	for (index = 0; index < num_pages; index++) {
-		unsigned long handle = meta->table[index].handle;
+	for (index = 0; index < num_pages; index++)
+		zram_free_page(zram, index);
 
-		if (!handle)
-			continue;
-
-		zs_free(meta->mem_pool, handle);
-	}
-
-	zs_destroy_pool(meta->mem_pool);
-	vfree(meta->table);
-	kfree(meta);
+	zs_destroy_pool(zram->mem_pool);
+	vfree(zram->table);
 }
 
-static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
+static bool zram_meta_alloc(struct zram *zram, u64 disksize)
 {
 	size_t num_pages;
-	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
-
-	if (!meta)
-		return NULL;
 
 	num_pages = disksize >> PAGE_SHIFT;
-	meta->table = vzalloc(num_pages * sizeof(*meta->table));
-	if (!meta->table) {
-		pr_err("Error allocating zram address table\n");
-		goto out_error;
+	zram->table = vzalloc(num_pages * sizeof(*zram->table));
+	if (!zram->table)
+		return false;
+
+	zram->mem_pool = zs_create_pool(zram->disk->disk_name);
+	if (!zram->mem_pool) {
+		vfree(zram->table);
+		return false;
 	}
 
-	meta->mem_pool = zs_create_pool(pool_name);
-	if (!meta->mem_pool) {
-		pr_err("Error creating memory pool\n");
-		goto out_error;
-	}
-
-	return meta;
-
-out_error:
-	vfree(meta->table);
-	kfree(meta);
-	return NULL;
+	if (!huge_class_size)
+		huge_class_size = zs_huge_class_size(zram->mem_pool);
+	return true;
 }
 
 /*
@@ -549,192 +948,194 @@
  */
 static void zram_free_page(struct zram *zram, size_t index)
 {
-	struct zram_meta *meta = zram->meta;
-	unsigned long handle = meta->table[index].handle;
+	unsigned long handle;
 
-	if (unlikely(!handle)) {
-		/*
-		 * No memory is allocated for zero filled pages.
-		 * Simply clear zero page flag.
-		 */
-		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
-			zram_clear_flag(meta, index, ZRAM_ZERO);
-			atomic64_dec(&zram->stats.zero_pages);
-		}
+	zram_reset_access(zram, index);
+
+	if (zram_test_flag(zram, index, ZRAM_HUGE)) {
+		zram_clear_flag(zram, index, ZRAM_HUGE);
+		atomic64_dec(&zram->stats.huge_pages);
+	}
+
+	if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) {
+		zram_wb_clear(zram, index);
+		atomic64_dec(&zram->stats.pages_stored);
 		return;
 	}
 
-	zs_free(meta->mem_pool, handle);
+	/*
+	 * No memory is allocated for same element filled pages.
+	 * Simply clear same page flag.
+	 */
+	if (zram_test_flag(zram, index, ZRAM_SAME)) {
+		zram_clear_flag(zram, index, ZRAM_SAME);
+		zram_set_element(zram, index, 0);
+		atomic64_dec(&zram->stats.same_pages);
+		atomic64_dec(&zram->stats.pages_stored);
+		return;
+	}
 
-	atomic64_sub(zram_get_obj_size(meta, index),
+	handle = zram_get_handle(zram, index);
+	if (!handle)
+		return;
+
+	zs_free(zram->mem_pool, handle);
+
+	atomic64_sub(zram_get_obj_size(zram, index),
 			&zram->stats.compr_data_size);
 	atomic64_dec(&zram->stats.pages_stored);
 
-	meta->table[index].handle = 0;
-	zram_set_obj_size(meta, index, 0);
+	zram_set_handle(zram, index, 0);
+	zram_set_obj_size(zram, index, 0);
 }
 
-static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
+static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
+				struct bio *bio, bool partial_io)
 {
-	int ret = 0;
-	unsigned char *cmem;
-	struct zram_meta *meta = zram->meta;
+	int ret;
 	unsigned long handle;
 	unsigned int size;
+	void *src, *dst;
 
-	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
-	handle = meta->table[index].handle;
-	size = zram_get_obj_size(meta, index);
+	if (zram_wb_enabled(zram)) {
+		zram_slot_lock(zram, index);
+		if (zram_test_flag(zram, index, ZRAM_WB)) {
+			struct bio_vec bvec;
 
-	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
-		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
-		memset(mem, 0, PAGE_SIZE);
+			zram_slot_unlock(zram, index);
+
+			bvec.bv_page = page;
+			bvec.bv_len = PAGE_SIZE;
+			bvec.bv_offset = 0;
+			return read_from_bdev(zram, &bvec,
+					zram_get_element(zram, index),
+					bio, partial_io);
+		}
+		zram_slot_unlock(zram, index);
+	}
+
+	zram_slot_lock(zram, index);
+	handle = zram_get_handle(zram, index);
+	if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
+		unsigned long value;
+		void *mem;
+
+		value = handle ? zram_get_element(zram, index) : 0;
+		mem = kmap_atomic(page);
+		zram_fill_page(mem, PAGE_SIZE, value);
+		kunmap_atomic(mem);
+		zram_slot_unlock(zram, index);
 		return 0;
 	}
 
-	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+	size = zram_get_obj_size(zram, index);
+
+	src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
 	if (size == PAGE_SIZE) {
-		memcpy(mem, cmem, PAGE_SIZE);
+		dst = kmap_atomic(page);
+		memcpy(dst, src, PAGE_SIZE);
+		kunmap_atomic(dst);
+		ret = 0;
 	} else {
 		struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
 
-		ret = zcomp_decompress(zstrm, cmem, size, mem);
+		dst = kmap_atomic(page);
+		ret = zcomp_decompress(zstrm, src, size, dst);
+		kunmap_atomic(dst);
 		zcomp_stream_put(zram->comp);
 	}
-	zs_unmap_object(meta->mem_pool, handle);
-	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+	zs_unmap_object(zram->mem_pool, handle);
+	zram_slot_unlock(zram, index);
 
 	/* Should NEVER happen. Return bio error if it does. */
-	if (unlikely(ret)) {
-		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
-		return ret;
-	}
-
-	return 0;
-}
-
-static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
-			  u32 index, int offset)
-{
-	int ret;
-	struct page *page;
-	unsigned char *user_mem, *uncmem = NULL;
-	struct zram_meta *meta = zram->meta;
-	page = bvec->bv_page;
-
-	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
-	if (unlikely(!meta->table[index].handle) ||
-			zram_test_flag(meta, index, ZRAM_ZERO)) {
-		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
-		handle_zero_page(bvec);
-		return 0;
-	}
-	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
-
-	if (is_partial_io(bvec))
-		/* Use  a temporary buffer to decompress the page */
-		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
-
-	user_mem = kmap_atomic(page);
-	if (!is_partial_io(bvec))
-		uncmem = user_mem;
-
-	if (!uncmem) {
-		pr_err("Unable to allocate temp memory\n");
-		ret = -ENOMEM;
-		goto out_cleanup;
-	}
-
-	ret = zram_decompress_page(zram, uncmem, index);
-	/* Should NEVER happen. Return bio error if it does. */
 	if (unlikely(ret))
-		goto out_cleanup;
+		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
 
-	if (is_partial_io(bvec))
-		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
-				bvec->bv_len);
-
-	flush_dcache_page(page);
-	ret = 0;
-out_cleanup:
-	kunmap_atomic(user_mem);
-	if (is_partial_io(bvec))
-		kfree(uncmem);
 	return ret;
 }
 
-static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
-			   int offset)
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+				u32 index, int offset, struct bio *bio)
 {
-	int ret = 0;
-	unsigned int clen;
-	unsigned long handle = 0;
+	int ret;
 	struct page *page;
-	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
-	struct zram_meta *meta = zram->meta;
-	struct zcomp_strm *zstrm = NULL;
-	unsigned long alloced_pages;
-	static unsigned long zram_rs_time;
 
 	page = bvec->bv_page;
 	if (is_partial_io(bvec)) {
-		/*
-		 * This is a partial IO. We need to read the full page
-		 * before to write the changes.
-		 */
-		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
-		if (!uncmem) {
-			ret = -ENOMEM;
-			goto out;
-		}
-		ret = zram_decompress_page(zram, uncmem, index);
-		if (ret)
-			goto out;
+		/* Use a temporary buffer to decompress the page */
+		page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
+		if (!page)
+			return -ENOMEM;
 	}
 
+	ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
+	if (unlikely(ret))
+		goto out;
+
+	if (is_partial_io(bvec)) {
+		void *dst = kmap_atomic(bvec->bv_page);
+		void *src = kmap_atomic(page);
+
+		memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
+		kunmap_atomic(src);
+		kunmap_atomic(dst);
+	}
+out:
+	if (is_partial_io(bvec))
+		__free_page(page);
+
+	return ret;
+}
+
+static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
+				u32 index, struct bio *bio)
+{
+	int ret = 0;
+	unsigned long alloced_pages;
+	unsigned long handle = 0;
+	unsigned int comp_len = 0;
+	void *src, *dst, *mem;
+	struct zcomp_strm *zstrm;
+	struct page *page = bvec->bv_page;
+	unsigned long element = 0;
+	enum zram_pageflags flags = 0;
+	bool allow_wb = true;
+
+	mem = kmap_atomic(page);
+	if (page_same_filled(mem, &element)) {
+		kunmap_atomic(mem);
+		/* Free memory associated with this sector now. */
+		flags = ZRAM_SAME;
+		atomic64_inc(&zram->stats.same_pages);
+		goto out;
+	}
+	kunmap_atomic(mem);
+
 compress_again:
-	user_mem = kmap_atomic(page);
-	if (is_partial_io(bvec)) {
-		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
-		       bvec->bv_len);
-		kunmap_atomic(user_mem);
-		user_mem = NULL;
-	} else {
-		uncmem = user_mem;
-	}
-
-	if (page_zero_filled(uncmem)) {
-		if (user_mem)
-			kunmap_atomic(user_mem);
-		/* Free memory associated with this sector now. */
-		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
-		zram_free_page(zram, index);
-		zram_set_flag(meta, index, ZRAM_ZERO);
-		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
-
-		atomic64_inc(&zram->stats.zero_pages);
-		ret = 0;
-		goto out;
-	}
-
 	zstrm = zcomp_stream_get(zram->comp);
-	ret = zcomp_compress(zstrm, uncmem, &clen);
-	if (!is_partial_io(bvec)) {
-		kunmap_atomic(user_mem);
-		user_mem = NULL;
-		uncmem = NULL;
-	}
+	src = kmap_atomic(page);
+	ret = zcomp_compress(zstrm, src, &comp_len);
+	kunmap_atomic(src);
 
 	if (unlikely(ret)) {
+		zcomp_stream_put(zram->comp);
 		pr_err("Compression failed! err=%d\n", ret);
-		goto out;
+		zs_free(zram->mem_pool, handle);
+		return ret;
 	}
 
-	src = zstrm->buffer;
-	if (unlikely(clen > max_zpage_size)) {
-		clen = PAGE_SIZE;
-		if (is_partial_io(bvec))
-			src = uncmem;
+	if (unlikely(comp_len >= huge_class_size)) {
+		if (zram_wb_enabled(zram) && allow_wb) {
+			zcomp_stream_put(zram->comp);
+			ret = write_to_bdev(zram, bvec, index, bio, &element);
+			if (!ret) {
+				flags = ZRAM_WB;
+				ret = 1;
+				goto out;
+			}
+			allow_wb = false;
+			goto compress_again;
+		}
 	}
 
 	/*
@@ -751,7 +1152,7 @@
 	 * from the slow path and handle has already been allocated.
 	 */
 	if (!handle)
-		handle = zs_malloc(meta->mem_pool, clen,
+		handle = zs_malloc(zram->mem_pool, comp_len,
 				__GFP_KSWAPD_RECLAIM |
 				__GFP_NOWARN |
 				__GFP_HIGHMEM |
@@ -759,66 +1160,101 @@
 				__GFP_CMA);
 	if (!handle) {
 		zcomp_stream_put(zram->comp);
-		zstrm = NULL;
-
 		atomic64_inc(&zram->stats.writestall);
-
-		handle = zs_malloc(meta->mem_pool, clen,
+		handle = zs_malloc(zram->mem_pool, comp_len,
 				GFP_NOIO | __GFP_HIGHMEM |
 				__GFP_MOVABLE | __GFP_CMA);
 		if (handle)
 			goto compress_again;
-
-		if (printk_timed_ratelimit(&zram_rs_time,
-					   ALLOC_ERROR_LOG_RATE_MS))
-			pr_err("Error allocating memory for compressed page: %u, size=%u\n",
-			       index, clen);
-		ret = -ENOMEM;
-		goto out;
+		return -ENOMEM;
 	}
 
-	alloced_pages = zs_get_total_pages(meta->mem_pool);
+	alloced_pages = zs_get_total_pages(zram->mem_pool);
 	update_used_max(zram, alloced_pages);
 
 	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
-		zs_free(meta->mem_pool, handle);
-		ret = -ENOMEM;
-		goto out;
+		zcomp_stream_put(zram->comp);
+		zs_free(zram->mem_pool, handle);
+		return -ENOMEM;
 	}
 
-	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
+	dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
 
-	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
+	src = zstrm->buffer;
+	if (comp_len == PAGE_SIZE)
 		src = kmap_atomic(page);
-		memcpy(cmem, src, PAGE_SIZE);
+	memcpy(dst, src, comp_len);
+	if (comp_len == PAGE_SIZE)
 		kunmap_atomic(src);
-	} else {
-		memcpy(cmem, src, clen);
-	}
 
 	zcomp_stream_put(zram->comp);
-	zstrm = NULL;
-	zs_unmap_object(meta->mem_pool, handle);
-
+	zs_unmap_object(zram->mem_pool, handle);
+	atomic64_add(comp_len, &zram->stats.compr_data_size);
+out:
 	/*
 	 * Free memory associated with this sector
 	 * before overwriting unused sectors.
 	 */
-	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+	zram_slot_lock(zram, index);
 	zram_free_page(zram, index);
 
-	meta->table[index].handle = handle;
-	zram_set_obj_size(meta, index, clen);
-	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+	if (comp_len == PAGE_SIZE) {
+		zram_set_flag(zram, index, ZRAM_HUGE);
+		atomic64_inc(&zram->stats.huge_pages);
+	}
+
+	if (flags) {
+		zram_set_flag(zram, index, flags);
+		zram_set_element(zram, index, element);
+	}  else {
+		zram_set_handle(zram, index, handle);
+		zram_set_obj_size(zram, index, comp_len);
+	}
+	zram_slot_unlock(zram, index);
 
 	/* Update stats */
-	atomic64_add(clen, &zram->stats.compr_data_size);
 	atomic64_inc(&zram->stats.pages_stored);
+	return ret;
+}
+
+static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
+				u32 index, int offset, struct bio *bio)
+{
+	int ret;
+	struct page *page = NULL;
+	void *src;
+	struct bio_vec vec;
+
+	vec = *bvec;
+	if (is_partial_io(bvec)) {
+		void *dst;
+		/*
+		 * This is a partial IO. We need to read the full page
+		 * before to write the changes.
+		 */
+		page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
+		if (!page)
+			return -ENOMEM;
+
+		ret = __zram_bvec_read(zram, page, index, bio, true);
+		if (ret)
+			goto out;
+
+		src = kmap_atomic(bvec->bv_page);
+		dst = kmap_atomic(page);
+		memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
+		kunmap_atomic(dst);
+		kunmap_atomic(src);
+
+		vec.bv_page = page;
+		vec.bv_len = PAGE_SIZE;
+		vec.bv_offset = 0;
+	}
+
+	ret = __zram_bvec_write(zram, &vec, index, bio);
 out:
-	if (zstrm)
-		zcomp_stream_put(zram->comp);
 	if (is_partial_io(bvec))
-		kfree(uncmem);
+		__free_page(page);
 	return ret;
 }
 
@@ -831,7 +1267,6 @@
 			     int offset, struct bio *bio)
 {
 	size_t n = bio->bi_iter.bi_size;
-	struct zram_meta *meta = zram->meta;
 
 	/*
 	 * zram manages data in physical block size units. Because logical block
@@ -852,17 +1287,22 @@
 	}
 
 	while (n >= PAGE_SIZE) {
-		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+		zram_slot_lock(zram, index);
 		zram_free_page(zram, index);
-		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+		zram_slot_unlock(zram, index);
 		atomic64_inc(&zram->stats.notify_free);
 		index++;
 		n -= PAGE_SIZE;
 	}
 }
 
+/*
+ * Returns errno if it has some problem. Otherwise return 0 or 1.
+ * Returns 0 if IO request was done synchronously
+ * Returns 1 if IO request was successfully submitted.
+ */
 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
-			int offset, bool is_write)
+			int offset, bool is_write, struct bio *bio)
 {
 	unsigned long start_time = jiffies;
 	int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
@@ -873,15 +1313,20 @@
 
 	if (!is_write) {
 		atomic64_inc(&zram->stats.num_reads);
-		ret = zram_bvec_read(zram, bvec, index, offset);
+		ret = zram_bvec_read(zram, bvec, index, offset, bio);
+		flush_dcache_page(bvec->bv_page);
 	} else {
 		atomic64_inc(&zram->stats.num_writes);
-		ret = zram_bvec_write(zram, bvec, index, offset);
+		ret = zram_bvec_write(zram, bvec, index, offset, bio);
 	}
 
 	generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
 
-	if (unlikely(ret)) {
+	zram_slot_lock(zram, index);
+	zram_accessed(zram, index);
+	zram_slot_unlock(zram, index);
+
+	if (unlikely(ret < 0)) {
 		if (!is_write)
 			atomic64_inc(&zram->stats.failed_reads);
 		else
@@ -909,34 +1354,21 @@
 	}
 
 	bio_for_each_segment(bvec, bio, iter) {
-		int max_transfer_size = PAGE_SIZE - offset;
+		struct bio_vec bv = bvec;
+		unsigned int unwritten = bvec.bv_len;
 
-		if (bvec.bv_len > max_transfer_size) {
-			/*
-			 * zram_bvec_rw() can only make operation on a single
-			 * zram page. Split the bio vector.
-			 */
-			struct bio_vec bv;
-
-			bv.bv_page = bvec.bv_page;
-			bv.bv_len = max_transfer_size;
-			bv.bv_offset = bvec.bv_offset;
-
+		do {
+			bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
+							unwritten);
 			if (zram_bvec_rw(zram, &bv, index, offset,
-					 op_is_write(bio_op(bio))) < 0)
+					op_is_write(bio_op(bio)), bio) < 0)
 				goto out;
 
-			bv.bv_len = bvec.bv_len - max_transfer_size;
-			bv.bv_offset += max_transfer_size;
-			if (zram_bvec_rw(zram, &bv, index + 1, 0,
-					 op_is_write(bio_op(bio))) < 0)
-				goto out;
-		} else
-			if (zram_bvec_rw(zram, &bvec, index, offset,
-					 op_is_write(bio_op(bio))) < 0)
-				goto out;
+			bv.bv_offset += bv.bv_len;
+			unwritten -= bv.bv_len;
 
-		update_position(&index, &offset, &bvec);
+			update_position(&index, &offset, &bv);
+		} while (unwritten);
 	}
 
 	bio_endio(bio);
@@ -953,22 +1385,15 @@
 {
 	struct zram *zram = queue->queuedata;
 
-	if (unlikely(!zram_meta_get(zram)))
-		goto error;
-
-	blk_queue_split(queue, &bio, queue->bio_split);
-
 	if (!valid_io_request(zram, bio->bi_iter.bi_sector,
 					bio->bi_iter.bi_size)) {
 		atomic64_inc(&zram->stats.invalid_io);
-		goto put_zram;
+		goto error;
 	}
 
 	__zram_make_request(zram, bio);
-	zram_meta_put(zram);
 	return BLK_QC_T_NONE;
-put_zram:
-	zram_meta_put(zram);
+
 error:
 	bio_io_error(bio);
 	return BLK_QC_T_NONE;
@@ -978,45 +1403,39 @@
 				unsigned long index)
 {
 	struct zram *zram;
-	struct zram_meta *meta;
 
 	zram = bdev->bd_disk->private_data;
-	meta = zram->meta;
 
-	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+	zram_slot_lock(zram, index);
 	zram_free_page(zram, index);
-	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+	zram_slot_unlock(zram, index);
 	atomic64_inc(&zram->stats.notify_free);
 }
 
 static int zram_rw_page(struct block_device *bdev, sector_t sector,
 		       struct page *page, bool is_write)
 {
-	int offset, err = -EIO;
+	int offset, ret;
 	u32 index;
 	struct zram *zram;
 	struct bio_vec bv;
 
 	zram = bdev->bd_disk->private_data;
-	if (unlikely(!zram_meta_get(zram)))
-		goto out;
 
 	if (!valid_io_request(zram, sector, PAGE_SIZE)) {
 		atomic64_inc(&zram->stats.invalid_io);
-		err = -EINVAL;
-		goto put_zram;
+		ret = -EINVAL;
+		goto out;
 	}
 
 	index = sector >> SECTORS_PER_PAGE_SHIFT;
-	offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
+	offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
 
 	bv.bv_page = page;
 	bv.bv_len = PAGE_SIZE;
 	bv.bv_offset = 0;
 
-	err = zram_bvec_rw(zram, &bv, index, offset, is_write);
-put_zram:
-	zram_meta_put(zram);
+	ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL);
 out:
 	/*
 	 * If I/O fails, just return error(ie, non-zero) without
@@ -1026,14 +1445,24 @@
 	 * bio->bi_end_io does things to handle the error
 	 * (e.g., SetPageError, set_page_dirty and extra works).
 	 */
-	if (err == 0)
+	if (unlikely(ret < 0))
+		return ret;
+
+	switch (ret) {
+	case 0:
 		page_endio(page, is_write, 0);
-	return err;
+		break;
+	case 1:
+		ret = 0;
+		break;
+	default:
+		WARN_ON(1);
+	}
+	return ret;
 }
 
 static void zram_reset_device(struct zram *zram)
 {
-	struct zram_meta *meta;
 	struct zcomp *comp;
 	u64 disksize;
 
@@ -1046,23 +1475,8 @@
 		return;
 	}
 
-	meta = zram->meta;
 	comp = zram->comp;
 	disksize = zram->disksize;
-	/*
-	 * Refcount will go down to 0 eventually and r/w handler
-	 * cannot handle further I/O so it will bail out by
-	 * check zram_meta_get.
-	 */
-	zram_meta_put(zram);
-	/*
-	 * We want to free zram_meta in process context to avoid
-	 * deadlock between reclaim path and any other locks.
-	 */
-	wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
-
-	/* Reset stats */
-	memset(&zram->stats, 0, sizeof(zram->stats));
 	zram->disksize = 0;
 
 	set_capacity(zram->disk, 0);
@@ -1070,8 +1484,10 @@
 
 	up_write(&zram->init_lock);
 	/* I/O operation under all of CPU are done so let's free */
-	zram_meta_free(meta, disksize);
+	zram_meta_free(zram, disksize);
+	memset(&zram->stats, 0, sizeof(zram->stats));
 	zcomp_destroy(comp);
+	reset_bdev(zram);
 }
 
 static ssize_t disksize_store(struct device *dev,
@@ -1079,7 +1495,6 @@
 {
 	u64 disksize;
 	struct zcomp *comp;
-	struct zram_meta *meta;
 	struct zram *zram = dev_to_zram(dev);
 	int err;
 
@@ -1087,10 +1502,18 @@
 	if (!disksize)
 		return -EINVAL;
 
+	down_write(&zram->init_lock);
+	if (init_done(zram)) {
+		pr_info("Cannot change disksize for initialized device\n");
+		err = -EBUSY;
+		goto out_unlock;
+	}
+
 	disksize = PAGE_ALIGN(disksize);
-	meta = zram_meta_alloc(zram->disk->disk_name, disksize);
-	if (!meta)
-		return -ENOMEM;
+	if (!zram_meta_alloc(zram, disksize)) {
+		err = -ENOMEM;
+		goto out_unlock;
+	}
 
 	comp = zcomp_create(zram->compressor);
 	if (IS_ERR(comp)) {
@@ -1100,29 +1523,19 @@
 		goto out_free_meta;
 	}
 
-	down_write(&zram->init_lock);
-	if (init_done(zram)) {
-		pr_info("Cannot change disksize for initialized device\n");
-		err = -EBUSY;
-		goto out_destroy_comp;
-	}
-
-	init_waitqueue_head(&zram->io_done);
-	atomic_set(&zram->refcount, 1);
-	zram->meta = meta;
 	zram->comp = comp;
 	zram->disksize = disksize;
 	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
-	zram_revalidate_disk(zram);
+
+	revalidate_disk(zram->disk);
 	up_write(&zram->init_lock);
 
 	return len;
 
-out_destroy_comp:
-	up_write(&zram->init_lock);
-	zcomp_destroy(comp);
 out_free_meta:
-	zram_meta_free(meta, disksize);
+	zram_meta_free(zram, disksize);
+out_unlock:
+	up_write(&zram->init_lock);
 	return err;
 }
 
@@ -1161,7 +1574,7 @@
 	/* Make sure all the pending I/O are finished */
 	fsync_bdev(bdev);
 	zram_reset_device(zram);
-	zram_revalidate_disk(zram);
+	revalidate_disk(zram->disk);
 	bdput(bdev);
 
 	mutex_lock(&bdev->bd_mutex);
@@ -1197,39 +1610,33 @@
 static DEVICE_ATTR_RW(disksize);
 static DEVICE_ATTR_RO(initstate);
 static DEVICE_ATTR_WO(reset);
-static DEVICE_ATTR_RO(orig_data_size);
-static DEVICE_ATTR_RO(mem_used_total);
-static DEVICE_ATTR_RW(mem_limit);
-static DEVICE_ATTR_RW(mem_used_max);
+static DEVICE_ATTR_WO(mem_limit);
+static DEVICE_ATTR_WO(mem_used_max);
 static DEVICE_ATTR_RW(max_comp_streams);
 static DEVICE_ATTR_RW(comp_algorithm);
+#ifdef CONFIG_ZRAM_WRITEBACK
+static DEVICE_ATTR_RW(backing_dev);
+#endif
 
 static struct attribute *zram_disk_attrs[] = {
 	&dev_attr_disksize.attr,
 	&dev_attr_initstate.attr,
 	&dev_attr_reset.attr,
-	&dev_attr_num_reads.attr,
-	&dev_attr_num_writes.attr,
-	&dev_attr_failed_reads.attr,
-	&dev_attr_failed_writes.attr,
 	&dev_attr_compact.attr,
-	&dev_attr_invalid_io.attr,
-	&dev_attr_notify_free.attr,
-	&dev_attr_zero_pages.attr,
-	&dev_attr_orig_data_size.attr,
-	&dev_attr_compr_data_size.attr,
-	&dev_attr_mem_used_total.attr,
 	&dev_attr_mem_limit.attr,
 	&dev_attr_mem_used_max.attr,
 	&dev_attr_max_comp_streams.attr,
 	&dev_attr_comp_algorithm.attr,
+#ifdef CONFIG_ZRAM_WRITEBACK
+	&dev_attr_backing_dev.attr,
+#endif
 	&dev_attr_io_stat.attr,
 	&dev_attr_mm_stat.attr,
 	&dev_attr_debug_stat.attr,
 	NULL,
 };
 
-static struct attribute_group zram_disk_attr_group = {
+static const struct attribute_group zram_disk_attr_group = {
 	.attrs = zram_disk_attrs,
 };
 
@@ -1287,6 +1694,7 @@
 	/* zram devices sort of resembles non-rotational disks */
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
 	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
+
 	/*
 	 * To ensure that we always get PAGE_SIZE aligned
 	 * and n*PAGE_SIZED sized I/O requests.
@@ -1297,8 +1705,6 @@
 	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
 	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
 	zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
-	zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE;
-	zram->disk->queue->limits.chunk_sectors = 0;
 	blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
 	/*
 	 * zram_bio_discard() will clear all logical blocks if logical block
@@ -1314,6 +1720,8 @@
 		zram->disk->queue->limits.discard_zeroes_data = 0;
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
 
+	zram->disk->queue->backing_dev_info->capabilities |=
+					BDI_CAP_STABLE_WRITES;
 	add_disk(zram->disk);
 
 	ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
@@ -1324,8 +1732,8 @@
 		goto out_free_disk;
 	}
 	strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
-	zram->meta = NULL;
 
+	zram_debugfs_register(zram);
 	pr_info("Added device: %s\n", zram->disk->disk_name);
 	return device_id;
 
@@ -1359,6 +1767,7 @@
 	zram->claim = true;
 	mutex_unlock(&bdev->bd_mutex);
 
+	zram_debugfs_unregister(zram);
 	/*
 	 * Remove sysfs first, so no one will perform a disksize
 	 * store while we destroy the devices. This also helps during
@@ -1376,8 +1785,8 @@
 
 	pr_info("Removed device: %s\n", zram->disk->disk_name);
 
-	blk_cleanup_queue(zram->disk->queue);
 	del_gendisk(zram->disk);
+	blk_cleanup_queue(zram->disk->queue);
 	put_disk(zram->disk);
 	kfree(zram);
 	return 0;
@@ -1457,6 +1866,7 @@
 {
 	class_unregister(&zram_control_class);
 	idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
+	zram_debugfs_destroy();
 	idr_destroy(&zram_index_idr);
 	unregister_blkdev(zram_major, "zram");
 }
@@ -1471,6 +1881,7 @@
 		return ret;
 	}
 
+	zram_debugfs_create();
 	zram_major = register_blkdev(0, "zram");
 	if (zram_major <= 0) {
 		pr_err("Unable to get major number\n");
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 74fcf10..3a1cac4 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -21,22 +21,6 @@
 
 #include "zcomp.h"
 
-/*-- Configurable parameters */
-
-/*
- * Pages that compress to size greater than this are stored
- * uncompressed in memory.
- */
-static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
-
-/*
- * NOTE: max_zpage_size must be less than or equal to:
- *   ZS_MAX_ALLOC_SIZE. Otherwise, zs_malloc() would
- * always return failure.
- */
-
-/*-- End of configurable params */
-
 #define SECTOR_SHIFT		9
 #define SECTORS_PER_PAGE_SHIFT	(PAGE_SHIFT - SECTOR_SHIFT)
 #define SECTORS_PER_PAGE	(1 << SECTORS_PER_PAGE_SHIFT)
@@ -60,9 +44,11 @@
 
 /* Flags for zram pages (table[page_no].value) */
 enum zram_pageflags {
-	/* Page consists entirely of zeros */
-	ZRAM_ZERO = ZRAM_FLAG_SHIFT,
-	ZRAM_ACCESS,	/* page is now accessed */
+	/* zram slot is locked */
+	ZRAM_LOCK = ZRAM_FLAG_SHIFT,
+	ZRAM_SAME,	/* Page consists the same element */
+	ZRAM_WB,	/* page is stored on backing_device */
+	ZRAM_HUGE,	/* Incompressible page */
 
 	__NR_ZRAM_PAGEFLAGS,
 };
@@ -71,8 +57,14 @@
 
 /* Allocated for each disk page */
 struct zram_table_entry {
-	unsigned long handle;
+	union {
+		unsigned long handle;
+		unsigned long element;
+	};
 	unsigned long value;
+#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+	ktime_t ac_time;
+#endif
 };
 
 struct zram_stats {
@@ -83,19 +75,16 @@
 	atomic64_t failed_writes;	/* can happen when memory is too low */
 	atomic64_t invalid_io;	/* non-page-aligned I/O requests */
 	atomic64_t notify_free;	/* no. of swap slot free notifications */
-	atomic64_t zero_pages;		/* no. of zero filled pages */
+	atomic64_t same_pages;		/* no. of same element filled pages */
+	atomic64_t huge_pages;		/* no. of huge pages */
 	atomic64_t pages_stored;	/* no. of pages currently stored */
 	atomic_long_t max_used_pages;	/* no. of maximum pages stored */
 	atomic64_t writestall;		/* no. of write slow paths */
 };
 
-struct zram_meta {
+struct zram {
 	struct zram_table_entry *table;
 	struct zs_pool *mem_pool;
-};
-
-struct zram {
-	struct zram_meta *meta;
 	struct zcomp *comp;
 	struct gendisk *disk;
 	/* Prevent concurrent execution of device init */
@@ -106,9 +95,6 @@
 	unsigned long limit_pages;
 
 	struct zram_stats stats;
-	atomic_t refcount; /* refcount for zram_meta */
-	/* wait all IO under all of cpu are done */
-	wait_queue_head_t io_done;
 	/*
 	 * This is the limit on amount of *uncompressed* worth of data
 	 * we can store in a disk.
@@ -119,5 +105,16 @@
 	 * zram is claimed so open request will be failed
 	 */
 	bool claim; /* Protected by bdev->bd_mutex */
+#ifdef CONFIG_ZRAM_WRITEBACK
+	struct file *backing_dev;
+	struct block_device *bdev;
+	unsigned int old_block_size;
+	unsigned long *bitmap;
+	unsigned long nr_pages;
+	spinlock_t bitmap_lock;
+#endif
+#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+	struct dentry *debugfs_dir;
+#endif
 };
 #endif
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 70ed8d9..328c907 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -125,6 +125,7 @@
 config BT_HCIUART_3WIRE
 	bool "Three-wire UART (H5) protocol support"
 	depends on BT_HCIUART
+	depends on BT_HCIUART_SERDEV
 	help
 	  The HCI Three-wire UART Transport Layer makes it possible to
 	  user the Bluetooth HCI over a serial port interface. The HCI
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index 0a3f24a..b90bbfe 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/regulator/consumer.h>
 #include <linux/clk.h>
+#include <linux/of_device.h>
 
 #if defined(CONFIG_CNSS)
 #include <net/cnss.h>
@@ -69,6 +70,7 @@
 		rc = PTR_ERR(vreg->reg);
 		pr_err("%s: regulator_get(%s) failed. rc=%d\n",
 			__func__, vreg->name, rc);
+		vreg->reg = NULL;
 		goto out;
 	}
 
@@ -614,6 +616,8 @@
 static int bt_power_probe(struct platform_device *pdev)
 {
 	int ret = 0;
+	const struct of_device_id *of_id =
+		of_match_device(bt_power_match_table, &pdev->dev);
 
 	dev_dbg(&pdev->dev, "%s\n", __func__);
 
@@ -654,6 +658,13 @@
 
 	btpdev = pdev;
 
+	if (of_id) {
+		if (strcmp(of_id->compatible, "qca,qca6174") == 0) {
+			bluetooth_toggle_radio(pdev->dev.platform_data, 0);
+			bluetooth_toggle_radio(pdev->dev.platform_data, 1);
+		}
+	}
+
 	return 0;
 
 free_pdata:
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index bff67c5..8dce1a8 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -348,6 +348,10 @@
 	/* Additional Realtek 8723BU Bluetooth devices */
 	{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
 
+	/* Additional Realtek 8723DE Bluetooth devices */
+	{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
+	{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
+
 	/* Additional Realtek 8821AE Bluetooth devices */
 	{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
 	{ USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 3a8b9ae..4c40fa2 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -231,11 +231,11 @@
 
 	BT_DBG("hu %p wq awake device", hu);
 
+	spin_lock(&qca->hci_ibs_lock);
+
 	/* Vote for serial clock */
 	serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
 
-	spin_lock(&qca->hci_ibs_lock);
-
 	/* Send wake indication to device */
 	if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
 		BT_ERR("Failed to send WAKE to device");
@@ -260,9 +260,10 @@
 
 	BT_DBG("hu %p wq awake rx", hu);
 
+	spin_lock(&qca->hci_ibs_lock);
+
 	serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
 
-	spin_lock(&qca->hci_ibs_lock);
 	qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
 
 	/* Always acknowledge device wake up,
@@ -287,7 +288,11 @@
 
 	BT_DBG("hu %p rx clock vote off", hu);
 
+	spin_lock(&qca->hci_ibs_lock);
+
 	serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
+
+	spin_unlock(&qca->hci_ibs_lock);
 }
 
 static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
@@ -298,6 +303,8 @@
 
 	BT_DBG("hu %p tx clock vote off", hu);
 
+	spin_lock(&qca->hci_ibs_lock);
+
 	/* Run HCI tx handling unlocked */
 	hci_uart_tx_wakeup(hu);
 
@@ -305,6 +312,8 @@
 	 * It is up to the tty driver to pend the clocks off until tx done.
 	 */
 	serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
+
+	spin_unlock(&qca->hci_ibs_lock);
 }
 
 static void hci_ibs_tx_idle_timeout(unsigned long arg)
@@ -520,8 +529,12 @@
 
 	BT_DBG("hu %p qca close", hu);
 
+	spin_lock(&qca->hci_ibs_lock);
+
 	serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
 
+	spin_unlock(&qca->hci_ibs_lock);
+
 	skb_queue_purge(&qca->tx_wait_q);
 	skb_queue_purge(&qca->txq);
 	del_timer(&qca->tx_idle_timer);
@@ -884,7 +897,7 @@
 	 */
 	set_current_state(TASK_UNINTERRUPTIBLE);
 	schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
-	set_current_state(TASK_INTERRUPTIBLE);
+	set_current_state(TASK_RUNNING);
 
 	return 0;
 }
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 45d7ecc..4e9e9e6 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -736,7 +736,7 @@
 	ccn = pmu_to_arm_ccn(event->pmu);
 
 	if (hw->sample_period) {
-		dev_warn(ccn->dev, "Sampling not supported!\n");
+		dev_dbg(ccn->dev, "Sampling not supported!\n");
 		return -EOPNOTSUPP;
 	}
 
@@ -744,12 +744,12 @@
 			event->attr.exclude_kernel || event->attr.exclude_hv ||
 			event->attr.exclude_idle || event->attr.exclude_host ||
 			event->attr.exclude_guest) {
-		dev_warn(ccn->dev, "Can't exclude execution levels!\n");
+		dev_dbg(ccn->dev, "Can't exclude execution levels!\n");
 		return -EINVAL;
 	}
 
 	if (event->cpu < 0) {
-		dev_warn(ccn->dev, "Can't provide per-task data!\n");
+		dev_dbg(ccn->dev, "Can't provide per-task data!\n");
 		return -EOPNOTSUPP;
 	}
 	/*
@@ -771,13 +771,13 @@
 	switch (type) {
 	case CCN_TYPE_MN:
 		if (node_xp != ccn->mn_id) {
-			dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp);
+			dev_dbg(ccn->dev, "Invalid MN ID %d!\n", node_xp);
 			return -EINVAL;
 		}
 		break;
 	case CCN_TYPE_XP:
 		if (node_xp >= ccn->num_xps) {
-			dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
+			dev_dbg(ccn->dev, "Invalid XP ID %d!\n", node_xp);
 			return -EINVAL;
 		}
 		break;
@@ -785,11 +785,11 @@
 		break;
 	default:
 		if (node_xp >= ccn->num_nodes) {
-			dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp);
+			dev_dbg(ccn->dev, "Invalid node ID %d!\n", node_xp);
 			return -EINVAL;
 		}
 		if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
-			dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n",
+			dev_dbg(ccn->dev, "Invalid type 0x%x for node %d!\n",
 					type, node_xp);
 			return -EINVAL;
 		}
@@ -808,19 +808,19 @@
 		if (event_id != e->event)
 			continue;
 		if (e->num_ports && port >= e->num_ports) {
-			dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n",
+			dev_dbg(ccn->dev, "Invalid port %d for node/XP %d!\n",
 					port, node_xp);
 			return -EINVAL;
 		}
 		if (e->num_vcs && vc >= e->num_vcs) {
-			dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n",
+			dev_dbg(ccn->dev, "Invalid vc %d for node/XP %d!\n",
 					vc, node_xp);
 			return -EINVAL;
 		}
 		valid = 1;
 	}
 	if (!valid) {
-		dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
+		dev_dbg(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
 				event_id, node_xp);
 		return -EINVAL;
 	}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 07b77fb..987e8f5 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2536,7 +2536,7 @@
 	if (!CDROM_CAN(CDC_SELECT_DISC) ||
 	    (arg == CDSL_CURRENT || arg == CDSL_NONE))
 		return cdi->ops->drive_status(cdi, CDSL_CURRENT);
-	if (((int)arg >= cdi->capacity))
+	if (arg >= cdi->capacity)
 		return -EINVAL;
 	return cdrom_slot_status(cdi, arg);
 }
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index b0d0181..87d8c3c 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -623,3 +623,49 @@
 
 endmenu
 
+config OKL4_PIPE
+      bool "OKL4 Pipe Driver"
+      depends on OKL4_GUEST
+      default n
+      help
+        Virtual pipe driver for the OKL4 Microvisor. This driver allows
+        OKL4 Microvisor pipes to be exposed directly to user level as
+        character devices.
+
+config VSERVICES_SERIAL
+	tristate
+
+config VSERVICES_SERIAL_SERVER
+	tristate "Virtual Services serial server"
+	depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+	select VSERVICES_SERIAL
+	select VSERVICES_PROTOCOL_SERIAL_SERVER
+	default y
+	help
+	  Select this option if you want support for server side Virtual
+	  Services serial. A virtual serial service behaves similarly to
+	  a UNIX pseudo terminal (pty), and does not require any physical
+	  serial hardware. Virtual serial devices are typically called
+	  /dev/ttyVS0, /dev/ttyVS1, etc.
+
+config VSERVICES_SERIAL_CLIENT
+	tristate "Virtual Services serial client"
+	depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+	select VSERVICES_SERIAL
+	select VSERVICES_PROTOCOL_SERIAL_CLIENT
+	default y
+	help
+	  Select this option if you want support for client side Virtual
+	  Services serial. A virtual serial service behaves similarly to
+	  a UNIX pseudo terminal (pty), and does not require any physical
+	  serial hardware. Virtual serial devices are typically called
+	  /dev/ttyVS0, /dev/ttyVS1, etc.
+
+config VSERVICES_VTTY_COUNT
+	int "Maximum number of Virtual Services serial devices"
+	depends on VSERVICES_SERIAL
+	range 0 256
+	default "8"
+	help
+	  The maximum number of Virtual Services serial devices to support.
+	  This limit applies to both the client and server.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 81283c4..a00142a 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -67,3 +67,11 @@
 obj-$(CONFIG_MSM_ADSPRPC)       += adsprpc_compat.o
 endif
 obj-$(CONFIG_MSM_RDBG)		+= rdbg.o
+obj-$(CONFIG_OKL4_PIPE)		+= okl4_pipe.o
+CFLAGS_okl4_pipe.o			+= -Werror
+obj-$(CONFIG_VSERVICES_SERIAL)		+= vservices_serial.o
+CFLAGS_vservices_serial.o	+= -Werror
+obj-$(CONFIG_VSERVICES_SERIAL_CLIENT)	+= vs_serial_client.o
+CFLAGS_vs_serial_client.o	 += -Werror
+obj-$(CONFIG_VSERVICES_SERIAL_SERVER)	+= vs_serial_server.o
+CFLAGS_vs_serial_server.o	+= -Werror
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index ef89246..b5af2e2 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -2743,6 +2743,7 @@
 			goto bail;
 		map->raddr = raddr;
 	}
+	ud->vaddrout = raddr;
  bail:
 	if (err && map) {
 		mutex_lock(&fl->fl_map_mutex);
@@ -3442,7 +3443,7 @@
 	if (err)
 		return err;
 	snprintf(strpid, PID_SIZE, "%d", current->pid);
-	buf_size = strlen(current->comm) + strlen(strpid) + 1;
+	buf_size = strlen(current->comm) + strlen("_") + strlen(strpid) + 1;
 	fl->debug_buf = kzalloc(buf_size, GFP_KERNEL);
 	snprintf(fl->debug_buf, UL_SIZE, "%.10s%s%d",
 	current->comm, "_", current->pid);
diff --git a/drivers/char/diag/Kconfig b/drivers/char/diag/Kconfig
index 93c164b..a7b6e75 100644
--- a/drivers/char/diag/Kconfig
+++ b/drivers/char/diag/Kconfig
@@ -34,6 +34,17 @@
 	  become available, this bridge driver enables DIAG traffic over MHI
 	  and SMUX.
 
+config DIAG_OVER_PCIE
+	bool "Enable Diag traffic to go over PCIE"
+	depends on DIAG_CHAR
+	depends on MSM_MHI
+	help
+	  Diag over PCIE enables sending diag traffic over PCIE endpoint when
+	  pcie is available. Diag PCIE channels should be configured
+	  and connected to use the transport. If PCIE is not configured
+	  diag will switch to usb mode and diag traffic will be routed
+	  over USB.
+
 config DIAG_USES_SMD
 	bool "Enable diag internal interface over SMD"
 	depends on DIAG_CHAR && MSM_SMD
diff --git a/drivers/char/diag/Makefile b/drivers/char/diag/Makefile
index 897375e..86cc533 100644
--- a/drivers/char/diag/Makefile
+++ b/drivers/char/diag/Makefile
@@ -3,5 +3,6 @@
 obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_hsic.o
 obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_smux.o
 obj-$(CONFIG_MSM_MHI) += diagfwd_mhi.o
+obj-$(CONFIG_DIAG_OVER_PCIE) += diag_pcie.o
 obj-$(CONFIG_DIAG_USES_SMD) += diagfwd_smd.o
 diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagfwd_glink.o diagfwd_peripheral.o diagfwd_socket.o diag_mux.o diag_memorydevice.o diag_usb.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index acee74a..6e0dba5 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -35,6 +35,7 @@
 #include "diagfwd_smd.h"
 #include "diagfwd_socket.h"
 #include "diagfwd_glink.h"
+#include "diag_pcie.h"
 #include "diag_debugfs.h"
 #include "diag_ipc_logging.h"
 
@@ -43,6 +44,9 @@
 static int diag_dbgfs_table_index;
 static int diag_dbgfs_mempool_index;
 static int diag_dbgfs_usbinfo_index;
+#ifdef CONFIG_DIAG_OVER_PCIE
+static int diag_dbgfs_pcieinfo_index;
+#endif
 static int diag_dbgfs_smdinfo_index;
 static int diag_dbgfs_socketinfo_index;
 static int diag_dbgfs_glinkinfo_index;
@@ -481,6 +485,68 @@
 	return ret;
 }
 
+#ifdef CONFIG_DIAG_OVER_PCIE
+static ssize_t diag_dbgfs_read_pcieinfo(struct file *file, char __user *ubuf,
+				       size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_pcie_info *pcie_info = NULL;
+	unsigned int temp_size = sizeof(char) * DEBUG_BUF_SIZE;
+
+	if (diag_dbgfs_pcieinfo_index >= NUM_DIAG_PCIE_DEV) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_pcieinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(temp_size, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf))
+		return -ENOMEM;
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = diag_dbgfs_pcieinfo_index; i < NUM_DIAG_PCIE_DEV; i++) {
+		pcie_info = &diag_pcie[i];
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"id: %d\n"
+			"name: %s\n"
+			"in channel hdl: %pK\n"
+			"out channel hdl: %pK\n"
+			"mempool: %s\n"
+			"read count: %lu\n"
+			"write count: %lu\n"
+			"read work pending: %d\n",
+			pcie_info->id,
+			pcie_info->name,
+			pcie_info->in_handle,
+			pcie_info->out_handle,
+			DIAG_MEMPOOL_GET_NAME(pcie_info->mempool),
+			pcie_info->read_cnt,
+			pcie_info->write_cnt,
+			work_pending(&pcie_info->read_work));
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_pcieinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+#endif
+
 #ifdef CONFIG_DIAG_USES_SMD
 static ssize_t diag_dbgfs_read_smdinfo(struct file *file, char __user *ubuf,
 				       size_t count, loff_t *ppos)
@@ -1080,6 +1146,12 @@
 	.read = diag_dbgfs_read_usbinfo,
 };
 
+#ifdef CONFIG_DIAG_OVER_PCIE
+const struct file_operations diag_dbgfs_pcieinfo_ops = {
+	.read = diag_dbgfs_read_pcieinfo,
+};
+#endif
+
 const struct file_operations diag_dbgfs_dcistats_ops = {
 	.read = diag_dbgfs_read_dcistats,
 };
@@ -1139,6 +1211,13 @@
 	if (!entry)
 		goto err;
 
+#ifdef CONFIG_DIAG_OVER_PCIE
+	entry = debugfs_create_file("pcieinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_pcieinfo_ops);
+	if (!entry)
+		goto err;
+#endif
+
 	entry = debugfs_create_file("dci_stats", 0444, diag_dbgfs_dent, 0,
 				    &diag_dbgfs_dcistats_ops);
 	if (!entry)
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index c31998c..52a57bb 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -27,12 +27,20 @@
 #include "diag_mux.h"
 #include "diag_usb.h"
 #include "diag_memorydevice.h"
+#include "diag_pcie.h"
 #include "diagfwd_peripheral.h"
 #include "diag_ipc_logging.h"
 
+#ifdef CONFIG_DIAG_OVER_PCIE
+#define diag_mux_register_ops diag_pcie_register_ops
+#else
+#define diag_mux_register_ops diag_usb_register_ops
+#endif
+
 struct diag_mux_state_t *diag_mux;
 static struct diag_logger_t usb_logger;
 static struct diag_logger_t md_logger;
+static struct diag_logger_t pcie_logger;
 
 static struct diag_logger_ops usb_log_ops = {
 	.open = diag_usb_connect_all,
@@ -50,6 +58,16 @@
 	.close_peripheral = diag_md_close_peripheral,
 };
 
+#ifdef CONFIG_DIAG_OVER_PCIE
+static struct diag_logger_ops pcie_log_ops = {
+	.open = diag_pcie_connect_all,
+	.close = diag_pcie_disconnect_all,
+	.queue_read = NULL,
+	.write = diag_pcie_write,
+	.close_peripheral = NULL
+};
+#endif
+
 int diag_mux_init(void)
 {
 	diag_mux = kzalloc(sizeof(struct diag_mux_state_t),
@@ -64,16 +82,29 @@
 	md_logger.mode = DIAG_MEMORY_DEVICE_MODE;
 	md_logger.log_ops = &md_log_ops;
 	diag_md_init();
-
+#ifdef CONFIG_DIAG_OVER_PCIE
+	pcie_logger.mode = DIAG_PCIE_MODE;
+	pcie_logger.log_ops = &pcie_log_ops;
+	diag_mux->pcie_ptr = &pcie_logger;
+#endif
 	/*
 	 * Set USB logging as the default logger. This is the mode
 	 * Diag should be in when it initializes.
 	 */
 	diag_mux->usb_ptr = &usb_logger;
 	diag_mux->md_ptr = &md_logger;
-	diag_mux->logger = &usb_logger;
+	switch (driver->transport_set) {
+	case DIAG_ROUTE_TO_PCIE:
+		diag_mux->logger = &pcie_logger;
+		diag_mux->mode = DIAG_PCIE_MODE;
+		break;
+	case DIAG_ROUTE_TO_USB:
+	default:
+		diag_mux->logger = &usb_logger;
+		diag_mux->mode = DIAG_USB_MODE;
+		break;
+	}
 	diag_mux->mux_mask = 0;
-	diag_mux->mode = DIAG_USB_MODE;
 	return 0;
 }
 
@@ -82,6 +113,56 @@
 	kfree(diag_mux);
 }
 
+#ifdef CONFIG_DIAG_OVER_PCIE
+int diag_pcie_register_ops(int proc, int ctx, struct diag_mux_ops *ops)
+{
+	int err = 0;
+
+	if (!ops)
+		return -EINVAL;
+
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return 0;
+
+	pcie_logger.ops[proc] = ops;
+	err = diag_pcie_register(proc, ctx, ops);
+	if (err) {
+		driver->transport_set == DIAG_ROUTE_TO_USB;
+		diag_mux->logger = &usb_logger;
+		diag_mux->mode = DIAG_USB_MODE;
+		usb_logger.ops[proc] = ops;
+		err = diag_usb_register(proc, ctx, ops);
+		if (err) {
+			pr_err("diag: MUX: unable to register usb operations for proc: %d, err: %d\n",
+					   proc, err);
+			return err;
+		}
+		pr_err("diag: MUX: unable to register pcie operations for proc: %d, err: %d\n",
+			proc, err);
+	}
+	return 0;
+}
+#else
+int diag_usb_register_ops(int proc, int ctx, struct diag_mux_ops *ops)
+{
+	int err = 0;
+
+	if (!ops)
+		return -EINVAL;
+
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return 0;
+	usb_logger.ops[proc] = ops;
+	err = diag_usb_register(proc, ctx, ops);
+	if (err) {
+		pr_err("diag: MUX: unable to register usb operations for proc: %d, err: %d\n",
+		       proc, err);
+		return err;
+	}
+	return 0;
+}
+#endif
+
 int diag_mux_register(int proc, int ctx, struct diag_mux_ops *ops)
 {
 	int err = 0;
@@ -91,16 +172,9 @@
 
 	if (proc < 0 || proc >= NUM_MUX_PROC)
 		return 0;
-
-	/* Register with USB logger */
-	usb_logger.ops[proc] = ops;
-	err = diag_usb_register(proc, ctx, ops);
-	if (err) {
-		pr_err("diag: MUX: unable to register usb operations for proc: %d, err: %d\n",
-		       proc, err);
+	err = diag_mux_register_ops(proc, ctx, ops);
+	if (err)
 		return err;
-	}
-
 	md_logger.ops[proc] = ops;
 	err = diag_md_register(proc, ctx, ops);
 	if (err) {
@@ -150,10 +224,19 @@
 		return -EINVAL;
 	}
 
-	if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
+	if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask) {
 		logger = diag_mux->md_ptr;
-	else
-		logger = diag_mux->usb_ptr;
+	} else {
+		switch (driver->transport_set) {
+		case DIAG_ROUTE_TO_PCIE:
+			logger = diag_mux->pcie_ptr;
+			break;
+		case DIAG_ROUTE_TO_USB:
+		default:
+			logger = diag_mux->usb_ptr;
+			break;
+		}
+	}
 
 	if (logger && logger->log_ops && logger->log_ops->write)
 		return logger->log_ops->write(proc, buf, len, ctx);
@@ -201,6 +284,7 @@
 	}
 
 	switch (*req_mode) {
+	case DIAG_PCIE_MODE:
 	case DIAG_USB_MODE:
 		new_mask = ~(*peripheral_mask) & diag_mux->mux_mask;
 		if (new_mask != DIAG_CON_NONE)
@@ -219,6 +303,16 @@
 	}
 
 	switch (diag_mux->mode) {
+	case DIAG_PCIE_MODE:
+		if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
+			diag_mux->pcie_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->md_ptr;
+			diag_mux->md_ptr->log_ops->open();
+		} else if (*req_mode == DIAG_MULTI_MODE) {
+			diag_mux->md_ptr->log_ops->open();
+			diag_mux->logger = NULL;
+		}
+		break;
 	case DIAG_USB_MODE:
 		if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
 			diag_mux->usb_ptr->log_ops->close();
@@ -234,8 +328,15 @@
 			diag_mux->md_ptr->log_ops->close();
 			diag_mux->logger = diag_mux->usb_ptr;
 			diag_mux->usb_ptr->log_ops->open();
+		} else if (*req_mode == DIAG_PCIE_MODE) {
+			diag_mux->md_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->pcie_ptr;
+			diag_mux->pcie_ptr->log_ops->open();
 		} else if (*req_mode == DIAG_MULTI_MODE) {
-			diag_mux->usb_ptr->log_ops->open();
+			if (driver->transport_set == DIAG_ROUTE_TO_PCIE)
+				diag_mux->pcie_ptr->log_ops->open();
+			else
+				diag_mux->usb_ptr->log_ops->open();
 			diag_mux->logger = NULL;
 		}
 		break;
@@ -243,6 +344,9 @@
 		if (*req_mode == DIAG_USB_MODE) {
 			diag_mux->md_ptr->log_ops->close();
 			diag_mux->logger = diag_mux->usb_ptr;
+		} else if (*req_mode == DIAG_PCIE_MODE) {
+			diag_mux->md_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->pcie_ptr;
 		} else if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
 			diag_mux->usb_ptr->log_ops->close();
 			diag_mux->logger = diag_mux->md_ptr;
diff --git a/drivers/char/diag/diag_mux.h b/drivers/char/diag/diag_mux.h
index e1fcebb..dac08b3 100644
--- a/drivers/char/diag/diag_mux.h
+++ b/drivers/char/diag/diag_mux.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, 2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
 	struct diag_logger_t *logger;
 	struct diag_logger_t *usb_ptr;
 	struct diag_logger_t *md_ptr;
+	struct diag_logger_t *pcie_ptr;
 	unsigned int mux_mask;
 	unsigned int mode;
 };
@@ -33,6 +34,7 @@
 #define DIAG_MEMORY_DEVICE_MODE		1
 #define DIAG_NO_LOGGING_MODE		2
 #define DIAG_MULTI_MODE			3
+#define DIAG_PCIE_MODE		4
 
 #define DIAG_MUX_LOCAL		0
 #define DIAG_MUX_LOCAL_LAST	1
@@ -73,4 +75,7 @@
 int diag_mux_open_all(struct diag_logger_t *logger);
 int diag_mux_close_all(void);
 int diag_mux_switch_logging(int *new_mode, int *peripheral_mask);
+int diag_pcie_register_ops(int proc, int ctx, struct diag_mux_ops *ops);
+int diag_usb_register_ops(int proc, int ctx, struct diag_mux_ops *ops);
+int diag_mux_register_ops(int proc, int ctx, struct diag_mux_ops *ops);
 #endif
diff --git a/drivers/char/diag/diag_pcie.c b/drivers/char/diag/diag_pcie.c
new file mode 100644
index 0000000..8353abc
--- /dev/null
+++ b/drivers/char/diag/diag_pcie.c
@@ -0,0 +1,659 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/kmemleak.h>
+#include <linux/list.h>
+#include "diag_pcie.h"
+#include "diag_mux.h"
+#include "diagmem.h"
+#include "diag_ipc_logging.h"
+#define DIAG_LEGACY "DIAG_PCIE"
+
+struct diag_pcie_info diag_pcie[NUM_DIAG_PCIE_DEV] = {
+	{
+		.id = DIAG_PCIE_LOCAL,
+		.name = DIAG_LEGACY,
+		.enabled = 0,
+		.mempool = POOL_TYPE_MUX_APPS,
+		.ops = NULL,
+		.wq = NULL,
+		.read_cnt = 0,
+		.write_cnt = 0,
+		.in_chan_attr = {
+			.max_pkt_size = DIAG_MAX_PKT_SZ,
+			.nr_trbs = 1,
+			.read_buffer = NULL,
+		},
+		.out_chan_attr = {
+			.max_pkt_size = DIAG_MAX_PCIE_PKT_SZ,
+		},
+		.in_chan = MHI_CLIENT_DIAG_OUT,
+		.out_chan = MHI_CLIENT_DIAG_IN,
+	}
+};
+
+static void diag_pcie_event_notifier(struct mhi_dev_client_cb_reason *reason)
+{
+	int i;
+	struct diag_pcie_info *pcie_info = NULL;
+
+	for (i = 0; i < NUM_DIAG_PCIE_DEV; i++) {
+		pcie_info = &diag_pcie[i];
+		if (reason->reason == MHI_DEV_TRE_AVAILABLE)
+			if (reason->ch_id == pcie_info->in_chan) {
+				queue_work(pcie_info->wq,
+					&pcie_info->read_work);
+				break;
+			}
+	}
+}
+
+void diag_pcie_read_work_fn(struct work_struct *work)
+{
+	struct mhi_req ureq;
+	struct diag_pcie_info *pcie_info = container_of(work,
+						      struct diag_pcie_info,
+						      read_work);
+	unsigned int bytes_avail = 0;
+
+	if (!pcie_info || !atomic_read(&pcie_info->enabled) ||
+		!atomic_read(&pcie_info->diag_state))
+		return;
+
+	ureq.chan = pcie_info->in_chan;
+	ureq.client = pcie_info->in_handle;
+	ureq.mode = IPA_DMA_SYNC;
+	ureq.buf = pcie_info->in_chan_attr.read_buffer;
+	ureq.len = pcie_info->in_chan_attr.read_buffer_size;
+	bytes_avail = mhi_dev_read_channel(&ureq);
+	if (bytes_avail < 0)
+		return;
+	DIAG_LOG(DIAG_DEBUG_MUX, "read total bytes %d from chan:%d",
+		bytes_avail, pcie_info->in_chan);
+	pcie_info->read_cnt++;
+
+	if (pcie_info->ops && pcie_info->ops->read_done)
+		pcie_info->ops->read_done(pcie_info->in_chan_attr.read_buffer,
+					ureq.transfer_len, pcie_info->ctxt);
+
+}
+
+static void diag_pcie_buf_tbl_remove(struct diag_pcie_info *pcie_info,
+				    unsigned char *buf)
+{
+	struct diag_pcie_buf_tbl_t *temp = NULL;
+	struct diag_pcie_buf_tbl_t *entry = NULL;
+
+	list_for_each_entry_safe(entry, temp, &pcie_info->buf_tbl, track) {
+		if (entry->buf == buf) {
+			DIAG_LOG(DIAG_DEBUG_MUX, "ref_count-- for %pK\n", buf);
+			atomic_dec(&entry->ref_count);
+			/*
+			 * Remove reference from the table if it is the
+			 * only instance of the buffer
+			 */
+			if (atomic_read(&entry->ref_count) == 0) {
+				list_del(&entry->track);
+				kfree(entry);
+				entry = NULL;
+			}
+			break;
+		}
+	}
+}
+
+static struct diag_pcie_buf_tbl_t *diag_pcie_buf_tbl_get(
+				struct diag_pcie_info *pcie_info,
+				unsigned char *buf)
+{
+	struct diag_pcie_buf_tbl_t *temp = NULL;
+	struct diag_pcie_buf_tbl_t *entry = NULL;
+
+	list_for_each_entry_safe(entry, temp, &pcie_info->buf_tbl, track) {
+		if (entry->buf == buf) {
+			DIAG_LOG(DIAG_DEBUG_MUX, "ref_count-- for %pK\n", buf);
+			atomic_dec(&entry->ref_count);
+			return entry;
+		}
+	}
+
+	return NULL;
+}
+
+void diag_pcie_write_complete_cb(void *req)
+{
+	struct diag_pcie_context *ctxt = NULL;
+	struct diag_pcie_info *ch;
+	struct diag_pcie_buf_tbl_t *entry = NULL;
+	struct mhi_req *ureq = req;
+	unsigned long flags;
+
+	if (!ureq)
+		return;
+	ctxt = (struct diag_pcie_context *)ureq->context;
+	if (!ctxt)
+		return;
+	ch = ctxt->ch;
+	if (!ch)
+		return;
+	spin_lock_irqsave(&ch->write_lock, flags);
+	ch->write_cnt++;
+	entry = diag_pcie_buf_tbl_get(ch, ctxt->buf);
+	if (!entry) {
+		pr_err_ratelimited("diag: In %s, unable to find entry %pK in the table\n",
+				   __func__, ctxt->buf);
+		spin_unlock_irqrestore(&ch->write_lock, flags);
+		return;
+	}
+	if (atomic_read(&entry->ref_count) != 0) {
+		DIAG_LOG(DIAG_DEBUG_MUX, "partial write_done ref %d\n",
+			 atomic_read(&entry->ref_count));
+		diag_ws_on_copy_complete(DIAG_WS_MUX);
+		spin_unlock_irqrestore(&ch->write_lock, flags);
+		diagmem_free(driver, req, ch->mempool);
+		return;
+	}
+	DIAG_LOG(DIAG_DEBUG_MUX, "full write_done, ctxt: %pK\n",
+		 ctxt->buf);
+	list_del(&entry->track);
+	kfree(entry);
+	entry = NULL;
+	if (ch->ops && ch->ops->write_done)
+		ch->ops->write_done(ureq->buf, ureq->len,
+				ctxt->buf_ctxt, DIAG_PCIE_MODE);
+	spin_unlock_irqrestore(&ch->write_lock, flags);
+	diagmem_free(driver, req, ch->mempool);
+	kfree(ctxt);
+	ctxt = NULL;
+}
+
+static int diag_pcie_buf_tbl_add(struct diag_pcie_info *pcie_info,
+				unsigned char *buf, uint32_t len, int ctxt)
+{
+	struct diag_pcie_buf_tbl_t *temp = NULL;
+	struct diag_pcie_buf_tbl_t *entry = NULL;
+
+	list_for_each_entry_safe(entry, temp, &pcie_info->buf_tbl, track) {
+		if (entry->buf == buf) {
+			atomic_inc(&entry->ref_count);
+			return 0;
+		}
+	}
+
+	/* New buffer, not found in the list */
+	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->buf = buf;
+	entry->ctxt = ctxt;
+	entry->len = len;
+	atomic_set(&entry->ref_count, 1);
+	INIT_LIST_HEAD(&entry->track);
+	list_add_tail(&entry->track, &pcie_info->buf_tbl);
+
+	return 0;
+}
+
+static int diag_pcie_write_ext(struct diag_pcie_info *pcie_info,
+			      unsigned char *buf, int len, int ctxt)
+{
+	int write_len = 0;
+	int bytes_remaining = len;
+	int offset = 0;
+	struct mhi_req *req;
+	struct diag_pcie_context *context;
+	int bytes_to_write;
+	unsigned long flags;
+
+	if (!pcie_info || !buf || len <= 0) {
+		pr_err_ratelimited("diag: In %s, pcie_info: %pK buf: %pK, len: %d\n",
+				   __func__, pcie_info, buf, len);
+		return -EINVAL;
+	}
+
+	while (bytes_remaining > 0) {
+		req = diagmem_alloc(driver, sizeof(struct mhi_req),
+				    pcie_info->mempool);
+		if (!req) {
+			pr_err_ratelimited("diag: In %s, cannot retrieve pcie write ptrs for pcie channel %s\n",
+					   __func__, pcie_info->name);
+			return -ENOMEM;
+		}
+
+		write_len = (bytes_remaining >
+				pcie_info->out_chan_attr.max_pkt_size) ?
+				pcie_info->out_chan_attr.max_pkt_size :
+				bytes_remaining;
+		req->client = pcie_info->out_handle;
+		context = kzalloc(sizeof(*context), GFP_KERNEL);
+		if (!context)
+			return -ENOMEM;
+
+		context->ch = pcie_info;
+		context->buf_ctxt = ctxt;
+		context->buf = buf;
+		req->context = context;
+		req->buf = buf + offset;
+		req->len = write_len;
+		req->chan = pcie_info->out_chan;
+		req->mode = IPA_DMA_ASYNC;
+		req->client_cb = diag_pcie_write_complete_cb;
+		req->snd_cmpl = 1;
+		if (!pcie_info->out_handle ||
+			!atomic_read(&pcie_info->enabled) ||
+			!atomic_read(&pcie_info->diag_state)) {
+			pr_debug_ratelimited("diag: pcie ch %s is not opened\n",
+					     pcie_info->name);
+			kfree(req->context);
+			diagmem_free(driver, req, pcie_info->mempool);
+			return -ENODEV;
+		}
+		spin_lock_irqsave(&pcie_info->write_lock, flags);
+		if (diag_pcie_buf_tbl_add(pcie_info, buf, len, ctxt)) {
+			kfree(req->context);
+			diagmem_free(driver, req, pcie_info->mempool);
+			spin_unlock_irqrestore(&pcie_info->write_lock, flags);
+			return -ENOMEM;
+		}
+		spin_unlock_irqrestore(&pcie_info->write_lock, flags);
+		diag_ws_on_read(DIAG_WS_MUX, len);
+		bytes_to_write = mhi_dev_write_channel(req);
+		diag_ws_on_copy(DIAG_WS_MUX);
+		if (bytes_to_write != write_len) {
+			pr_err_ratelimited("diag: In %s, error writing to pcie channel %s, err: %d\n",
+					   __func__, pcie_info->name,
+					bytes_to_write);
+			DIAG_LOG(DIAG_DEBUG_MUX,
+				 "ERR! unable to write to pcie, err: %d\n",
+				bytes_to_write);
+			diag_ws_on_copy_fail(DIAG_WS_MUX);
+			spin_lock_irqsave(&pcie_info->write_lock, flags);
+			diag_pcie_buf_tbl_remove(pcie_info, buf);
+			kfree(req->context);
+			diagmem_free(driver, req, pcie_info->mempool);
+			spin_unlock_irqrestore(&pcie_info->write_lock, flags);
+			return bytes_to_write;
+		}
+		offset += write_len;
+		bytes_remaining -= write_len;
+		DIAG_LOG(DIAG_DEBUG_MUX,
+			 "bytes_remaining: %d write_len: %d, len: %d\n",
+			 bytes_remaining, write_len, len);
+	}
+	DIAG_LOG(DIAG_DEBUG_MUX, "done writing!");
+
+	return 0;
+}
+
+int diag_pcie_write(int id, unsigned char *buf, int len, int ctxt)
+{
+	struct mhi_req *req;
+	struct diag_pcie_context *context;
+	int bytes_to_write;
+	struct diag_pcie_info *pcie_info;
+	unsigned long flags;
+
+	pcie_info = &diag_pcie[id];
+
+	if (len > pcie_info->out_chan_attr.max_pkt_size) {
+		DIAG_LOG(DIAG_DEBUG_MUX, "len: %d, max_size: %d\n",
+			 len, pcie_info->out_chan_attr.max_pkt_size);
+		return diag_pcie_write_ext(pcie_info, buf, len, ctxt);
+	}
+	req = (struct mhi_req *)diagmem_alloc(driver, sizeof(struct mhi_req),
+		    pcie_info->mempool);
+	if (!req) {
+		pr_err_ratelimited("diag: In %s, cannot retrieve pcie write ptrs for pcie channel %s\n",
+				 __func__, pcie_info->name);
+		return -ENOMEM;
+	}
+	req->client = pcie_info->out_handle;
+	context = kzalloc(sizeof(struct diag_pcie_context), GFP_KERNEL);
+	if (!context)
+		return -ENOMEM;
+
+	context->ch = &diag_pcie[id];
+	context->buf_ctxt = ctxt;
+	context->buf = buf;
+	req->context = context;
+	req->buf = buf;
+	req->len = len;
+	req->chan = pcie_info->out_chan;
+	req->mode = IPA_DMA_ASYNC;
+	req->client_cb = diag_pcie_write_complete_cb;
+	req->snd_cmpl = 1;
+	if (!pcie_info->out_handle || !atomic_read(&pcie_info->enabled) ||
+		!atomic_read(&pcie_info->diag_state)) {
+		pr_debug_ratelimited("diag: pcie ch %s is not opened\n",
+					pcie_info->name);
+		kfree(req->context);
+		diagmem_free(driver, req, pcie_info->mempool);
+		return -ENODEV;
+	}
+	spin_lock_irqsave(&pcie_info->write_lock, flags);
+	if (diag_pcie_buf_tbl_add(pcie_info, buf, len, ctxt)) {
+		DIAG_LOG(DIAG_DEBUG_MUX,
+			"ERR! unable to add buf %pK to table\n", buf);
+		kfree(req->context);
+		diagmem_free(driver, req, pcie_info->mempool);
+		spin_unlock_irqrestore(&pcie_info->write_lock, flags);
+		return -ENOMEM;
+	}
+	spin_unlock_irqrestore(&pcie_info->write_lock, flags);
+	diag_ws_on_read(DIAG_WS_MUX, len);
+	bytes_to_write = mhi_dev_write_channel(req);
+	diag_ws_on_copy(DIAG_WS_MUX);
+	if (bytes_to_write != len) {
+		pr_err_ratelimited("diag: In %s, error writing to pcie channel %s, err: %d\n",
+				   __func__, pcie_info->name, bytes_to_write);
+		diag_ws_on_copy_fail(DIAG_WS_MUX);
+		DIAG_LOG(DIAG_DEBUG_MUX,
+			 "ERR! unable to write to pcie, err: %d\n",
+			bytes_to_write);
+		spin_lock_irqsave(&pcie_info->write_lock, flags);
+		diag_pcie_buf_tbl_remove(pcie_info, buf);
+		spin_unlock_irqrestore(&pcie_info->write_lock, flags);
+		kfree(req->context);
+		diagmem_free(driver, req, pcie_info->mempool);
+	}
+	DIAG_LOG(DIAG_DEBUG_MUX, "wrote packet to pcie chan:%d, len:%d",
+		pcie_info->out_chan, len);
+
+	return 0;
+}
+
+static int pcie_init_read_chan(struct diag_pcie_info *ptr,
+		enum mhi_client_channel chan)
+{
+	int rc = 0;
+	size_t buf_size;
+	void *data_loc;
+
+	if (ptr == NULL) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Bad Input data, quitting\n");
+		return -EINVAL;
+	}
+
+	buf_size = ptr->in_chan_attr.max_pkt_size;
+	data_loc = kzalloc(buf_size, GFP_KERNEL);
+	if (!data_loc)
+		return -ENOMEM;
+
+	kmemleak_not_leak(data_loc);
+	ptr->in_chan_attr.read_buffer = data_loc;
+	ptr->in_chan_attr.read_buffer_size = buf_size;
+
+	return rc;
+
+}
+
+void diag_pcie_client_cb(struct mhi_dev_client_cb_data *cb_data)
+{
+	struct diag_pcie_info *pcie_info = NULL;
+
+	if (!cb_data)
+		return;
+
+	pcie_info = cb_data->user_data;
+	if (!pcie_info)
+		return;
+
+	switch (cb_data->ctrl_info) {
+	case  MHI_STATE_CONNECTED:
+		if (cb_data->channel == pcie_info->out_chan) {
+			DIAG_LOG(DIAG_DEBUG_MUX,
+				" Received connect event from MHI for %d",
+				pcie_info->out_chan);
+			if (atomic_read(&pcie_info->enabled))
+				return;
+			queue_work(pcie_info->wq, &pcie_info->open_work);
+		}
+		break;
+	case MHI_STATE_DISCONNECTED:
+		if (cb_data->channel == pcie_info->out_chan) {
+			DIAG_LOG(DIAG_DEBUG_MUX,
+				" Received disconnect event from MHI for %d",
+				pcie_info->out_chan);
+			if (!atomic_read(&pcie_info->enabled))
+				return;
+			queue_work(pcie_info->wq, &pcie_info->close_work);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static int diag_register_pcie_channels(struct diag_pcie_info *pcie_info)
+{
+	int rc = 0;
+
+	if (!pcie_info)
+		return -EIO;
+
+	pcie_info->event_notifier = diag_pcie_event_notifier;
+
+	DIAG_LOG(DIAG_DEBUG_MUX,
+		"Initializing inbound chan %d.\n",
+		pcie_info->in_chan);
+	rc = pcie_init_read_chan(pcie_info, pcie_info->in_chan);
+	if (rc < 0) {
+		DIAG_LOG(DIAG_DEBUG_MUX,
+			"Failed to init inbound 0x%x, ret 0x%x\n",
+			pcie_info->in_chan, rc);
+		return rc;
+	}
+	/* Register for state change notifications from mhi*/
+	rc = mhi_register_state_cb(diag_pcie_client_cb, pcie_info,
+						pcie_info->out_chan);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+static void diag_pcie_connect(struct diag_pcie_info *ch)
+{
+	int err = 0;
+	int num_write = 0;
+	int num_read = 1; /* Only one read buffer for any pcie channel */
+
+	if (!ch || !atomic_read(&ch->enabled))
+		return;
+
+	if (ch->ops && ch->ops->open)
+		if (atomic_read(&ch->diag_state))
+			ch->ops->open(ch->ctxt, DIAG_PCIE_MODE);
+
+	/* As soon as we open the channel, queue a read */
+	queue_work(ch->wq, &(ch->read_work));
+}
+
+void diag_pcie_open_work_fn(struct work_struct *work)
+{
+	int rc = 0;
+	struct diag_pcie_info *pcie_info = container_of(work,
+						      struct diag_pcie_info,
+						      open_work);
+
+	if (!pcie_info || atomic_read(&pcie_info->enabled))
+		return;
+
+	mutex_lock(&pcie_info->out_chan_lock);
+	mutex_lock(&pcie_info->in_chan_lock);
+	/* Open write channel*/
+	rc = mhi_dev_open_channel(pcie_info->out_chan,
+			&pcie_info->out_handle,
+			pcie_info->event_notifier);
+	if (rc < 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Failed to open chan %d, ret %d\n",
+			pcie_info->in_chan, rc);
+		goto handle_not_rdy_err;
+	}
+	DIAG_LOG(DIAG_DEBUG_MUX, "opened write channel %d",
+		pcie_info->out_chan);
+
+	/* Open read channel*/
+	rc = mhi_dev_open_channel(pcie_info->in_chan,
+			&pcie_info->in_handle,
+			pcie_info->event_notifier);
+	if (rc < 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Failed to open chan %d, ret 0x%x\n",
+			pcie_info->in_chan, rc);
+		goto handle_in_err;
+	}
+	DIAG_LOG(DIAG_DEBUG_MUX, "opened read channel %d", pcie_info->in_chan);
+	mutex_unlock(&pcie_info->in_chan_lock);
+	mutex_unlock(&pcie_info->out_chan_lock);
+	atomic_set(&pcie_info->enabled, 1);
+	atomic_set(&pcie_info->diag_state, 1);
+	diag_pcie_connect(pcie_info);
+	return;
+handle_in_err:
+	mhi_dev_close_channel(pcie_info->out_handle);
+	atomic_set(&pcie_info->enabled, 0);
+handle_not_rdy_err:
+	mutex_unlock(&pcie_info->in_chan_lock);
+	mutex_unlock(&pcie_info->out_chan_lock);
+}
+
+/*
+ * This function performs pcie connect operations wrt Diag synchronously. It
+ * doesn't translate to actual pcie connect. This is used when Diag switches
+ * logging to pcie mode and wants to mimic pcie connection.
+ */
+void diag_pcie_connect_all(void)
+{
+	int i = 0;
+	struct diag_pcie_info *pcie_info = NULL;
+
+	for (i = 0; i < NUM_DIAG_PCIE_DEV; i++) {
+		pcie_info = &diag_pcie[i];
+		if (!atomic_read(&pcie_info->enabled))
+			continue;
+		atomic_set(&pcie_info->diag_state, 1);
+		diag_pcie_connect(pcie_info);
+	}
+}
+
+static void diag_pcie_disconnect(struct diag_pcie_info *ch)
+{
+	if (!ch)
+		return;
+
+	if (!atomic_read(&ch->enabled) &&
+		driver->pcie_connected && diag_mask_param())
+		diag_clear_masks(0);
+
+	if (ch && ch->ops && ch->ops->close)
+		ch->ops->close(ch->ctxt, DIAG_PCIE_MODE);
+}
+
+/*
+ * This function performs pcie disconnect operations wrt Diag synchronously.
+ * It doesn't translate to actual pcie disconnect. This is used when Diag
+ * switches logging from pcie mode and want to mimic pcie disconnect.
+ */
+void diag_pcie_disconnect_all(void)
+{
+	int i = 0;
+	struct diag_pcie_info *pcie_info = NULL;
+
+	for (i = 0; i < NUM_DIAG_PCIE_DEV; i++) {
+		pcie_info = &diag_pcie[i];
+		if (!atomic_read(&pcie_info->enabled))
+			continue;
+		atomic_set(&pcie_info->diag_state, 0);
+		diag_pcie_disconnect(pcie_info);
+	}
+}
+
+void diag_pcie_close_work_fn(struct work_struct *work)
+{
+	int rc = 0;
+	struct diag_pcie_info *pcie_info = container_of(work,
+						      struct diag_pcie_info,
+						      open_work);
+
+	if (!pcie_info || !atomic_read(&pcie_info->enabled))
+		return;
+	mutex_lock(&pcie_info->out_chan_lock);
+	mutex_lock(&pcie_info->in_chan_lock);
+	rc = mhi_dev_close_channel(pcie_info->in_handle);
+	DIAG_LOG(DIAG_DEBUG_MUX, " closed in bound channel %d",
+		pcie_info->in_chan);
+	rc = mhi_dev_close_channel(pcie_info->out_handle);
+	DIAG_LOG(DIAG_DEBUG_MUX, " closed out bound channel %d",
+		pcie_info->out_chan);
+	mutex_unlock(&pcie_info->in_chan_lock);
+	mutex_unlock(&pcie_info->out_chan_lock);
+	atomic_set(&pcie_info->enabled, 0);
+	diag_pcie_disconnect(pcie_info);
+}
+
+int diag_pcie_register(int id, int ctxt, struct diag_mux_ops *ops)
+{
+	struct diag_pcie_info *ch = NULL;
+	int rc = 0;
+	unsigned char wq_name[DIAG_PCIE_NAME_SZ + DIAG_PCIE_STRING_SZ];
+
+	if (id < 0 || id >= NUM_DIAG_PCIE_DEV) {
+		pr_err("diag: Unable to register with PCIE, id: %d\n", id);
+		return -EIO;
+	}
+
+	if (!ops) {
+		pr_err("diag: Invalid operations for PCIE\n");
+		return -EIO;
+	}
+
+	ch = &diag_pcie[id];
+	ch->ops = ops;
+	ch->ctxt = ctxt;
+	atomic_set(&ch->diag_state, 0);
+	atomic_set(&ch->enabled, 0);
+	INIT_LIST_HEAD(&ch->buf_tbl);
+	spin_lock_init(&ch->write_lock);
+	INIT_WORK(&(ch->read_work), diag_pcie_read_work_fn);
+	INIT_WORK(&(ch->open_work), diag_pcie_open_work_fn);
+	INIT_WORK(&(ch->close_work), diag_pcie_close_work_fn);
+	strlcpy(wq_name, "DIAG_PCIE_", sizeof(wq_name));
+	strlcat(wq_name, ch->name, sizeof(wq_name));
+	ch->wq = create_singlethread_workqueue(wq_name);
+	if (!ch->wq)
+		return -ENOMEM;
+	diagmem_init(driver, ch->mempool);
+	mutex_init(&ch->in_chan_lock);
+	mutex_init(&ch->out_chan_lock);
+	rc = diag_register_pcie_channels(ch);
+	if (rc < 0) {
+		if (ch->wq)
+			destroy_workqueue(ch->wq);
+		kfree(ch->in_chan_attr.read_buffer);
+		return rc;
+	}
+	return 0;
+}
diff --git a/drivers/char/diag/diag_pcie.h b/drivers/char/diag/diag_pcie.h
new file mode 100644
index 0000000..63d2b4f
--- /dev/null
+++ b/drivers/char/diag/diag_pcie.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGPCIE_H
+#define DIAGPCIE_H
+
+#include "diagchar.h"
+#include "diag_mux.h"
+#include "../../platform/msm/mhi_dev/mhi.h"
+
+#define NUM_DIAG_PCIE_DEV	2
+#define DIAG_PCIE_LOCAL	0
+#define DIAG_PCIE_NAME_SZ	24
+#define DIAG_PCIE_STRING_SZ	30
+#define DIAG_MAX_PKT_SZ	16386
+#define DIAG_MAX_PCIE_PKT_SZ	2048
+enum mhi_chan_dir {
+	MHI_DIR_INVALID = 0x0,
+	MHI_DIR_OUT = 0x1,
+	MHI_DIR_IN = 0x2,
+	MHI_DIR__reserved = 0x80000000
+};
+struct diag_pcie_buf_tbl_t {
+	struct list_head track;
+	unsigned char *buf;
+	uint32_t len;
+	atomic_t ref_count;
+	int ctxt;
+};
+struct chan_attr {
+	/* SW maintained channel id */
+	enum mhi_client_channel chan_id;
+	/* maximum buffer size for this channel */
+	size_t max_pkt_size;
+	/* number of buffers supported in this channel */
+	u32 nr_trbs;
+	/* direction of the channel, see enum mhi_chan_dir */
+	enum mhi_chan_dir dir;
+	/* need to register mhi channel state change callback */
+	bool register_cb;
+	void *read_buffer;
+	size_t read_buffer_size;
+	/* Name of char device */
+	char *device_name;
+};
+struct diag_pcie_context {
+	struct diag_pcie_info *ch;
+	int buf_ctxt;
+	void *buf;
+};
+struct diag_pcie_info {
+	int id;
+	int dev_id;
+	int mempool;
+	int ctxt;
+	int mempool_init;
+	struct mutex in_chan_lock;
+	struct mutex out_chan_lock;
+	u32 out_chan;
+	/* read channel - always even */
+	u32 in_chan;
+	struct mhi_dev_client *out_handle;
+	struct mhi_dev_client *in_handle;
+	struct chan_attr in_chan_attr;
+	struct chan_attr out_chan_attr;
+	atomic_t diag_state;
+	atomic_t enabled;
+	unsigned long read_cnt;
+	unsigned long write_cnt;
+	struct diag_mux_ops *ops;
+	unsigned char *read_buf;
+	struct list_head buf_tbl;
+	spinlock_t write_lock;
+	char name[DIAG_PCIE_NAME_SZ];
+	struct work_struct read_work;
+	struct work_struct open_work;
+	struct work_struct close_work;
+	struct workqueue_struct *wq;
+	spinlock_t lock;
+	void (*event_notifier)(struct mhi_dev_client_cb_reason *cb);
+};
+extern struct diag_pcie_info diag_pcie[NUM_DIAG_PCIE_DEV];
+int diag_pcie_register(int id, int ctxt, struct diag_mux_ops *ops);
+int diag_pcie_queue_read(int id);
+int diag_pcie_write(int id, unsigned char *buf, int len, int ctxt);
+void diag_pcie_connect_all(void);
+void diag_pcie_disconnect_all(void);
+void diag_pcie_exit(int id);
+void diag_pcie_write_complete_cb(void *req);
+void diag_pcie_read_work_fn(struct work_struct *work);
+void diag_pcie_open_work_fn(struct work_struct *work);
+void diag_pcie_close_work_fn(struct work_struct *work);
+void diag_pcie_ready_cb(struct mhi_dev_client_cb_data *cb_data);
+#endif
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
index 24a9f8a..f960e8b 100644
--- a/drivers/char/diag/diag_usb.c
+++ b/drivers/char/diag/diag_usb.c
@@ -219,13 +219,6 @@
  */
 static void usb_disconnect(struct diag_usb_info *ch)
 {
-	if (!ch)
-		return;
-
-	if (!atomic_read(&ch->connected) &&
-		driver->usb_connected && diag_mask_param())
-		diag_clear_masks(0);
-
 	if (ch && ch->ops && ch->ops->close)
 		ch->ops->close(ch->ctxt, DIAG_USB_MODE);
 }
@@ -234,6 +227,14 @@
 {
 	struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
 						disconnect_work);
+
+	if (!ch)
+		return;
+
+	if (!atomic_read(&ch->connected) &&
+		driver->usb_connected && diag_mask_param())
+		diag_clear_masks(0);
+
 	usb_disconnect(ch);
 }
 
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index f63d78c..2e87d51 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -296,6 +296,8 @@
 #define DIAG_ID_UNKNOWN		0
 #define DIAG_ID_APPS		1
 
+#define DIAG_ROUTE_TO_USB 0
+#define DIAG_ROUTE_TO_PCIE 1
 /* List of remote processor supported */
 enum remote_procs {
 	MDM = 1,
@@ -592,6 +594,7 @@
 	struct mutex cmd_reg_mutex;
 	uint32_t cmd_reg_count;
 	struct mutex diagfwd_channel_mutex[NUM_PERIPHERALS];
+	int transport_set;
 	/* Sizes that reflect memory pool sizes */
 	unsigned int poolsize;
 	unsigned int poolsize_hdlc;
@@ -643,6 +646,7 @@
 #ifdef CONFIG_DIAG_OVER_USB
 	int usb_connected;
 #endif
+	int pcie_connected;
 	struct workqueue_struct *diag_wq;
 	struct work_struct diag_drain_work;
 	struct work_struct update_user_clients;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 131a050..1cfd4b7 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -40,6 +40,7 @@
 #include "diag_mux.h"
 #include "diag_ipc_logging.h"
 #include "diagfwd_peripheral.h"
+#include "diag_pcie.h"
 
 #include <linux/coresight-stm.h>
 #include <linux/kernel.h>
@@ -86,6 +87,8 @@
  */
 static unsigned int itemsize_usb_apps = sizeof(struct diag_request);
 static unsigned int poolsize_usb_apps = 10;
+static unsigned int itemsize_pcie_apps = sizeof(struct mhi_req);
+static unsigned int poolsize_pcie_apps = 10;
 module_param(poolsize_usb_apps, uint, 0000);
 
 /* Used for DCI client buffers. Don't expose itemsize as it is constant. */
@@ -466,8 +469,10 @@
 	for (i = 0; i < NUM_MD_SESSIONS; i++)
 		if (MD_PERIPHERAL_MASK(i) & session_mask)
 			diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
-
-	params.req_mode = USB_MODE;
+	if (driver->transport_set == DIAG_ROUTE_TO_PCIE)
+		params.req_mode = PCIE_MODE;
+	else
+		params.req_mode = USB_MODE;
 	params.mode_param = 0;
 	params.pd_mask = 0;
 	params.peripheral_mask = p_mask;
@@ -1488,7 +1493,8 @@
 	session_info = diag_md_session_get_pid(pid);
 	if (!session_info)
 		return -EINVAL;
-	if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
+	if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE ||
+		req_mode != DIAG_PCIE_MODE)
 		return -EINVAL;
 
 	/*
@@ -1499,7 +1505,7 @@
 		bit = MD_PERIPHERAL_MASK(i) & peripheral_mask;
 		if (!bit)
 			continue;
-		if (req_mode == DIAG_USB_MODE) {
+		if (req_mode == DIAG_USB_MODE || req_mode == DIAG_PCIE_MODE) {
 			if (driver->md_session_map[i] != session_info)
 				return -EINVAL;
 			driver->md_session_map[i] = NULL;
@@ -1537,18 +1543,21 @@
 	switch (curr_mode) {
 	case DIAG_USB_MODE:
 	case DIAG_MEMORY_DEVICE_MODE:
+	case DIAG_PCIE_MODE:
 	case DIAG_MULTI_MODE:
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE)
+	if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE &&
+		req_mode != DIAG_PCIE_MODE)
 		return -EINVAL;
 
-	if (req_mode == DIAG_USB_MODE) {
-		if (curr_mode == DIAG_USB_MODE)
+	if (req_mode == DIAG_USB_MODE || req_mode == DIAG_PCIE_MODE) {
+		if (curr_mode == req_mode)
 			return 0;
+
 		mutex_lock(&driver->md_session_lock);
 		if (driver->md_session_mode == DIAG_MD_NONE
 		    && driver->md_session_mask == 0 && driver->logging_mask) {
@@ -1622,7 +1631,11 @@
 				mutex_unlock(&driver->md_session_lock);
 				return -EINVAL;
 			}
-			err = diag_md_peripheral_switch(current->tgid,
+			if (driver->transport_set == DIAG_ROUTE_TO_PCIE)
+				err = diag_md_peripheral_switch(current->tgid,
+					change_mask, DIAG_PCIE_MODE);
+			else
+				err = diag_md_peripheral_switch(current->tgid,
 					change_mask, DIAG_USB_MODE);
 			mutex_unlock(&driver->md_session_lock);
 		} else {
@@ -1709,13 +1722,17 @@
 	case USB_MODE:
 		new_mode = DIAG_USB_MODE;
 		break;
+	case PCIE_MODE:
+		new_mode = DIAG_PCIE_MODE;
+		break;
 	default:
 		DIAG_LOG(DIAG_DEBUG_USERSPACE,
 			"Request to switch to invalid mode: %d\n",
 			param->req_mode);
 		return;
 	}
-	if ((new_mode == DIAG_USB_MODE) && diag_mask_clear_param)
+	if ((new_mode == DIAG_USB_MODE || new_mode == DIAG_PCIE_MODE) &&
+			diag_mask_clear_param)
 		diag_clear_masks(pid);
 
 }
@@ -1810,6 +1827,9 @@
 	case USB_MODE:
 		new_mode = DIAG_USB_MODE;
 		break;
+	case PCIE_MODE:
+		new_mode = DIAG_PCIE_MODE;
+		break;
 	default:
 		pr_err("diag: In %s, request to switch to invalid mode: %d\n",
 		       __func__, param->req_mode);
@@ -1858,7 +1878,7 @@
 	}
 
 	if (!(new_mode == DIAG_MEMORY_DEVICE_MODE &&
-	      curr_mode == DIAG_USB_MODE)) {
+	      (curr_mode == DIAG_USB_MODE || curr_mode == DIAG_PCIE_MODE))) {
 		queue_work(driver->diag_real_time_wq,
 			   &driver->diag_real_time_work);
 	}
@@ -3227,13 +3247,13 @@
 	mutex_lock(&apps_data_mutex);
 	mutex_lock(&driver->hdlc_disable_mutex);
 	hdlc_disabled = driver->p_hdlc_disabled[APPS_DATA];
+	mutex_unlock(&driver->hdlc_disable_mutex);
 	if (hdlc_disabled)
 		ret = diag_process_apps_data_non_hdlc(user_space_data, len,
 						      pkt_type);
 	else
 		ret = diag_process_apps_data_hdlc(user_space_data, len,
 						  pkt_type);
-	mutex_unlock(&driver->hdlc_disable_mutex);
 	mutex_unlock(&apps_data_mutex);
 
 	diagmem_free(driver, user_space_data, mempool);
@@ -3574,7 +3594,9 @@
 		return -EIO;
 	}
 
-	if (driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) {
+	if ((driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) ||
+		(driver->logging_mode == DIAG_PCIE_MODE &&
+		!driver->pcie_connected)) {
 		if (!((pkt_type == DCI_DATA_TYPE) ||
 		    (pkt_type == DCI_PKT_TYPE) ||
 		    (pkt_type & DATA_TYPE_DCI_LOG) ||
@@ -3612,8 +3634,10 @@
 		 * stream. If USB is not connected and we are not in memory
 		 * device mode, we should not process these logs/events.
 		 */
-		if (pkt_type && driver->logging_mode == DIAG_USB_MODE &&
-		    !driver->usb_connected)
+		if (pkt_type && ((driver->logging_mode == DIAG_USB_MODE &&
+		    !driver->usb_connected) ||
+			(driver->logging_mode == DIAG_PCIE_MODE &&
+			 !driver->pcie_connected)))
 			return err;
 	}
 
@@ -3928,6 +3952,13 @@
 	kmemleak_not_leak(driver);
 
 	timer_in_progress = 0;
+#ifdef CONFIG_DIAG_OVER_PCIE
+	driver->transport_set = DIAG_ROUTE_TO_PCIE;
+#else
+	driver->transport_set = DIAG_ROUTE_TO_USB;
+#endif
+	DIAG_LOG(DIAG_DEBUG_MUX, "Transport type set to %d\n",
+		driver->transport_set);
 	driver->delayed_rsp_id = 0;
 	driver->hdlc_disabled = 0;
 	driver->dci_state = DIAG_DCI_NO_ERROR;
@@ -3939,17 +3970,24 @@
 	driver->poolsize_hdlc = poolsize_hdlc;
 	driver->poolsize_dci = poolsize_dci;
 	driver->poolsize_user = poolsize_user;
-	/*
-	 * POOL_TYPE_MUX_APPS is for the buffers in the Diag MUX layer.
-	 * The number of buffers encompasses Diag data generated on
-	 * the Apss processor + 1 for the responses generated exclusively on
-	 * the Apps processor + data from data channels (4 channels per
-	 * peripheral) + data from command channels (2)
-	 */
-	diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
-			poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
 	driver->num_clients = max_clients;
-	driver->logging_mode = DIAG_USB_MODE;
+	if (driver->transport_set == DIAG_ROUTE_TO_PCIE) {
+		driver->logging_mode = DIAG_PCIE_MODE;
+		/*
+		 * POOL_TYPE_MUX_APPS is for the buffers in the Diag MUX layer.
+		 * The number of buffers encompasses Diag data generated on
+		 * the Apss processor + 1 for the responses generated
+		 * exclusively on the Apps processor + data from data channels
+		 *(4 channels periperipheral) + data from command channels (2)
+		 */
+		diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_pcie_apps,
+			poolsize_pcie_apps + 1 + (NUM_PERIPHERALS * 6));
+	} else {
+		driver->logging_mode = DIAG_USB_MODE;
+		diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
+			poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
+	}
+
 	for (i = 0; i < NUM_UPD; i++) {
 		driver->pd_logging_mode[i] = 0;
 		driver->pd_session_clear[i] = 0;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index f1bc1c5..60a98b6 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1383,6 +1383,9 @@
 		break;
 	case DIAG_MEMORY_DEVICE_MODE:
 		break;
+	case DIAG_PCIE_MODE:
+		driver->pcie_connected = 1;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -1416,6 +1419,9 @@
 		break;
 	case DIAG_MEMORY_DEVICE_MODE:
 		break;
+	case DIAG_PCIE_MODE:
+		driver->pcie_connected = 0;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index a6d5ca8..7f27aff 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -619,7 +619,12 @@
 	}
 	if (range->ssid_last >= mask->ssid_last) {
 		temp_range = range->ssid_last - mask->ssid_first + 1;
-		mask->ssid_last = range->ssid_last;
+		if (temp_range > MAX_SSID_PER_RANGE) {
+			temp_range = MAX_SSID_PER_RANGE;
+			mask->ssid_last = mask->ssid_first + temp_range - 1;
+		} else
+			mask->ssid_last = range->ssid_last;
+		mask->ssid_last_tools = mask->ssid_last;
 		mask->range = temp_range;
 	}
 
@@ -1085,7 +1090,7 @@
 		 * connection.
 		 */
 		real_time = MODE_REALTIME;
-	} else if (driver->usb_connected) {
+	} else if (driver->usb_connected || driver->pcie_connected) {
 		/*
 		 * If USB is connected, check individual process. If Memory
 		 * Device Mode is active, set the mode requested by Memory
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 848ce06..560f0df 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -1305,7 +1305,8 @@
 	 * Keeping the buffers busy for Memory Device and Multi Mode.
 	 */
 
-	if (driver->logging_mode != DIAG_USB_MODE) {
+	if (driver->logging_mode != DIAG_USB_MODE &&
+		driver->logging_mode != DIAG_PCIE_MODE) {
 		if (fwd_info->buf_1) {
 			atomic_set(&fwd_info->buf_1->in_busy, 0);
 			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
diff --git a/drivers/char/okl4_pipe.c b/drivers/char/okl4_pipe.c
new file mode 100644
index 0000000..e7a0d8a
--- /dev/null
+++ b/drivers/char/okl4_pipe.c
@@ -0,0 +1,677 @@
+/*
+ * drivers/char/okl4_pipe.c
+ *
+ * Copyright (c) 2015 General Dynamics
+ * Copyright (c) 2015 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * OKL4 Microvisor Pipes driver.
+ *
+ * Clients using this driver must have vclient names of the form
+ * "pipe%d", where %d is the pipe number, which must be
+ * unique and less than MAX_PIPES.
+ */
+
+/* #define DEBUG 1 */
+/* #define VERBOSE_DEBUG 1 */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/of.h>
+#include <asm/uaccess.h>
+#include <asm-generic/okl4_virq.h>
+
+#include <microvisor/microvisor.h>
+#if defined(CONFIG_OKL4_VIRTUALISATION)
+#include <asm/okl4-microvisor/okl4tags.h>
+#include <asm/okl4-microvisor/microvisor_bus.h>
+#include <asm/okl4-microvisor/virq.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
+#define __devinit
+#define __devexit
+#define __devexit_p(x) x
+#endif
+
+#define DRIVER_NAME "okl4-pipe"
+#define DEVICE_NAME "okl4-pipe"
+
+#ifndef CONFIG_OF
+#error "okl4-pipe driver only supported on device tree kernels"
+#endif
+
+#define MAX_PIPES 8
+
+#ifdef CONFIG_OKL4_INTERLEAVED_PRIORITIES
+extern int vcpu_prio_normal;
+#endif
+
+static int okl4_pipe_major;
+static struct class *okl4_pipe_class;
+
+/* This can be extended if required */
+struct okl4_pipe_mv {
+	int pipe_id;
+};
+
+struct okl4_pipe {
+	struct okl4_pipe_data_buffer *write_buf;
+	okl4_kcap_t pipe_tx_kcap;
+	okl4_kcap_t pipe_rx_kcap;
+	int tx_irq;
+	int rx_irq;
+	size_t max_msg_size;
+	int ref_count;
+	struct mutex pipe_mutex;
+	spinlock_t pipe_lock;
+
+	struct platform_device *pdev;
+	struct cdev cdev;
+
+	bool reset;
+	bool tx_maybe_avail;
+	bool rx_maybe_avail;
+
+	wait_queue_head_t rx_wait_q;
+	wait_queue_head_t tx_wait_q;
+	wait_queue_head_t poll_wait_q;
+
+	char *rx_buf;
+	size_t rx_buf_count;
+};
+static struct okl4_pipe pipes[MAX_PIPES];
+
+static okl4_error_t
+okl4_pipe_control(okl4_kcap_t kcap, uint8_t control)
+{
+	okl4_pipe_control_t x = 0;
+
+	okl4_pipe_control_setdoop(&x, true);
+	okl4_pipe_control_setoperation(&x, control);
+	return _okl4_sys_pipe_control(kcap, x);
+}
+
+static irqreturn_t
+okl4_pipe_tx_irq(int irq, void *dev)
+{
+	struct okl4_pipe *pipe = dev;
+	okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+	spin_lock(&pipe->pipe_lock);
+	if (okl4_pipe_state_gettxavailable(&payload))
+		pipe->tx_maybe_avail = true;
+	if (okl4_pipe_state_getreset(&payload)) {
+		pipe->reset = true;
+		pipe->tx_maybe_avail = true;
+	}
+	spin_unlock(&pipe->pipe_lock);
+
+	wake_up_interruptible(&pipe->tx_wait_q);
+	wake_up_interruptible(&pipe->poll_wait_q);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+okl4_pipe_rx_irq(int irq, void *dev)
+{
+	struct okl4_pipe *pipe = dev;
+	okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+	spin_lock(&pipe->pipe_lock);
+	if (okl4_pipe_state_getrxavailable(&payload))
+		pipe->rx_maybe_avail = true;
+	if (okl4_pipe_state_getreset(&payload)) {
+		pipe->reset = true;
+		pipe->rx_maybe_avail = true;
+	}
+	spin_unlock(&pipe->pipe_lock);
+
+	wake_up_interruptible(&pipe->rx_wait_q);
+	wake_up_interruptible(&pipe->poll_wait_q);
+
+	return IRQ_HANDLED;
+}
+
+static ssize_t
+okl4_pipe_read(struct file *filp, char __user *buf, size_t count,
+		loff_t *f_pos)
+{
+	struct okl4_pipe_mv *priv = filp->private_data;
+	int pipe_id = priv->pipe_id;
+	struct okl4_pipe *pipe = &pipes[pipe_id];
+	struct _okl4_sys_pipe_recv_return recv_return;
+	uint32_t *buffer = NULL;
+	size_t recv = 0;
+
+	if (!count)
+		return 0;
+
+again:
+	if (pipe->reset)
+		return -EPIPE;
+
+	if (!pipe->rx_maybe_avail && (filp->f_flags & O_NONBLOCK))
+		return -EAGAIN;
+
+	if (wait_event_interruptible(pipe->rx_wait_q, pipe->rx_maybe_avail))
+		return -ERESTARTSYS;
+
+	if (mutex_lock_interruptible(&pipe->pipe_mutex))
+		return -ERESTARTSYS;
+
+	/* Receive buffered data first */
+	if (pipe->rx_buf_count) {
+		recv = min(pipe->rx_buf_count, count);
+
+		if (copy_to_user(buf, pipe->rx_buf, recv)) {
+			mutex_unlock(&pipe->pipe_mutex);
+			return -EFAULT;
+		}
+
+		pipe->rx_buf_count -= recv;
+
+		if (pipe->rx_buf_count) {
+			memmove(pipe->rx_buf, pipe->rx_buf + recv,
+				pipe->max_msg_size - recv);
+		}
+
+		buf += recv;
+		count -= recv;
+		if (!count) {
+			mutex_unlock(&pipe->pipe_mutex);
+			return recv;
+		}
+	}
+
+	buffer = kmalloc(pipe->max_msg_size + sizeof(uint32_t), GFP_KERNEL);
+
+	if (!buffer) {
+		mutex_unlock(&pipe->pipe_mutex);
+		return -ENOMEM;
+	}
+
+	while (count) {
+		okl4_error_t ret;
+		size_t size;
+
+		spin_lock_irq(&pipe->pipe_lock);
+		recv_return = _okl4_sys_pipe_recv(pipe->pipe_rx_kcap,
+				pipe->max_msg_size + sizeof(uint32_t),
+				(void *)buffer);
+		ret = recv_return.error;
+
+		if (ret == OKL4_ERROR_PIPE_NOT_READY ||
+				ret == OKL4_ERROR_PIPE_EMPTY) {
+			pipe->rx_maybe_avail = false;
+			if (!recv) {
+				if (!(filp->f_flags & O_NONBLOCK)) {
+					spin_unlock_irq(&pipe->pipe_lock);
+					mutex_unlock(&pipe->pipe_mutex);
+					kfree(buffer);
+					goto again;
+				}
+				recv = -EAGAIN;
+			}
+			goto error;
+		} else if (ret != OKL4_OK) {
+			dev_err(&pipe->pdev->dev,
+					"pipe send returned error %d in okl4_pipe driver!\n",
+					(int)ret);
+			if (!recv)
+				recv = -ENXIO;
+			goto error;
+		}
+
+		spin_unlock_irq(&pipe->pipe_lock);
+
+		size = buffer[0];
+		if (size > pipe->max_msg_size) {
+			/* pipe error */
+			if (!recv)
+				recv = -EPROTO;
+			goto out;
+		}
+
+		/* Save extra received data */
+		if (size > count) {
+			pipe->rx_buf_count = size - count;
+			memcpy(pipe->rx_buf, (char*)&buffer[1] + count,
+					size - count);
+			size = count;
+		}
+
+		if (copy_to_user(buf, &buffer[1], size)) {
+			if (!recv)
+				recv = -EFAULT;
+			goto out;
+		}
+
+
+		count -= size;
+		buf += size;
+		recv += size;
+	}
+out:
+	mutex_unlock(&pipe->pipe_mutex);
+
+	kfree(buffer);
+	return recv;
+error:
+	spin_unlock_irq(&pipe->pipe_lock);
+	goto out;
+}
+
+static ssize_t
+okl4_pipe_write(struct file *filp, const char __user *buf, size_t count,
+		loff_t *f_pos)
+{
+	struct okl4_pipe_mv *priv = filp->private_data;
+	int pipe_id = priv->pipe_id;
+	struct okl4_pipe *pipe = &pipes[pipe_id];
+	uint32_t *buffer = NULL;
+	size_t sent = 0;
+
+	if (!count)
+		return 0;
+
+again:
+	if (pipe->reset)
+		return -EPIPE;
+
+	if (!pipe->tx_maybe_avail && (filp->f_flags & O_NONBLOCK))
+		return -EAGAIN;
+
+	if (wait_event_interruptible(pipe->tx_wait_q, pipe->tx_maybe_avail))
+		return -ERESTARTSYS;
+
+	if (mutex_lock_interruptible(&pipe->pipe_mutex))
+		return -ERESTARTSYS;
+
+	buffer = kmalloc(pipe->max_msg_size + sizeof(uint32_t), GFP_KERNEL);
+
+	if (!buffer) {
+		mutex_unlock(&pipe->pipe_mutex);
+		return -ENOMEM;
+	}
+
+	while (count) {
+		okl4_error_t ret;
+		size_t size = min(count, pipe->max_msg_size);
+		size_t pipe_size = roundup(size + sizeof(uint32_t),
+				sizeof(uint32_t));
+
+		if (copy_from_user(&buffer[1], buf, size)) {
+			if (!sent)
+				sent = -EFAULT;
+			break;
+		}
+
+		buffer[0] = size;
+
+		spin_lock_irq(&pipe->pipe_lock);
+		ret = _okl4_sys_pipe_send(pipe->pipe_tx_kcap, pipe_size,
+				(void *)buffer);
+		if (ret == OKL4_ERROR_PIPE_NOT_READY ||
+				ret == OKL4_ERROR_PIPE_FULL) {
+			pipe->tx_maybe_avail = false;
+			spin_unlock_irq(&pipe->pipe_lock);
+			if (!sent) {
+				if (filp->f_flags & O_NONBLOCK) {
+					sent = -EAGAIN;
+					break;
+				}
+				mutex_unlock(&pipe->pipe_mutex);
+				kfree(buffer);
+				goto again;
+			}
+			break;
+		} else if (ret != OKL4_OK) {
+			dev_err(&pipe->pdev->dev,
+					"pipe send returned error %d in okl4_pipe driver!\n",
+					(int)ret);
+			if (!sent)
+				sent = -ENXIO;
+			spin_unlock_irq(&pipe->pipe_lock);
+			break;
+		}
+		spin_unlock_irq(&pipe->pipe_lock);
+
+		count -= size;
+		buf += size;
+		sent += size;
+	}
+	mutex_unlock(&pipe->pipe_mutex);
+
+	kfree(buffer);
+	return sent;
+}
+
+
+static unsigned int
+okl4_pipe_poll(struct file *filp, struct poll_table_struct *poll_table)
+{
+	struct okl4_pipe_mv *priv = filp->private_data;
+	int pipe_id = priv->pipe_id;
+	struct okl4_pipe *pipe = &pipes[pipe_id];
+	unsigned int ret = 0;
+
+	poll_wait(filp, &pipe->poll_wait_q, poll_table);
+
+	spin_lock_irq(&pipe->pipe_lock);
+
+	if (pipe->rx_maybe_avail)
+		ret |= POLLIN | POLLRDNORM;
+	if (pipe->tx_maybe_avail)
+		ret |= POLLOUT | POLLWRNORM;
+	if (pipe->reset)
+		ret = POLLHUP;
+
+	spin_unlock_irq(&pipe->pipe_lock);
+
+	return ret;
+}
+
+static int
+okl4_pipe_open(struct inode *inode, struct file *filp)
+{
+	struct okl4_pipe *pipe = container_of(inode->i_cdev,
+			struct okl4_pipe, cdev);
+	struct okl4_pipe_mv *priv = dev_get_drvdata(&pipe->pdev->dev);
+
+	filp->private_data = priv;
+	if (!pipe->ref_count) {
+		pipe->rx_buf = kmalloc(pipe->max_msg_size, GFP_KERNEL);
+		if (!pipe->rx_buf)
+			return -ENOMEM;
+
+		mutex_init(&pipe->pipe_mutex);
+		spin_lock_init(&pipe->pipe_lock);
+
+		pipe->rx_buf_count = 0;
+		pipe->reset = false;
+		pipe->tx_maybe_avail = true;
+		pipe->rx_maybe_avail = true;
+
+		okl4_pipe_control(pipe->pipe_tx_kcap,
+				OKL4_PIPE_CONTROL_OP_SET_TX_READY);
+		okl4_pipe_control(pipe->pipe_rx_kcap,
+				OKL4_PIPE_CONTROL_OP_SET_RX_READY);
+	}
+	pipe->ref_count++;
+	return 0;
+}
+
+static int
+okl4_pipe_close(struct inode *inode, struct file *filp)
+{
+	struct okl4_pipe *pipe = container_of(inode->i_cdev,
+			struct okl4_pipe, cdev);
+
+	pipe->ref_count--;
+	if (!pipe->ref_count) {
+		okl4_pipe_control(pipe->pipe_rx_kcap,
+				OKL4_PIPE_CONTROL_OP_RESET);
+		okl4_pipe_control(pipe->pipe_tx_kcap,
+				OKL4_PIPE_CONTROL_OP_RESET);
+
+		if (pipe->rx_buf)
+			kfree(pipe->rx_buf);
+		pipe->rx_buf = NULL;
+		pipe->rx_buf_count = 0;
+	}
+
+	return 0;
+}
+
+struct file_operations okl4_pipe_fops = {
+	.owner =	THIS_MODULE,
+	.read =		okl4_pipe_read,
+	.write =	okl4_pipe_write,
+	.open =		okl4_pipe_open,
+	.release =	okl4_pipe_close,
+	.poll =		okl4_pipe_poll,
+};
+
+static int __devinit
+okl4_pipe_probe(struct platform_device *pdev)
+{
+	struct okl4_pipe *pipe;
+	int err, pipe_id;
+	struct okl4_pipe_mv *priv;
+	dev_t dev_num;
+	struct device *device = NULL;
+	u32 reg[2];
+	struct resource *irq;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(struct okl4_pipe_mv),
+			GFP_KERNEL);
+	if (priv == NULL) {
+		err = -ENOMEM;
+		goto fail_alloc_priv;
+	}
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	pipe_id = of_alias_get_id(pdev->dev.of_node, "pipe");
+	if (pipe_id < 0) {
+		err = -ENXIO;
+		goto fail_pipe_id;
+	}
+
+	if (pipe_id < 0 || pipe_id >= MAX_PIPES) {
+		err = -ENXIO;
+		goto fail_pipe_id;
+	}
+
+	if (of_property_read_u32_array(pdev->dev.of_node, "reg", reg, 2)) {
+		dev_err(&pdev->dev, "need 2 reg resources\n");
+		err = -ENODEV;
+		goto fail_pipe_id;
+	}
+
+	/* Populate the private structure */
+	priv->pipe_id = pipe_id;
+
+	pipe = &pipes[pipe_id];
+
+	/* Set up and register the pipe device */
+	pipe->pdev = pdev;
+	dev_set_name(&pdev->dev, "%s%d", DEVICE_NAME, (int)pipe_id);
+
+	pipe->ref_count = 0;
+	pipe->pipe_tx_kcap = reg[0];
+	pipe->pipe_rx_kcap = reg[1];
+	pipe->max_msg_size = 64;
+
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq) {
+		dev_err(&pdev->dev, "no tx irq resource?\n");
+		err = -ENODEV;
+		goto fail_irq_resource;
+	}
+	pipe->tx_irq = irq->start;
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+	if (!irq) {
+		dev_err(&pdev->dev, "no rx irq resource?\n");
+		err = -ENODEV;
+		goto fail_irq_resource;
+	}
+	pipe->rx_irq = irq->start;
+
+	pipe->write_buf = kmalloc(sizeof(pipe->write_buf), GFP_KERNEL);
+	if (!pipe->write_buf) {
+		dev_err(&pdev->dev, "cannot allocate write buffer\n");
+		err = -ENOMEM;
+		goto fail_malloc_write;
+	}
+
+	init_waitqueue_head(&pipe->rx_wait_q);
+	init_waitqueue_head(&pipe->tx_wait_q);
+	init_waitqueue_head(&pipe->poll_wait_q);
+
+	err = devm_request_irq(&pdev->dev, pipe->rx_irq,
+			okl4_pipe_rx_irq, 0, dev_name(&pdev->dev),
+			pipe);
+	if (err) {
+		dev_err(&pdev->dev, "cannot register rx irq %d: %d\n",
+				(int)pipe->rx_irq, (int)err);
+		goto fail_request_rx_irq;
+	}
+
+	err = devm_request_irq(&pdev->dev, pipe->tx_irq,
+			okl4_pipe_tx_irq, 0, dev_name(&pdev->dev),
+			pipe);
+	if (err) {
+		dev_err(&pdev->dev, "cannot register tx irq %d: %d\n",
+				(int)pipe->tx_irq, (int)err);
+		goto fail_request_tx_irq;
+	}
+
+	dev_num = MKDEV(okl4_pipe_major, pipe_id);
+
+	cdev_init(&pipe->cdev, &okl4_pipe_fops);
+	pipe->cdev.owner = THIS_MODULE;
+	err = cdev_add(&pipe->cdev, dev_num, 1);
+	if (err) {
+		dev_err(&pdev->dev, "cannot add device: %d\n", (int)err);
+		goto fail_cdev_add;
+	}
+
+	device = device_create(okl4_pipe_class, NULL, dev_num, NULL,
+			DEVICE_NAME "%d", pipe_id);
+	if (IS_ERR(device)) {
+		err = PTR_ERR(device);
+		dev_err(&pdev->dev, "cannot create device: %d\n", (int)err);
+		goto fail_device_create;
+	}
+
+	return 0;
+
+fail_device_create:
+	cdev_del(&pipe->cdev);
+fail_cdev_add:
+	devm_free_irq(&pdev->dev, pipe->tx_irq, pipe);
+fail_request_tx_irq:
+	devm_free_irq(&pdev->dev, pipe->rx_irq, pipe);
+fail_request_rx_irq:
+	kfree(pipe->write_buf);
+fail_malloc_write:
+fail_irq_resource:
+fail_pipe_id:
+	dev_set_drvdata(&pdev->dev, NULL);
+	devm_kfree(&pdev->dev, priv);
+fail_alloc_priv:
+	return err;
+}
+
+static int __devexit
+okl4_pipe_remove(struct platform_device *pdev)
+{
+	struct okl4_pipe *pipe;
+	struct okl4_pipe_mv *priv = dev_get_drvdata(&pdev->dev);
+
+	if (priv->pipe_id < 0 || priv->pipe_id >= MAX_PIPES)
+		return -ENXIO;
+
+	pipe = &pipes[priv->pipe_id];
+
+	cdev_del(&pipe->cdev);
+
+	devm_free_irq(&pdev->dev, pipe->tx_irq, pipe);
+	devm_free_irq(&pdev->dev, pipe->rx_irq, pipe);
+
+	kfree(pipe->write_buf);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+	devm_kfree(&pdev->dev, priv);
+
+	return 0;
+}
+
+static const struct of_device_id okl4_pipe_match[] = {
+	{
+		.compatible = "okl,pipe",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, okl4_pipe_match);
+
+static struct platform_driver okl4_pipe_driver = {
+	.probe		= okl4_pipe_probe,
+	.remove		= __devexit_p(okl4_pipe_remove),
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = okl4_pipe_match,
+	},
+};
+
+static int __init
+okl4_pipe_init(void)
+{
+	int err;
+	dev_t dev_num = 0;
+
+	err = alloc_chrdev_region(&dev_num, 0, MAX_PIPES, DEVICE_NAME);
+	if (err < 0) {
+		printk("%s: cannot allocate device region\n", __func__);
+		goto fail_alloc_chrdev_region;
+	}
+	okl4_pipe_major = MAJOR(dev_num);
+
+	okl4_pipe_class = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(okl4_pipe_class)) {
+		err = PTR_ERR(okl4_pipe_class);
+		goto fail_class_create;
+	}
+
+	/* Register the driver with the microvisor bus */
+	err = platform_driver_register(&okl4_pipe_driver);
+	if (err)
+		goto fail_driver_register;
+
+	return 0;
+
+fail_driver_register:
+	class_destroy(okl4_pipe_class);
+fail_class_create:
+	unregister_chrdev_region(dev_num, MAX_PIPES);
+fail_alloc_chrdev_region:
+	return err;
+}
+
+static void __exit
+okl4_pipe_exit(void)
+{
+	dev_t dev_num = MKDEV(okl4_pipe_major, 0);
+
+	platform_driver_unregister(&okl4_pipe_driver);
+	class_destroy(okl4_pipe_class);
+	unregister_chrdev_region(dev_num, MAX_PIPES);
+}
+
+module_init(okl4_pipe_init);
+module_exit(okl4_pipe_exit);
+
+MODULE_DESCRIPTION("OKL4 pipe driver");
+MODULE_AUTHOR("John Clarke <johnc@cog.systems>");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 1b3c731..464b95a 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1826,14 +1826,22 @@
 write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
 {
 	size_t bytes;
-	__u32 buf[16];
+	__u32 t, buf[16];
 	const char __user *p = buffer;
 
 	while (count > 0) {
+		int b, i = 0;
+
 		bytes = min(count, sizeof(buf));
 		if (copy_from_user(&buf, p, bytes))
 			return -EFAULT;
 
+		for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
+			if (!arch_get_random_int(&t))
+				break;
+			buf[i] ^= t;
+		}
+
 		count -= bytes;
 		p += bytes;
 
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index 65b8249..1662e46 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -25,7 +25,7 @@
 	struct tpm_chip *chip;
 
 	/* Data passed to and from the tpm via the read/write calls */
-	atomic_t data_pending;
+	size_t data_pending;
 	struct mutex buffer_mutex;
 
 	struct timer_list user_read_timer;      /* user needs to claim result */
@@ -46,7 +46,7 @@
 	struct file_priv *priv = container_of(work, struct file_priv, work);
 
 	mutex_lock(&priv->buffer_mutex);
-	atomic_set(&priv->data_pending, 0);
+	priv->data_pending = 0;
 	memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
 	mutex_unlock(&priv->buffer_mutex);
 }
@@ -72,7 +72,6 @@
 	}
 
 	priv->chip = chip;
-	atomic_set(&priv->data_pending, 0);
 	mutex_init(&priv->buffer_mutex);
 	setup_timer(&priv->user_read_timer, user_reader_timeout,
 			(unsigned long)priv);
@@ -86,28 +85,24 @@
 			size_t size, loff_t *off)
 {
 	struct file_priv *priv = file->private_data;
-	ssize_t ret_size;
+	ssize_t ret_size = 0;
 	int rc;
 
 	del_singleshot_timer_sync(&priv->user_read_timer);
 	flush_work(&priv->work);
-	ret_size = atomic_read(&priv->data_pending);
-	if (ret_size > 0) {	/* relay data */
-		ssize_t orig_ret_size = ret_size;
-		if (size < ret_size)
-			ret_size = size;
+	mutex_lock(&priv->buffer_mutex);
 
-		mutex_lock(&priv->buffer_mutex);
+	if (priv->data_pending) {
+		ret_size = min_t(ssize_t, size, priv->data_pending);
 		rc = copy_to_user(buf, priv->data_buffer, ret_size);
-		memset(priv->data_buffer, 0, orig_ret_size);
+		memset(priv->data_buffer, 0, priv->data_pending);
 		if (rc)
 			ret_size = -EFAULT;
 
-		mutex_unlock(&priv->buffer_mutex);
+		priv->data_pending = 0;
 	}
 
-	atomic_set(&priv->data_pending, 0);
-
+	mutex_unlock(&priv->buffer_mutex);
 	return ret_size;
 }
 
@@ -118,18 +113,20 @@
 	size_t in_size = size;
 	ssize_t out_size;
 
-	/* cannot perform a write until the read has cleared
-	   either via tpm_read or a user_read_timer timeout.
-	   This also prevents splitted buffered writes from blocking here.
-	*/
-	if (atomic_read(&priv->data_pending) != 0)
-		return -EBUSY;
-
 	if (in_size > TPM_BUFSIZE)
 		return -E2BIG;
 
 	mutex_lock(&priv->buffer_mutex);
 
+	/* Cannot perform a write until the read has cleared either via
+	 * tpm_read or a user_read_timer timeout. This also prevents split
+	 * buffered writes from blocking here.
+	 */
+	if (priv->data_pending != 0) {
+		mutex_unlock(&priv->buffer_mutex);
+		return -EBUSY;
+	}
+
 	if (copy_from_user
 	    (priv->data_buffer, (void __user *) buf, in_size)) {
 		mutex_unlock(&priv->buffer_mutex);
@@ -159,7 +156,7 @@
 		return out_size;
 	}
 
-	atomic_set(&priv->data_pending, out_size);
+	priv->data_pending = out_size;
 	mutex_unlock(&priv->buffer_mutex);
 
 	/* Set a timeout by which the reader must come claim the result */
@@ -178,7 +175,7 @@
 	del_singleshot_timer_sync(&priv->user_read_timer);
 	flush_work(&priv->work);
 	file->private_data = NULL;
-	atomic_set(&priv->data_pending, 0);
+	priv->data_pending = 0;
 	clear_bit(0, &priv->chip->is_open);
 	kfree(priv);
 	return 0;
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index da69dde..b15479a 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -115,7 +115,7 @@
 	/* Lock the adapter for the duration of the whole sequence. */
 	if (!tpm_dev.client->adapter->algo->master_xfer)
 		return -EOPNOTSUPP;
-	i2c_lock_adapter(tpm_dev.client->adapter);
+	i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
 
 	if (tpm_dev.chip_type == SLB9645) {
 		/* use a combined read for newer chips
@@ -156,7 +156,7 @@
 	}
 
 out:
-	i2c_unlock_adapter(tpm_dev.client->adapter);
+	i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
 	/* take care of 'guard time' */
 	usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
 
@@ -188,7 +188,7 @@
 
 	if (!tpm_dev.client->adapter->algo->master_xfer)
 		return -EOPNOTSUPP;
-	i2c_lock_adapter(tpm_dev.client->adapter);
+	i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
 
 	/* prepend the 'register address' to the buffer */
 	tpm_dev.buf[0] = addr;
@@ -207,7 +207,7 @@
 		usleep_range(sleep_low, sleep_hi);
 	}
 
-	i2c_unlock_adapter(tpm_dev.client->adapter);
+	i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
 	/* take care of 'guard time' */
 	usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
 
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 01eccb1..950c2d2 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -189,6 +189,7 @@
 static int tpm_tis_spi_probe(struct spi_device *dev)
 {
 	struct tpm_tis_spi_phy *phy;
+	int irq;
 
 	phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
 			   GFP_KERNEL);
@@ -201,7 +202,13 @@
 	if (!phy->iobuf)
 		return -ENOMEM;
 
-	return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
+	/* If the SPI device has an IRQ then use that */
+	if (dev->irq > 0)
+		irq = dev->irq;
+	else
+		irq = -1;
+
+	return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
 				 NULL);
 }
 
diff --git a/drivers/char/vs_serial_client.c b/drivers/char/vs_serial_client.c
new file mode 100644
index 0000000..a0bf1cc
--- /dev/null
+++ b/drivers/char/vs_serial_client.c
@@ -0,0 +1,132 @@
+/*
+ * drivers/char/vs_serial_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Serial vService client driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/client.h>
+
+#include "vs_serial_common.h"
+
+#define client_state_to_port(state) \
+	container_of(state, struct vtty_port, u.vs_client)
+
+static struct vs_mbuf *vs_serial_client_alloc_msg_buf(struct vtty_port *port,
+		struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+	return vs_client_serial_serial_alloc_msg(&port->u.vs_client, pbuf,
+			gfp_flags);
+}
+
+static void vs_serial_client_free_msg_buf(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+	vs_client_serial_serial_free_msg(&port->u.vs_client, pbuf, mbuf);
+}
+
+static int vs_serial_client_send_msg_buf(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+	return vs_client_serial_serial_send_msg(&port->u.vs_client, *pbuf,
+			mbuf);
+}
+
+static bool vs_serial_client_is_vservices_running(struct vtty_port *port)
+{
+	return VSERVICE_BASE_STATE_IS_RUNNING(port->u.vs_client.state.base);
+}
+
+static struct vtty_port_ops client_port_ops = {
+	.alloc_msg_buf	= vs_serial_client_alloc_msg_buf,
+	.free_msg_buf	= vs_serial_client_free_msg_buf,
+	.send_msg_buf	= vs_serial_client_send_msg_buf,
+	.is_running	= vs_serial_client_is_vservices_running,
+};
+
+static struct vs_client_serial_state *
+vs_serial_client_alloc(struct vs_service_device *service)
+{
+	struct vtty_port *port;
+
+	port = vs_serial_alloc_port(service, &client_port_ops);
+	if (!port)
+		return NULL;
+
+	dev_set_drvdata(&service->dev, port);
+	return &port->u.vs_client;
+}
+
+static void vs_serial_client_release(struct vs_client_serial_state *_state)
+{
+	vs_serial_release(client_state_to_port(_state));
+}
+
+static void vs_serial_client_closed(struct vs_client_serial_state *_state)
+{
+	vs_serial_reset(client_state_to_port(_state));
+}
+
+static void vs_serial_client_opened(struct vs_client_serial_state *_state)
+{
+	struct vtty_port *port = client_state_to_port(_state);
+
+	dev_dbg(&port->service->dev, "ack_open\n");
+	port->max_transfer_size = _state->packet_size;
+}
+
+static int
+vs_serial_client_handle_message(struct vs_client_serial_state *_state,
+		struct vs_pbuf data, struct vs_mbuf *mbuf)
+{
+	return vs_serial_handle_message(client_state_to_port(_state), mbuf,
+			&data);
+}
+
+static struct vs_client_serial vs_client_serial_driver = {
+	.rx_atomic		= true,
+	.alloc			= vs_serial_client_alloc,
+	.release		= vs_serial_client_release,
+	.closed			= vs_serial_client_closed,
+	.opened			= vs_serial_client_opened,
+	.serial = {
+		.msg_msg	= vs_serial_client_handle_message,
+	},
+};
+
+static int __init vs_serial_client_init(void)
+{
+	return vservice_serial_client_register(&vs_client_serial_driver,
+			"vserial");
+}
+
+static void __exit vs_serial_client_exit(void)
+{
+	vservice_serial_client_unregister(&vs_client_serial_driver);
+}
+
+module_init(vs_serial_client_init);
+module_exit(vs_serial_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/char/vs_serial_common.h b/drivers/char/vs_serial_common.h
new file mode 100644
index 0000000..2fe7d28
--- /dev/null
+++ b/drivers/char/vs_serial_common.h
@@ -0,0 +1,91 @@
+/*
+ * drivers/char/vs_serial_common.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _VS_SERIAL_COMMON_H
+#define _VS_SERIAL_COMMON_H
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/console.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/protocol/serial/client.h>
+
+#define OUTBUFFER_SIZE 1024
+#define vtty_list_last_entry(ptr, type, member) \
+	list_entry((ptr)->prev, type, member)
+
+struct vtty_port;
+struct vs_service_device;
+
+struct vtty_port_ops {
+	struct vs_mbuf	*(*alloc_msg_buf)(struct vtty_port *port,
+			struct vs_pbuf *pbuf, gfp_t gfp_flags);
+	void		(*free_msg_buf)(struct vtty_port *port,
+			struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+	int		(*send_msg_buf)(struct vtty_port *port,
+			struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+	bool		(*is_running)(struct vtty_port *port);
+};
+
+struct vtty_port {
+	union {
+		struct vs_client_serial_state vs_client;
+		struct vs_server_serial_state vs_server;
+	} u;
+
+	struct vs_service_device	*service;
+	int				port_num;
+
+	struct tty_driver		*vtty_driver;
+
+	struct vtty_port_ops		ops;
+
+	/* output data */
+	bool				doing_release;
+
+	int				max_transfer_size;
+
+	/* Tracks if tty layer can receive data from driver */
+	bool				tty_canrecv;
+
+	/*
+	 * List of pending incoming buffers from the vServices stack. If we
+	 * receive a buffer, but cannot write it to the tty layer then we
+	 * queue it on this list to handle later. in_lock protects access to
+	 * the pending_in_packets list and the tty_canrecv field.
+	 */
+	struct list_head		pending_in_packets;
+	spinlock_t			in_lock;
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+	struct console			console;
+#endif
+
+	struct tty_port			port;
+};
+
+extern struct vtty_port *
+vs_serial_alloc_port(struct vs_service_device *service,
+	struct vtty_port_ops *port_ops);
+extern void vs_serial_release(struct vtty_port *port);
+extern void vs_serial_reset(struct vtty_port *port);
+extern int vs_serial_handle_message(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+
+#endif /* _VS_SERIAL_COMMON_H */
diff --git a/drivers/char/vs_serial_server.c b/drivers/char/vs_serial_server.c
new file mode 100644
index 0000000..d4a169e
--- /dev/null
+++ b/drivers/char/vs_serial_server.c
@@ -0,0 +1,152 @@
+/*
+ * drivers/char/vs_serial_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Serial vService server driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+
+#include "vs_serial_common.h"
+
+#define server_state_to_port(state) \
+	container_of(state, struct vtty_port, u.vs_server)
+
+static struct vs_mbuf *vs_serial_server_alloc_msg_buf(struct vtty_port *port,
+		struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+	return vs_server_serial_serial_alloc_msg(&port->u.vs_server, pbuf,
+			gfp_flags);
+}
+
+static void vs_serial_server_free_msg_buf(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+	vs_server_serial_serial_free_msg(&port->u.vs_server, pbuf, mbuf);
+}
+
+static int vs_serial_server_send_msg_buf(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+	return vs_server_serial_serial_send_msg(&port->u.vs_server, *pbuf, mbuf);
+}
+
+static bool vs_serial_server_is_vservices_running(struct vtty_port *port)
+{
+	return VSERVICE_BASE_STATE_IS_RUNNING(port->u.vs_server.state.base);
+}
+
+static struct vtty_port_ops server_port_ops = {
+	.alloc_msg_buf	= vs_serial_server_alloc_msg_buf,
+	.free_msg_buf	= vs_serial_server_free_msg_buf,
+	.send_msg_buf	= vs_serial_server_send_msg_buf,
+	.is_running	= vs_serial_server_is_vservices_running,
+};
+
+static struct vs_server_serial_state *
+vs_serial_server_alloc(struct vs_service_device *service)
+{
+	struct vtty_port *port;
+
+	port = vs_serial_alloc_port(service, &server_port_ops);
+	if (!port)
+		return NULL;
+
+	dev_set_drvdata(&service->dev, port);
+	return &port->u.vs_server;
+}
+
+static void vs_serial_server_release(struct vs_server_serial_state *_state)
+{
+	vs_serial_release(server_state_to_port(_state));
+}
+
+static void vs_serial_server_closed(struct vs_server_serial_state *_state)
+{
+	vs_serial_reset(server_state_to_port(_state));
+}
+
+static int
+vs_serial_server_handle_message(struct vs_server_serial_state *_state,
+		struct vs_pbuf data, struct vs_mbuf *mbuf)
+{
+	return vs_serial_handle_message(server_state_to_port(_state), mbuf,
+			&data);
+}
+
+static vs_server_response_type_t
+vs_serial_server_req_open(struct vs_server_serial_state *_state)
+{
+	struct vtty_port *port = server_state_to_port(_state);
+
+	dev_dbg(&port->service->dev, "req_open\n");
+
+	/* FIXME: Jira ticket SDK-3521 - ryanm. */
+	port->max_transfer_size = vs_service_max_mbuf_size(port->service) - 8;
+	_state->packet_size = port->max_transfer_size;
+
+	return VS_SERVER_RESP_SUCCESS;
+}
+
+static vs_server_response_type_t
+vs_serial_server_req_close(struct vs_server_serial_state *_state)
+{
+	struct vtty_port *port = server_state_to_port(_state);
+
+	dev_dbg(&port->service->dev, "req_close\n");
+
+	return VS_SERVER_RESP_SUCCESS;
+}
+
+static struct vs_server_serial vs_server_serial_driver = {
+	.rx_atomic		= true,
+	.alloc			= vs_serial_server_alloc,
+	.release		= vs_serial_server_release,
+	.closed			= vs_serial_server_closed,
+	.open			= vs_serial_server_req_open,
+	.close			= vs_serial_server_req_close,
+	.serial = {
+		.msg_msg	= vs_serial_server_handle_message,
+	},
+
+	/* Large default quota for batching data messages */
+	.in_quota_best		= 16,
+	.out_quota_best		= 16,
+};
+
+static int __init vs_serial_server_init(void)
+{
+	return vservice_serial_server_register(&vs_server_serial_driver,
+			"vserial");
+}
+
+static void __exit vs_serial_server_exit(void)
+{
+	vservice_serial_server_unregister(&vs_server_serial_driver);
+}
+
+module_init(vs_serial_server_init);
+module_exit(vs_serial_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/char/vservices_serial.c b/drivers/char/vservices_serial.c
new file mode 100644
index 0000000..0194eac
--- /dev/null
+++ b/drivers/char/vservices_serial.c
@@ -0,0 +1,634 @@
+/*
+ * drivers/char/vservice_serial.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * serial vservice client driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bitmap.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/console.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/service.h>
+#include <vservices/wait.h>
+
+#include "vs_serial_common.h"
+
+struct vtty_in_packet {
+	struct vs_pbuf	pbuf;
+	size_t		offset;
+};
+
+static int max_ttys = CONFIG_VSERVICES_VTTY_COUNT;
+static unsigned long *alloced_ttys;
+module_param(max_ttys, int, S_IRUGO);
+
+static struct tty_driver *vtty_driver;
+
+static DEFINE_MUTEX(tty_bitmap_lock);
+
+static struct vtty_port *dev_to_port(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+#if defined(CONFIG_VSERVICES_SERIAL_SERVER) || \
+    defined(CONFIG_VSERIVCES_SERIAL_SERVER_MODULE)
+	if (service->is_server) {
+		struct vs_server_serial_state *server = dev_get_drvdata(dev);
+		return container_of(server, struct vtty_port, u.vs_server);
+	}
+#endif
+#if defined(CONFIG_VSERVICES_SERIAL_CLIENT) || \
+    defined(CONFIG_VSERIVCES_SERIAL_CLIENT_MODULE)
+	if (!service->is_server) {
+		struct vs_client_serial_state *client = dev_get_drvdata(dev);
+		return container_of(client, struct vtty_port, u.vs_client);
+	}
+#endif
+	/* should never get here */
+	WARN_ON(1);
+	return NULL;
+}
+
+static struct vtty_port *port_from_tty(struct tty_struct *tty)
+{
+	return dev_to_port(tty->dev->parent);
+}
+
+static int vtty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+	struct vtty_port *port;
+
+	if (tty->index < 0 || !test_bit(tty->index, alloced_ttys))
+		return -ENXIO;
+
+	port = port_from_tty(tty);
+
+	if (!port)
+		return -ENXIO;
+
+	tty->driver_data = port;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+	if (tty->port)
+		tty->port->low_latency = 0;
+#else
+	tty->low_latency = 0;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+	tty_port_install(&port->port, driver, tty);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+	tty->port = &port->port;
+	tty_standard_install(driver, tty);
+#else
+	tty->port = &port->port;
+	if (tty_init_termios(tty) != 0)
+		return -ENOMEM;
+
+	tty_driver_kref_get(driver);
+	tty->count++;
+	driver->ttys[tty->index] = tty;
+#endif
+
+	return 0;
+}
+
+static int vtty_open(struct tty_struct *tty, struct file *file)
+{
+	struct vtty_port *port = tty->driver_data;
+	return tty_port_open(&port->port, tty, file);
+}
+
+static void vtty_close(struct tty_struct *tty, struct file *file)
+{
+	struct vtty_port *port = tty->driver_data;
+	if (port)
+		tty_port_close(&port->port, tty, file);
+}
+
+static void vtty_shutdown(struct tty_port *port)
+{
+	struct vtty_port *vtty_port =
+			container_of(port, struct vtty_port, port);
+
+	if (vtty_port->doing_release)
+		kfree(port);
+}
+
+static int vtty_write_room(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+
+	return vs_service_send_mbufs_available(port->service) *
+			port->max_transfer_size;
+}
+
+static struct vs_mbuf *vserial_alloc_send_buffer(struct vtty_port *port,
+		const unsigned char *buf, size_t size, struct vs_pbuf *pbuf,
+		gfp_t gfp_flags)
+{
+	struct vs_mbuf *mbuf;
+	ssize_t ret;
+
+	mbuf = port->ops.alloc_msg_buf(port, pbuf, gfp_flags);
+	if (IS_ERR(mbuf)) {
+		ret = PTR_ERR(mbuf);
+		goto fail;
+	}
+
+	ret = vs_pbuf_resize(pbuf, size);
+	if (ret < (ssize_t)size)
+		goto fail_free_buf;
+
+	ret = vs_pbuf_copyin(pbuf, 0, buf, size);
+	if (ret < (ssize_t)size)
+		goto fail_free_buf;
+
+	return mbuf;
+
+fail_free_buf:
+	port->ops.free_msg_buf(port, mbuf, pbuf);
+fail:
+	return ERR_PTR(ret);
+}
+
+static int vtty_write(struct tty_struct *tty, const unsigned char *buf,
+		int count)
+{
+	struct vtty_port *port;
+	size_t sent_bytes = 0, size;
+	struct vs_mbuf *mbuf;
+	struct vs_pbuf pbuf;
+	int err;
+
+	if (WARN_ON(!tty || !buf))
+		return -EINVAL;
+
+	port = tty->driver_data;
+	if (!port->ops.is_running(port)) {
+		dev_dbg(&port->service->dev, "tty is not running!");
+		return 0;
+	}
+
+	/*
+	 * We need to break our message up into chunks of
+	 * port->max_transfer_size.
+	 */
+	dev_dbg(&port->service->dev, "Writing %d bytes\n", count);
+	while (sent_bytes < count) {
+		size = min_t(size_t, count - sent_bytes,
+				port->max_transfer_size);
+
+		/*
+		 * Passing &port->u.vs_client here works for both the client
+		 * and the server since vs_client and vs_server are in the
+		 * same union, and therefore have the same address.
+		 */
+		mbuf = vs_service_waiting_alloc(&port->u.vs_client,
+				vserial_alloc_send_buffer(port,
+				buf + sent_bytes, size, &pbuf, GFP_KERNEL));
+		if (IS_ERR(mbuf)) {
+			dev_err(&port->service->dev,
+					"Failed to alloc mbuf of %zu bytes: %ld - resetting service\n",
+					size, PTR_ERR(mbuf));
+			vs_service_reset(port->service, port->service);
+			return -EIO;
+		}
+
+		vs_service_state_lock(port->service);
+		err = port->ops.send_msg_buf(port, mbuf, &pbuf);
+		vs_service_state_unlock(port->service);
+		if (err) {
+			port->ops.free_msg_buf(port, mbuf, &pbuf);
+			dev_err(&port->service->dev,
+					"send failed: %d - resetting service",
+					err);
+			vs_service_reset(port->service, port->service);
+			return -EIO;
+		}
+
+		dev_dbg(&port->service->dev, "Sent %zu bytes (%zu/%d)\n",
+				size, sent_bytes + size, count);
+		sent_bytes += size;
+	}
+
+	dev_dbg(&port->service->dev, "Write complete - sent %zu/%d bytes\n",
+			sent_bytes, count);
+	return sent_bytes;
+}
+
+static int vtty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	return vtty_write(tty, &ch, 1);
+}
+
+static size_t vs_serial_send_pbuf_to_tty(struct vtty_port *port,
+		struct vs_pbuf *pbuf, size_t offset)
+{
+	struct tty_struct *tty = tty_port_tty_get(&port->port);
+	size_t space, size;
+
+	lockdep_assert_held(&port->in_lock);
+
+	size = vs_pbuf_size(pbuf) - offset;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+	space = tty_buffer_request_room(tty->port, size);
+#else
+	space = tty_buffer_request_room(tty, size);
+#endif
+	if (space) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+		tty_insert_flip_string(tty->port, pbuf->data + offset, space);
+		tty_flip_buffer_push(tty->port);
+#else
+		tty_insert_flip_string(tty, pbuf->data + offset, space);
+		tty_flip_buffer_push(tty);
+#endif
+	}
+
+	tty_kref_put(tty);
+
+	/* Return the number of bytes written */
+	return space;
+}
+
+static void vtty_throttle(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+
+	dev_dbg(&port->service->dev, "throttle\n");
+
+	spin_lock_bh(&port->in_lock);
+	port->tty_canrecv = false;
+	spin_unlock_bh(&port->in_lock);
+}
+
+static void vtty_unthrottle(struct tty_struct *tty)
+{
+	struct vtty_port *port = tty->driver_data;
+	struct vtty_in_packet *packet;
+	struct vs_mbuf *mbuf;
+	size_t sent;
+
+	dev_dbg(&port->service->dev, "unthrottle\n");
+
+	spin_lock_bh(&port->in_lock);
+
+	while (!list_empty(&port->pending_in_packets)) {
+		mbuf = list_first_entry(&port->pending_in_packets,
+				struct vs_mbuf, queue);
+		packet = mbuf->priv;
+
+		sent = vs_serial_send_pbuf_to_tty(port, &packet->pbuf,
+				packet->offset);
+		packet->offset += sent;
+		if (packet->offset < vs_pbuf_size(&packet->pbuf)) {
+			/*
+			 * Only wrote part of the buffer. This means that we
+			 * still have pending data that cannot be written to
+			 * the tty at this time. The tty layer will rethrottle
+			 * and this function will be called again when the tty
+			 * layer is next able to handle data and we can write
+			 * the remainder of the buffer.
+			 */
+			dev_dbg(&port->service->dev,
+					"unthrottle: Only wrote %zu (%zu/%zu) bytes\n",
+					sent, packet->offset,
+					vs_pbuf_size(&packet->pbuf));
+			break;
+		}
+
+		dev_dbg(&port->service->dev,
+				"unthrottle: wrote %zu (%zu/%zu) bytes\n",
+				sent, packet->offset,
+				vs_pbuf_size(&packet->pbuf));
+
+		/* Wrote the whole buffer - free it */
+		list_del(&mbuf->queue);
+		port->ops.free_msg_buf(port, mbuf, &packet->pbuf);
+		kfree(packet);
+	}
+
+	port->tty_canrecv = true;
+	spin_unlock_bh(&port->in_lock);
+}
+
+static struct tty_port_operations vtty_port_ops = {
+	.shutdown	= vtty_shutdown,
+};
+
+static struct tty_operations vtty_ops = {
+	.install	= vtty_install,
+	.open		= vtty_open,
+	.close		= vtty_close,
+	.write		= vtty_write,
+	.write_room	= vtty_write_room,
+	.put_char	= vtty_put_char,
+	.throttle	= vtty_throttle,
+	.unthrottle	= vtty_unthrottle
+};
+
+static int vs_serial_queue_incoming_packet(struct vtty_port *port,
+		struct vs_mbuf *mbuf, struct vs_pbuf *pbuf, size_t offset)
+{
+	struct vtty_in_packet *packet;
+
+	lockdep_assert_held(&port->in_lock);
+
+	packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+	if (!packet) {
+		/*
+		 * Uh oh, we are seriously out of memory. The incoming data
+		 * will be lost.
+		 */
+		return -ENOMEM;
+	}
+
+	dev_dbg(&port->service->dev, "Queuing packet %zu bytes, offset %zu\n",
+			vs_pbuf_size(pbuf), offset);
+	mbuf->priv = packet;
+	memcpy(&packet->pbuf, pbuf, sizeof(*pbuf));
+	packet->offset = offset;
+
+	list_add_tail(&mbuf->queue, &port->pending_in_packets);
+	return 0;
+}
+
+int vs_serial_handle_message(struct vtty_port *port, struct vs_mbuf *mbuf,
+		struct vs_pbuf *pbuf)
+{
+	struct tty_struct *tty = tty_port_tty_get(&port->port);
+	bool queue_packet = false;
+	size_t sent = 0;
+	int err;
+
+	if (!tty) {
+		dev_dbg(&port->service->dev,
+				"tty not open. Dropping %zu chars\n",
+				pbuf->size);
+		port->ops.free_msg_buf(port, mbuf, pbuf);
+		return 0;
+	}
+
+	dev_dbg(&port->service->dev, "Incoming message - len = %zu\n",
+			pbuf->size);
+
+	spin_lock(&port->in_lock);
+	if (!port->tty_canrecv || !list_empty(&port->pending_in_packets)) {
+		/*
+		 * We cannot send to the tty right now, either because we are
+		 * being throttled or because we still have pending data
+		 * to write out to the tty. Queue the buffer up so we can
+		 * write it later.
+		 */
+		dev_dbg(&port->service->dev,
+				"Cannot send (canrecv = %d, queued = %d) - queuing message\n",
+				port->tty_canrecv,
+				!list_empty(&port->pending_in_packets));
+		queue_packet = true;
+
+	} else {
+		sent = vs_serial_send_pbuf_to_tty(port, pbuf, 0);
+		if (sent < vs_pbuf_size(pbuf)) {
+			/*
+			 * Only wrote part of the buffer to the tty. Queue
+			 * the buffer to write the rest.
+			 */
+			dev_dbg(&port->service->dev,
+					"Sent %zu/%zu bytes to tty - queueing rest\n",
+					sent, vs_pbuf_size(pbuf));
+			queue_packet = true;
+		}
+	}
+
+	if (queue_packet) {
+		/*
+		 * Queue the incoming data up. If we are not already throttled,
+		 * the tty layer will do so now since it has no room in its
+		 * buffers.
+		 */
+		err = vs_serial_queue_incoming_packet(port, mbuf, pbuf, sent);
+		if (err) {
+			dev_err(&port->service->dev,
+					"Failed to queue packet - dropping chars\n");
+			port->ops.free_msg_buf(port, mbuf, pbuf);
+		}
+
+	} else {
+		port->ops.free_msg_buf(port, mbuf, pbuf);
+	}
+
+	spin_unlock(&port->in_lock);
+	tty_kref_put(tty);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_serial_handle_message);
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+static int vconsole_setup(struct console *co, char *options)
+{
+	if (co->index < 0 || co->index >= max_ttys)
+		co->index = 0;
+
+	pr_info("OKL4 virtual console init\n");
+
+	return 0;
+}
+
+static void vconsole_write(struct console *co, const char *p, unsigned count)
+{
+}
+
+static struct tty_driver *vconsole_device(struct console *co, int *index)
+{
+	*index = co->index;
+
+	return vtty_driver;
+}
+#endif /* CONFIG_OKL4_VTTY_CONSOLE */
+
+static void vs_serial_free_buffers(struct vtty_port *port)
+{
+	struct vtty_in_packet *packet;
+	struct vs_mbuf *mbuf;
+
+	/* Free the list of incoming buffers */
+	spin_lock_bh(&port->in_lock);
+	while (!list_empty(&port->pending_in_packets)) {
+		mbuf = list_first_entry(&port->pending_in_packets,
+				struct vs_mbuf, queue);
+		packet = mbuf->priv;
+
+		list_del(&mbuf->queue);
+		port->ops.free_msg_buf(port, mbuf, &packet->pbuf);
+		kfree(packet);
+	}
+	spin_unlock_bh(&port->in_lock);
+}
+
+/** vservices callbacks **/
+struct vtty_port *vs_serial_alloc_port(struct vs_service_device *service,
+		struct vtty_port_ops *port_ops)
+{
+	struct vtty_port *port;
+	int port_num;
+
+	mutex_lock(&tty_bitmap_lock);
+	port_num = find_first_zero_bit(alloced_ttys, max_ttys);
+
+	if (port_num >= max_ttys) {
+		mutex_unlock(&tty_bitmap_lock);
+		return NULL;
+	}
+
+	port = kzalloc(sizeof(struct vtty_port), GFP_KERNEL);
+	if (!port) {
+		mutex_unlock(&tty_bitmap_lock);
+		return NULL;
+	}
+
+	port->service = service;
+	port->ops = *port_ops;
+	port->tty_canrecv = true;
+	port->port_num = port_num;
+	INIT_LIST_HEAD(&port->pending_in_packets);
+	spin_lock_init(&port->in_lock);
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+	/* Set up and register the port's console device */
+	strlcpy(port->console.name, "vconvs", sizeof(port->console.name));
+	port->console.write = vconsole_write;
+	port->console.flags = CON_PRINTBUFFER;
+	port->console.device = vconsole_device;
+	port->console.setup = vconsole_setup;
+	port->console.index = port_num;
+
+	register_console(&port->console);
+#endif
+	port->vtty_driver = vtty_driver;
+
+	tty_port_init(&port->port);
+	port->port.ops = &vtty_port_ops;
+
+	tty_register_device(vtty_driver, port_num, &service->dev);
+	bitmap_set(alloced_ttys, port_num, 1);
+	mutex_unlock(&tty_bitmap_lock);
+
+	return port;
+}
+EXPORT_SYMBOL(vs_serial_alloc_port);
+
+void vs_serial_release(struct vtty_port *port)
+{
+	dev_dbg(&port->service->dev, "Release\n");
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+	unregister_console(&port->console);
+#endif
+
+	mutex_lock(&tty_bitmap_lock);
+	bitmap_clear(alloced_ttys, port->port_num, 1);
+	mutex_unlock(&tty_bitmap_lock);
+
+	if (port->port.tty) {
+		tty_vhangup(port->port.tty);
+		tty_kref_put(port->port.tty);
+	}
+
+	vs_serial_free_buffers(port);
+	port->doing_release = true;
+	tty_unregister_device(vtty_driver, port->port_num);
+}
+EXPORT_SYMBOL_GPL(vs_serial_release);
+
+void vs_serial_reset(struct vtty_port *port)
+{
+	/* Free list of in and out mbufs. */
+	vs_serial_free_buffers(port);
+}
+EXPORT_SYMBOL_GPL(vs_serial_reset);
+
+static int __init vs_serial_init(void)
+{
+	int err;
+
+	if (max_ttys == 0)
+		return -EINVAL;
+
+	alloced_ttys = kzalloc(sizeof(unsigned long) * BITS_TO_LONGS(max_ttys),
+			GFP_KERNEL);
+	if (!alloced_ttys) {
+		err = -ENOMEM;
+		goto fail_alloc_ttys;
+	}
+
+	/* Set up the tty driver. */
+	vtty_driver = alloc_tty_driver(max_ttys);
+	if (!vtty_driver) {
+		err = -ENOMEM;
+		goto fail_alloc_tty_driver;
+	}
+
+	vtty_driver->owner = THIS_MODULE;
+	vtty_driver->driver_name = "okl4-vservices-serial";
+	vtty_driver->name = "ttyVS";
+	vtty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	vtty_driver->subtype = SERIAL_TYPE_NORMAL;
+	vtty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	vtty_driver->init_termios = tty_std_termios;
+	vtty_driver->num = max_ttys;
+
+	/* These flags don't really matter; just use sensible defaults. */
+	vtty_driver->init_termios.c_cflag =
+			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+	vtty_driver->init_termios.c_ispeed = 9600;
+	vtty_driver->init_termios.c_ospeed = 9600;
+
+	tty_set_operations(vtty_driver, &vtty_ops);
+
+	err = tty_register_driver(vtty_driver);
+	if (err)
+		goto fail_tty_driver_register;
+
+	return 0;
+
+fail_tty_driver_register:
+	put_tty_driver(vtty_driver);
+fail_alloc_tty_driver:
+	kfree(alloced_ttys);
+fail_alloc_ttys:
+	return err;
+}
+
+static void __exit vs_serial_exit(void)
+{
+	tty_unregister_driver(vtty_driver);
+	put_tty_driver(vtty_driver);
+}
+
+module_init(vs_serial_init);
+module_exit(vs_serial_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Core Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index fda42ab..a1b5935 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -177,8 +177,15 @@
 
 	clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags,
 					mult, div);
-	if (IS_ERR(clk))
+	if (IS_ERR(clk)) {
+		/*
+		 * If parent clock is not registered, registration would fail.
+		 * Clear OF_POPULATED flag so that clock registration can be
+		 * attempted again from probe function.
+		 */
+		of_node_clear_flag(node, OF_POPULATED);
 		return clk;
+	}
 
 	ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
 	if (ret) {
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index d1d7787..db29016 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -120,6 +120,7 @@
 
 	np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-anatop");
 	base = of_iomap(np, 0);
+	of_node_put(np);
 	WARN_ON(!base);
 
 	clks[IMX6UL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
diff --git a/drivers/clk/msm/clock-gcc-8952.c b/drivers/clk/msm/clock-gcc-8952.c
index 47619f5..06f095c 100644
--- a/drivers/clk/msm/clock-gcc-8952.c
+++ b/drivers/clk/msm/clock-gcc-8952.c
@@ -274,6 +274,7 @@
 	F_APCS_PLL(1708800000, 89, 0x0, 0x1, 0x0, 0x0, 0x0),
 	F_APCS_PLL(1804800000, 94, 0x0, 0x1, 0x0, 0x0, 0x0),
 	F_APCS_PLL(1958400000, 102, 0x0, 0x1, 0x0, 0x0, 0x0),
+	F_APCS_PLL(2016000000, 105, 0x0, 0x1, 0x0, 0x0, 0x0),
 };
 
 static struct pll_clk a53ss_c1_pll = {
@@ -304,7 +305,7 @@
 		.vdd_class = &vdd_hf_pll,
 		.fmax = (unsigned long [VDD_HF_PLL_NUM]) {
 			[VDD_HF_PLL_SVS] = 1000000000,
-			[VDD_HF_PLL_NOM] = 2000000000,
+			[VDD_HF_PLL_NOM] = 2020000000,
 		},
 		.num_fmax = VDD_HF_PLL_NUM,
 		CLK_INIT(a53ss_c1_pll.c),
@@ -840,6 +841,7 @@
 	F_SLEW( 270000000, FIXED_CLK_SRC, gpll6_aux,	4,	0,	0),
 	F_SLEW( 320000000, FIXED_CLK_SRC, gpll0,	2.5,	0,	0),
 	F_SLEW( 400000000, FIXED_CLK_SRC, gpll0,	2,	0,	0),
+	F_SLEW( 465000000, 930000000,	  gpll3,	1,	0,	0),
 	F_SLEW( 484800000, 969600000,	  gpll3,	1,	0,	0),
 	F_SLEW( 523200000, 1046400000,	  gpll3,	1,	0,	0),
 	F_SLEW( 550000000, 1100000000,	  gpll3,	1,	0,	0),
@@ -860,6 +862,7 @@
 	F_SLEW( 270000000, FIXED_CLK_SRC, gpll6_aux,	4,	0,	0),
 	F_SLEW( 320000000, FIXED_CLK_SRC, gpll0,	2.5,	0,	0),
 	F_SLEW( 400000000, FIXED_CLK_SRC, gpll0,	2,	0,	0),
+	F_SLEW( 465000000, 930000000,	  gpll3,	1,	0,	0),
 	F_SLEW( 484800000, 969600000,	  gpll3,	1,	0,	0),
 	F_SLEW( 523200000, 1046400000,	  gpll3,	1,	0,	0),
 	F_SLEW( 550000000, 1100000000,	  gpll3,	1,	0,	0),
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 057f0e1..a4e0670 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -336,6 +336,39 @@
 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req);
 }
 
+static bool clk_rcg2_current_config(struct clk_rcg2 *rcg,
+				    const struct freq_tbl *f)
+{
+	struct clk_hw *hw = &rcg->clkr.hw;
+	u32 cfg, mask, new_cfg;
+	int index;
+
+	if (rcg->mnd_width) {
+		mask = BIT(rcg->mnd_width) - 1;
+		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &cfg);
+		if ((cfg & mask) != (f->m & mask))
+			return false;
+
+		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &cfg);
+		if ((cfg & mask) != (~(f->n - f->m) & mask))
+			return false;
+	}
+
+	mask = (BIT(rcg->hid_width) - 1) | CFG_SRC_SEL_MASK;
+
+	index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+
+	new_cfg = ((f->pre_div << CFG_SRC_DIV_SHIFT) |
+		(rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT)) & mask;
+
+	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+	if (new_cfg != (cfg & mask))
+		return false;
+
+	return true;
+}
+
 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
 {
 	u32 cfg, mask;
@@ -900,6 +933,8 @@
 	for (i = 0; i < num_parents; i++) {
 		if (cfg == rcg->parent_map[i].cfg) {
 			f.src = rcg->parent_map[i].src;
+			if (clk_rcg2_current_config(rcg, &f))
+				return 0;
 			return clk_rcg2_configure(rcg, &f);
 		}
 	}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 7b23db4..aa8dccc 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -95,6 +95,9 @@
 #define PLL_PLL_INT_GAIN_IFILT_BAND_1		0x15c
 #define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1	0x164
 #define PLL_FASTLOCK_EN_BAND			0x16c
+#define PLL_FREQ_TUNE_ACCUM_INIT_LOW		0x170
+#define PLL_FREQ_TUNE_ACCUM_INIT_MID		0x174
+#define PLL_FREQ_TUNE_ACCUM_INIT_HIGH		0x178
 #define PLL_FREQ_TUNE_ACCUM_INIT_MUX		0x17c
 #define PLL_PLL_LOCK_OVERRIDE			0x180
 #define PLL_PLL_LOCK_DELAY			0x184
@@ -112,6 +115,7 @@
 #define PHY_CMN_RBUF_CTRL	0x01c
 #define PHY_CMN_PLL_CNTRL	0x038
 #define PHY_CMN_CTRL_0		0x024
+#define PHY_CMN_CTRL_2		0x02c
 
 /* Bit definition of SSC control registers */
 #define SSC_CENTER		BIT(0)
@@ -123,6 +127,43 @@
 #define SSC_START		BIT(6)
 #define SSC_START_MUX		BIT(7)
 
+/* Dynamic Refresh Control Registers */
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0		(0x014)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1		(0x018)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2		(0x01C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3		(0x020)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4		(0x024)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5		(0x028)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6		(0x02C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7		(0x030)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8		(0x034)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9		(0x038)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10		(0x03C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11		(0x040)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12		(0x044)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13		(0x048)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14		(0x04C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15		(0x050)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16		(0x054)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17		(0x058)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18		(0x05C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19		(0x060)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20		(0x064)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21		(0x068)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22		(0x06C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23		(0x070)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24		(0x074)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25		(0x078)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26		(0x07C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27		(0x080)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28		(0x084)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29		(0x088)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30		(0x08C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31		(0x090)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR	(0x094)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2	(0x098)
+
+#define DSI_PHY_TO_PLL_OFFSET	(0x600)
 enum {
 	DSI_PLL_0,
 	DSI_PLL_1,
@@ -644,6 +685,7 @@
 
 	rsc->vco_current_rate = rate;
 	rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+	rsc->dfps_trigger = false;
 
 	rc = mdss_pll_resource_enable(rsc, true);
 	if (rc) {
@@ -674,6 +716,237 @@
 	return 0;
 }
 
+static int dsi_pll_read_stored_trim_codes(struct mdss_pll_resources *pll_res,
+					  unsigned long vco_clk_rate)
+{
+	int i;
+	bool found = false;
+
+	if (!pll_res->dfps)
+		return -EINVAL;
+
+	for (i = 0; i < pll_res->dfps->vco_rate_cnt; i++) {
+		struct dfps_codes_info *codes_info =
+			&pll_res->dfps->codes_dfps[i];
+
+		pr_debug("valid=%d vco_rate=%d, code %d %d %d\n",
+			codes_info->is_valid, codes_info->clk_rate,
+			codes_info->pll_codes.pll_codes_1,
+			codes_info->pll_codes.pll_codes_2,
+			codes_info->pll_codes.pll_codes_3);
+
+		if (vco_clk_rate != codes_info->clk_rate &&
+				codes_info->is_valid)
+			continue;
+
+		pll_res->cache_pll_trim_codes[0] =
+			codes_info->pll_codes.pll_codes_1;
+		pll_res->cache_pll_trim_codes[1] =
+			codes_info->pll_codes.pll_codes_2;
+		pll_res->cache_pll_trim_codes[2] =
+			codes_info->pll_codes.pll_codes_3;
+		found = true;
+		break;
+	}
+
+	if (!found)
+		return -EINVAL;
+
+	pr_debug("trim_code_0=0x%x trim_code_1=0x%x trim_code_2=0x%x\n",
+			pll_res->cache_pll_trim_codes[0],
+			pll_res->cache_pll_trim_codes[1],
+			pll_res->cache_pll_trim_codes[2]);
+
+	return 0;
+}
+
+static void shadow_dsi_pll_dynamic_refresh_10nm(struct dsi_pll_10nm *pll,
+						struct mdss_pll_resources *rsc)
+{
+	u32 data;
+	u32 offset = DSI_PHY_TO_PLL_OFFSET;
+	u32 upper_addr = 0;
+	struct dsi_pll_regs *reg = &pll->reg_setup;
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+	data &= ~BIT(5);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL0,
+			   PHY_CMN_CLK_CFG1, PHY_CMN_PLL_CNTRL, data, 0);
+	upper_addr |= (upper_8_bit(PHY_CMN_CLK_CFG1) << 0);
+	upper_addr |= (upper_8_bit(PHY_CMN_PLL_CNTRL) << 1);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL1,
+			   PHY_CMN_RBUF_CTRL,
+			   (PLL_DECIMAL_DIV_START_1 + offset),
+			   0, reg->decimal_div_start);
+	upper_addr |= (upper_8_bit(PHY_CMN_RBUF_CTRL) << 2);
+	upper_addr |= (upper_8_bit(PLL_DECIMAL_DIV_START_1 + offset) << 3);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL2,
+			   (PLL_FRAC_DIV_START_LOW_1 + offset),
+			   (PLL_FRAC_DIV_START_MID_1 + offset),
+			   reg->frac_div_start_low, reg->frac_div_start_mid);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_LOW_1 + offset) << 4);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_MID_1 + offset) << 5);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL3,
+			   (PLL_FRAC_DIV_START_HIGH_1 + offset),
+			   (PLL_PLL_PROP_GAIN_RATE_1 + offset),
+			   reg->frac_div_start_high, reg->pll_prop_gain_rate);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_HIGH_1 + offset) << 6);
+	upper_addr |= (upper_8_bit(PLL_PLL_PROP_GAIN_RATE_1 + offset) << 7);
+
+	data = MDSS_PLL_REG_R(rsc->pll_base, PLL_PLL_OUTDIV_RATE) & 0x03;
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL4,
+			   (PLL_PLL_OUTDIV_RATE + offset),
+			   (PLL_FREQ_TUNE_ACCUM_INIT_LOW + offset),
+			   data, 0);
+	upper_addr |= (upper_8_bit(PLL_PLL_OUTDIV_RATE + offset) << 8);
+	upper_addr |= (upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_LOW + offset) << 9);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL5,
+			   (PLL_FREQ_TUNE_ACCUM_INIT_MID + offset),
+			   (PLL_FREQ_TUNE_ACCUM_INIT_HIGH + offset),
+			   rsc->cache_pll_trim_codes[1],
+			   rsc->cache_pll_trim_codes[0]);
+	upper_addr |=
+		(upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_MID + offset) << 10);
+	upper_addr |=
+		(upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_HIGH + offset) << 11);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL6,
+			   (PLL_FREQ_TUNE_ACCUM_INIT_MUX + offset),
+			   (PLL_PLL_BAND_SET_RATE_1 + offset),
+			   0x07, rsc->cache_pll_trim_codes[2]);
+	upper_addr |=
+		(upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_MUX + offset) << 12);
+	upper_addr |= (upper_8_bit(PLL_PLL_BAND_SET_RATE_1 + offset) << 13);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL7,
+			   (PLL_CALIBRATION_SETTINGS + offset),
+			   (PLL_BAND_SEL_CAL_SETTINGS + offset), 0x44, 0x3a);
+	upper_addr |= (upper_8_bit(PLL_CALIBRATION_SETTINGS + offset) << 14);
+	upper_addr |= (upper_8_bit(PLL_BAND_SEL_CAL_SETTINGS + offset) << 15);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL8,
+			   (PLL_PLL_LOCKDET_RATE_1 + offset),
+			   (PLL_PLL_LOCK_DELAY + offset), 0x10, 0x06);
+	upper_addr |= (upper_8_bit(PLL_PLL_LOCKDET_RATE_1 + offset) << 16);
+	upper_addr |= (upper_8_bit(PLL_PLL_LOCK_DELAY + offset) << 17);
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL17,
+			   PHY_CMN_CTRL_2, PHY_CMN_CLK_CFG0, 0x40, data);
+	if (rsc->slave)
+		MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+				   DSI_DYNAMIC_REFRESH_PLL_CTRL10,
+				   PHY_CMN_CLK_CFG0, PHY_CMN_CTRL_0,
+				   data, 0x7f);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL18,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	/* Dummy register writes */
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL19,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL20,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL21,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL22,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL23,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL24,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL25,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL26,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL28,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL29,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+
+	/* Registers to configure after PLL enable delay */
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1) | BIT(5);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+			   PHY_CMN_CLK_CFG1, PHY_CMN_RBUF_CTRL, data, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+			   PHY_CMN_CLK_CFG1, PHY_CMN_CLK_CFG1, data, data);
+	if (rsc->slave) {
+		data = MDSS_PLL_REG_R(rsc->slave->phy_base, PHY_CMN_CLK_CFG1) |
+			BIT(5);
+		MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+				   DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+				   PHY_CMN_CLK_CFG1, PHY_CMN_RBUF_CTRL,
+				   data, 0x01);
+		MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+				   DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+				   PHY_CMN_CLK_CFG1, PHY_CMN_CLK_CFG1,
+				   data, data);
+	}
+
+	MDSS_PLL_REG_W(rsc->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, upper_addr);
+	MDSS_PLL_REG_W(rsc->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, 0);
+	wmb(); /* commit register writes */
+}
+
+static int shadow_vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
+			unsigned long parent_rate)
+{
+	int rc;
+	struct dsi_pll_10nm *pll;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *rsc = vco->priv;
+
+	if (!rsc) {
+		pr_err("pll resource not found\n");
+		return -EINVAL;
+	}
+
+	pll = rsc->priv;
+	if (!pll) {
+		pr_err("pll configuration not found\n");
+		return -EINVAL;
+	}
+
+	rc = dsi_pll_read_stored_trim_codes(rsc, rate);
+	if (rc) {
+		pr_err("cannot find pll codes rate=%ld\n", rate);
+		return -EINVAL;
+	}
+	pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
+
+	rsc->vco_current_rate = rate;
+	rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
+		       rsc->index, rc);
+		return rc;
+	}
+
+	dsi_pll_setup_config(pll, rsc);
+
+	dsi_pll_calc_dec_frac(pll, rsc);
+
+	/* program dynamic refresh control registers */
+	shadow_dsi_pll_dynamic_refresh_10nm(pll, rsc);
+
+	/* update cached vco rate */
+	rsc->vco_cached_rate = rate;
+	rsc->dfps_trigger = true;
+
+	mdss_pll_resource_enable(rsc, false);
+
+	return 0;
+}
+
 static int dsi_pll_10nm_lock_status(struct mdss_pll_resources *pll)
 {
 	int rc;
@@ -739,7 +1012,7 @@
 	phy_reg_update_bits_sub(rsc, PHY_CMN_CLK_CFG1, 0x03, rsc->cached_cfg1);
 	if (rsc->slave)
 		phy_reg_update_bits_sub(rsc->slave, PHY_CMN_CLK_CFG1,
-				0x03, rsc->cached_cfg1);
+				0x03, rsc->slave->cached_cfg1);
 	wmb(); /* ensure dsiclk_sel is always programmed before pll start */
 
 	/* Start PLL */
@@ -789,6 +1062,7 @@
 	}
 
 	rsc->handoff_resources = false;
+	rsc->dfps_trigger = false;
 
 	pr_debug("stop PLL (%d)\n", rsc->index);
 
@@ -840,16 +1114,18 @@
 	/*
 	 * During unprepare in continuous splash use case we want driver
 	 * to pick all dividers instead of retaining bootloader configurations.
+	 * Also handle use cases where dynamic refresh triggered before
+	 * first suspend/resume.
 	 */
-	if (!pll->handoff_resources) {
+	if (!pll->handoff_resources || pll->dfps_trigger) {
 		pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base,
-							PHY_CMN_CLK_CFG0);
+						  PHY_CMN_CLK_CFG0);
 		pll->cached_outdiv = MDSS_PLL_REG_R(pll->pll_base,
-							PLL_PLL_OUTDIV_RATE);
+						    PLL_PLL_OUTDIV_RATE);
 		pr_debug("cfg0=%d,cfg1=%d, outdiv=%d\n", pll->cached_cfg0,
-					pll->cached_cfg1, pll->cached_outdiv);
+			 pll->cached_cfg1, pll->cached_outdiv);
 
-		pll->vco_cached_rate = clk_hw_get_rate(hw);
+		pll->vco_cached_rate = clk_get_rate(hw->clk);
 	}
 
 	/*
@@ -859,9 +1135,15 @@
 	 * does not change.For such usecases, we need to ensure that the cached
 	 * value is programmed prior to PLL being locked
 	 */
-	if (pll->handoff_resources)
+	if (pll->handoff_resources) {
 		pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base,
-							PHY_CMN_CLK_CFG1);
+						  PHY_CMN_CLK_CFG1);
+		if (pll->slave)
+			pll->slave->cached_cfg1 =
+				MDSS_PLL_REG_R(pll->slave->phy_base,
+					       PHY_CMN_CLK_CFG1);
+	}
+
 	dsi_pll_disable(vco);
 	mdss_pll_resource_enable(pll, false);
 }
@@ -889,7 +1171,7 @@
 	}
 
 	if ((pll->vco_cached_rate != 0) &&
-	    (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
+	    (pll->vco_cached_rate == clk_get_rate(hw->clk))) {
 		rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
 				pll->vco_cached_rate);
 		if (rc) {
@@ -902,6 +1184,9 @@
 			pll->cached_cfg1);
 		MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0,
 					pll->cached_cfg0);
+		if (pll->slave)
+			MDSS_PLL_REG_W(pll->slave->phy_base, PHY_CMN_CLK_CFG0,
+				       pll->cached_cfg0);
 		MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
 					pll->cached_outdiv);
 	}
@@ -1037,6 +1322,14 @@
 	reg_val &= ~0xF0;
 	reg_val |= (div << 4);
 	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+
+	/*
+	 * cache the current parent index for cases where parent
+	 * is not changing but rate is changing. In that case
+	 * clock framework won't call parent_set and hence dsiclk_sel
+	 * bit won't be programmed. e.g. dfps update use case.
+	 */
+	pll->cached_cfg0 = reg_val;
 }
 
 static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
@@ -1174,6 +1467,12 @@
 	.unprepare = vco_10nm_unprepare,
 };
 
+static const struct clk_ops clk_ops_shadow_vco_10nm = {
+	.recalc_rate = vco_10nm_recalc_rate,
+	.set_rate = shadow_vco_10nm_set_rate,
+	.round_rate = vco_10nm_round_rate,
+};
+
 static struct regmap_bus mdss_mux_regmap_bus = {
 	.reg_write = mdss_set_mux_sel,
 	.reg_read = mdss_get_mux_sel,
@@ -1248,6 +1547,19 @@
 	},
 };
 
+static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_vco_clk",
+			.parent_names = (const char *[]){"bi_tcxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_shadow_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
+	},
+};
+
 static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
 	.min_rate = 1000000000UL,
@@ -1261,6 +1573,19 @@
 	},
 };
 
+static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_vco_clk",
+			.parent_names = (const char *[]){"bi_tcxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_shadow_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
+	},
+};
+
 static struct clk_regmap_div dsi0pll_pll_out_div = {
 	.reg = PLL_PLL_OUTDIV_RATE,
 	.shift = 0,
@@ -1277,6 +1602,23 @@
 	},
 };
 
+static struct clk_regmap_div dsi0pll_shadow_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_pll_out_div",
+			.parent_names = (const char *[]){
+				"dsi0pll_shadow_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi1pll_pll_out_div = {
 	.reg = PLL_PLL_OUTDIV_RATE,
 	.shift = 0,
@@ -1293,6 +1635,23 @@
 	},
 };
 
+static struct clk_regmap_div dsi1pll_shadow_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_pll_out_div",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi0pll_bitclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1307,6 +1666,21 @@
 	},
 };
 
+static struct clk_regmap_div dsi0pll_shadow_bitclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_bitclk_src",
+			.parent_names = (const char *[]){
+				"dsi0pll_shadow_pll_out_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi1pll_bitclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1321,6 +1695,21 @@
 	},
 };
 
+static struct clk_regmap_div dsi1pll_shadow_bitclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_bitclk_src",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_pll_out_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_fixed_factor dsi0pll_post_vco_div = {
 	.div = 4,
 	.mult = 1,
@@ -1328,7 +1717,19 @@
 		.name = "dsi0pll_post_vco_div",
 		.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
 		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_shadow_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_post_vco_div",
+		.parent_names = (const char *[]){"dsi0pll_shadow_pll_out_div"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
 		.ops = &clk_fixed_factor_ops,
 	},
 };
@@ -1340,7 +1741,19 @@
 		.name = "dsi1pll_post_vco_div",
 		.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
 		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_shadow_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_post_vco_div",
+		.parent_names = (const char *[]){"dsi1pll_shadow_pll_out_div"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
 		.ops = &clk_fixed_factor_ops,
 	},
 };
@@ -1357,6 +1770,18 @@
 	},
 };
 
+static struct clk_fixed_factor dsi0pll_shadow_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_byteclk_src",
+		.parent_names = (const char *[]){"dsi0pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi1pll_byteclk_src = {
 	.div = 8,
 	.mult = 1,
@@ -1369,6 +1794,18 @@
 	},
 };
 
+static struct clk_fixed_factor dsi1pll_shadow_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_byteclk_src",
+		.parent_names = (const char *[]){"dsi1pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi0pll_post_bit_div = {
 	.div = 2,
 	.mult = 1,
@@ -1381,6 +1818,18 @@
 	},
 };
 
+static struct clk_fixed_factor dsi0pll_shadow_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_post_bit_div",
+		.parent_names = (const char *[]){"dsi0pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi1pll_post_bit_div = {
 	.div = 2,
 	.mult = 1,
@@ -1393,15 +1842,29 @@
 	},
 };
 
+static struct clk_fixed_factor dsi1pll_shadow_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_post_bit_div",
+		.parent_names = (const char *[]){"dsi1pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_regmap_mux dsi0pll_byteclk_mux = {
 	.shift = 0,
 	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0_phy_pll_out_byteclk",
-			.parent_names = (const char *[]){"dsi0pll_byteclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi0pll_byteclk_src",
+				"dsi0pll_shadow_byteclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+				  CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1413,9 +1876,11 @@
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1_phy_pll_out_byteclk",
-			.parent_names = (const char *[]){"dsi1pll_byteclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi1pll_byteclk_src",
+				"dsi1pll_shadow_byteclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+				  CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1439,6 +1904,25 @@
 	},
 };
 
+static struct clk_regmap_mux dsi0pll_shadow_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_pclk_src_mux",
+			.parent_names = (const char *[]){
+				"dsi0pll_shadow_bitclk_src",
+				"dsi0pll_shadow_post_bit_div",
+				"dsi0pll_shadow_pll_out_div",
+				"dsi0pll_shadow_post_vco_div"},
+			.num_parents = 4,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
 static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
 	.reg = PHY_CMN_CLK_CFG1,
 	.shift = 0,
@@ -1457,6 +1941,25 @@
 	},
 };
 
+static struct clk_regmap_mux dsi1pll_shadow_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_pclk_src_mux",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_bitclk_src",
+				"dsi1pll_shadow_post_bit_div",
+				"dsi1pll_shadow_pll_out_div",
+				"dsi1pll_shadow_post_vco_div"},
+			.num_parents = 4,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi0pll_pclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1472,6 +1975,21 @@
 	},
 };
 
+static struct clk_regmap_div dsi0pll_shadow_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi0pll_shadow_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi1pll_pclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1487,15 +2005,32 @@
 	},
 };
 
+static struct clk_regmap_div dsi1pll_shadow_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi1pll_shadow_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_mux dsi0pll_pclk_mux = {
 	.shift = 0,
 	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0_phy_pll_out_dsiclk",
-			.parent_names = (const char *[]){"dsi0pll_pclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi0pll_pclk_src",
+				"dsi0pll_shadow_pclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+				  CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1507,9 +2042,11 @@
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1_phy_pll_out_dsiclk",
-			.parent_names = (const char *[]){"dsi1pll_pclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi1pll_pclk_src",
+				"dsi1pll_shadow_pclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+				  CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1526,6 +2063,14 @@
 	[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
 	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
 	[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
+	[SHADOW_VCO_CLK_0] = &dsi0pll_shadow_vco_clk.hw,
+	[SHADOW_PLL_OUT_DIV_0_CLK] = &dsi0pll_shadow_pll_out_div.clkr.hw,
+	[SHADOW_BITCLK_SRC_0_CLK] = &dsi0pll_shadow_bitclk_src.clkr.hw,
+	[SHADOW_BYTECLK_SRC_0_CLK] = &dsi0pll_shadow_byteclk_src.hw,
+	[SHADOW_POST_BIT_DIV_0_CLK] = &dsi0pll_shadow_post_bit_div.hw,
+	[SHADOW_POST_VCO_DIV_0_CLK] = &dsi0pll_shadow_post_vco_div.hw,
+	[SHADOW_PCLK_SRC_MUX_0_CLK] = &dsi0pll_shadow_pclk_src_mux.clkr.hw,
+	[SHADOW_PCLK_SRC_0_CLK] = &dsi0pll_shadow_pclk_src.clkr.hw,
 	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
 	[PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
 	[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
@@ -1536,6 +2081,14 @@
 	[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
 	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
 	[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
+	[SHADOW_VCO_CLK_1] = &dsi1pll_shadow_vco_clk.hw,
+	[SHADOW_PLL_OUT_DIV_1_CLK] = &dsi1pll_shadow_pll_out_div.clkr.hw,
+	[SHADOW_BITCLK_SRC_1_CLK] = &dsi1pll_shadow_bitclk_src.clkr.hw,
+	[SHADOW_BYTECLK_SRC_1_CLK] = &dsi1pll_shadow_byteclk_src.hw,
+	[SHADOW_POST_BIT_DIV_1_CLK] = &dsi1pll_shadow_post_bit_div.hw,
+	[SHADOW_POST_VCO_DIV_1_CLK] = &dsi1pll_shadow_post_vco_div.hw,
+	[SHADOW_PCLK_SRC_MUX_1_CLK] = &dsi1pll_shadow_pclk_src_mux.clkr.hw,
+	[SHADOW_PCLK_SRC_1_CLK] = &dsi1pll_shadow_pclk_src.clkr.hw,
 };
 
 int dsi_pll_clock_register_10nm(struct platform_device *pdev,
@@ -1580,18 +2133,20 @@
 
 	/* Establish client data */
 	if (ndx == 0) {
-
 		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_pll_out_div.clkr.regmap = rmap;
+		dsi0pll_shadow_pll_out_div.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_bitclk_src.clkr.regmap = rmap;
+		dsi0pll_shadow_bitclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_pclk_src.clkr.regmap = rmap;
+		dsi0pll_shadow_pclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
@@ -1600,12 +2155,16 @@
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_pclk_src_mux.clkr.regmap = rmap;
+		dsi0pll_shadow_pclk_src_mux.clkr.regmap = rmap;
+
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_byteclk_mux.clkr.regmap = rmap;
 
 		dsi0pll_vco_clk.priv = pll_res;
-		for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
+		dsi0pll_shadow_vco_clk.priv = pll_res;
+
+		for (i = VCO_CLK_0; i <= SHADOW_PCLK_SRC_0_CLK; i++) {
 			clk = devm_clk_register(&pdev->dev,
 						mdss_dsi_pllcc_10nm[i]);
 			if (IS_ERR(clk)) {
@@ -1620,20 +2179,21 @@
 
 		rc = of_clk_add_provider(pdev->dev.of_node,
 				of_clk_src_onecell_get, clk_data);
-
-
 	} else {
 		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_pll_out_div.clkr.regmap = rmap;
+		dsi1pll_shadow_pll_out_div.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_bitclk_src.clkr.regmap = rmap;
+		dsi1pll_shadow_bitclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_pclk_src.clkr.regmap = rmap;
+		dsi1pll_shadow_pclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
@@ -1642,12 +2202,16 @@
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_pclk_src_mux.clkr.regmap = rmap;
+		dsi1pll_shadow_pclk_src_mux.clkr.regmap = rmap;
+
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_byteclk_mux.clkr.regmap = rmap;
-		dsi1pll_vco_clk.priv = pll_res;
 
-		for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
+		dsi1pll_vco_clk.priv = pll_res;
+		dsi1pll_shadow_vco_clk.priv = pll_res;
+
+		for (i = VCO_CLK_1; i <= SHADOW_PCLK_SRC_1_CLK; i++) {
 			clk = devm_clk_register(&pdev->dev,
 						mdss_dsi_pllcc_10nm[i]);
 			if (IS_ERR(clk)) {
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index 2f92270..e4b5184 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,8 @@
 		writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
 			(base) + (offset))
 
+#define upper_8_bit(x) ((((x) >> 2) & 0x100) >> 8)
+
 enum {
 	MDSS_DSI_PLL_10NM,
 	MDSS_DP_PLL_10NM,
@@ -45,30 +47,23 @@
 	MDSS_PLL_TARGET_8996,
 };
 
-#define DFPS_MAX_NUM_OF_FRAME_RATES 20
-
-struct dfps_panel_info {
-	uint32_t enabled;
-	uint32_t frame_rate_cnt;
-	uint32_t frame_rate[DFPS_MAX_NUM_OF_FRAME_RATES]; /* hz */
-};
+#define DFPS_MAX_NUM_OF_FRAME_RATES 16
 
 struct dfps_pll_codes {
 	uint32_t pll_codes_1;
 	uint32_t pll_codes_2;
+	uint32_t pll_codes_3;
 };
 
 struct dfps_codes_info {
 	uint32_t is_valid;
-	uint32_t frame_rate;	/* hz */
 	uint32_t clk_rate;	/* hz */
 	struct dfps_pll_codes pll_codes;
 };
 
 struct dfps_info {
-	struct dfps_panel_info panel_dfps;
+	uint32_t vco_rate_cnt;
 	struct dfps_codes_info codes_dfps[DFPS_MAX_NUM_OF_FRAME_RATES];
-	void *dfps_fb_base;
 };
 
 struct mdss_pll_resources {
@@ -139,7 +134,7 @@
 	/*
 	 * caching the pll trim codes in the case of dynamic refresh
 	 */
-	int		cache_pll_trim_codes[2];
+	int		cache_pll_trim_codes[3];
 
 	/*
 	 * for maintaining the status of saving trim codes
@@ -181,6 +176,11 @@
 	 */
 	struct dfps_info *dfps;
 
+	/*
+	 * for cases where dfps trigger happens before first
+	 * suspend/resume and handoff is not finished.
+	 */
+	bool dfps_trigger;
 };
 
 struct mdss_pll_vco_calc {
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 8387c7a..4109988 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -629,7 +629,7 @@
 	MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT,
 			RK3399_CLKSEL_CON(31), 0, 2, MFLAGS),
 	COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT,
-			RK3399_CLKSEL_CON(30), 8, 2, MFLAGS,
+			RK3399_CLKSEL_CON(31), 2, 1, MFLAGS,
 			RK3399_CLKGATE_CON(8), 12, GFLAGS),
 
 	/* uart */
@@ -1521,6 +1521,7 @@
 	"pclk_pmu_src",
 	"fclk_cm0s_src_pmu",
 	"clk_timer_src_pmu",
+	"pclk_rkpwm_pmu",
 };
 
 static void __init rk3399_clk_init(struct device_node *np)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6e16d9f..8f9c8b6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2316,6 +2316,11 @@
 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
 			CPUFREQ_ADJUST, new_policy);
 
+	/* adjust if necessary - hardware incompatibility */
+	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+			CPUFREQ_INCOMPATIBLE, new_policy);
+
+
 	/*
 	 * verify the cpu speed can be set within this limit, which might be
 	 * different to the first one
diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c
index 6254f45..0e8754b6 100644
--- a/drivers/cpufreq/cpufreq_times.c
+++ b/drivers/cpufreq/cpufreq_times.c
@@ -234,16 +234,19 @@
 
 void cpufreq_task_times_init(struct task_struct *p)
 {
-	void *temp;
 	unsigned long flags;
-	unsigned int max_state;
 
 	spin_lock_irqsave(&task_time_in_state_lock, flags);
 	p->time_in_state = NULL;
 	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
 	p->max_state = 0;
+}
 
-	max_state = READ_ONCE(next_offset);
+void cpufreq_task_times_alloc(struct task_struct *p)
+{
+	void *temp;
+	unsigned long flags;
+	unsigned int max_state = READ_ONCE(next_offset);
 
 	/* We use one array to avoid multiple allocs per task */
 	temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 3c75997..ae72206 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1776,7 +1776,7 @@
 	md_entry.virt_addr = (uintptr_t)lpm_debug;
 	md_entry.phys_addr = lpm_debug_phys;
 	md_entry.size = size;
-	if (msm_minidump_add_region(&md_entry))
+	if (msm_minidump_add_region(&md_entry) < 0)
 		pr_info("Failed to add lpm_debug in Minidump\n");
 
 	return 0;
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index dae1e39..c7524bb 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -208,7 +208,7 @@
 				  dev->pdr_pa);
 		return -ENOMEM;
 	}
-	memset(dev->pdr, 0,  sizeof(struct ce_pd) * PPC4XX_NUM_PD);
+	memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
 	dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
 				   256 * PPC4XX_NUM_PD,
 				   &dev->shadow_sa_pool_pa,
@@ -241,13 +241,15 @@
 
 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
 {
-	if (dev->pdr != NULL)
+	if (dev->pdr)
 		dma_free_coherent(dev->core_dev->device,
 				  sizeof(struct ce_pd) * PPC4XX_NUM_PD,
 				  dev->pdr, dev->pdr_pa);
+
 	if (dev->shadow_sa_pool)
 		dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
 				  dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+
 	if (dev->shadow_sr_pool)
 		dma_free_coherent(dev->core_dev->device,
 			sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
@@ -417,12 +419,12 @@
 
 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
 {
-	if (dev->sdr != NULL)
+	if (dev->sdr)
 		dma_free_coherent(dev->core_dev->device,
 				  sizeof(struct ce_sd) * PPC4XX_NUM_SD,
 				  dev->sdr, dev->sdr_pa);
 
-	if (dev->scatter_buffer_va != NULL)
+	if (dev->scatter_buffer_va)
 		dma_free_coherent(dev->core_dev->device,
 				  dev->scatter_buffer_size * PPC4XX_NUM_SD,
 				  dev->scatter_buffer_va,
@@ -1034,12 +1036,10 @@
 			break;
 		}
 
-		if (rc) {
-			list_del(&alg->entry);
+		if (rc)
 			kfree(alg);
-		} else {
+		else
 			list_add_tail(&alg->entry, &sec_dev->alg_list);
-		}
 	}
 
 	return 0;
@@ -1193,7 +1193,7 @@
 
 	rc = crypto4xx_build_gdr(core_dev->dev);
 	if (rc)
-		goto err_build_gdr;
+		goto err_build_pdr;
 
 	rc = crypto4xx_build_sdr(core_dev->dev);
 	if (rc)
@@ -1236,12 +1236,11 @@
 err_request_irq:
 	irq_dispose_mapping(core_dev->irq);
 	tasklet_kill(&core_dev->tasklet);
-	crypto4xx_destroy_sdr(core_dev->dev);
 err_build_sdr:
+	crypto4xx_destroy_sdr(core_dev->dev);
 	crypto4xx_destroy_gdr(core_dev->dev);
-err_build_gdr:
-	crypto4xx_destroy_pdr(core_dev->dev);
 err_build_pdr:
+	crypto4xx_destroy_pdr(core_dev->dev);
 	kfree(core_dev->dev);
 err_alloc_dev:
 	kfree(core_dev);
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 9e7f281..6d475a2 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -189,7 +189,8 @@
 		BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
 
 		/* Unmap just-run descriptor so we can post-process */
-		dma_unmap_single(dev, jrp->outring[hw_idx].desc,
+		dma_unmap_single(dev,
+				 caam_dma_to_cpu(jrp->outring[hw_idx].desc),
 				 jrp->entinfo[sw_idx].desc_size,
 				 DMA_TO_DEVICE);
 
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index c8d1158..d58144f 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1239,8 +1239,7 @@
 				goto exit;
 			}
 
-			k_align_dst += creq->vbuf.dst[dst_i].len +
-						byteoffset;
+			k_align_dst += creq->vbuf.dst[dst_i].len;
 			creq->data_len -= creq->vbuf.dst[dst_i].len;
 			dst_i++;
 		} else {
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 625ee50..decaed4 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -63,7 +63,7 @@
 	struct dcp_coherent_block	*coh;
 
 	struct completion		completion[DCP_MAX_CHANS];
-	struct mutex			mutex[DCP_MAX_CHANS];
+	spinlock_t			lock[DCP_MAX_CHANS];
 	struct task_struct		*thread[DCP_MAX_CHANS];
 	struct crypto_queue		queue[DCP_MAX_CHANS];
 };
@@ -349,13 +349,20 @@
 
 	int ret;
 
-	do {
-		__set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
 
-		mutex_lock(&sdcp->mutex[chan]);
+		spin_lock(&sdcp->lock[chan]);
 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
-		mutex_unlock(&sdcp->mutex[chan]);
+		spin_unlock(&sdcp->lock[chan]);
+
+		if (!backlog && !arq) {
+			schedule();
+			continue;
+		}
+
+		set_current_state(TASK_RUNNING);
 
 		if (backlog)
 			backlog->complete(backlog, -EINPROGRESS);
@@ -363,11 +370,8 @@
 		if (arq) {
 			ret = mxs_dcp_aes_block_crypt(arq);
 			arq->complete(arq, ret);
-			continue;
 		}
-
-		schedule();
-	} while (!kthread_should_stop());
+	}
 
 	return 0;
 }
@@ -409,9 +413,9 @@
 	rctx->ecb = ecb;
 	actx->chan = DCP_CHAN_CRYPTO;
 
-	mutex_lock(&sdcp->mutex[actx->chan]);
+	spin_lock(&sdcp->lock[actx->chan]);
 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-	mutex_unlock(&sdcp->mutex[actx->chan]);
+	spin_unlock(&sdcp->lock[actx->chan]);
 
 	wake_up_process(sdcp->thread[actx->chan]);
 
@@ -640,13 +644,20 @@
 	struct ahash_request *req;
 	int ret, fini;
 
-	do {
-		__set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
 
-		mutex_lock(&sdcp->mutex[chan]);
+		spin_lock(&sdcp->lock[chan]);
 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
-		mutex_unlock(&sdcp->mutex[chan]);
+		spin_unlock(&sdcp->lock[chan]);
+
+		if (!backlog && !arq) {
+			schedule();
+			continue;
+		}
+
+		set_current_state(TASK_RUNNING);
 
 		if (backlog)
 			backlog->complete(backlog, -EINPROGRESS);
@@ -658,12 +669,8 @@
 			ret = dcp_sha_req_to_buf(arq);
 			fini = rctx->fini;
 			arq->complete(arq, ret);
-			if (!fini)
-				continue;
 		}
-
-		schedule();
-	} while (!kthread_should_stop());
+	}
 
 	return 0;
 }
@@ -721,9 +728,9 @@
 		rctx->init = 1;
 	}
 
-	mutex_lock(&sdcp->mutex[actx->chan]);
+	spin_lock(&sdcp->lock[actx->chan]);
 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-	mutex_unlock(&sdcp->mutex[actx->chan]);
+	spin_unlock(&sdcp->lock[actx->chan]);
 
 	wake_up_process(sdcp->thread[actx->chan]);
 	mutex_unlock(&actx->mutex);
@@ -979,7 +986,7 @@
 	platform_set_drvdata(pdev, sdcp);
 
 	for (i = 0; i < DCP_MAX_CHANS; i++) {
-		mutex_init(&sdcp->mutex[i]);
+		spin_lock_init(&sdcp->lock[i]);
 		init_completion(&sdcp->completion[i]);
 		crypto_init_queue(&sdcp->queue[i], 50);
 	}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 9126627..75f2bef 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -266,6 +266,8 @@
 		return;
 	}
 
+	count -= initial;
+
 	if (initial)
 		asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
 			      : "+S"(input), "+D"(output)
@@ -273,7 +275,7 @@
 
 	asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
 		      : "+S"(input), "+D"(output)
-		      : "d"(control_word), "b"(key), "c"(count - initial));
+		      : "d"(control_word), "b"(key), "c"(count));
 }
 
 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
@@ -284,6 +286,8 @@
 	if (count < cbc_fetch_blocks)
 		return cbc_crypt(input, output, key, iv, control_word, count);
 
+	count -= initial;
+
 	if (initial)
 		asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
 			      : "+S" (input), "+D" (output), "+a" (iv)
@@ -291,7 +295,7 @@
 
 	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
 		      : "+S" (input), "+D" (output), "+a" (iv)
-		      : "d" (control_word), "b" (key), "c" (count-initial));
+		      : "d" (control_word), "b" (key), "c" (count));
 	return iv;
 }
 
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index 640c3fc..ad9d6fb 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -123,7 +123,8 @@
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 	switch (ent->device) {
 	case ADF_C3XXX_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@
 	/* Find and map all the device's BARS */
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 949d77b..0dd8d2d 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -125,7 +125,8 @@
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 	switch (ent->device) {
 	case ADF_C3XXXIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@
 	/* Find and map all the device's BARS */
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 5b2d78a..dcdb94c 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -123,7 +123,8 @@
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 	switch (ent->device) {
 	case ADF_C62X_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@
 	/* Find and map all the device's BARS */
 	i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 7540ce1..cd9e634 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -125,7 +125,8 @@
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 	switch (ent->device) {
 	case ADF_C62XIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@
 	/* Find and map all the device's BARS */
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 4d2de28..3417443 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -123,7 +123,8 @@
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 	switch (ent->device) {
 	case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -237,8 +238,7 @@
 	/* Find and map all the device's BARS */
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 60df986..15de9cb 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -125,7 +125,8 @@
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
-	int ret, bar_mask;
+	unsigned long bar_mask;
+	int ret;
 
 	switch (ent->device) {
 	case ADF_DH895XCCIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@
 	/* Find and map all the device's BARS */
 	i = 0;
 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-			 ADF_PCI_MAX_BARS * 2) {
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0c49956..4d0ec7b 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1352,7 +1352,7 @@
 
 err_sha_v3_algs:
 	for (j = 0; j < k; j++)
-		crypto_unregister_ahash(&sha_v4_algs[j]);
+		crypto_unregister_ahash(&sha_v3_algs[j]);
 
 err_aes_algs:
 	for (j = 0; j < i; j++)
@@ -1368,7 +1368,7 @@
 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
 		crypto_unregister_alg(&aes_algs[i]);
 
-	for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+	for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
 		crypto_unregister_ahash(&sha_v3_algs[i]);
 
 	if (dev->version > SAHARA_VERSION_3)
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 4613170..92e1163 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -111,24 +111,23 @@
 		ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
 					       nbytes);
 	} else {
-		preempt_disable();
-		pagefault_disable();
-		enable_kernel_vsx();
-
 		blkcipher_walk_init(&walk, dst, src, nbytes);
 		ret = blkcipher_walk_virt(desc, &walk);
 		while ((nbytes = walk.nbytes)) {
+			preempt_disable();
+			pagefault_disable();
+			enable_kernel_vsx();
 			aes_p8_cbc_encrypt(walk.src.virt.addr,
 					   walk.dst.virt.addr,
 					   nbytes & AES_BLOCK_MASK,
 					   &ctx->enc_key, walk.iv, 1);
+			disable_kernel_vsx();
+			pagefault_enable();
+			preempt_enable();
+
 			nbytes &= AES_BLOCK_SIZE - 1;
 			ret = blkcipher_walk_done(desc, &walk, nbytes);
 		}
-
-		disable_kernel_vsx();
-		pagefault_enable();
-		preempt_enable();
 	}
 
 	return ret;
@@ -152,24 +151,23 @@
 		ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
 					       nbytes);
 	} else {
-		preempt_disable();
-		pagefault_disable();
-		enable_kernel_vsx();
-
 		blkcipher_walk_init(&walk, dst, src, nbytes);
 		ret = blkcipher_walk_virt(desc, &walk);
 		while ((nbytes = walk.nbytes)) {
+			preempt_disable();
+			pagefault_disable();
+			enable_kernel_vsx();
 			aes_p8_cbc_encrypt(walk.src.virt.addr,
 					   walk.dst.virt.addr,
 					   nbytes & AES_BLOCK_MASK,
 					   &ctx->dec_key, walk.iv, 0);
+			disable_kernel_vsx();
+			pagefault_enable();
+			preempt_enable();
+
 			nbytes &= AES_BLOCK_SIZE - 1;
 			ret = blkcipher_walk_done(desc, &walk, nbytes);
 		}
-
-		disable_kernel_vsx();
-		pagefault_enable();
-		preempt_enable();
 	}
 
 	return ret;
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 24353ec3..52e7ae0 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -123,32 +123,39 @@
 		ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) :
                             crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
 	} else {
+		blkcipher_walk_init(&walk, dst, src, nbytes);
+
+		ret = blkcipher_walk_virt(desc, &walk);
+
 		preempt_disable();
 		pagefault_disable();
 		enable_kernel_vsx();
 
-		blkcipher_walk_init(&walk, dst, src, nbytes);
-
-		ret = blkcipher_walk_virt(desc, &walk);
 		iv = walk.iv;
 		memset(tweak, 0, AES_BLOCK_SIZE);
 		aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
 
+		disable_kernel_vsx();
+		pagefault_enable();
+		preempt_enable();
+
 		while ((nbytes = walk.nbytes)) {
+			preempt_disable();
+			pagefault_disable();
+			enable_kernel_vsx();
 			if (enc)
 				aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
 						nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
 			else
 				aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
 						nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
+			disable_kernel_vsx();
+			pagefault_enable();
+			preempt_enable();
 
 			nbytes &= AES_BLOCK_SIZE - 1;
 			ret = blkcipher_walk_done(desc, &walk, nbytes);
 		}
-
-		disable_kernel_vsx();
-		pagefault_enable();
-		preempt_enable();
 	}
 	return ret;
 }
diff --git a/drivers/devfreq/governor_bw_hwmon.c b/drivers/devfreq/governor_bw_hwmon.c
index cb04014..d0be577 100644
--- a/drivers/devfreq/governor_bw_hwmon.c
+++ b/drivers/devfreq/governor_bw_hwmon.c
@@ -74,6 +74,7 @@
 	struct bw_hwmon *hw;
 	struct devfreq_governor *gov;
 	struct attribute_group *attr_grp;
+	struct mutex mon_lock;
 };
 
 #define UP_WAKE 1
@@ -511,8 +512,11 @@
 	if (!node)
 		return -ENODEV;
 
-	if (!node->mon_started)
+	mutex_lock(&node->mon_lock);
+	if (!node->mon_started) {
+		mutex_unlock(&node->mon_lock);
 		return -EBUSY;
+	}
 
 	dev_dbg(df->dev.parent, "Got update request\n");
 	devfreq_monitor_stop(df);
@@ -526,6 +530,7 @@
 
 	devfreq_monitor_start(df);
 
+	mutex_unlock(&node->mon_lock);
 	return 0;
 }
 
@@ -572,7 +577,9 @@
 	struct hwmon_node *node = df->data;
 	struct bw_hwmon *hw = node->hw;
 
+	mutex_lock(&node->mon_lock);
 	node->mon_started = false;
+	mutex_unlock(&node->mon_lock);
 
 	if (init) {
 		devfreq_monitor_stop(df);
@@ -932,6 +939,8 @@
 	node->mbps_zones[0] = 0;
 	node->hw = hwmon;
 
+	mutex_init(&node->mon_lock);
+
 	mutex_lock(&list_lock);
 	list_add_tail(&node->list, &hwmon_list);
 	mutex_unlock(&list_lock);
diff --git a/drivers/devfreq/governor_gpubw_mon.c b/drivers/devfreq/governor_gpubw_mon.c
index 9c24eef..f7bb7eb 100644
--- a/drivers/devfreq/governor_gpubw_mon.c
+++ b/drivers/devfreq/governor_gpubw_mon.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -224,10 +224,11 @@
 	case DEVFREQ_GOV_SUSPEND:
 		{
 			struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
-
-			priv->bus.total_time = 0;
-			priv->bus.gpu_time = 0;
-			priv->bus.ram_time = 0;
+			if (priv) {
+				priv->bus.total_time = 0;
+				priv->bus.gpu_time = 0;
+				priv->bus.ram_time = 0;
+			}
 		}
 		break;
 	default:
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index aabcb79..cd7f67b 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -792,7 +792,7 @@
 	struct k3_dma_dev *d = ofdma->of_dma_data;
 	unsigned int request = dma_spec->args[0];
 
-	if (request > d->dma_requests)
+	if (request >= d->dma_requests)
 		return NULL;
 
 	return dma_get_slave_channel(&(d->chans[request].vc.chan));
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 7186664..be1f5c2 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -844,6 +844,8 @@
 
 	platform_msi_domain_free_irqs(&pdev->dev);
 
+	tasklet_kill(&xor_dev->irq_tasklet);
+
 	clk_disable_unprepare(xor_dev->clk);
 
 	return 0;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 2c449bd..6d7e3cd 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2167,13 +2167,14 @@
 
 	pm_runtime_get_sync(pl330->ddma.dev);
 	spin_lock_irqsave(&pch->lock, flags);
+
 	spin_lock(&pl330->lock);
 	_stop(pch->thread);
-	spin_unlock(&pl330->lock);
-
 	pch->thread->req[0].desc = NULL;
 	pch->thread->req[1].desc = NULL;
 	pch->thread->req_running = -1;
+	spin_unlock(&pl330->lock);
+
 	power_down = pch->active;
 	pch->active = false;
 
@@ -2951,7 +2952,7 @@
 	pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
 	pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
 	pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-	pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+	pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 	pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
 			 1 : PL330_MAX_BURST);
 
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 61262a7..b0bd0f6 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1111,7 +1111,7 @@
 
 static void ocram_free_mem(void *p, size_t size, void *other)
 {
-	gen_pool_free((struct gen_pool *)other, (u32)p, size);
+	gen_pool_free((struct gen_pool *)other, (unsigned long)p, size);
 }
 
 static const struct edac_device_prv_data ocramecc_data = {
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index bea71fb..7335a86 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -1059,14 +1059,14 @@
 
 	err = device_add(mci_pdev);
 	if (err < 0)
-		goto out_dev_free;
+		goto out_put_device;
 
 	edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
 
 	return 0;
 
- out_dev_free:
-	kfree(mci_pdev);
+ out_put_device:
+	put_device(mci_pdev);
  out:
 	return err;
 }
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 8a68a5e..b609320 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1177,15 +1177,14 @@
 
 	rc = device_add(pvt->addrmatch_dev);
 	if (rc < 0)
-		return rc;
+		goto err_put_addrmatch;
 
 	if (!pvt->is_registered) {
 		pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
 					      GFP_KERNEL);
 		if (!pvt->chancounts_dev) {
-			put_device(pvt->addrmatch_dev);
-			device_del(pvt->addrmatch_dev);
-			return -ENOMEM;
+			rc = -ENOMEM;
+			goto err_del_addrmatch;
 		}
 
 		pvt->chancounts_dev->type = &all_channel_counts_type;
@@ -1199,9 +1198,18 @@
 
 		rc = device_add(pvt->chancounts_dev);
 		if (rc < 0)
-			return rc;
+			goto err_put_chancounts;
 	}
 	return 0;
+
+err_put_chancounts:
+	put_device(pvt->chancounts_dev);
+err_del_addrmatch:
+	device_del(pvt->addrmatch_dev);
+err_put_addrmatch:
+	put_device(pvt->addrmatch_dev);
+
+	return rc;
 }
 
 static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
@@ -1211,11 +1219,11 @@
 	edac_dbg(1, "\n");
 
 	if (!pvt->is_registered) {
-		put_device(pvt->chancounts_dev);
 		device_del(pvt->chancounts_dev);
+		put_device(pvt->chancounts_dev);
 	}
-	put_device(pvt->addrmatch_dev);
 	device_del(pvt->addrmatch_dev);
+	put_device(pvt->addrmatch_dev);
 }
 
 /****************************************************************************
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 2b108fa..b1ef5c4 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/workqueue.h>
 #include <linux/acpi.h>
+#include <linux/delay.h>
 
 #define USB_GPIO_DEBOUNCE_MS	20	/* ms */
 
@@ -36,6 +37,7 @@
 
 	struct gpio_desc *id_gpiod;
 	struct gpio_desc *vbus_gpiod;
+	struct gpio_desc *trig_gpiod;
 	int id_irq;
 	int vbus_irq;
 
@@ -87,6 +89,12 @@
 
 	if (!id) {
 		extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
+		if (info->trig_gpiod) {
+			gpiod_set_value(info->trig_gpiod, 1);
+			msleep(20);
+			gpiod_set_value(info->trig_gpiod, 0);
+			msleep(20);
+		}
 	} else {
 		if (vbus)
 			extcon_set_state_sync(info->edev, EXTCON_USB, true);
@@ -121,6 +129,8 @@
 	info->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id", GPIOD_IN);
 	info->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
 						   GPIOD_IN);
+	info->trig_gpiod = devm_gpiod_get_optional(&pdev->dev, "trig",
+						   GPIOD_OUT_LOW);
 
 	if (!info->id_gpiod && !info->vbus_gpiod) {
 		dev_err(dev, "failed to get gpios\n");
@@ -133,6 +143,9 @@
 	if (IS_ERR(info->vbus_gpiod))
 		return PTR_ERR(info->vbus_gpiod);
 
+	if (IS_ERR(info->trig_gpiod))
+		return PTR_ERR(info->trig_gpiod);
+
 	info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
 	if (IS_ERR(info->edev)) {
 		dev_err(dev, "failed to allocate extcon device\n");
@@ -198,8 +211,13 @@
 	platform_set_drvdata(pdev, info);
 	device_init_wakeup(dev, true);
 
-	/* Perform initial detection */
-	usb_extcon_detect_cable(&info->wq_detcable.work);
+	if (info->trig_gpiod)
+		/* Schedule with delay to reset ethernet bridge */
+		queue_delayed_work(system_power_efficient_wq,
+			&info->wq_detcable, msecs_to_jiffies(1500));
+	else
+		/* Perform initial detection */
+		usb_extcon_detect_cable(&info->wq_detcable.work);
 
 	return 0;
 }
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 8efe130..1d1c969 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -250,7 +250,6 @@
 	reserve_regions();
 	efi_memattr_init();
 	efi_esrt_init();
-	efi_memmap_unmap();
 
 	memblock_reserve(params.mmap & PAGE_MASK,
 			 PAGE_ALIGN(params.mmap_size +
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 6bdf39e..4d788e0d 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -118,11 +118,13 @@
 {
 	u64 mapsize;
 
-	if (!efi_enabled(EFI_BOOT)) {
+	if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) {
 		pr_info("EFI services will not be available.\n");
 		return 0;
 	}
 
+	efi_memmap_unmap();
+
 	if (efi_runtime_disabled()) {
 		pr_info("EFI runtime services will be disabled.\n");
 		return 0;
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 311c9d0..241dd7c 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -333,7 +333,8 @@
 
 	end = esrt_data + size;
 	pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
-	efi_mem_reserve(esrt_data, esrt_data_size);
+	if (md.type == EFI_BOOT_SERVICES_DATA)
+		efi_mem_reserve(esrt_data, esrt_data_size);
 
 	pr_debug("esrt-init: loaded.\n");
 err_memunmap:
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 68418a6..beba663 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -143,4 +143,3 @@
 obj-$(CONFIG_GPIO_ZX)		+= gpio-zx.o
 obj-$(CONFIG_GPIO_LOONGSON1)	+= gpio-loongson1.o
 obj-$(CONFIG_MSM_SMP2P)		+= gpio-msm-smp2p.o
-obj-$(CONFIG_MSM_SMP2P_TEST)	+= gpio-msm-smp2p-test.o
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index c0f718b..c85407a 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -41,6 +41,8 @@
 	uint8_t int_en[3];
 	uint8_t irq_mask[3];
 	uint8_t irq_stat[3];
+	uint8_t int_input_en[3];
+	uint8_t int_lvl_cached[3];
 };
 
 static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -173,12 +175,28 @@
 	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
 	int i;
 
-	for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+	for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
+		if (dev->int_input_en[i]) {
+			mutex_lock(&dev->lock);
+			dev->dir[i] &= ~dev->int_input_en[i];
+			dev->int_input_en[i] = 0;
+			adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
+					   dev->dir[i]);
+			mutex_unlock(&dev->lock);
+		}
+
+		if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
+			dev->int_lvl_cached[i] = dev->int_lvl[i];
+			adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
+					   dev->int_lvl[i]);
+		}
+
 		if (dev->int_en[i] ^ dev->irq_mask[i]) {
 			dev->int_en[i] = dev->irq_mask[i];
 			adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
 					   dev->int_en[i]);
 		}
+	}
 
 	mutex_unlock(&dev->irq_lock);
 }
@@ -221,9 +239,7 @@
 	else
 		return -EINVAL;
 
-	adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
-	adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
-			   dev->int_lvl[bank]);
+	dev->int_input_en[bank] |= bit;
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index a1210e3..95061d2 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -56,9 +56,9 @@
 		rnd = fls(debounce) - 1;
 
 		if (rnd && (debounce & BIT(rnd - 1)))
-			debounce = round_up(debounce, MEN_Z127_DB_MIN_US);
+			debounce = roundup(debounce, MEN_Z127_DB_MIN_US);
 		else
-			debounce = round_down(debounce, MEN_Z127_DB_MIN_US);
+			debounce = rounddown(debounce, MEN_Z127_DB_MIN_US);
 
 		if (debounce > MEN_Z127_DB_MAX_US)
 			debounce = MEN_Z127_DB_MAX_US;
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 796a5a4..e9b6074 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -495,9 +495,10 @@
 
 	chip = chip_save;
 err_gpiochip_add:
+	chip = chip_save;
 	while (--i >= 0) {
-		chip--;
 		gpiochip_remove(&chip->gpio);
+		chip++;
 	}
 	kfree(chip_save);
 
diff --git a/drivers/gpio/gpio-msm-smp2p-test.c b/drivers/gpio/gpio-msm-smp2p-test.c
deleted file mode 100644
index 1067c4a..0000000
--- a/drivers/gpio/gpio-msm-smp2p-test.c
+++ /dev/null
@@ -1,763 +0,0 @@
-/* drivers/gpio/gpio-msm-smp2p-test.c
- *
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of_gpio.h>
-#include <linux/of_irq.h>
-#include <linux/gpio.h>
-#include <linux/debugfs.h>
-#include <linux/completion.h>
-#include <linux/interrupt.h>
-#include <linux/bitmap.h>
-#include "../soc/qcom/smp2p_private.h"
-#include "../soc/qcom/smp2p_test_common.h"
-
-/* Interrupt callback data */
-struct gpio_info {
-	int gpio_base_id;
-	int irq_base_id;
-
-	bool initialized;
-	struct completion cb_completion;
-	int cb_count;
-	DECLARE_BITMAP(triggered_irqs, SMP2P_BITS_PER_ENTRY);
-};
-
-/* GPIO Inbound/Outbound callback info */
-struct gpio_inout {
-	struct gpio_info in;
-	struct gpio_info out;
-};
-
-static struct gpio_inout gpio_info[SMP2P_NUM_PROCS];
-
-/**
- * Init/reset the callback data.
- *
- * @info: Pointer to callback data
- */
-static void cb_data_reset(struct gpio_info *info)
-{
-	int n;
-
-	if (!info)
-		return;
-
-	if (!info->initialized) {
-		init_completion(&info->cb_completion);
-		info->initialized = true;
-	}
-	info->cb_count = 0;
-
-	for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n)
-		clear_bit(n,  info->triggered_irqs);
-
-	reinit_completion(&info->cb_completion);
-}
-
-static int smp2p_gpio_test_probe(struct platform_device *pdev)
-{
-	int id;
-	int cnt;
-	struct device_node *node = pdev->dev.of_node;
-	struct gpio_info *gpio_info_ptr = NULL;
-
-	/*
-	 * NOTE:  This does a string-lookup of the GPIO pin name and doesn't
-	 * actually directly link to the SMP2P GPIO driver since all
-	 * GPIO/Interrupt access must be through standard
-	 * Linux GPIO / Interrupt APIs.
-	 */
-	if (strcmp("qcom,smp2pgpio_test_smp2p_1_in", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_MODEM_PROC].in;
-	} else if (strcmp("qcom,smp2pgpio_test_smp2p_1_out", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_MODEM_PROC].out;
-	} else if (strcmp("qcom,smp2pgpio_test_smp2p_2_in", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_AUDIO_PROC].in;
-	} else if (strcmp("qcom,smp2pgpio_test_smp2p_2_out", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_AUDIO_PROC].out;
-	} else if (strcmp("qcom,smp2pgpio_test_smp2p_3_in", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_SENSOR_PROC].in;
-	} else if (strcmp("qcom,smp2pgpio_test_smp2p_3_out", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_SENSOR_PROC].out;
-	} else if (strcmp("qcom,smp2pgpio_test_smp2p_4_in", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_WIRELESS_PROC].in;
-	} else if (strcmp("qcom,smp2pgpio_test_smp2p_4_out", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_WIRELESS_PROC].out;
-	} else if (strcmp("qcom,smp2pgpio_test_smp2p_5_in", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_CDSP_PROC].in;
-	} else if (strcmp("qcom,smp2pgpio_test_smp2p_5_out", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_CDSP_PROC].out;
-	} else if (strcmp("qcom,smp2pgpio_test_smp2p_7_in", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_TZ_PROC].in;
-	} else if (strcmp("qcom,smp2pgpio_test_smp2p_7_out", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_TZ_PROC].out;
-	} else if (strcmp("qcom,smp2pgpio_test_smp2p_15_in", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
-	} else if (
-		strcmp("qcom,smp2pgpio_test_smp2p_15_out", node->name) == 0) {
-		gpio_info_ptr = &gpio_info[SMP2P_REMOTE_MOCK_PROC].out;
-	} else {
-		pr_err("%s: unable to match device type '%s'\n",
-				__func__, node->name);
-		return -ENODEV;
-	}
-
-	/* retrieve the GPIO and interrupt ID's */
-	cnt = of_gpio_count(node);
-	if (cnt && gpio_info_ptr) {
-		/*
-		 * Instead of looping through all 32-bits, we can just get the
-		 * first pin to get the base IDs.  This saves on the verbosity
-		 * of the device tree nodes as well.
-		 */
-		id = of_get_gpio(node, 0);
-		if (id == -EPROBE_DEFER)
-			return id;
-		gpio_info_ptr->gpio_base_id = id;
-		gpio_info_ptr->irq_base_id = gpio_to_irq(id);
-	}
-	return 0;
-}
-
-/*
- * NOTE:  Instead of match table and device driver, you may be able to just
- * call of_find_compatible_node() in your init function.
- */
-static const struct of_device_id msm_smp2p_match_table[] = {
-	/* modem */
-	{.compatible = "qcom,smp2pgpio_test_smp2p_1_out", },
-	{.compatible = "qcom,smp2pgpio_test_smp2p_1_in", },
-
-	/* audio (adsp) */
-	{.compatible = "qcom,smp2pgpio_test_smp2p_2_out", },
-	{.compatible = "qcom,smp2pgpio_test_smp2p_2_in", },
-
-	/* sensor */
-	{.compatible = "qcom,smp2pgpio_test_smp2p_3_out", },
-	{.compatible = "qcom,smp2pgpio_test_smp2p_3_in", },
-
-	/* wcnss */
-	{.compatible = "qcom,smp2pgpio_test_smp2p_4_out", },
-	{.compatible = "qcom,smp2pgpio_test_smp2p_4_in", },
-
-	/* CDSP */
-	{.compatible = "qcom,smp2pgpio_test_smp2p_5_out", },
-	{.compatible = "qcom,smp2pgpio_test_smp2p_5_in", },
-
-	/* TZ */
-	{.compatible = "qcom,smp2pgpio_test_smp2p_7_out", },
-	{.compatible = "qcom,smp2pgpio_test_smp2p_7_in", },
-
-	/* mock loopback */
-	{.compatible = "qcom,smp2pgpio_test_smp2p_15_out", },
-	{.compatible = "qcom,smp2pgpio_test_smp2p_15_in", },
-	{},
-};
-
-static struct platform_driver smp2p_gpio_driver = {
-	.probe = smp2p_gpio_test_probe,
-	.driver = {
-		.name = "smp2pgpio_test",
-		.owner = THIS_MODULE,
-		.of_match_table = msm_smp2p_match_table,
-	},
-};
-
-/**
- * smp2p_ut_local_gpio_out - Verify outbound functionality.
- *
- * @s:   pointer to output file
- */
-static void smp2p_ut_local_gpio_out(struct seq_file *s)
-{
-	int failed = 0;
-	struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].out;
-	int ret;
-	int id;
-	struct msm_smp2p_remote_mock *mock;
-
-	seq_printf(s, "Running %s\n", __func__);
-	do {
-		/* initialize mock edge */
-		ret = smp2p_reset_mock_edge();
-		UT_ASSERT_INT(ret, ==, 0);
-
-		mock = msm_smp2p_get_remote_mock();
-		UT_ASSERT_PTR(mock, !=, NULL);
-
-		mock->rx_interrupt_count = 0;
-		memset(&mock->remote_item, 0,
-			sizeof(struct smp2p_smem_item));
-		smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
-			SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
-			0, 1);
-		strlcpy(mock->remote_item.entries[0].name, "smp2p",
-			SMP2P_MAX_ENTRY_NAME);
-		SMP2P_SET_ENT_VALID(
-			mock->remote_item.header.valid_total_ent, 1);
-		msm_smp2p_set_remote_mock_exists(true);
-		mock->tx_interrupt();
-
-		/* open GPIO entry */
-		smp2p_gpio_open_test_entry("smp2p",
-				SMP2P_REMOTE_MOCK_PROC, true);
-
-		/* verify set/get functions */
-		UT_ASSERT_INT(0, <, cb_info->gpio_base_id);
-		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-			int pin = cb_info->gpio_base_id + id;
-
-			mock->rx_interrupt_count = 0;
-			gpio_set_value(pin, 1);
-			UT_ASSERT_INT(1, ==, mock->rx_interrupt_count);
-			UT_ASSERT_INT(1, ==, gpio_get_value(pin));
-
-			gpio_set_value(pin, 0);
-			UT_ASSERT_INT(2, ==, mock->rx_interrupt_count);
-			UT_ASSERT_INT(0, ==, gpio_get_value(pin));
-		}
-		if (failed)
-			break;
-
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-	}
-
-	smp2p_gpio_open_test_entry("smp2p",
-			SMP2P_REMOTE_MOCK_PROC, false);
-}
-
-/**
- * smp2p_gpio_irq - Interrupt handler for inbound entries.
- *
- * @irq:         Virtual IRQ being triggered
- * @data:        Cookie data (struct gpio_info * in this case)
- * @returns:     Number of bytes written
- */
-static irqreturn_t smp2p_gpio_irq(int irq, void *data)
-{
-	struct gpio_info *gpio_ptr = (struct gpio_info *)data;
-	int offset;
-
-	if (!gpio_ptr) {
-		pr_err("%s: gpio_ptr is NULL for irq %d\n", __func__, irq);
-		return IRQ_HANDLED;
-	}
-
-	offset = irq - gpio_ptr->irq_base_id;
-	if (offset >= 0 &&  offset < SMP2P_BITS_PER_ENTRY)
-		set_bit(offset, gpio_ptr->triggered_irqs);
-	else
-		pr_err("%s: invalid irq offset base %d; irq %d\n",
-			__func__, gpio_ptr->irq_base_id, irq);
-
-	++gpio_ptr->cb_count;
-	complete(&gpio_ptr->cb_completion);
-	return IRQ_HANDLED;
-}
-
-/**
- * smp2p_ut_local_gpio_in - Verify inbound functionality.
- *
- * @s:   pointer to output file
- */
-static void smp2p_ut_local_gpio_in(struct seq_file *s)
-{
-	int failed = 0;
-	struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
-	int id;
-	int ret;
-	int virq;
-	struct msm_smp2p_remote_mock *mock;
-
-	seq_printf(s, "Running %s\n", __func__);
-
-	cb_data_reset(cb_info);
-	do {
-		/* initialize mock edge */
-		ret = smp2p_reset_mock_edge();
-		UT_ASSERT_INT(ret, ==, 0);
-
-		mock = msm_smp2p_get_remote_mock();
-		UT_ASSERT_PTR(mock, !=, NULL);
-
-		mock->rx_interrupt_count = 0;
-		memset(&mock->remote_item, 0,
-			sizeof(struct smp2p_smem_item));
-		smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
-			SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
-			0, 1);
-		strlcpy(mock->remote_item.entries[0].name, "smp2p",
-			SMP2P_MAX_ENTRY_NAME);
-		SMP2P_SET_ENT_VALID(
-			mock->remote_item.header.valid_total_ent, 1);
-		msm_smp2p_set_remote_mock_exists(true);
-		mock->tx_interrupt();
-
-		smp2p_gpio_open_test_entry("smp2p",
-				SMP2P_REMOTE_MOCK_PROC, true);
-
-		/* verify set/get functions locally */
-		UT_ASSERT_INT(0, <, cb_info->gpio_base_id);
-		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-			int pin;
-			int current_value;
-
-			/* verify pin value cannot be set */
-			pin = cb_info->gpio_base_id + id;
-			current_value = gpio_get_value(pin);
-
-			gpio_set_value(pin, 0);
-			UT_ASSERT_INT(current_value, ==, gpio_get_value(pin));
-			gpio_set_value(pin, 1);
-			UT_ASSERT_INT(current_value, ==, gpio_get_value(pin));
-
-			/* verify no interrupts */
-			UT_ASSERT_INT(0, ==, cb_info->cb_count);
-		}
-		if (failed)
-			break;
-
-		/* register for interrupts */
-		UT_ASSERT_INT(0, <, cb_info->irq_base_id);
-		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-			virq = cb_info->irq_base_id + id;
-			UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
-			ret = request_irq(virq,
-					smp2p_gpio_irq,	IRQF_TRIGGER_RISING,
-					"smp2p_test", cb_info);
-			UT_ASSERT_INT(0, ==, ret);
-		}
-		if (failed)
-			break;
-
-		/* verify both rising and falling edge interrupts */
-		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-			virq = cb_info->irq_base_id + id;
-			irq_set_irq_type(virq, IRQ_TYPE_EDGE_BOTH);
-			cb_data_reset(cb_info);
-
-			/* verify rising-edge interrupt */
-			mock->remote_item.entries[0].entry = 1 << id;
-			mock->tx_interrupt();
-			UT_ASSERT_INT(cb_info->cb_count, ==, 1);
-			UT_ASSERT_INT(0, <,
-				test_bit(id, cb_info->triggered_irqs));
-			test_bit(id, cb_info->triggered_irqs);
-
-			/* verify falling-edge interrupt */
-			mock->remote_item.entries[0].entry = 0;
-			mock->tx_interrupt();
-			UT_ASSERT_INT(cb_info->cb_count, ==, 2);
-			UT_ASSERT_INT(0, <,
-					test_bit(id, cb_info->triggered_irqs));
-		}
-		if (failed)
-			break;
-
-		/* verify rising-edge interrupts */
-		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-			virq = cb_info->irq_base_id + id;
-			irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
-			cb_data_reset(cb_info);
-
-			/* verify only rising-edge interrupt is triggered */
-			mock->remote_item.entries[0].entry = 1 << id;
-			mock->tx_interrupt();
-			UT_ASSERT_INT(cb_info->cb_count, ==, 1);
-			UT_ASSERT_INT(0, <,
-				test_bit(id, cb_info->triggered_irqs));
-			test_bit(id, cb_info->triggered_irqs);
-
-			mock->remote_item.entries[0].entry = 0;
-			mock->tx_interrupt();
-			UT_ASSERT_INT(cb_info->cb_count, ==, 1);
-			UT_ASSERT_INT(0, <,
-				test_bit(id, cb_info->triggered_irqs));
-		}
-		if (failed)
-			break;
-
-		/* verify falling-edge interrupts */
-		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-			virq = cb_info->irq_base_id + id;
-			irq_set_irq_type(virq, IRQ_TYPE_EDGE_FALLING);
-			cb_data_reset(cb_info);
-
-			/* verify only rising-edge interrupt is triggered */
-			mock->remote_item.entries[0].entry = 1 << id;
-			mock->tx_interrupt();
-			UT_ASSERT_INT(cb_info->cb_count, ==, 0);
-			UT_ASSERT_INT(0, ==,
-				test_bit(id, cb_info->triggered_irqs));
-
-			mock->remote_item.entries[0].entry = 0;
-			mock->tx_interrupt();
-			UT_ASSERT_INT(cb_info->cb_count, ==, 1);
-			UT_ASSERT_INT(0, <,
-				test_bit(id, cb_info->triggered_irqs));
-		}
-		if (failed)
-			break;
-
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-	}
-
-	/* unregister for interrupts */
-	if (cb_info->irq_base_id) {
-		for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
-			free_irq(cb_info->irq_base_id + id, cb_info);
-	}
-
-	smp2p_gpio_open_test_entry("smp2p",
-			SMP2P_REMOTE_MOCK_PROC, false);
-}
-
-/**
- * smp2p_ut_local_gpio_in_update_open - Verify combined open/update.
- *
- * @s:   pointer to output file
- *
- * If the remote side updates the SMP2P bits and sends before negotiation is
- * complete, then the UPDATE event will have to be delayed until negotiation is
- * complete.  This should result in both the OPEN and UPDATE events coming in
- * right after each other and the behavior should be transparent to the clients
- * of SMP2P GPIO.
- */
-static void smp2p_ut_local_gpio_in_update_open(struct seq_file *s)
-{
-	int failed = 0;
-	struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
-	int id;
-	int ret;
-	int virq;
-	struct msm_smp2p_remote_mock *mock;
-
-	seq_printf(s, "Running %s\n", __func__);
-
-	cb_data_reset(cb_info);
-	do {
-		/* initialize mock edge */
-		ret = smp2p_reset_mock_edge();
-		UT_ASSERT_INT(ret, ==, 0);
-
-		mock = msm_smp2p_get_remote_mock();
-		UT_ASSERT_PTR(mock, !=, NULL);
-
-		mock->rx_interrupt_count = 0;
-		memset(&mock->remote_item, 0,
-			sizeof(struct smp2p_smem_item));
-		smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
-			SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
-			0, 1);
-		strlcpy(mock->remote_item.entries[0].name, "smp2p",
-			SMP2P_MAX_ENTRY_NAME);
-		SMP2P_SET_ENT_VALID(
-			mock->remote_item.header.valid_total_ent, 1);
-
-		/* register for interrupts */
-		smp2p_gpio_open_test_entry("smp2p",
-				SMP2P_REMOTE_MOCK_PROC, true);
-
-		UT_ASSERT_INT(0, <, cb_info->irq_base_id);
-		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-			virq = cb_info->irq_base_id + id;
-			UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
-			ret = request_irq(virq,
-					smp2p_gpio_irq,	IRQ_TYPE_EDGE_BOTH,
-					"smp2p_test", cb_info);
-			UT_ASSERT_INT(0, ==, ret);
-		}
-		if (failed)
-			break;
-
-		/* update the state value and complete negotiation */
-		mock->remote_item.entries[0].entry = 0xDEADDEAD;
-		msm_smp2p_set_remote_mock_exists(true);
-		mock->tx_interrupt();
-
-		/* verify delayed state updates were processed */
-		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-			virq = cb_info->irq_base_id + id;
-
-			UT_ASSERT_INT(cb_info->cb_count, >, 0);
-			if (0x1 & (0xDEADDEAD >> id)) {
-				/* rising edge should have been triggered */
-				if (!test_bit(id, cb_info->triggered_irqs)) {
-					seq_printf(s, "%s:%d bit %d clear, ",
-						__func__, __LINE__, id);
-					seq_puts(s, "expected set\n");
-					failed = 1;
-					break;
-				}
-			} else {
-				/* edge should not have been triggered */
-				if (test_bit(id, cb_info->triggered_irqs)) {
-					seq_printf(s, "%s:%d bit %d set, ",
-						__func__, __LINE__, id);
-					seq_puts(s, "expected clear\n");
-					failed = 1;
-					break;
-				}
-			}
-		}
-		if (failed)
-			break;
-
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-	}
-
-	/* unregister for interrupts */
-	if (cb_info->irq_base_id) {
-		for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
-			free_irq(cb_info->irq_base_id + id, cb_info);
-	}
-
-	smp2p_gpio_open_test_entry("smp2p",
-			SMP2P_REMOTE_MOCK_PROC, false);
-}
-
-/**
- * smp2p_gpio_write_bits - writes value to each GPIO pin specified in mask.
- *
- * @gpio: gpio test structure
- * @mask: 1 = write gpio_value to this GPIO pin
- * @gpio_value: value to write to GPIO pin
- */
-static void smp2p_gpio_write_bits(struct gpio_info *gpio, uint32_t mask,
-	int gpio_value)
-{
-	int n;
-
-	for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n) {
-		if (mask & 0x1)
-			gpio_set_value(gpio->gpio_base_id + n, gpio_value);
-		mask >>= 1;
-	}
-}
-
-static void smp2p_gpio_set_bits(struct gpio_info *gpio, uint32_t mask)
-{
-	smp2p_gpio_write_bits(gpio, mask, 1);
-}
-
-static void smp2p_gpio_clr_bits(struct gpio_info *gpio, uint32_t mask)
-{
-	smp2p_gpio_write_bits(gpio, mask, 0);
-}
-
-/**
- * smp2p_gpio_get_value - reads entire 32-bits of GPIO
- *
- * @gpio: gpio structure
- * @returns: 32 bit value of GPIO pins
- */
-static uint32_t smp2p_gpio_get_value(struct gpio_info *gpio)
-{
-	int n;
-	uint32_t value = 0;
-
-	for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n) {
-		if (gpio_get_value(gpio->gpio_base_id + n))
-			value |= 1 << n;
-	}
-	return value;
-}
-
-/**
- * smp2p_ut_remote_inout_core - Verify inbound/outbound functionality.
- *
- * @s:   pointer to output file
- * @remote_pid:  Remote processor to test
- * @name:        Name of the test for reporting
- *
- * This test verifies inbound/outbound functionality for the remote processor.
- */
-static void smp2p_ut_remote_inout_core(struct seq_file *s, int remote_pid,
-		const char *name)
-{
-	int failed = 0;
-	uint32_t request;
-	uint32_t response;
-	struct gpio_info *cb_in;
-	struct gpio_info *cb_out;
-	int id;
-	int ret;
-
-	seq_printf(s, "Running %s for '%s' remote pid %d\n",
-		   __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-
-	cb_in = &gpio_info[remote_pid].in;
-	cb_out = &gpio_info[remote_pid].out;
-	cb_data_reset(cb_in);
-	cb_data_reset(cb_out);
-	do {
-		/* open test entries */
-		msm_smp2p_deinit_rmt_lpb_proc(remote_pid);
-		smp2p_gpio_open_test_entry("smp2p", remote_pid, true);
-
-		/* register for interrupts */
-		UT_ASSERT_INT(0, <, cb_in->gpio_base_id);
-		UT_ASSERT_INT(0, <, cb_in->irq_base_id);
-		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
-			int virq = cb_in->irq_base_id + id;
-
-			UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
-			ret = request_irq(virq,
-				smp2p_gpio_irq,
-				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-				"smp2p_test", cb_in);
-			UT_ASSERT_INT(0, ==, ret);
-		}
-		if (failed)
-			break;
-
-		/* write echo of data value 0 */
-		UT_ASSERT_INT(0, <, cb_out->gpio_base_id);
-		request = 0x0;
-		SMP2P_SET_RMT_CMD_TYPE(request, 1);
-		SMP2P_SET_RMT_CMD(request, SMP2P_LB_CMD_ECHO);
-		SMP2P_SET_RMT_DATA(request, 0x0);
-
-		smp2p_gpio_set_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
-		smp2p_gpio_clr_bits(cb_out, ~SMP2P_RMT_IGNORE_MASK);
-		smp2p_gpio_set_bits(cb_out, request);
-
-		UT_ASSERT_INT(cb_in->cb_count, ==, 0);
-		smp2p_gpio_clr_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
-
-		/* verify response */
-		do {
-			/* wait for up to 32 changes */
-			if (wait_for_completion_timeout(
-					&cb_in->cb_completion, HZ / 2) == 0)
-				break;
-			reinit_completion(&cb_in->cb_completion);
-		} while (cb_in->cb_count < 32);
-		UT_ASSERT_INT(cb_in->cb_count, >, 0);
-		response = smp2p_gpio_get_value(cb_in);
-		SMP2P_SET_RMT_CMD_TYPE(request, 0);
-		UT_ASSERT_HEX(request, ==, response);
-
-		/* write echo of data value of all 1's */
-		request = 0x0;
-		SMP2P_SET_RMT_CMD_TYPE(request, 1);
-		SMP2P_SET_RMT_CMD(request, SMP2P_LB_CMD_ECHO);
-		SMP2P_SET_RMT_DATA(request, ~0);
-
-		smp2p_gpio_set_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
-		cb_data_reset(cb_in);
-		smp2p_gpio_clr_bits(cb_out, ~SMP2P_RMT_IGNORE_MASK);
-		smp2p_gpio_set_bits(cb_out, request);
-
-		UT_ASSERT_INT(cb_in->cb_count, ==, 0);
-		smp2p_gpio_clr_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
-
-		/* verify response including 24 interrupts */
-		do {
-			UT_ASSERT_INT(
-				(int)wait_for_completion_timeout(
-					&cb_in->cb_completion, HZ / 2),
-			   >, 0);
-			reinit_completion(&cb_in->cb_completion);
-		} while (cb_in->cb_count < 24);
-		response = smp2p_gpio_get_value(cb_in);
-		SMP2P_SET_RMT_CMD_TYPE(request, 0);
-		UT_ASSERT_HEX(request, ==, response);
-		UT_ASSERT_INT(24, ==, cb_in->cb_count);
-
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", name);
-		seq_puts(s, "\tFailed\n");
-	}
-
-	/* unregister for interrupts */
-	if (cb_in->irq_base_id) {
-		for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
-			free_irq(cb_in->irq_base_id + id, cb_in);
-	}
-
-	smp2p_gpio_open_test_entry("smp2p",	remote_pid, false);
-	msm_smp2p_init_rmt_lpb_proc(remote_pid);
-}
-
-/**
- * smp2p_ut_remote_inout - Verify inbound/outbound functionality for all.
- *
- * @s:   pointer to output file
- *
- * This test verifies inbound and outbound functionality for all
- * configured remote processor.
- */
-static void smp2p_ut_remote_inout(struct seq_file *s)
-{
-	struct smp2p_interrupt_config *int_cfg;
-	int pid;
-
-	int_cfg = smp2p_get_interrupt_config();
-	if (!int_cfg) {
-		seq_puts(s, "Remote processor config unavailable\n");
-		return;
-	}
-
-	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
-		if (!int_cfg[pid].is_configured)
-			continue;
-
-		smp2p_ut_remote_inout_core(s, pid, __func__);
-	}
-}
-
-static int __init smp2p_debugfs_init(void)
-{
-	/* register GPIO pins */
-	(void)platform_driver_register(&smp2p_gpio_driver);
-
-	/*
-	 * Add Unit Test entries.
-	 *
-	 * The idea with unit tests is that you can run all of them
-	 * from ADB shell by doing:
-	 *  adb shell
-	 *  cat ut*
-	 *
-	 * And if particular tests fail, you can then repeatedly run the
-	 * failing tests as you debug and resolve the failing test.
-	 */
-	smp2p_debug_create("ut_local_gpio_out", smp2p_ut_local_gpio_out);
-	smp2p_debug_create("ut_local_gpio_in", smp2p_ut_local_gpio_in);
-	smp2p_debug_create("ut_local_gpio_in_update_open",
-		smp2p_ut_local_gpio_in_update_open);
-	smp2p_debug_create("ut_remote_gpio_inout", smp2p_ut_remote_inout);
-	return 0;
-}
-late_initcall(smp2p_debugfs_init);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 76ac906..7a63058 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -660,6 +660,8 @@
 	pchip->irq0 = irq0;
 	pchip->irq1 = irq1;
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -EINVAL;
 	gpio_reg_base = devm_ioremap(&pdev->dev, res->start,
 				     resource_size(res));
 	if (!gpio_reg_base)
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 661b0e3..05d3241 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -723,4 +723,4 @@
 {
 	return platform_driver_register(&tegra_gpio_driver);
 }
-postcore_initcall(tegra_gpio_init);
+subsys_initcall(tegra_gpio_init);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 193f15d..aac8432 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -31,6 +31,7 @@
 	struct of_phandle_args *gpiospec = data;
 
 	return chip->gpiodev->dev.of_node == gpiospec->np &&
+				chip->of_xlate &&
 				chip->of_xlate(chip, gpiospec, NULL) >= 0;
 }
 
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index dd00764..2ec402a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -471,7 +471,7 @@
 		if (ret)
 			goto out_free_descs;
 		lh->descs[i] = desc;
-		count = i;
+		count = i + 1;
 
 		if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
 			set_bit(FLAG_ACTIVE_LOW, &desc->flags);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 346fbda..6c4d728 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -85,7 +85,7 @@
 };
 
 /* gpio suffixes used for ACPI and device tree lookup */
-static const char * const gpio_suffixes[] = { "gpios", "gpio" };
+static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
 
 #ifdef CONFIG_OF_GPIO
 struct gpio_desc *of_find_gpio(struct device *dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 564362e..c8a5cf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -5551,6 +5551,11 @@
 	if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
 		return 0;
 
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+				AMD_PG_SUPPORT_RLC_SMU_HS |
+				AMD_PG_SUPPORT_CP |
+				AMD_PG_SUPPORT_GFX_DMG))
+		adev->gfx.rlc.funcs->enter_safe_mode(adev);
 	switch (adev->asic_type) {
 	case CHIP_CARRIZO:
 	case CHIP_STONEY:
@@ -5586,7 +5591,11 @@
 	default:
 		break;
 	}
-
+	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+				AMD_PG_SUPPORT_RLC_SMU_HS |
+				AMD_PG_SUPPORT_CP |
+				AMD_PG_SUPPORT_GFX_DMG))
+		adev->gfx.rlc.funcs->exit_safe_mode(adev);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 71d2856..f61c489 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -1350,8 +1350,6 @@
 		return ret;
 	}
 
-	kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
-
 	if (adev->irq.installed &&
 	    amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
 		ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
@@ -3086,7 +3084,7 @@
 	else
 		adev->pm.dpm_enabled = true;
 	mutex_unlock(&adev->pm.mutex);
-
+	amdgpu_pm_compute_clocks(adev);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 3fa8320..4826bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6959,7 +6959,6 @@
 
 	si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 	si_thermal_start_thermal_controller(adev);
-	ni_update_current_ps(adev, boot_ps);
 
 	return 0;
 }
@@ -7836,7 +7835,7 @@
 	else
 		adev->pm.dpm_enabled = true;
 	mutex_unlock(&adev->pm.mutex);
-
+	amdgpu_pm_compute_clocks(adev);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 171480b..6e7eb76 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -124,6 +124,8 @@
 		return ERR_PTR(-EINVAL);
 
 	process = find_process(thread);
+	if (!process)
+		return ERR_PTR(-EINVAL);
 
 	return process;
 }
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index a6132f1..5eee325 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -432,7 +432,8 @@
 				.vsync_irq = MALIDP500_DE_IRQ_VSYNC,
 			},
 			.se_irq_map = {
-				.irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
+				.irq_mask = MALIDP500_SE_IRQ_CONF_MODE |
+					    MALIDP500_SE_IRQ_GLOBAL,
 				.vsync_irq = 0,
 			},
 			.dc_irq_map = {
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
index 27319a8..345dc4d 100644
--- a/drivers/gpu/drm/armada/armada_hw.h
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -160,6 +160,7 @@
 	CFG_ALPHAM_GRA		= 0x1 << 16,
 	CFG_ALPHAM_CFG		= 0x2 << 16,
 	CFG_ALPHA_MASK		= 0xff << 8,
+#define CFG_ALPHA(x)		((x) << 8)
 	CFG_PIXCMD_MASK		= 0xff,
 };
 
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 152b4e7..6a9bba7 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -27,6 +27,7 @@
 	uint16_t contrast;
 	uint16_t saturation;
 	uint32_t colorkey_mode;
+	uint32_t colorkey_enable;
 };
 
 struct armada_ovl_plane {
@@ -62,11 +63,13 @@
 	writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
 
 	spin_lock_irq(&dcrtc->irq_lock);
-	armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
-		     CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
-		     dcrtc->base + LCD_SPU_DMA_CTRL1);
-
-	armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
+	armada_updatel(prop->colorkey_mode,
+		       CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+		       dcrtc->base + LCD_SPU_DMA_CTRL1);
+	if (dcrtc->variant->has_spu_adv_reg)
+		armada_updatel(prop->colorkey_enable,
+			       ADV_GRACOLORKEY | ADV_VIDCOLORKEY,
+			       dcrtc->base + LCD_SPU_ADV_REG);
 	spin_unlock_irq(&dcrtc->irq_lock);
 }
 
@@ -340,8 +343,17 @@
 		dplane->prop.colorkey_vb |= K2B(val);
 		update_attr = true;
 	} else if (property == priv->colorkey_mode_prop) {
-		dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
-		dplane->prop.colorkey_mode |= CFG_CKMODE(val);
+		if (val == CKMODE_DISABLE) {
+			dplane->prop.colorkey_mode =
+				CFG_CKMODE(CKMODE_DISABLE) |
+				CFG_ALPHAM_CFG | CFG_ALPHA(255);
+			dplane->prop.colorkey_enable = 0;
+		} else {
+			dplane->prop.colorkey_mode =
+				CFG_CKMODE(val) |
+				CFG_ALPHAM_GRA | CFG_ALPHA(0);
+			dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
+		}
 		update_attr = true;
 	} else if (property == priv->brightness_prop) {
 		dplane->prop.brightness = val - 256;
@@ -470,7 +482,9 @@
 	dplane->prop.colorkey_yr = 0xfefefe00;
 	dplane->prop.colorkey_ug = 0x01010100;
 	dplane->prop.colorkey_vb = 0x01010100;
-	dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
+	dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
+				     CFG_ALPHAM_GRA | CFG_ALPHA(0);
+	dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
 	dplane->prop.brightness = 0;
 	dplane->prop.contrast = 0x4000;
 	dplane->prop.saturation = 0x4000;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index a68f94d..32ab5c3 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -424,6 +424,18 @@
 	else
 		status = connector_status_disconnected;
 
+	/*
+	 * The bridge resets its registers on unplug. So when we get a plug
+	 * event and we're already supposed to be powered, cycle the bridge to
+	 * restore its state.
+	 */
+	if (status == connector_status_connected &&
+	    adv7511->connector.status == connector_status_disconnected &&
+	    adv7511->powered) {
+		regcache_mark_dirty(adv7511->regmap);
+		adv7511_power_on(adv7511);
+	}
+
 	if (adv7511->connector.status != status) {
 		adv7511->connector.status = status;
 		drm_kms_helper_hotplug_event(adv7511->connector.dev);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 33778bf..4e4043f 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1134,7 +1134,9 @@
 {
 	struct drm_plane *plane = plane_state->plane;
 	struct drm_crtc_state *crtc_state;
-
+	/* Nothing to do for same crtc*/
+	if (plane_state->crtc == crtc)
+		return 0;
 	if (plane_state->crtc) {
 		crtc_state = drm_atomic_get_crtc_state(plane_state->state,
 						       plane_state->crtc);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 03a9f20..46fd6c3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -118,6 +118,9 @@
 	/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
 	{ "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
 
+	/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
+	{ "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
+
 	/* Belinea 10 15 55 */
 	{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
 	{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 6dd09c3..bdcc6ec 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -199,7 +199,7 @@
 	unsigned long val;
 
 	val = readl(ctx->addr + DECON_WINCONx(win));
-	val &= ~WINCONx_BPPMODE_MASK;
+	val &= WINCONx_ENWIN_F;
 
 	switch (fb->pixel_format) {
 	case DRM_FORMAT_XRGB1555:
@@ -291,8 +291,8 @@
 		COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
 	writel(val, ctx->addr + DECON_VIDOSDxB(win));
 
-	val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
-		VIDOSD_Wx_ALPHA_B_F(0x0);
+	val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
+		VIDOSD_Wx_ALPHA_B_F(0xff);
 	writel(val, ctx->addr + DECON_VIDOSDxC(win));
 
 	val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 52a9d26..4c81c79 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -532,21 +532,25 @@
 			GSC_IN_CHROMA_ORDER_CRCB);
 		break;
 	case DRM_FORMAT_NV21:
+		cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
+		break;
 	case DRM_FORMAT_NV61:
-		cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
-			GSC_IN_YUV420_2P);
+		cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
 		break;
 	case DRM_FORMAT_YUV422:
 		cfg |= GSC_IN_YUV422_3P;
 		break;
 	case DRM_FORMAT_YUV420:
+		cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
+		break;
 	case DRM_FORMAT_YVU420:
-		cfg |= GSC_IN_YUV420_3P;
+		cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
 		break;
 	case DRM_FORMAT_NV12:
+		cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
+		break;
 	case DRM_FORMAT_NV16:
-		cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
-			GSC_IN_YUV420_2P);
+		cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
 		break;
 	default:
 		dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
@@ -806,18 +810,25 @@
 			GSC_OUT_CHROMA_ORDER_CRCB);
 		break;
 	case DRM_FORMAT_NV21:
-	case DRM_FORMAT_NV61:
 		cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
 		break;
+	case DRM_FORMAT_NV61:
+		cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
+		break;
 	case DRM_FORMAT_YUV422:
+		cfg |= GSC_OUT_YUV422_3P;
+		break;
 	case DRM_FORMAT_YUV420:
+		cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
+		break;
 	case DRM_FORMAT_YVU420:
-		cfg |= GSC_OUT_YUV420_3P;
+		cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
 		break;
 	case DRM_FORMAT_NV12:
+		cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
+		break;
 	case DRM_FORMAT_NV16:
-		cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
-			GSC_OUT_YUV420_2P);
+		cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
 		break;
 	default:
 		dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
index 4704a99..16b39734 100644
--- a/drivers/gpu/drm/exynos/regs-gsc.h
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -138,6 +138,7 @@
 #define GSC_OUT_YUV420_3P		(3 << 4)
 #define GSC_OUT_YUV422_1P		(4 << 4)
 #define GSC_OUT_YUV422_2P		(5 << 4)
+#define GSC_OUT_YUV422_3P		(6 << 4)
 #define GSC_OUT_YUV444			(7 << 4)
 #define GSC_OUT_TILE_TYPE_MASK		(1 << 2)
 #define GSC_OUT_TILE_C_16x8		(0 << 2)
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 2a3b7c6..fbd3fa3 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -255,7 +255,7 @@
 extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
 				      const struct drm_display_mode *mode,
 				      struct drm_display_mode *adjusted_mode);
-extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
 				     struct drm_display_mode *mode);
 extern int psb_intel_lvds_set_property(struct drm_connector *connector,
 					struct drm_property *property,
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 79e9d36..e2c6ba3 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -343,7 +343,7 @@
 	}
 }
 
-int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
 	struct drm_psb_private *dev_priv = connector->dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index c6f780f..555fd47 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -778,6 +778,9 @@
 			    I915_USERPTR_UNSYNCHRONIZED))
 		return -EINVAL;
 
+	if (!args->user_size)
+		return -EINVAL;
+
 	if (offset_in_page(args->user_ptr | args->user_size))
 		return -EINVAL;
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 02908e3..279d1e0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1684,10 +1684,38 @@
 
 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
 {
-	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+	u32 hotplug_status = 0, hotplug_status_mask;
+	int i;
 
-	if (hotplug_status)
+	if (IS_G4X(dev_priv) ||
+	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
+			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
+	else
+		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
+
+	/*
+	 * We absolutely have to clear all the pending interrupt
+	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
+	 * interrupt bit won't have an edge, and the i965/g4x
+	 * edge triggered IIR will not notice that an interrupt
+	 * is still pending. We can't use PORT_HOTPLUG_EN to
+	 * guarantee the edge as the act of toggling the enable
+	 * bits can itself generate a new hotplug interrupt :(
+	 */
+	for (i = 0; i < 10; i++) {
+		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
+
+		if (tmp == 0)
+			return hotplug_status;
+
+		hotplug_status |= tmp;
 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+	}
+
+	WARN_ONCE(1,
+		  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
+		  I915_READ(PORT_HOTPLUG_STAT));
 
 	return hotplug_status;
 }
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 3ce391c2..67881e5 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -634,6 +634,9 @@
 		return PTR_ERR(imx_ldb->regmap);
 	}
 
+	/* disable LDB by resetting the control register to POR default */
+	regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
+
 	imx_ldb->dev = dev;
 
 	if (of_id)
@@ -675,14 +678,14 @@
 		if (ret || i < 0 || i > 1)
 			return -EINVAL;
 
+		if (!of_device_is_available(child))
+			continue;
+
 		if (dual && i > 0) {
 			dev_warn(dev, "dual-channel mode, ignoring second output\n");
 			continue;
 		}
 
-		if (!of_device_is_available(child))
-			continue;
-
 		channel = &imx_ldb->channel[i];
 		channel->ldb = imx_ldb;
 		channel->chno = i;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
index 011e3b8..efb36bf 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -70,6 +70,8 @@
 	ctrl->ops.wait_for_cmd_mode_mdp_idle =
 		dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle;
 	ctrl->ops.set_continuous_clk = dsi_ctrl_hw_cmn_set_continuous_clk;
+	ctrl->ops.wait4dynamic_refresh_done =
+		dsi_ctrl_hw_cmn_wait4dynamic_refresh_done;
 
 	switch (version) {
 	case DSI_CTRL_VERSION_1_4:
@@ -218,6 +220,14 @@
 	phy->ops.clamp_ctrl = dsi_phy_hw_v3_0_clamp_ctrl;
 	phy->ops.phy_lane_reset = dsi_phy_hw_v3_0_lane_reset;
 	phy->ops.toggle_resync_fifo = dsi_phy_hw_v3_0_toggle_resync_fifo;
+	phy->ops.dyn_refresh_ops.dyn_refresh_config =
+		dsi_phy_hw_v3_0_dyn_refresh_config;
+	phy->ops.dyn_refresh_ops.dyn_refresh_pipe_delay =
+		dsi_phy_hw_v3_0_dyn_refresh_pipe_delay;
+	phy->ops.dyn_refresh_ops.dyn_refresh_helper =
+		dsi_phy_hw_v3_0_dyn_refresh_helper;
+	phy->ops.dyn_refresh_ops.cache_phy_timings =
+		dsi_phy_hw_v3_0_cache_phy_timings;
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index c55bbe0..944dd52 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -66,15 +66,17 @@
  * @mode:       DSI mode information.
  * @host:       DSI host configuration.
  * @timing:     DSI phy lane configurations.
+ * @use_mode_bit_clk: Boolean to indicate whether to recalculate bit clk.
  *
  * This function setups the catalog information in the dsi_phy_hw object.
  *
  * return: error code for failure and 0 for success.
  */
 int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
-					    struct dsi_mode_info *mode,
-	struct dsi_host_common_cfg *host,
-	struct dsi_phy_per_lane_cfgs *timing);
+				       struct dsi_mode_info *mode,
+				       struct dsi_host_common_cfg *host,
+				       struct dsi_phy_per_lane_cfgs *timing,
+				       bool use_mode_bit_clk);
 
 /* Definitions for 14nm PHY hardware driver */
 void dsi_phy_hw_v2_0_regulator_enable(struct dsi_phy_hw *phy,
@@ -226,4 +228,14 @@
 
 void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable);
 
+/* dynamic refresh specific functions */
+void dsi_phy_hw_v3_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset);
+void dsi_phy_hw_v3_0_dyn_refresh_config(struct dsi_phy_hw *phy,
+				struct dsi_phy_cfg *cfg, bool is_master);
+void dsi_phy_hw_v3_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
+					    struct dsi_dyn_clk_delay *delay);
+
+int dsi_ctrl_hw_cmn_wait4dynamic_refresh_done(struct dsi_ctrl_hw *ctrl);
+int dsi_phy_hw_v3_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
+				      u32 *dst, u32 size);
 #endif /* _DSI_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
index bdc60d2..cdcb331 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
@@ -317,4 +317,18 @@
  */
 int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
 			  struct dsi_clk_link_set *child);
+
+/**
+ * dsi_clk_prepare_enable() - prepare and enable dsi src clocks
+ * @clk:       list of src clocks.
+ *
+ * @return:	Zero on success and err no on failure
+ */
+int dsi_clk_prepare_enable(struct dsi_clk_link_set *clk);
+
+/**
+ * dsi_clk_disable_unprepare() - disable and unprepare dsi src clocks
+ * @clk:       list of src clocks.
+ */
+void dsi_clk_disable_unprepare(struct dsi_clk_link_set *clk);
 #endif /* _DSI_CLK_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index fdaf283..9592603f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -113,8 +113,9 @@
 
 /**
  * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock
- * @clks:      DSI link clock information.
- * @pixel_clk: Pixel clock rate in KHz.
+ * @clks:	DSI link clock information.
+ * @pixel_clk:	Pixel clock rate in KHz.
+ * @index:	Index of the DSI controller.
  *
  * return: error code in case of failure or 0 for success.
  */
@@ -136,9 +137,9 @@
 
 /**
  * dsi_clk_set_byte_clk_rate() - set frequency for byte clock
- * @client:       DSI clock client pointer.
- * @byte_clk: Pixel clock rate in Hz.
- * @index:      Index of the DSI controller.
+ * @client:	DSI clock client pointer.
+ * @byte_clk:	Byte clock rate in Hz.
+ * @index:	Index of the DSI controller.
  * return: error code in case of failure or 0 for success.
  */
 int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, u32 index)
@@ -146,6 +147,7 @@
 	int rc = 0;
 	struct dsi_clk_client_info *c = client;
 	struct dsi_clk_mngr *mngr;
+	u64 byte_intf_rate;
 
 	mngr = c->mngr;
 	rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_clk, byte_clk);
@@ -154,8 +156,16 @@
 	else
 		mngr->link_clks[index].freq.byte_clk_rate = byte_clk;
 
-	return rc;
+	if (mngr->link_clks[index].hs_clks.byte_intf_clk) {
+		byte_intf_rate = mngr->link_clks[index].freq.byte_clk_rate / 2;
+		rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_intf_clk,
+				  byte_intf_rate);
+		if (rc)
+			pr_err("failed to set clk rate for byte intf clk=%d\n",
+			       rc);
+	}
 
+	return rc;
 }
 
 /**
@@ -183,6 +193,41 @@
 	return rc;
 }
 
+/**
+ * dsi_clk_prepare_enable() - prepare and enable dsi src clocks
+ * @clk:       list of src clocks.
+ *
+ * @return:	Zero on success and err no on failure.
+ */
+int dsi_clk_prepare_enable(struct dsi_clk_link_set *clk)
+{
+	int rc;
+
+	rc = clk_prepare_enable(clk->byte_clk);
+	if (rc) {
+		pr_err("failed to enable byte src clk %d\n", rc);
+		return rc;
+	}
+
+	rc = clk_prepare_enable(clk->pixel_clk);
+	if (rc) {
+		pr_err("failed to enable pixel src clk %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * dsi_clk_disable_unprepare() - disable and unprepare dsi src clocks
+ * @clk:       list of src clocks.
+ */
+void dsi_clk_disable_unprepare(struct dsi_clk_link_set *clk)
+{
+	clk_disable_unprepare(clk->pixel_clk);
+	clk_disable_unprepare(clk->byte_clk);
+}
+
 int dsi_core_clk_start(struct dsi_core_clks *c_clks)
 {
 	int rc = 0;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 31c3b1a..378ef4c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -2736,7 +2736,12 @@
 		goto error;
 	}
 
-	if (!(flags & (DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR))) {
+	if (!(flags & (DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR |
+		       DSI_MODE_FLAG_DYN_CLK))) {
+		/*
+		 * for dynamic clk swith case link frequence would
+		 * be updated dsi_display_dynamic_clk_switch().
+		 */
 		rc = dsi_ctrl_update_link_freqs(ctrl, config, clk_handle);
 		if (rc) {
 			pr_err("[%s] failed to update link frequencies, rc=%d\n",
@@ -3455,6 +3460,27 @@
 }
 
 /**
+ * dsi_ctrl_wait4dynamic_refresh_done() - Poll for dynamci refresh
+ *				done interrupt.
+ * @dsi_ctrl:              DSI controller handle.
+ */
+int dsi_ctrl_wait4dynamic_refresh_done(struct dsi_ctrl *ctrl)
+{
+	int rc = 0;
+
+	if (!ctrl)
+		return 0;
+
+	mutex_lock(&ctrl->ctrl_lock);
+
+	if (ctrl->hw.ops.wait4dynamic_refresh_done)
+		rc = ctrl->hw.ops.wait4dynamic_refresh_done(&ctrl->hw);
+
+	mutex_unlock(&ctrl->ctrl_lock);
+	return rc;
+}
+
+/**
  * dsi_ctrl_drv_register() - register platform driver for dsi controller
  */
 void dsi_ctrl_drv_register(void)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 61c6116..47009bf 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -783,4 +783,11 @@
  * @enable:			   variable to control continuous clock.
  */
 void dsi_ctrl_set_continuous_clk(struct dsi_ctrl *dsi_ctrl, bool enable);
+
+/**
+ * dsi_ctrl_wait4dynamic_refresh_done() - Poll for dynamic refresh done
+ *					interrupt.
+ * @dsi_ctrl:                      DSI controller handle.
+ */
+int dsi_ctrl_wait4dynamic_refresh_done(struct dsi_ctrl *ctrl);
 #endif /* _DSI_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index 348ef36..f34cb10 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -810,6 +810,12 @@
 	 * @enable:	  Bool to control continuous clock request.
 	 */
 	void (*set_continuous_clk)(struct dsi_ctrl_hw *ctrl, bool enable);
+
+	/**
+	 * hw.ops.wait4dynamic_refresh_done() - Wait for dynamic refresh done
+	 * @ctrl:         Pointer to the controller host hardware.
+	 */
+	int (*wait4dynamic_refresh_done)(struct dsi_ctrl_hw *ctrl);
 };
 
 /*
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 7c58c43..7139a51 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -1443,6 +1443,13 @@
 			reg &= ~(0x7 << 23);
 	}
 
+	if (idx & BIT(DSI_PLL_UNLOCK_ERR)) {
+		if (en)
+			reg |= BIT(28);
+		else
+			reg &= ~BIT(28);
+	}
+
 	DSI_W32(ctrl, 0x10c, reg);
 	wmb(); /* ensure error is masked */
 }
@@ -1509,3 +1516,25 @@
 	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
 	wmb(); /* make sure request is set */
 }
+
+int dsi_ctrl_hw_cmn_wait4dynamic_refresh_done(struct dsi_ctrl_hw *ctrl)
+{
+	int rc;
+	u32 const sleep_us = 1000;
+	u32 const timeout_us = 84000; /* approximately 5 vsyncs */
+	u32 reg = 0, dyn_refresh_done = BIT(28);
+
+	rc = readl_poll_timeout(ctrl->base + DSI_INT_CTRL, reg,
+				(reg & dyn_refresh_done), sleep_us, timeout_us);
+	if (rc) {
+		pr_err("wait4dynamic refresh timedout %d\n", rc);
+		return rc;
+	}
+
+	/* ack dynamic refresh done status */
+	reg = DSI_R32(ctrl, DSI_INT_CTRL);
+	reg |= dyn_refresh_done;
+	DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
index 39ac021..0ee8b39 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -138,44 +138,7 @@
 #define DSI_SCRATCH_REGISTER_1                     (0x01F8)
 #define DSI_SCRATCH_REGISTER_2                     (0x01FC)
 #define DSI_DYNAMIC_REFRESH_CTRL                   (0x0200)
-#define DSI_DYNAMIC_REFRESH_PIPE_DELAY             (0x0204)
-#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2            (0x0208)
-#define DSI_DYNAMIC_REFRESH_PLL_DELAY              (0x020C)
 #define DSI_DYNAMIC_REFRESH_STATUS                 (0x0210)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL0              (0x0214)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL1              (0x0218)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL2              (0x021C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL3              (0x0220)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL4              (0x0224)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL5              (0x0228)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL6              (0x022C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL7              (0x0230)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL8              (0x0234)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL9              (0x0238)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL10             (0x023C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL11             (0x0240)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL12             (0x0244)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL13             (0x0248)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL14             (0x024C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL15             (0x0250)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL16             (0x0254)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL17             (0x0258)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL18             (0x025C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL19             (0x0260)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL20             (0x0264)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL21             (0x0268)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL22             (0x026C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL23             (0x0270)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL24             (0x0274)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL25             (0x0278)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL26             (0x027C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL27             (0x0280)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL28             (0x0284)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL29             (0x0288)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL30             (0x028C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL31             (0x0290)
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR         (0x0294)
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2        (0x0298)
 #define DSI_VIDEO_COMPRESSION_MODE_CTRL            (0x02A0)
 #define DSI_VIDEO_COMPRESSION_MODE_CTRL2           (0x02A4)
 #define DSI_COMMAND_COMPRESSION_MODE_CTRL          (0x02A8)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index 3b2ef70..a6ada73 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -79,6 +79,7 @@
  * @DSI_MODE_FLAG_DMS: Seamless transition is dynamic mode switch
  * @DSI_MODE_FLAG_VRR: Seamless transition is DynamicFPS.
  *                     New timing values are sent from DAL.
+ * @DSI_MODE_FLAG_DYN_CLK: Seamless transition is dynamic clock change
  */
 enum dsi_mode_flags {
 	DSI_MODE_FLAG_SEAMLESS			= BIT(0),
@@ -86,6 +87,7 @@
 	DSI_MODE_FLAG_VBLANK_PRE_MODESET	= BIT(2),
 	DSI_MODE_FLAG_DMS			= BIT(3),
 	DSI_MODE_FLAG_VRR			= BIT(4),
+	DSI_MODE_FLAG_DYN_CLK			= BIT(5),
 };
 
 /**
@@ -595,12 +597,50 @@
  * @DSI_FIFO_OVERFLOW:     DSI FIFO Overflow error
  * @DSI_FIFO_UNDERFLOW:    DSI FIFO Underflow error
  * @DSI_LP_Rx_TIMEOUT:     DSI LP/RX Timeout error
+ * @DSI_PLL_UNLOCK_ERR:	   DSI PLL unlock error
  */
 enum dsi_error_status {
 	DSI_FIFO_OVERFLOW = 1,
 	DSI_FIFO_UNDERFLOW,
 	DSI_LP_Rx_TIMEOUT,
+	DSI_PLL_UNLOCK_ERR,
 	DSI_ERR_INTR_ALL,
 };
 
+/* structure containing the delays required for dynamic clk */
+struct dsi_dyn_clk_delay {
+	u32 pipe_delay;
+	u32 pipe_delay2;
+	u32 pll_delay;
+};
+
+/* dynamic refresh control bits */
+enum dsi_dyn_clk_control_bits {
+	DYN_REFRESH_INTF_SEL = 1,
+	DYN_REFRESH_SYNC_MODE,
+	DYN_REFRESH_SW_TRIGGER,
+	DYN_REFRESH_SWI_CTRL,
+};
+
+/* convert dsi pixel format into bits per pixel */
+static inline int dsi_pixel_format_to_bpp(enum dsi_pixel_format fmt)
+{
+	switch (fmt) {
+	case DSI_PIXEL_FORMAT_RGB888:
+	case DSI_PIXEL_FORMAT_MAX:
+		return 24;
+	case DSI_PIXEL_FORMAT_RGB666:
+	case DSI_PIXEL_FORMAT_RGB666_LOOSE:
+		return 18;
+	case DSI_PIXEL_FORMAT_RGB565:
+		return 16;
+	case DSI_PIXEL_FORMAT_RGB111:
+		return 3;
+	case DSI_PIXEL_FORMAT_RGB332:
+		return 8;
+	case DSI_PIXEL_FORMAT_RGB444:
+		return 12;
+	}
+	return 24;
+}
 #endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 9e251da..f8170b2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -1505,14 +1505,12 @@
 static void adjust_timing_by_ctrl_count(const struct dsi_display *display,
 					struct dsi_display_mode *mode)
 {
-	if (display->ctrl_count > 1) {
-		mode->timing.h_active /= display->ctrl_count;
-		mode->timing.h_front_porch /= display->ctrl_count;
-		mode->timing.h_sync_width /= display->ctrl_count;
-		mode->timing.h_back_porch /= display->ctrl_count;
-		mode->timing.h_skew /= display->ctrl_count;
-		mode->pixel_clk_khz /= display->ctrl_count;
-	}
+	mode->timing.h_active /= display->ctrl_count;
+	mode->timing.h_front_porch /= display->ctrl_count;
+	mode->timing.h_sync_width /= display->ctrl_count;
+	mode->timing.h_back_porch /= display->ctrl_count;
+	mode->timing.h_skew /= display->ctrl_count;
+	mode->pixel_clk_khz /= display->ctrl_count;
 }
 
 static int dsi_display_is_ulps_req_valid(struct dsi_display *display,
@@ -2220,7 +2218,7 @@
 	m_ctrl = &display->ctrl[display->clk_master_idx];
 
 	rc = dsi_ctrl_set_clock_source(m_ctrl->ctrl,
-		   &display->clock_info.src_clks);
+		   &display->clock_info.mux_clks);
 	if (rc) {
 		pr_err("[%s] failed to set source clocks for master, rc=%d\n",
 			   display->name, rc);
@@ -2234,7 +2232,7 @@
 			continue;
 
 		rc = dsi_ctrl_set_clock_source(ctrl->ctrl,
-			   &display->clock_info.src_clks);
+			   &display->clock_info.mux_clks);
 		if (rc) {
 			pr_err("[%s] failed to set source clocks, rc=%d\n",
 				   display->name, rc);
@@ -2957,13 +2955,37 @@
 	struct dsi_clk_link_set *src = &display->clock_info.src_clks;
 	struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
 	struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
+	struct dsi_dyn_clk_caps *dyn_clk_caps = &(display->panel->dyn_clk_caps);
+
+	mux->byte_clk = devm_clk_get(&display->pdev->dev, "mux_byte_clk");
+	if (IS_ERR_OR_NULL(mux->byte_clk)) {
+		rc = PTR_ERR(mux->byte_clk);
+		pr_err("failed to get mux_byte_clk, rc=%d\n", rc);
+		mux->byte_clk = NULL;
+		goto error;
+	};
+
+	mux->pixel_clk = devm_clk_get(&display->pdev->dev, "mux_pixel_clk");
+	if (IS_ERR_OR_NULL(mux->pixel_clk)) {
+		rc = PTR_ERR(mux->pixel_clk);
+		mux->pixel_clk = NULL;
+		pr_err("failed to get mux_pixel_clk, rc=%d\n", rc);
+		goto error;
+	};
 
 	src->byte_clk = devm_clk_get(&display->pdev->dev, "src_byte_clk");
 	if (IS_ERR_OR_NULL(src->byte_clk)) {
 		rc = PTR_ERR(src->byte_clk);
 		src->byte_clk = NULL;
 		pr_err("failed to get src_byte_clk, rc=%d\n", rc);
-		goto error;
+		/*
+		 * Skip getting rest of clocks since one failed. This is a
+		 * non-critical failure since these clocks are requied only for
+		 * dynamic refresh use cases.
+		 */
+		rc = 0;
+		dyn_clk_caps->dyn_clk_support = false;
+		goto done;
 	}
 
 	src->pixel_clk = devm_clk_get(&display->pdev->dev, "src_pixel_clk");
@@ -2971,37 +2993,16 @@
 		rc = PTR_ERR(src->pixel_clk);
 		src->pixel_clk = NULL;
 		pr_err("failed to get src_pixel_clk, rc=%d\n", rc);
-		goto error;
+		/*
+		 * Skip getting rest of clocks since one failed. This is a
+		 * non-critical failure since these clocks are requied only for
+		 * dynamic refresh use cases.
+		 */
+		rc = 0;
+		dyn_clk_caps->dyn_clk_support = false;
+		goto done;
 	}
 
-	mux->byte_clk = devm_clk_get(&display->pdev->dev, "mux_byte_clk");
-	if (IS_ERR_OR_NULL(mux->byte_clk)) {
-		rc = PTR_ERR(mux->byte_clk);
-		pr_debug("failed to get mux_byte_clk, rc=%d\n", rc);
-		mux->byte_clk = NULL;
-		/*
-		 * Skip getting rest of clocks since one failed. This is a
-		 * non-critical failure since these clocks are requied only for
-		 * dynamic refresh use cases.
-		 */
-		rc = 0;
-		goto done;
-	};
-
-	mux->pixel_clk = devm_clk_get(&display->pdev->dev, "mux_pixel_clk");
-	if (IS_ERR_OR_NULL(mux->pixel_clk)) {
-		rc = PTR_ERR(mux->pixel_clk);
-		mux->pixel_clk = NULL;
-		pr_debug("failed to get mux_pixel_clk, rc=%d\n", rc);
-		/*
-		 * Skip getting rest of clocks since one failed. This is a
-		 * non-critical failure since these clocks are requied only for
-		 * dynamic refresh use cases.
-		 */
-		rc = 0;
-		goto done;
-	};
-
 	shadow->byte_clk = devm_clk_get(&display->pdev->dev, "shadow_byte_clk");
 	if (IS_ERR_OR_NULL(shadow->byte_clk)) {
 		rc = PTR_ERR(shadow->byte_clk);
@@ -3013,6 +3014,7 @@
 		 * dynamic refresh use cases.
 		 */
 		rc = 0;
+		dyn_clk_caps->dyn_clk_support = false;
 		goto done;
 	};
 
@@ -3028,6 +3030,7 @@
 		 * dynamic refresh use cases.
 		 */
 		rc = 0;
+		dyn_clk_caps->dyn_clk_support = false;
 		goto done;
 	};
 
@@ -3722,6 +3725,305 @@
 	return true;
 }
 
+static int dsi_display_update_dsi_bitrate(struct dsi_display *display,
+					  u32 bit_clk_rate)
+{
+	int rc = 0;
+	int i;
+
+	pr_debug("%s:bit rate:%d\n", __func__, bit_clk_rate);
+	if (!display->panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	if (bit_clk_rate == 0) {
+		pr_err("Invalid bit clock rate\n");
+		return -EINVAL;
+	}
+
+	display->config.bit_clk_rate_hz = bit_clk_rate;
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		struct dsi_display_ctrl *dsi_disp_ctrl = &display->ctrl[i];
+		struct dsi_ctrl *ctrl = dsi_disp_ctrl->ctrl;
+		u32 num_of_lanes = 0, bpp;
+		u64 bit_rate, pclk_rate, bit_rate_per_lane, byte_clk_rate;
+		struct dsi_host_common_cfg *host_cfg;
+
+		mutex_lock(&ctrl->ctrl_lock);
+
+		host_cfg = &display->panel->host_config;
+		if (host_cfg->data_lanes & DSI_DATA_LANE_0)
+			num_of_lanes++;
+		if (host_cfg->data_lanes & DSI_DATA_LANE_1)
+			num_of_lanes++;
+		if (host_cfg->data_lanes & DSI_DATA_LANE_2)
+			num_of_lanes++;
+		if (host_cfg->data_lanes & DSI_DATA_LANE_3)
+			num_of_lanes++;
+
+		if (num_of_lanes == 0) {
+			pr_err("Invalid lane count\n");
+			rc = -EINVAL;
+			goto error;
+		}
+
+		bpp = dsi_pixel_format_to_bpp(host_cfg->dst_format);
+
+		bit_rate = display->config.bit_clk_rate_hz * num_of_lanes;
+		bit_rate_per_lane = bit_rate;
+		do_div(bit_rate_per_lane, num_of_lanes);
+		pclk_rate = bit_rate;
+		do_div(pclk_rate, bpp);
+		byte_clk_rate = bit_rate_per_lane;
+		do_div(byte_clk_rate, 8);
+		pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
+			 bit_rate, bit_rate_per_lane);
+		pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
+			  byte_clk_rate, pclk_rate);
+
+		ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
+		ctrl->clk_freq.pix_clk_rate = pclk_rate;
+		rc = dsi_clk_set_link_frequencies(display->dsi_clk_handle,
+			ctrl->clk_freq, ctrl->cell_index);
+		if (rc) {
+			pr_err("Failed to update link frequencies\n");
+			goto error;
+		}
+
+		ctrl->host_config.bit_clk_rate_hz = bit_clk_rate;
+error:
+		mutex_unlock(&ctrl->ctrl_lock);
+
+		/* TODO: recover ctrl->clk_freq in case of failure */
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+static void _dsi_display_calc_pipe_delay(struct dsi_display *display,
+				    struct dsi_dyn_clk_delay *delay,
+				    struct dsi_display_mode *mode)
+{
+	u32 esc_clk_rate_hz;
+	u32 pclk_to_esc_ratio, byte_to_esc_ratio, hr_bit_to_esc_ratio;
+	u32 hsync_period = 0;
+	struct dsi_display_ctrl *m_ctrl;
+	struct dsi_ctrl *dsi_ctrl;
+	struct dsi_phy_cfg *cfg;
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+	dsi_ctrl = m_ctrl->ctrl;
+
+	cfg = &(m_ctrl->phy->cfg);
+
+	esc_clk_rate_hz = dsi_ctrl->clk_freq.esc_clk_rate * 1000;
+	pclk_to_esc_ratio = ((dsi_ctrl->clk_freq.pix_clk_rate * 1000) /
+			     esc_clk_rate_hz);
+	byte_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 1000) /
+			     esc_clk_rate_hz);
+	hr_bit_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 4 * 1000) /
+					esc_clk_rate_hz);
+
+	hsync_period = DSI_H_TOTAL_DSC(&mode->timing);
+	delay->pipe_delay = (hsync_period + 1) / pclk_to_esc_ratio;
+	if (!display->panel->video_config.eof_bllp_lp11_en)
+		delay->pipe_delay += (17 / pclk_to_esc_ratio) +
+			((21 + (display->config.common_config.t_clk_pre + 1) +
+			  (display->config.common_config.t_clk_post + 1)) /
+			 byte_to_esc_ratio) +
+			((((cfg->timing.lane_v3[8] >> 1) + 1) +
+			((cfg->timing.lane_v3[6] >> 1) + 1) +
+			((cfg->timing.lane_v3[3] * 4) +
+			 (cfg->timing.lane_v3[5] >> 1) + 1) +
+			((cfg->timing.lane_v3[7] >> 1) + 1) +
+			((cfg->timing.lane_v3[1] >> 1) + 1) +
+			((cfg->timing.lane_v3[4] >> 1) + 1)) /
+			 hr_bit_to_esc_ratio);
+
+	delay->pipe_delay2 = 0;
+	if (display->panel->host_config.force_hs_clk_lane)
+		delay->pipe_delay2 = (6 / byte_to_esc_ratio) +
+			((((cfg->timing.lane_v3[1] >> 1) + 1) +
+			  ((cfg->timing.lane_v3[4] >> 1) + 1)) /
+			 hr_bit_to_esc_ratio);
+
+	/* 130 us pll delay recommended by h/w doc */
+	delay->pll_delay = ((130 * esc_clk_rate_hz) / 1000000) * 2;
+}
+
+static int _dsi_display_dyn_update_clks(struct dsi_display *display,
+					struct link_clk_freq *bkp_freq)
+{
+	int rc = 0, i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+
+	dsi_clk_prepare_enable(&display->clock_info.src_clks);
+
+	rc = dsi_clk_update_parent(&display->clock_info.shadow_clks,
+			      &display->clock_info.mux_clks);
+	if (rc) {
+		pr_err("failed update mux parent to shadow\n");
+		goto exit;
+	}
+
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+		rc = dsi_clk_set_byte_clk_rate(display->dsi_clk_handle,
+				   ctrl->ctrl->clk_freq.byte_clk_rate, i);
+		if (rc) {
+			pr_err("failed to set byte rate for index:%d\n", i);
+			goto recover_byte_clk;
+		}
+		rc = dsi_clk_set_pixel_clk_rate(display->dsi_clk_handle,
+				   ctrl->ctrl->clk_freq.pix_clk_rate, i);
+		if (rc) {
+			pr_err("failed to set pix rate for index:%d\n", i);
+			goto recover_pix_clk;
+		}
+	}
+
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (ctrl == m_ctrl)
+			continue;
+		dsi_phy_dynamic_refresh_trigger(ctrl->phy, false);
+	}
+	dsi_phy_dynamic_refresh_trigger(m_ctrl->phy, true);
+
+	/* wait for dynamic refresh done */
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		rc = dsi_ctrl_wait4dynamic_refresh_done(ctrl->ctrl);
+		if (rc) {
+			pr_err("wait4dynamic refresh failed for dsi:%d\n", i);
+			goto recover_pix_clk;
+		} else {
+			pr_info("dynamic refresh done on dsi: %s\n",
+				i ? "slave" : "master");
+		}
+	}
+
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		dsi_phy_dynamic_refresh_clear(ctrl->phy);
+	}
+
+	rc = dsi_clk_update_parent(&display->clock_info.src_clks,
+			      &display->clock_info.mux_clks);
+	if (rc)
+		pr_err("could not switch back to src clks %d\n", rc);
+
+	dsi_clk_disable_unprepare(&display->clock_info.src_clks);
+
+	return rc;
+
+recover_pix_clk:
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+		dsi_clk_set_pixel_clk_rate(display->dsi_clk_handle,
+					   bkp_freq->pix_clk_rate, i);
+	}
+
+recover_byte_clk:
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+		dsi_clk_set_byte_clk_rate(display->dsi_clk_handle,
+					  bkp_freq->byte_clk_rate, i);
+	}
+
+exit:
+	dsi_clk_disable_unprepare(&display->clock_info.src_clks);
+
+	return rc;
+}
+
+static int dsi_display_dynamic_clk_switch(struct dsi_display *display,
+					  struct dsi_display_mode *mode)
+{
+	int rc = 0, mask, i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+	struct dsi_dyn_clk_delay delay;
+	struct link_clk_freq bkp_freq;
+
+	dsi_panel_acquire_panel_lock(display->panel);
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+
+	dsi_display_clk_ctrl(display->dsi_clk_handle, DSI_ALL_CLKS, DSI_CLK_ON);
+
+	/* mask PLL unlock, FIFO overflow and underflow errors */
+	mask = BIT(DSI_PLL_UNLOCK_ERR) | BIT(DSI_FIFO_UNDERFLOW) |
+		BIT(DSI_FIFO_OVERFLOW);
+	dsi_display_mask_ctrl_error_interrupts(display, mask, true);
+
+	/* update the phy timings based on new mode */
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		dsi_phy_update_phy_timings(ctrl->phy, &display->config);
+	}
+
+	/* back up existing rates to handle failure case */
+	bkp_freq.byte_clk_rate = m_ctrl->ctrl->clk_freq.byte_clk_rate;
+	bkp_freq.pix_clk_rate = m_ctrl->ctrl->clk_freq.pix_clk_rate;
+	bkp_freq.esc_clk_rate = m_ctrl->ctrl->clk_freq.esc_clk_rate;
+
+	rc = dsi_display_update_dsi_bitrate(display, mode->timing.clk_rate_hz);
+	if (rc) {
+		pr_err("failed set link frequencies %d\n", rc);
+		goto exit;
+	}
+
+	/* calculate pipe delays */
+	_dsi_display_calc_pipe_delay(display, &delay, mode);
+
+	/* configure dynamic refresh ctrl registers */
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->phy)
+			continue;
+		if (ctrl == m_ctrl)
+			dsi_phy_config_dynamic_refresh(ctrl->phy, &delay, true);
+		else
+			dsi_phy_config_dynamic_refresh(ctrl->phy, &delay,
+						       false);
+	}
+
+	rc = _dsi_display_dyn_update_clks(display, &bkp_freq);
+
+exit:
+	dsi_display_mask_ctrl_error_interrupts(display, mask, false);
+
+	dsi_display_clk_ctrl(display->dsi_clk_handle, DSI_ALL_CLKS,
+			     DSI_CLK_OFF);
+
+	/* store newly calculated phy timings in mode private info */
+	dsi_phy_dyn_refresh_cache_phy_timings(m_ctrl->phy,
+					      mode->priv_info->phy_timing_val,
+					      mode->priv_info->phy_timing_len);
+
+	dsi_panel_release_panel_lock(display->panel);
+
+	return rc;
+}
+
 static int dsi_display_dfps_update(struct dsi_display *display,
 				   struct dsi_display_mode *dsi_mode)
 {
@@ -3987,6 +4289,16 @@
 					display->name, rc);
 			goto error;
 		}
+	} else if (mode->dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK) {
+		rc = dsi_display_dynamic_clk_switch(display, mode);
+		if (rc)
+			pr_err("dynamic clk change failed %d\n", rc);
+		/*
+		 * skip rest of the opearations since
+		 * dsi_display_dynamic_clk_switch() already takes
+		 * care of them.
+		 */
+		return rc;
 	}
 
 	for (i = 0; i < display->ctrl_count; i++) {
@@ -4222,84 +4534,6 @@
 	return rc;
 }
 
-static int dsi_display_request_update_dsi_bitrate(struct dsi_display *display,
-					u32 bit_clk_rate)
-{
-	int rc = 0;
-	int i;
-
-	pr_debug("%s:bit rate:%d\n", __func__, bit_clk_rate);
-	if (!display->panel) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (bit_clk_rate == 0) {
-		pr_err("Invalid bit clock rate\n");
-		return -EINVAL;
-	}
-
-	display->config.bit_clk_rate_hz = bit_clk_rate;
-
-	for (i = 0; i < display->ctrl_count; i++) {
-		struct dsi_display_ctrl *dsi_disp_ctrl = &display->ctrl[i];
-		struct dsi_ctrl *ctrl = dsi_disp_ctrl->ctrl;
-		u32 num_of_lanes = 0;
-		u32 bpp = 3;
-		u64 bit_rate, pclk_rate, bit_rate_per_lane, byte_clk_rate;
-		struct dsi_host_common_cfg *host_cfg;
-
-		mutex_lock(&ctrl->ctrl_lock);
-
-		host_cfg = &display->panel->host_config;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_0)
-			num_of_lanes++;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_1)
-			num_of_lanes++;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_2)
-			num_of_lanes++;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_3)
-			num_of_lanes++;
-
-		if (num_of_lanes == 0) {
-			pr_err("Invalid lane count\n");
-			rc = -EINVAL;
-			goto error;
-		}
-
-		bit_rate = display->config.bit_clk_rate_hz * num_of_lanes;
-		bit_rate_per_lane = bit_rate;
-		do_div(bit_rate_per_lane, num_of_lanes);
-		pclk_rate = bit_rate;
-		do_div(pclk_rate, (8 * bpp));
-		byte_clk_rate = bit_rate_per_lane;
-		do_div(byte_clk_rate, 8);
-		pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
-			 bit_rate, bit_rate_per_lane);
-		pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
-			  byte_clk_rate, pclk_rate);
-
-		ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
-		ctrl->clk_freq.pix_clk_rate = pclk_rate;
-		rc = dsi_clk_set_link_frequencies(display->dsi_clk_handle,
-			ctrl->clk_freq, ctrl->cell_index);
-		if (rc) {
-			pr_err("Failed to update link frequencies\n");
-			goto error;
-		}
-
-		ctrl->host_config.bit_clk_rate_hz = bit_clk_rate;
-error:
-		mutex_unlock(&ctrl->ctrl_lock);
-
-		/* TODO: recover ctrl->clk_freq in case of failure */
-		if (rc)
-			return rc;
-	}
-
-	return 0;
-}
-
 static ssize_t sysfs_dynamic_dsi_clk_read(struct device *dev,
 	struct device_attribute *attr, char *buf)
 {
@@ -4350,6 +4584,11 @@
 		return rc;
 	}
 
+	if (display->panel->panel_mode != DSI_OP_CMD_MODE) {
+		pr_err("only supported for command mode\n");
+		return -ENOTSUPP;
+	}
+
 	if (clk_rate <= 0) {
 		pr_err("%s: bitrate should be greater than 0\n", __func__);
 		return -EINVAL;
@@ -4365,7 +4604,7 @@
 	mutex_lock(&display->display_lock);
 
 	display->cached_clk_rate = clk_rate;
-	rc = dsi_display_request_update_dsi_bitrate(display, clk_rate);
+	rc = dsi_display_update_dsi_bitrate(display, clk_rate);
 	if (!rc) {
 		pr_info("%s: bit clk is ready to be configured to '%d'\n",
 			__func__, clk_rate);
@@ -5151,7 +5390,8 @@
 			u32 *count)
 {
 	struct dsi_dfps_capabilities dfps_caps;
-	int num_dfps_rates, rc = 0;
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
+	int num_dfps_rates, num_bit_clks, rc = 0;
 
 	if (!display || !display->panel) {
 		pr_err("invalid display:%d panel:%d\n", display != NULL,
@@ -5168,12 +5408,16 @@
 		return rc;
 	}
 
-	num_dfps_rates = !dfps_caps.dfps_support ? 1 :
-			dfps_caps.max_refresh_rate -
-			dfps_caps.min_refresh_rate + 1;
+	num_dfps_rates = !dfps_caps.dfps_support ? 1 : dfps_caps.dfps_list_len;
 
-	/* Inflate num_of_modes by fps in dfps */
-	*count = display->panel->num_timing_nodes * num_dfps_rates;
+	dyn_clk_caps = &(display->panel->dyn_clk_caps);
+
+	num_bit_clks = !dyn_clk_caps->dyn_clk_support ? 1 :
+					dyn_clk_caps->bit_clk_list_len;
+
+	/* Inflate num_of_modes by fps and bit clks in dfps */
+	*count = display->panel->num_timing_nodes *
+				num_dfps_rates * num_bit_clks;
 
 	return 0;
 }
@@ -5196,6 +5440,73 @@
 	return 0;
 }
 
+static void _dsi_display_populate_bit_clks(struct dsi_display *display,
+					   int start, int end, u32 *mode_idx)
+{
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
+	struct dsi_display_mode *src, *dst;
+	struct dsi_host_common_cfg *cfg;
+	int i, j, total_modes, bpp, lanes = 0;
+
+	if (!display || !mode_idx)
+		return;
+
+	dyn_clk_caps = &(display->panel->dyn_clk_caps);
+	if (!dyn_clk_caps->dyn_clk_support)
+		return;
+
+	cfg = &(display->panel->host_config);
+	bpp = dsi_pixel_format_to_bpp(cfg->dst_format);
+
+	if (cfg->data_lanes & DSI_LOGICAL_LANE_0)
+		lanes++;
+	if (cfg->data_lanes & DSI_LOGICAL_LANE_1)
+		lanes++;
+	if (cfg->data_lanes & DSI_LOGICAL_LANE_2)
+		lanes++;
+	if (cfg->data_lanes & DSI_LOGICAL_LANE_3)
+		lanes++;
+
+	dsi_display_get_mode_count_no_lock(display, &total_modes);
+
+	for (i = start; i < end; i++) {
+		src = &display->modes[i];
+		if (!src)
+			return;
+		/*
+		 * TODO: currently setting the first bit rate in
+		 * the list as preferred rate. But ideally should
+		 * be based on user or device tree preferrence.
+		 */
+		src->timing.clk_rate_hz = dyn_clk_caps->bit_clk_list[0];
+		src->pixel_clk_khz =
+			div_u64(src->timing.clk_rate_hz * lanes, bpp);
+		src->pixel_clk_khz /= 1000;
+		src->pixel_clk_khz *= display->ctrl_count;
+	}
+
+	for (i = 1; i < dyn_clk_caps->bit_clk_list_len; i++) {
+		if (*mode_idx >= total_modes)
+			return;
+		for (j = start; j < end; j++) {
+			src = &display->modes[j];
+			dst = &display->modes[*mode_idx];
+
+			if (!src || !dst) {
+				pr_err("invalid mode index\n");
+				return;
+			}
+			memcpy(dst, src, sizeof(struct dsi_display_mode));
+			dst->timing.clk_rate_hz = dyn_clk_caps->bit_clk_list[i];
+			dst->pixel_clk_khz =
+				div_u64(dst->timing.clk_rate_hz * lanes, bpp);
+			dst->pixel_clk_khz /= 1000;
+			dst->pixel_clk_khz *= display->ctrl_count;
+			(*mode_idx)++;
+		}
+	}
+}
+
 void dsi_display_put_mode(struct dsi_display *display,
 	struct dsi_display_mode *mode)
 {
@@ -5206,9 +5517,10 @@
 			  struct dsi_display_mode **out_modes)
 {
 	struct dsi_dfps_capabilities dfps_caps;
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
 	u32 num_dfps_rates, panel_mode_count, total_mode_count;
 	u32 mode_idx, array_idx = 0;
-	int i, rc = -EINVAL;
+	int i, start, end, rc = -EINVAL;
 
 	if (!display || !out_modes) {
 		pr_err("Invalid params\n");
@@ -5219,13 +5531,13 @@
 
 	mutex_lock(&display->display_lock);
 
+	if (display->modes)
+		goto exit;
+
 	rc = dsi_display_get_mode_count_no_lock(display, &total_mode_count);
 	if (rc)
 		goto error;
 
-	/* free any previously probed modes */
-	kfree(display->modes);
-
 	display->modes = kcalloc(total_mode_count, sizeof(*display->modes),
 			GFP_KERNEL);
 	if (!display->modes) {
@@ -5240,9 +5552,9 @@
 		goto error;
 	}
 
-	num_dfps_rates = !dfps_caps.dfps_support ? 1 :
-			dfps_caps.max_refresh_rate -
-			dfps_caps.min_refresh_rate + 1;
+	dyn_clk_caps = &(display->panel->dyn_clk_caps);
+
+	num_dfps_rates = !dfps_caps.dfps_support ? 1 : dfps_caps.dfps_list_len;
 
 	panel_mode_count = display->panel->num_timing_nodes;
 
@@ -5263,14 +5575,14 @@
 			goto error;
 		}
 
-		if (display->ctrl_count > 1) { /* TODO: remove if */
-			panel_mode.timing.h_active *= display->ctrl_count;
-			panel_mode.timing.h_front_porch *= display->ctrl_count;
-			panel_mode.timing.h_sync_width *= display->ctrl_count;
-			panel_mode.timing.h_back_porch *= display->ctrl_count;
-			panel_mode.timing.h_skew *= display->ctrl_count;
-			panel_mode.pixel_clk_khz *= display->ctrl_count;
-		}
+		panel_mode.timing.h_active *= display->ctrl_count;
+		panel_mode.timing.h_front_porch *= display->ctrl_count;
+		panel_mode.timing.h_sync_width *= display->ctrl_count;
+		panel_mode.timing.h_back_porch *= display->ctrl_count;
+		panel_mode.timing.h_skew *= display->ctrl_count;
+		panel_mode.pixel_clk_khz *= display->ctrl_count;
+
+		start = array_idx;
 
 		for (i = 0; i < num_dfps_rates; i++) {
 			struct dsi_display_mode *sub_mode =
@@ -5284,26 +5596,27 @@
 			}
 
 			memcpy(sub_mode, &panel_mode, sizeof(panel_mode));
-
-			if (dfps_caps.dfps_support) {
-				curr_refresh_rate =
-					sub_mode->timing.refresh_rate;
-				sub_mode->timing.refresh_rate =
-					dfps_caps.min_refresh_rate +
-					(i % num_dfps_rates);
-
-				dsi_display_get_dfps_timing(display,
-					sub_mode, curr_refresh_rate);
-
-				sub_mode->pixel_clk_khz =
-					(DSI_H_TOTAL(&sub_mode->timing) *
-					DSI_V_TOTAL(&sub_mode->timing) *
-					sub_mode->timing.refresh_rate) / 1000;
-			}
 			array_idx++;
+
+			if (!dfps_caps.dfps_support)
+				continue;
+
+			curr_refresh_rate = sub_mode->timing.refresh_rate;
+			sub_mode->timing.refresh_rate = dfps_caps.dfps_list[i];
+
+			dsi_display_get_dfps_timing(display, sub_mode,
+						    curr_refresh_rate);
 		}
+
+		end = array_idx;
+		/*
+		 * if dynamic clk switch is supported then update all the bit
+		 * clk rates.
+		 */
+		_dsi_display_populate_bit_clks(display, start, end, &array_idx);
 	}
 
+exit:
 	*out_modes = display->modes;
 	rc = 0;
 
@@ -5384,7 +5697,8 @@
 
 		if (cmp->timing.v_active == m->timing.v_active &&
 			cmp->timing.h_active == m->timing.h_active &&
-			cmp->timing.refresh_rate == m->timing.refresh_rate) {
+			cmp->timing.refresh_rate == m->timing.refresh_rate &&
+			cmp->pixel_clk_khz == m->pixel_clk_khz) {
 			*out_mode = m;
 			rc = 0;
 			break;
@@ -5393,9 +5707,10 @@
 	mutex_unlock(&display->display_lock);
 
 	if (!*out_mode) {
-		pr_err("[%s] failed to find mode for v_active %u h_active %u rate %u\n",
+		pr_err("[%s] failed to find mode for v_active %u h_active %u fps %u pclk %u\n",
 				display->name, cmp->timing.v_active,
-				cmp->timing.h_active, cmp->timing.refresh_rate);
+				cmp->timing.h_active, cmp->timing.refresh_rate,
+				cmp->pixel_clk_khz);
 		rc = -ENOENT;
 	}
 
@@ -5403,7 +5718,7 @@
 }
 
 /**
- * dsi_display_validate_mode_vrr() - Validate if varaible refresh case.
+ * dsi_display_validate_mode_change() - Validate if varaible refresh case.
  * @display:     DSI display handle.
  * @cur_dsi_mode:   Current DSI mode.
  * @mode:        Mode value structure to be validated.
@@ -5411,16 +5726,15 @@
  *               is change in fps but vactive and hactive are same.
  * Return: error code.
  */
-int dsi_display_validate_mode_vrr(struct dsi_display *display,
-			struct dsi_display_mode *cur_dsi_mode,
-			struct dsi_display_mode *mode)
+int dsi_display_validate_mode_change(struct dsi_display *display,
+			struct dsi_display_mode *cur_mode,
+			struct dsi_display_mode *adj_mode)
 {
 	int rc = 0;
-	struct dsi_display_mode adj_mode, cur_mode;
 	struct dsi_dfps_capabilities dfps_caps;
-	u32 curr_refresh_rate;
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
 
-	if (!display || !mode) {
+	if (!display || !adj_mode) {
 		pr_err("Invalid params\n");
 		return -EINVAL;
 	}
@@ -5432,65 +5746,43 @@
 
 	mutex_lock(&display->display_lock);
 
-	adj_mode = *mode;
-	cur_mode = *cur_dsi_mode;
-
-	if ((cur_mode.timing.refresh_rate != adj_mode.timing.refresh_rate) &&
-		(cur_mode.timing.v_active == adj_mode.timing.v_active) &&
-		(cur_mode.timing.h_active == adj_mode.timing.h_active)) {
-
-		curr_refresh_rate = cur_mode.timing.refresh_rate;
-		rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
-		if (rc) {
-			pr_err("[%s] failed to get dfps caps from panel\n",
-					display->name);
-			goto error;
+	if ((cur_mode->timing.v_active == adj_mode->timing.v_active) &&
+	    (cur_mode->timing.h_active == adj_mode->timing.h_active)) {
+		/* dfps change use case */
+		if (cur_mode->timing.refresh_rate !=
+		    adj_mode->timing.refresh_rate) {
+			dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
+			if (!dfps_caps.dfps_support) {
+				pr_err("invalid mode dfps not supported\n");
+				rc = -ENOTSUPP;
+				goto error;
+			}
+			pr_debug("Mode switch is seamless variable refresh\n");
+			adj_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
+			SDE_EVT32(cur_mode->timing.refresh_rate,
+				  adj_mode->timing.refresh_rate,
+				  cur_mode->timing.h_front_porch,
+				  adj_mode->timing.h_front_porch);
 		}
 
-		cur_mode.timing.refresh_rate =
-			adj_mode.timing.refresh_rate;
-
-		rc = dsi_display_get_dfps_timing(display,
-			&cur_mode, curr_refresh_rate);
-		if (rc) {
-			pr_err("[%s] seamless vrr not possible rc=%d\n",
-			display->name, rc);
-			goto error;
+		/* dynamic clk change use case */
+		if (cur_mode->pixel_clk_khz != adj_mode->pixel_clk_khz) {
+			dyn_clk_caps = &(display->panel->dyn_clk_caps);
+			if (!dyn_clk_caps->dyn_clk_support) {
+				pr_err("dyn clk change not supported\n");
+				rc = -ENOTSUPP;
+				goto error;
+			}
+			if (adj_mode->dsi_mode_flags & DSI_MODE_FLAG_VRR) {
+				pr_err("dfps and dyn clk not supported in same commit\n");
+				rc = -ENOTSUPP;
+				goto error;
+			}
+			pr_debug("dynamic clk change detected\n");
+			adj_mode->dsi_mode_flags |= DSI_MODE_FLAG_DYN_CLK;
+			SDE_EVT32(cur_mode->pixel_clk_khz,
+				  adj_mode->pixel_clk_khz);
 		}
-		switch (dfps_caps.type) {
-		/*
-		 * Ignore any round off factors in porch calculation.
-		 * Worse case is set to 5.
-		 */
-		case DSI_DFPS_IMMEDIATE_VFP:
-			if (abs(DSI_V_TOTAL(&cur_mode.timing) -
-				DSI_V_TOTAL(&adj_mode.timing)) > 5)
-				pr_err("Mismatch vfp fps:%d new:%d given:%d\n",
-				adj_mode.timing.refresh_rate,
-				cur_mode.timing.v_front_porch,
-				adj_mode.timing.v_front_porch);
-			break;
-
-		case DSI_DFPS_IMMEDIATE_HFP:
-			if (abs(DSI_H_TOTAL(&cur_mode.timing) -
-				DSI_H_TOTAL(&adj_mode.timing)) > 5)
-				pr_err("Mismatch hfp fps:%d new:%d given:%d\n",
-				adj_mode.timing.refresh_rate,
-				cur_mode.timing.h_front_porch,
-				adj_mode.timing.h_front_porch);
-			break;
-
-		default:
-			pr_err("Unsupported DFPS mode %d\n",
-				dfps_caps.type);
-			rc = -ENOTSUPP;
-		}
-
-		pr_debug("Mode switch is seamless variable refresh\n");
-		mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
-		SDE_EVT32(curr_refresh_rate, adj_mode.timing.refresh_rate,
-				cur_mode.timing.h_front_porch,
-				adj_mode.timing.h_front_porch);
 	}
 
 error:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 5612016..f65f0f5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -398,13 +398,14 @@
 			      u32 flags);
 
 /**
- * dsi_display_validate_mode_vrr() - validates mode if variable refresh case
+ * dsi_display_validate_mode_change() - validates mode if variable refresh case
+ *				or dynamic clk change case
  * @display:             Handle to display.
  * @mode:                Mode to be validated..
  *
  * Return: 0 if  error code.
  */
-int dsi_display_validate_mode_vrr(struct dsi_display *display,
+int dsi_display_validate_mode_change(struct dsi_display *display,
 			struct dsi_display_mode *cur_dsi_mode,
 			struct dsi_display_mode *mode);
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 250314b..68a7277 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -63,6 +63,8 @@
 		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DMS;
 	if (msm_is_mode_seamless_vrr(drm_mode))
 		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
+	if (msm_is_mode_seamless_dyn_clk(drm_mode))
+		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DYN_CLK;
 
 	dsi_mode->timing.h_sync_polarity =
 			!!(drm_mode->flags & DRM_MODE_FLAG_PHSYNC);
@@ -105,13 +107,18 @@
 		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DMS;
 	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_VRR)
 		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_VRR;
+	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK)
+		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYN_CLK;
 
 	if (dsi_mode->timing.h_sync_polarity)
 		drm_mode->flags |= DRM_MODE_FLAG_PHSYNC;
 	if (dsi_mode->timing.v_sync_polarity)
 		drm_mode->flags |= DRM_MODE_FLAG_PVSYNC;
 
-	drm_mode_set_name(drm_mode);
+	/* set mode name */
+	snprintf(drm_mode->name, DRM_DISPLAY_MODE_LEN, "%dx%dx%dx%d",
+		 drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->vrefresh,
+		 drm_mode->clock);
 }
 
 static int dsi_bridge_attach(struct drm_bridge *bridge)
@@ -156,7 +163,8 @@
 	}
 
 	if (c_bridge->dsi_mode.dsi_mode_flags &
-		(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR)) {
+		(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR |
+		 DSI_MODE_FLAG_DYN_CLK)) {
 		pr_debug("[%d] seamless pre-enable\n", c_bridge->id);
 		return;
 	}
@@ -279,6 +287,12 @@
 
 	memset(&(c_bridge->dsi_mode), 0x0, sizeof(struct dsi_display_mode));
 	convert_to_dsi_mode(adjusted_mode, &(c_bridge->dsi_mode));
+
+	/* restore bit_clk_rate also for dynamic clk use cases */
+	c_bridge->dsi_mode.timing.clk_rate_hz =
+		dsi_drm_find_bit_clk_rate(c_bridge->display, adjusted_mode);
+
+	pr_debug("clk_rate: %llu\n", c_bridge->dsi_mode.timing.clk_rate_hz);
 }
 
 static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
@@ -337,17 +351,20 @@
 
 		convert_to_dsi_mode(&crtc_state->crtc->state->mode,
 							&cur_dsi_mode);
-		rc = dsi_display_validate_mode_vrr(c_bridge->display,
+		rc = dsi_display_validate_mode_change(c_bridge->display,
 					&cur_dsi_mode, &dsi_mode);
-		if (rc)
-			pr_debug("[%s] vrr mode mismatch failure rc=%d\n",
+		if (rc) {
+			pr_err("[%s] seamless mode mismatch failure rc=%d\n",
 				c_bridge->display->name, rc);
+			return false;
+		}
 
 		cur_mode = crtc_state->crtc->mode;
 
 		/* No DMS/VRR when drm pipeline is changing */
 		if (!drm_mode_equal(&cur_mode, adjusted_mode) &&
 			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
+			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK)) &&
 			(!crtc_state->active_changed ||
 			 display->is_cont_splash_enabled))
 			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
@@ -359,6 +376,33 @@
 	return true;
 }
 
+u64 dsi_drm_find_bit_clk_rate(void *display,
+			      const struct drm_display_mode *drm_mode)
+{
+	int i = 0, count = 0;
+	struct dsi_display *dsi_display = display;
+	struct dsi_display_mode *dsi_mode;
+	u64 bit_clk_rate = 0;
+
+	if (!dsi_display || !drm_mode)
+		return 0;
+
+	dsi_display_get_mode_count(dsi_display, &count);
+
+	for (i = 0; i < count; i++) {
+		dsi_mode = &dsi_display->modes[i];
+		if ((dsi_mode->timing.v_active == drm_mode->vdisplay) &&
+		    (dsi_mode->timing.h_active == drm_mode->hdisplay) &&
+		    (dsi_mode->pixel_clk_khz == drm_mode->clock) &&
+		    (dsi_mode->timing.refresh_rate == drm_mode->vrefresh)) {
+			bit_clk_rate = dsi_mode->timing.clk_rate_hz;
+			break;
+		}
+	}
+
+	return bit_clk_rate;
+}
+
 int dsi_conn_get_mode_info(const struct drm_display_mode *drm_mode,
 	struct msm_mode_info *mode_info,
 	u32 max_mixer_width, void *display)
@@ -382,7 +426,7 @@
 	mode_info->prefill_lines = dsi_mode.priv_info->panel_prefill_lines;
 	mode_info->jitter_numer = dsi_mode.priv_info->panel_jitter_numer;
 	mode_info->jitter_denom = dsi_mode.priv_info->panel_jitter_denom;
-	mode_info->clk_rate = dsi_mode.priv_info->clk_rate_hz;
+	mode_info->clk_rate = dsi_drm_find_bit_clk_rate(display, drm_mode);
 
 	memcpy(&mode_info->topology, &dsi_mode.priv_info->topology,
 			sizeof(struct msm_display_topology));
@@ -507,6 +551,9 @@
 			panel->dfps_caps.max_refresh_rate);
 	}
 
+	sde_kms_info_add_keystr(info, "dyn bitclk support",
+			panel->dyn_clk_caps.dyn_clk_support ? "true" : "false");
+
 	switch (panel->phy_props.rotation) {
 	case DSI_PANEL_ROTATE_NONE:
 		sde_kms_info_add_keystr(info, "panel orientation", "none");
@@ -602,14 +649,20 @@
 {
 	struct drm_display_mode *drm_mode;
 	struct dsi_display_mode dsi_mode;
+	struct dsi_display *dsi_display;
 
 	if (!connector || !display)
 		return;
 
-	 list_for_each_entry(drm_mode, &connector->modes, head) {
+	list_for_each_entry(drm_mode, &connector->modes, head) {
 		convert_to_dsi_mode(drm_mode, &dsi_mode);
 		dsi_display_put_mode(display, &dsi_mode);
 	}
+
+	/* free the display structure modes also */
+	dsi_display = display;
+	kfree(dsi_display->modes);
+	dsi_display->modes = NULL;
 }
 
 int dsi_connector_get_modes(struct drm_connector *connector,
@@ -656,6 +709,9 @@
 		}
 		m->width_mm = connector->display_info.width_mm;
 		m->height_mm = connector->display_info.height_mm;
+		/* set the first mode in list as preferred */
+		if (i == 0)
+			m->type |= DRM_MODE_TYPE_PREFERRED;
 		drm_mode_probed_add(connector, m);
 	}
 end:
@@ -762,6 +818,9 @@
 		c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_VRR;
 	}
 
+	/* ensure dynamic clk switch flag is reset */
+	c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_DYN_CLK;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
index 2bad8c0..8d3e764 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -150,4 +150,6 @@
 void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
 				struct drm_display_mode *drm_mode);
 
+u64 dsi_drm_find_bit_clk_rate(void *display,
+			      const struct drm_display_mode *drm_mode);
 #endif /* _DSI_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
index 174be9f..9ccff4b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -45,4 +45,14 @@
 #define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
 #define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
 
+#define PLL_CALC_DATA(addr0, addr1, data0, data1)      \
+	(((data1) << 24) | ((((addr1)/4) & 0xFF) << 16) | \
+	 ((data0) << 8) | (((addr0)/4) & 0xFF))
+
+#define DSI_DYN_REF_REG_W(base, offset, addr0, addr1, data0, data1)   \
+	writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
+			(base) + (offset))
+
+#define DSI_GEN_R32(base, offset) readl_relaxed(base + (offset))
+#define DSI_GEN_W32(base, offset, val) writel_relaxed((val), base + (offset))
 #endif /* _DSI_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 2a70155..b43b23c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1130,6 +1130,46 @@
 	return rc;
 }
 
+static int dsi_panel_parse_dyn_clk_caps(struct dsi_dyn_clk_caps *dyn_clk_caps,
+				     struct device_node *of_node,
+				     const char *name)
+{
+	int rc = 0;
+	bool supported = false;
+
+	supported = of_property_read_bool(of_node, "qcom,dsi-dyn-clk-enable");
+
+	if (!supported) {
+		dyn_clk_caps->dyn_clk_support = false;
+		return rc;
+	}
+
+	of_find_property(of_node, "qcom,dsi-dyn-clk-list",
+			      &dyn_clk_caps->bit_clk_list_len);
+	dyn_clk_caps->bit_clk_list_len /= sizeof(u32);
+	if (dyn_clk_caps->bit_clk_list_len < 1) {
+		pr_err("[%s] failed to get supported bit clk list\n", name);
+		return -EINVAL;
+	}
+
+	dyn_clk_caps->bit_clk_list = kcalloc(dyn_clk_caps->bit_clk_list_len,
+					     sizeof(u32), GFP_KERNEL);
+	if (!dyn_clk_caps->bit_clk_list)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_node, "qcom,dsi-dyn-clk-list",
+				   dyn_clk_caps->bit_clk_list,
+				   dyn_clk_caps->bit_clk_list_len);
+	if (rc) {
+		pr_err("[%s] failed to parse supported bit clk list\n", name);
+		return -EINVAL;
+	}
+
+	dyn_clk_caps->dyn_clk_support = true;
+
+	return 0;
+}
+
 static int dsi_panel_parse_dfps_caps(struct dsi_dfps_capabilities *dfps_caps,
 				     struct device_node *of_node,
 				     const char *name)
@@ -1137,7 +1177,7 @@
 	int rc = 0;
 	bool supported = false;
 	const char *type;
-	u32 val = 0;
+	u32 val = 0, i;
 
 	supported = of_property_read_bool(of_node,
 					"qcom,mdss-dsi-pan-enable-dynamic-fps");
@@ -1145,68 +1185,68 @@
 	if (!supported) {
 		pr_debug("[%s] DFPS is not supported\n", name);
 		dfps_caps->dfps_support = false;
-	} else {
-
-		type = of_get_property(of_node,
-				       "qcom,mdss-dsi-pan-fps-update",
-				       NULL);
-		if (!type) {
-			pr_err("[%s] dfps type not defined\n", name);
-			rc = -EINVAL;
-			goto error;
-		} else if (!strcmp(type, "dfps_suspend_resume_mode")) {
-			dfps_caps->type = DSI_DFPS_SUSPEND_RESUME;
-		} else if (!strcmp(type, "dfps_immediate_clk_mode")) {
-			dfps_caps->type = DSI_DFPS_IMMEDIATE_CLK;
-		} else if (!strcmp(type, "dfps_immediate_porch_mode_hfp")) {
-			dfps_caps->type = DSI_DFPS_IMMEDIATE_HFP;
-		} else if (!strcmp(type, "dfps_immediate_porch_mode_vfp")) {
-			dfps_caps->type = DSI_DFPS_IMMEDIATE_VFP;
-		} else {
-			pr_err("[%s] dfps type is not recognized\n", name);
-			rc = -EINVAL;
-			goto error;
-		}
-
-		rc = of_property_read_u32(of_node,
-					  "qcom,mdss-dsi-min-refresh-rate",
-					  &val);
-		if (rc) {
-			pr_err("[%s] Min refresh rate is not defined\n", name);
-			rc = -EINVAL;
-			goto error;
-		}
-		dfps_caps->min_refresh_rate = val;
-
-		rc = of_property_read_u32(of_node,
-					  "qcom,mdss-dsi-max-refresh-rate",
-					  &val);
-		if (rc) {
-			pr_debug("[%s] Using default refresh rate\n", name);
-			rc = of_property_read_u32(of_node,
-						"qcom,mdss-dsi-panel-framerate",
-						&val);
-			if (rc) {
-				pr_err("[%s] max refresh rate is not defined\n",
-				       name);
-				rc = -EINVAL;
-				goto error;
-			}
-		}
-		dfps_caps->max_refresh_rate = val;
-
-		if (dfps_caps->min_refresh_rate > dfps_caps->max_refresh_rate) {
-			pr_err("[%s] min rate > max rate\n", name);
-			rc = -EINVAL;
-		}
-
-		pr_debug("[%s] DFPS is supported %d-%d, mode %d\n", name,
-				dfps_caps->min_refresh_rate,
-				dfps_caps->max_refresh_rate,
-				dfps_caps->type);
-		dfps_caps->dfps_support = true;
+		return rc;
 	}
 
+	type = of_get_property(of_node,
+			       "qcom,mdss-dsi-pan-fps-update",
+			       NULL);
+	if (!type) {
+		pr_err("[%s] dfps type not defined\n", name);
+		rc = -EINVAL;
+		goto error;
+	} else if (!strcmp(type, "dfps_suspend_resume_mode")) {
+		dfps_caps->type = DSI_DFPS_SUSPEND_RESUME;
+	} else if (!strcmp(type, "dfps_immediate_clk_mode")) {
+		dfps_caps->type = DSI_DFPS_IMMEDIATE_CLK;
+	} else if (!strcmp(type, "dfps_immediate_porch_mode_hfp")) {
+		dfps_caps->type = DSI_DFPS_IMMEDIATE_HFP;
+	} else if (!strcmp(type, "dfps_immediate_porch_mode_vfp")) {
+		dfps_caps->type = DSI_DFPS_IMMEDIATE_VFP;
+	} else {
+		pr_err("[%s] dfps type is not recognized\n", name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	of_find_property(of_node, "qcom,dsi-supported-dfps-list",
+			 &dfps_caps->dfps_list_len);
+	dfps_caps->dfps_list_len /= sizeof(u32);
+	if (dfps_caps->dfps_list_len < 1) {
+		pr_err("[%s] dfps refresh list not present\n", name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dfps_caps->dfps_list = kcalloc(dfps_caps->dfps_list_len, sizeof(u32),
+				       GFP_KERNEL);
+	if (!dfps_caps->dfps_list) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,dsi-supported-dfps-list",
+					dfps_caps->dfps_list,
+					dfps_caps->dfps_list_len);
+	if (rc) {
+		pr_err("[%s] dfps refresh rate list parse failed\n", name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dfps_caps->dfps_support = true;
+
+	/* calculate max and min fps */
+	of_property_read_u32(of_node, "qcom,mdss-dsi-panel-framerate", &val);
+	dfps_caps->max_refresh_rate = val;
+	dfps_caps->min_refresh_rate = val;
+
+	for (i = 0; i < dfps_caps->dfps_list_len; i++) {
+		if (dfps_caps->dfps_list[i] < dfps_caps->min_refresh_rate)
+			dfps_caps->min_refresh_rate = dfps_caps->dfps_list[i];
+		else if (dfps_caps->dfps_list[i] > dfps_caps->max_refresh_rate)
+			dfps_caps->max_refresh_rate = dfps_caps->dfps_list[i];
+	}
 error:
 	return rc;
 }
@@ -1958,6 +1998,7 @@
 {
 	int rc = 0;
 	const char *bl_type;
+	const char *data;
 	u32 val = 0;
 
 	bl_type = of_get_property(of_node,
@@ -1977,6 +2018,17 @@
 		panel->bl_config.type = DSI_BACKLIGHT_UNKNOWN;
 	}
 
+	data = of_get_property(of_node, "qcom,bl-update-flag", NULL);
+	if (!data) {
+		panel->bl_config.bl_update = BL_UPDATE_NONE;
+	} else if (!strcmp(data, "delay_until_first_frame")) {
+		panel->bl_config.bl_update = BL_UPDATE_DELAY_UNTIL_FIRST_FRAME;
+	} else {
+		pr_debug("[%s] No valid bl-update-flag: %s\n",
+						panel->name, data);
+		panel->bl_config.bl_update = BL_UPDATE_NONE;
+	}
+
 	panel->bl_config.bl_scale = MAX_BL_SCALE_LEVEL;
 	panel->bl_config.bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
 
@@ -2918,6 +2970,14 @@
 			pr_err("failed to parse dfps configuration, rc=%d\n",
 				rc);
 
+		if (panel->panel_mode == DSI_OP_VIDEO_MODE) {
+			rc = dsi_panel_parse_dyn_clk_caps(&panel->dyn_clk_caps,
+				of_node, panel->name);
+			if (rc)
+				pr_err("failed to parse dynamic clk config, rc=%d\n",
+				       rc);
+		}
+
 		rc = dsi_panel_parse_phy_props(&panel->phy_props,
 			of_node, panel->name);
 		if (rc) {
@@ -3317,7 +3377,7 @@
 	if (mode->priv_info) {
 		config->video_timing.dsc_enabled = mode->priv_info->dsc_enabled;
 		config->video_timing.dsc = &mode->priv_info->dsc;
-		config->bit_clk_rate_hz = mode->priv_info->clk_rate_hz;
+		config->bit_clk_rate_hz = mode->timing.clk_rate_hz;
 	}
 	config->esc_clk_rate_hz = 19200000;
 	mutex_unlock(&panel->panel_lock);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index f6a9c60..ab8ccee 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -51,6 +51,11 @@
 	DSI_BACKLIGHT_MAX,
 };
 
+enum bl_update_flag {
+	BL_UPDATE_DELAY_UNTIL_FIRST_FRAME,
+	BL_UPDATE_NONE,
+};
+
 enum {
 	MODE_GPIO_NOT_VALID = 0,
 	MODE_SEL_DUAL_PORT,
@@ -65,10 +70,18 @@
 };
 
 struct dsi_dfps_capabilities {
-	bool dfps_support;
 	enum dsi_dfps_type type;
 	u32 min_refresh_rate;
 	u32 max_refresh_rate;
+	u32 *dfps_list;
+	u32 dfps_list_len;
+	bool dfps_support;
+};
+
+struct dsi_dyn_clk_caps {
+	bool dyn_clk_support;
+	u32 *bit_clk_list;
+	u32 bit_clk_list_len;
 };
 
 struct dsi_pinctrl_info {
@@ -85,6 +98,7 @@
 
 struct dsi_backlight_config {
 	enum dsi_backlight_type type;
+	enum bl_update_flag bl_update;
 
 	u32 bl_min_level;
 	u32 bl_max_level;
@@ -164,6 +178,7 @@
 	enum dsi_op_mode panel_mode;
 
 	struct dsi_dfps_capabilities dfps_caps;
+	struct dsi_dyn_clk_caps dyn_clk_caps;
 	struct dsi_panel_phy_props phy_props;
 
 	struct dsi_display_mode *cur_mode;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index 3d6711f..ebc699a 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -107,6 +107,9 @@
 
 	phy->hw.base = ptr;
 
+	ptr = msm_ioremap(pdev, "dyn_refresh_base", phy->name);
+	phy->hw.dyn_pll_base = ptr;
+
 	pr_debug("[%s] map dsi_phy registers to %pK\n",
 		phy->name, phy->hw.base);
 
@@ -616,11 +619,8 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&dsi_phy->phy_lock);
-
 	pr_debug("[PHY_%d] Skipping validation\n", dsi_phy->index);
 
-	mutex_unlock(&dsi_phy->phy_lock);
 	return rc;
 }
 
@@ -848,7 +848,7 @@
 		rc = phy->hw.ops.calculate_timing_params(&phy->hw,
 						 &phy->mode,
 						 &config->common_config,
-						 &phy->cfg.timing);
+						 &phy->cfg.timing, false);
 	if (rc) {
 		pr_err("[%s] failed to set timing, rc=%d\n", phy->name, rc);
 		goto error;
@@ -866,6 +866,27 @@
 	return rc;
 }
 
+/* update dsi phy timings for dynamic clk switch use case */
+int dsi_phy_update_phy_timings(struct msm_dsi_phy *phy,
+			       struct dsi_host_config *config)
+{
+	int rc = 0;
+
+	if (!phy || !config) {
+		pr_err("invalid argument\n");
+		return -EINVAL;
+	}
+
+	memcpy(&phy->mode, &config->video_timing, sizeof(phy->mode));
+	rc = phy->hw.ops.calculate_timing_params(&phy->hw, &phy->mode,
+						 &config->common_config,
+						 &phy->cfg.timing, true);
+	if (rc)
+		pr_err("failed to calculate phy timings %d\n", rc);
+
+	return rc;
+}
+
 int dsi_phy_lane_reset(struct msm_dsi_phy *phy)
 {
 	int ret = 0;
@@ -1030,10 +1051,111 @@
 		rc = phy->hw.ops.phy_timing_val(&phy->cfg.timing, timing, size);
 	if (!rc)
 		phy->cfg.is_phy_timing_present = true;
+
 	mutex_unlock(&phy->phy_lock);
 	return rc;
 }
 
+/**
+ * dsi_phy_dynamic_refresh_trigger() - trigger dynamic refresh
+ * @phy:	DSI PHY handle
+ * @is_master:	Boolean to indicate if for master or slave.
+ */
+void dsi_phy_dynamic_refresh_trigger(struct msm_dsi_phy *phy, bool is_master)
+{
+	u32 off;
+
+	if (!phy)
+		return;
+
+	mutex_lock(&phy->phy_lock);
+	/*
+	 * program PLL_SWI_INTF_SEL and SW_TRIGGER bit only for
+	 * master and program SYNC_MODE bit only for slave.
+	 */
+	if (is_master)
+		off = BIT(DYN_REFRESH_INTF_SEL) | BIT(DYN_REFRESH_SWI_CTRL) |
+			BIT(DYN_REFRESH_SW_TRIGGER);
+	else
+		off = BIT(DYN_REFRESH_SYNC_MODE) | BIT(DYN_REFRESH_SWI_CTRL);
+
+	if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper)
+		phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper(&phy->hw, off);
+
+	mutex_unlock(&phy->phy_lock);
+}
+
+/**
+ * dsi_phy_config_dynamic_refresh() - Configure dynamic refresh registers
+ * @phy:	DSI PHY handle
+ * @delay:	pipe delays for dynamic refresh
+ * @is_master:	Boolean to indicate if for master or slave.
+ */
+void dsi_phy_config_dynamic_refresh(struct msm_dsi_phy *phy,
+				    struct dsi_dyn_clk_delay *delay,
+				    bool is_master)
+{
+	struct dsi_phy_cfg *cfg;
+
+	if (!phy)
+		return;
+
+	mutex_lock(&phy->phy_lock);
+
+	cfg = &phy->cfg;
+
+	if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_config)
+		phy->hw.ops.dyn_refresh_ops.dyn_refresh_config(&phy->hw, cfg,
+							       is_master);
+	if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_pipe_delay)
+		phy->hw.ops.dyn_refresh_ops.dyn_refresh_pipe_delay(
+						&phy->hw, delay);
+
+	mutex_unlock(&phy->phy_lock);
+}
+
+/**
+ * dsi_phy_cache_phy_timings - cache the phy timings calculated as part of
+ *				dynamic refresh.
+ * @phy:	   DSI PHY Handle.
+ * @dst:	   Pointer to cache location.
+ * @size:	   Number of phy lane settings.
+ */
+int dsi_phy_dyn_refresh_cache_phy_timings(struct msm_dsi_phy *phy, u32 *dst,
+					  u32 size)
+{
+	int rc = 0;
+
+	if (!phy || !dst || !size)
+		return -EINVAL;
+
+	if (phy->hw.ops.dyn_refresh_ops.cache_phy_timings)
+		rc = phy->hw.ops.dyn_refresh_ops.cache_phy_timings(
+					   &phy->cfg.timing, dst, size);
+
+	if (rc)
+		pr_err("failed to cache phy timings %d\n", rc);
+
+	return rc;
+}
+
+/**
+ * dsi_phy_dynamic_refresh_clear() - clear dynamic refresh config
+ * @phy:	DSI PHY handle
+ */
+void dsi_phy_dynamic_refresh_clear(struct msm_dsi_phy *phy)
+{
+	if (!phy)
+		return;
+
+	mutex_lock(&phy->phy_lock);
+
+	if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper)
+		phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper(&phy->hw, 0);
+
+	mutex_unlock(&phy->phy_lock);
+}
+
 void dsi_phy_drv_register(void)
 {
 	platform_driver_register(&dsi_phy_platform_driver);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
index 4163411..65c7a16 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
@@ -278,4 +278,45 @@
  */
 void dsi_phy_drv_unregister(void);
 
+/**
+ * dsi_phy_update_phy_timings() - Update dsi phy timings
+ * @phy:	DSI PHY handle
+ * @config:	DSI Host config parameters
+ *
+ * Return: error code.
+ */
+int dsi_phy_update_phy_timings(struct msm_dsi_phy *phy,
+			       struct dsi_host_config *config);
+
+/**
+ * dsi_phy_config_dynamic_refresh() - Configure dynamic refresh registers
+ * @phy:	DSI PHY handle
+ * @delay:	pipe delays for dynamic refresh
+ * @is_master:	Boolean to indicate if for master or slave
+ */
+void dsi_phy_config_dynamic_refresh(struct msm_dsi_phy *phy,
+				    struct dsi_dyn_clk_delay *delay,
+				    bool is_master);
+/**
+ * dsi_phy_dynamic_refresh_trigger() - trigger dynamic refresh
+ * @phy:	DSI PHY handle
+ * @is_master:	Boolean to indicate if for master or slave.
+ */
+void dsi_phy_dynamic_refresh_trigger(struct msm_dsi_phy *phy, bool is_master);
+
+/**
+ * dsi_phy_dynamic_refresh_clear() - clear dynamic refresh config
+ * @phy:	DSI PHY handle
+ */
+void dsi_phy_dynamic_refresh_clear(struct msm_dsi_phy *phy);
+
+/**
+ * dsi_phy_dyn_refresh_cache_phy_timings - cache the phy timings calculated
+ *				as part of dynamic refresh.
+ * @phy:	   DSI PHY Handle.
+ * @dst:	   Pointer to cache location.
+ * @size:	   Number of phy lane settings.
+ */
+int dsi_phy_dyn_refresh_cache_phy_timings(struct msm_dsi_phy *phy,
+					  u32 *dst, u32 size);
 #endif /* _DSI_PHY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
index d24a613..67a1157 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -159,6 +159,43 @@
 	bool (*is_lanes_in_ulps)(u32 ulps, u32 ulps_lanes);
 };
 
+struct phy_dyn_refresh_ops {
+	/**
+	 * dyn_refresh_helper - helper function to config particular registers
+	 * @phy:           Pointer to DSI PHY hardware instance.
+	 * @offset:         register offset to program.
+	 */
+	void (*dyn_refresh_helper)(struct dsi_phy_hw *phy, u32 offset);
+
+	/**
+	 * dyn_refresh_config - configure dynamic refresh ctrl registers
+	 * @phy:           Pointer to DSI PHY hardware instance.
+	 * @cfg:	   Pointer to DSI PHY timings.
+	 * @is_master:	   Boolean to indicate whether for master or slave.
+	 */
+	void (*dyn_refresh_config)(struct dsi_phy_hw *phy,
+				   struct dsi_phy_cfg *cfg, bool is_master);
+
+	/**
+	 * dyn_refresh_pipe_delay - configure pipe delay registers for dynamic
+	 *				refresh.
+	 * @phy:           Pointer to DSI PHY hardware instance.
+	 * @delay:	   structure containing all the delays to be programed.
+	 */
+	void (*dyn_refresh_pipe_delay)(struct dsi_phy_hw *phy,
+				      struct dsi_dyn_clk_delay *delay);
+
+	/**
+	 * cache_phy_timings - cache the phy timings calculated as part of
+	 *				dynamic refresh.
+	 * @timings:       Pointer to calculated phy timing parameters.
+	 * @dst:	   Pointer to cache location.
+	 * @size:	   Number of phy lane settings.
+	 */
+	int (*cache_phy_timings)(struct dsi_phy_per_lane_cfgs *timings,
+				  u32 *dst, u32 size);
+};
+
 /**
  * struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
  * @regulator_enable:          Enable PHY regulators.
@@ -218,11 +255,14 @@
 	 * @mode:     Mode information for which timing has to be calculated.
 	 * @config:   DSI host configuration for this mode.
 	 * @timing:   Timing parameters for each lane which will be returned.
+	 * @use_mode_bit_clk: Boolean to indicate whether reacalculate dsi
+	 *		bitclk or use the existing bitclk(for dynamic clk case).
 	 */
 	int (*calculate_timing_params)(struct dsi_phy_hw *phy,
 				       struct dsi_mode_info *mode,
 				       struct dsi_host_common_cfg *config,
-				       struct dsi_phy_per_lane_cfgs *timing);
+				       struct dsi_phy_per_lane_cfgs *timing,
+				       bool use_mode_bit_clk);
 
 	/**
 	 * phy_timing_val() - Gets PHY timing values.
@@ -257,12 +297,15 @@
 
 	void *timing_ops;
 	struct phy_ulps_config_ops ulps_ops;
+	struct phy_dyn_refresh_ops dyn_refresh_ops;
 };
 
 /**
  * struct dsi_phy_hw - DSI phy hardware object specific to an instance
  * @base:                  VA for the DSI PHY base address.
  * @length:                Length of the DSI PHY register base map.
+ * @dyn_pll_base:      VA for the DSI dynamic refresh base address.
+ * @length:                Length of the DSI dynamic refresh register base map.
  * @index:                 Instance ID of the controller.
  * @version:               DSI PHY version.
  * @feature_map:           Features supported by DSI PHY.
@@ -271,6 +314,8 @@
 struct dsi_phy_hw {
 	void __iomem *base;
 	u32 length;
+	void __iomem *dyn_pll_base;
+	u32 dyn_refresh_len;
 	u32 index;
 
 	enum dsi_phy_version version;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
index 5015806..6c6286d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
@@ -51,7 +51,6 @@
 #define DSIPHY_CMN_LANE_STATUS0						0x0F4
 #define DSIPHY_CMN_LANE_STATUS1						0x0F8
 
-
 /* n = 0..3 for data lanes and n = 4 for clock lane */
 #define DSIPHY_LNX_CFG0(n)                         (0x200 + (0x80 * (n)))
 #define DSIPHY_LNX_CFG1(n)                         (0x204 + (0x80 * (n)))
@@ -66,6 +65,47 @@
 #define DSIPHY_LNX_LPRX_CTRL(n)                    (0x228 + (0x80 * (n)))
 #define DSIPHY_LNX_TX_DCTRL(n)                     (0x22C + (0x80 * (n)))
 
+/* dynamic refresh control registers */
+#define DSI_DYN_REFRESH_CTRL                   (0x000)
+#define DSI_DYN_REFRESH_PIPE_DELAY             (0x004)
+#define DSI_DYN_REFRESH_PIPE_DELAY2            (0x008)
+#define DSI_DYN_REFRESH_PLL_DELAY              (0x00C)
+#define DSI_DYN_REFRESH_STATUS                 (0x010)
+#define DSI_DYN_REFRESH_PLL_CTRL0              (0x014)
+#define DSI_DYN_REFRESH_PLL_CTRL1              (0x018)
+#define DSI_DYN_REFRESH_PLL_CTRL2              (0x01C)
+#define DSI_DYN_REFRESH_PLL_CTRL3              (0x020)
+#define DSI_DYN_REFRESH_PLL_CTRL4              (0x024)
+#define DSI_DYN_REFRESH_PLL_CTRL5              (0x028)
+#define DSI_DYN_REFRESH_PLL_CTRL6              (0x02C)
+#define DSI_DYN_REFRESH_PLL_CTRL7              (0x030)
+#define DSI_DYN_REFRESH_PLL_CTRL8              (0x034)
+#define DSI_DYN_REFRESH_PLL_CTRL9              (0x038)
+#define DSI_DYN_REFRESH_PLL_CTRL10             (0x03C)
+#define DSI_DYN_REFRESH_PLL_CTRL11             (0x040)
+#define DSI_DYN_REFRESH_PLL_CTRL12             (0x044)
+#define DSI_DYN_REFRESH_PLL_CTRL13             (0x048)
+#define DSI_DYN_REFRESH_PLL_CTRL14             (0x04C)
+#define DSI_DYN_REFRESH_PLL_CTRL15             (0x050)
+#define DSI_DYN_REFRESH_PLL_CTRL16             (0x054)
+#define DSI_DYN_REFRESH_PLL_CTRL17             (0x058)
+#define DSI_DYN_REFRESH_PLL_CTRL18             (0x05C)
+#define DSI_DYN_REFRESH_PLL_CTRL19             (0x060)
+#define DSI_DYN_REFRESH_PLL_CTRL20             (0x064)
+#define DSI_DYN_REFRESH_PLL_CTRL21             (0x068)
+#define DSI_DYN_REFRESH_PLL_CTRL22             (0x06C)
+#define DSI_DYN_REFRESH_PLL_CTRL23             (0x070)
+#define DSI_DYN_REFRESH_PLL_CTRL24             (0x074)
+#define DSI_DYN_REFRESH_PLL_CTRL25             (0x078)
+#define DSI_DYN_REFRESH_PLL_CTRL26             (0x07C)
+#define DSI_DYN_REFRESH_PLL_CTRL27             (0x080)
+#define DSI_DYN_REFRESH_PLL_CTRL28             (0x084)
+#define DSI_DYN_REFRESH_PLL_CTRL29             (0x088)
+#define DSI_DYN_REFRESH_PLL_CTRL30             (0x08C)
+#define DSI_DYN_REFRESH_PLL_CTRL31             (0x090)
+#define DSI_DYN_REFRESH_PLL_UPPER_ADDR         (0x094)
+#define DSI_DYN_REFRESH_PLL_UPPER_ADDR2        (0x098)
+
 static inline int dsi_conv_phy_to_logical_lane(
 	struct dsi_lane_map *lane_map, enum dsi_phy_data_lanes phy_lane)
 {
@@ -363,7 +403,8 @@
 	pr_debug("%s: polling for lanes to be in stop state, mask=0x%08x\n",
 		__func__, stop_state_mask);
 	rc = readl_poll_timeout(phy->base + DSIPHY_CMN_LANE_STATUS1, val,
-			(val == stop_state_mask), sleep_us, timeout_us);
+				((val & stop_state_mask) == stop_state_mask),
+				sleep_us, timeout_us);
 	if (rc) {
 		pr_err("%s: lanes not in stop state, LANE_STATUS=0x%08x\n",
 			__func__, val);
@@ -499,3 +540,163 @@
 		timing_cfg->lane_v3[i] = timing_val[i];
 	return 0;
 }
+
+void dsi_phy_hw_v3_0_dyn_refresh_config(struct dsi_phy_hw *phy,
+					struct dsi_phy_cfg *cfg, bool is_master)
+{
+	u32 reg;
+
+	if (is_master) {
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
+			  DSIPHY_CMN_GLBL_CTRL, DSIPHY_CMN_VREG_CTRL,
+			  0x10, 0x59);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL10,
+			  DSIPHY_CMN_TIMING_CTRL_0, DSIPHY_CMN_TIMING_CTRL_1,
+			  cfg->timing.lane_v3[0], cfg->timing.lane_v3[1]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL11,
+			  DSIPHY_CMN_TIMING_CTRL_2, DSIPHY_CMN_TIMING_CTRL_3,
+			  cfg->timing.lane_v3[2], cfg->timing.lane_v3[3]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL12,
+			  DSIPHY_CMN_TIMING_CTRL_4, DSIPHY_CMN_TIMING_CTRL_5,
+			  cfg->timing.lane_v3[4], cfg->timing.lane_v3[5]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL13,
+			  DSIPHY_CMN_TIMING_CTRL_6, DSIPHY_CMN_TIMING_CTRL_7,
+			  cfg->timing.lane_v3[6], cfg->timing.lane_v3[7]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL14,
+			  DSIPHY_CMN_TIMING_CTRL_8, DSIPHY_CMN_TIMING_CTRL_9,
+			  cfg->timing.lane_v3[8], cfg->timing.lane_v3[9]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL15,
+			  DSIPHY_CMN_TIMING_CTRL_10, DSIPHY_CMN_TIMING_CTRL_11,
+			  cfg->timing.lane_v3[10], cfg->timing.lane_v3[11]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL16,
+			  DSIPHY_CMN_CTRL_0, DSIPHY_CMN_LANE_CTRL0,
+			  0x7f, 0x1f);
+	} else {
+		reg = DSI_R32(phy, DSIPHY_CMN_CLK_CFG0);
+		reg &= ~BIT(5);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL0,
+			  DSIPHY_CMN_CLK_CFG0, DSIPHY_CMN_PLL_CNTRL,
+			  reg, 0x0);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL1,
+			  DSIPHY_CMN_RBUF_CTRL, DSIPHY_CMN_GLBL_CTRL,
+			  0x0, 0x10);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL2,
+			  DSIPHY_CMN_VREG_CTRL, DSIPHY_CMN_TIMING_CTRL_0,
+			  0x59, cfg->timing.lane_v3[0]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL3,
+			  DSIPHY_CMN_TIMING_CTRL_1, DSIPHY_CMN_TIMING_CTRL_2,
+			  cfg->timing.lane_v3[1], cfg->timing.lane_v3[2]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL4,
+			  DSIPHY_CMN_TIMING_CTRL_3, DSIPHY_CMN_TIMING_CTRL_4,
+			  cfg->timing.lane_v3[3], cfg->timing.lane_v3[4]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL5,
+			  DSIPHY_CMN_TIMING_CTRL_5, DSIPHY_CMN_TIMING_CTRL_6,
+			  cfg->timing.lane_v3[5], cfg->timing.lane_v3[6]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL6,
+			  DSIPHY_CMN_TIMING_CTRL_7, DSIPHY_CMN_TIMING_CTRL_8,
+			  cfg->timing.lane_v3[7], cfg->timing.lane_v3[8]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL7,
+			  DSIPHY_CMN_TIMING_CTRL_9, DSIPHY_CMN_TIMING_CTRL_10,
+			  cfg->timing.lane_v3[9], cfg->timing.lane_v3[10]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL8,
+			  DSIPHY_CMN_TIMING_CTRL_11, DSIPHY_CMN_CTRL_0,
+			  cfg->timing.lane_v3[11], 0x7f);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
+			  DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_2,
+			  0x1f, 0x40);
+		/*
+		 * fill with dummy register writes since controller will blindly
+		 * send these values to DSI PHY.
+		 */
+		reg = DSI_DYN_REFRESH_PLL_CTRL11;
+		while (reg <= DSI_DYN_REFRESH_PLL_CTRL29) {
+			DSI_DYN_REF_REG_W(phy->dyn_pll_base, reg,
+				  DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_0,
+				  0x1f, 0x7f);
+			reg += 0x4;
+		}
+
+		DSI_GEN_W32(phy->dyn_pll_base,
+			    DSI_DYN_REFRESH_PLL_UPPER_ADDR, 0);
+		DSI_GEN_W32(phy->dyn_pll_base,
+			    DSI_DYN_REFRESH_PLL_UPPER_ADDR2, 0);
+	}
+
+	wmb(); /* make sure all registers are updated */
+}
+
+void dsi_phy_hw_v3_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
+					    struct dsi_dyn_clk_delay *delay)
+{
+	if (!delay)
+		return;
+
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY,
+		    delay->pipe_delay);
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY2,
+		    delay->pipe_delay2);
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_DELAY,
+		    delay->pll_delay);
+}
+
+void dsi_phy_hw_v3_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset)
+{
+	u32 reg;
+
+	/*
+	 * if no offset is mentioned then this means we want to clear
+	 * the dynamic refresh ctrl register which is the last step
+	 * of dynamic refresh sequence.
+	 */
+	if (!offset) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg &= ~(BIT(0) | BIT(8));
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+		wmb(); /* ensure dynamic fps is cleared */
+		return;
+	}
+
+	if (offset & BIT(DYN_REFRESH_INTF_SEL)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(13);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SYNC_MODE)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(16);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SWI_CTRL)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(0);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SW_TRIGGER)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(8);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+		wmb(); /* ensure dynamic fps is triggered */
+	}
+}
+
+int dsi_phy_hw_v3_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
+				      u32 *dst, u32 size)
+{
+	int i;
+
+	if (!timings || !dst || !size)
+		return -EINVAL;
+
+	if (size != DSI_PHY_TIMING_V3_SIZE) {
+		pr_err("size mis-match\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size; i++)
+		dst[i] = timings->lane_v3[i];
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c
index fdfaa5d..44d0928 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c
@@ -511,11 +511,14 @@
  * @mode:     Mode information for which timing has to be calculated.
  * @config:   DSI host configuration for this mode.
  * @timing:   Timing parameters for each lane which will be returned.
+ * @use_mode_bit_clk: Boolean to indicate whether reacalculate dsi
+ *		bit clk or use the existing bit clk(for dynamic clk case).
  */
 int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
-					    struct dsi_mode_info *mode,
-					    struct dsi_host_common_cfg *host,
-					   struct dsi_phy_per_lane_cfgs *timing)
+				       struct dsi_mode_info *mode,
+				       struct dsi_host_common_cfg *host,
+				       struct dsi_phy_per_lane_cfgs *timing,
+				       bool use_mode_bit_clk)
 {
 	/* constants */
 	u32 const esc_clk_mhz = 192; /* TODO: esc clock is hardcoded */
@@ -541,7 +544,7 @@
 	struct phy_timing_ops *ops = phy->ops.timing_ops;
 
 	memset(&desc, 0x0, sizeof(desc));
-	h_total = DSI_H_TOTAL(mode);
+	h_total = DSI_H_TOTAL_DSC(mode);
 	v_total = DSI_V_TOTAL(mode);
 
 	bpp = bits_per_pixel[host->dst_format];
@@ -558,7 +561,10 @@
 		num_of_lanes++;
 
 
-	x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
+	if (use_mode_bit_clk)
+		x = mode->clk_rate_hz;
+	else
+		x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
 	y = rounddown(x, 1);
 
 	clk_params.bitclk_mbps = rounddown(DIV_ROUND_UP_ULL(y, 1000000), 1);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 20cae2e..a4d71f7 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -124,7 +124,8 @@
 	int conn_cnt = 0;
 
 	if (msm_is_mode_seamless(&crtc_state->mode) ||
-		msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode))
+		msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode) ||
+		msm_is_mode_seamless_dyn_clk(&crtc_state->adjusted_mode))
 		return true;
 
 	if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable)
@@ -168,6 +169,10 @@
 			&connector->encoder->crtc->state->adjusted_mode))
 		return true;
 
+	if (msm_is_mode_seamless_dyn_clk(
+			 &connector->encoder->crtc->state->adjusted_mode))
+		return true;
+
 	if (msm_is_mode_seamless_dms(
 			&connector->encoder->crtc->state->adjusted_mode))
 		return true;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0f565d3..c564a09 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -201,62 +201,46 @@
 	return val;
 }
 
-struct vblank_event {
-	struct list_head node;
+struct vblank_work {
+	struct kthread_work work;
 	int crtc_id;
 	bool enable;
+	struct msm_drm_private *priv;
 };
 
 static void vblank_ctrl_worker(struct kthread_work *work)
 {
-	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
-						struct msm_vblank_ctrl, work);
-	struct msm_drm_private *priv = container_of(vbl_ctrl,
-					struct msm_drm_private, vblank_ctrl);
+	struct vblank_work *cur_work = container_of(work,
+					struct vblank_work, work);
+	struct msm_drm_private *priv = cur_work->priv;
 	struct msm_kms *kms = priv->kms;
-	struct vblank_event *vbl_ev, *tmp;
-	unsigned long flags;
-	LIST_HEAD(tmp_head);
 
-	spin_lock_irqsave(&vbl_ctrl->lock, flags);
-	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
-		list_del(&vbl_ev->node);
-		list_add_tail(&vbl_ev->node, &tmp_head);
-	}
-	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+	if (cur_work->enable)
+		kms->funcs->enable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
+	else
+		kms->funcs->disable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
 
-	list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) {
-		if (vbl_ev->enable)
-			kms->funcs->enable_vblank(kms,
-						priv->crtcs[vbl_ev->crtc_id]);
-		else
-			kms->funcs->disable_vblank(kms,
-						priv->crtcs[vbl_ev->crtc_id]);
-
-		kfree(vbl_ev);
-	}
+	kfree(cur_work);
 }
 
 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
 					int crtc_id, bool enable)
 {
-	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
-	struct vblank_event *vbl_ev;
-	unsigned long flags;
+	struct vblank_work *cur_work;
 
-	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
-	if (!vbl_ev)
+	if (!priv || crtc_id >= priv->num_crtcs)
+		return -EINVAL;
+
+	cur_work = kzalloc(sizeof(*cur_work), GFP_ATOMIC);
+	if (!cur_work)
 		return -ENOMEM;
 
-	vbl_ev->crtc_id = crtc_id;
-	vbl_ev->enable = enable;
+	kthread_init_work(&cur_work->work, vblank_ctrl_worker);
+	cur_work->crtc_id = crtc_id;
+	cur_work->enable = enable;
+	cur_work->priv = priv;
 
-	spin_lock_irqsave(&vbl_ctrl->lock, flags);
-	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
-	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
-
-	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
-			&vbl_ctrl->work);
+	kthread_queue_work(&priv->disp_thread[crtc_id].worker, &cur_work->work);
 
 	return 0;
 }
@@ -268,20 +252,8 @@
 	struct msm_drm_private *priv = ddev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	struct msm_gpu *gpu = priv->gpu;
-	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
-	struct vblank_event *vbl_ev, *tmp;
 	int i;
 
-	/* We must cancel and cleanup any pending vblank enable/disable
-	 * work before drm_irq_uninstall() to avoid work re-enabling an
-	 * irq after uninstall has disabled it.
-	 */
-	kthread_flush_work(&vbl_ctrl->work);
-	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
-		list_del(&vbl_ev->node);
-		kfree(vbl_ev);
-	}
-
 	/* clean up display commit/event worker threads */
 	for (i = 0; i < priv->num_crtcs; i++) {
 		if (priv->disp_thread[i].thread) {
@@ -522,9 +494,6 @@
 
 	INIT_LIST_HEAD(&priv->client_event_list);
 	INIT_LIST_HEAD(&priv->inactive_list);
-	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
-	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
-	spin_lock_init(&priv->vblank_ctrl.lock);
 
 	ret = sde_power_resource_init(pdev, &priv->phandle);
 	if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 978aba2..83e100c 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -198,12 +198,6 @@
 	CONNECTOR_PROP_COUNT
 };
 
-struct msm_vblank_ctrl {
-	struct kthread_work work;
-	struct list_head event_list;
-	spinlock_t lock;
-};
-
 #define MAX_H_TILES_PER_DISPLAY 2
 
 /**
@@ -615,8 +609,6 @@
 	struct notifier_block vmap_notifier;
 	struct shrinker shrinker;
 
-	struct msm_vblank_ctrl vblank_ctrl;
-
 	/* task holding struct_mutex.. currently only used in submit path
 	 * to detect and reject faults from copy_from_user() for submit
 	 * ioctl.
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e99ff9c..f5f6853 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -38,6 +38,8 @@
 #define MSM_MODE_FLAG_SEAMLESS_DMS			(1<<2)
 /* Request to switch the fps */
 #define MSM_MODE_FLAG_SEAMLESS_VRR			(1<<3)
+/* Request to switch the bit clk */
+#define MSM_MODE_FLAG_SEAMLESS_DYN_CLK			(1<<4)
 
 /* As there are different display controller blocks depending on the
  * snapdragon version, the kms support is split out and the appropriate
@@ -175,6 +177,13 @@
 		: false;
 }
 
+static inline bool msm_is_mode_seamless_dyn_clk(
+					const struct drm_display_mode *mode)
+{
+	return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DYN_CLK)
+		: false;
+}
+
 static inline bool msm_needs_vblank_pre_modeset(
 		const struct drm_display_mode *mode)
 {
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 8680449b..378847d 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -1862,24 +1862,29 @@
 
 	node = container_of(ad_irq, struct sde_crtc_irq_info, irq);
 
+	/* deregister AD irq */
 	if (!en) {
 		spin_lock_irqsave(&node->state_lock, flags);
 		if (node->state == IRQ_ENABLED) {
+			node->state = IRQ_DISABLING;
+			spin_unlock_irqrestore(&node->state_lock, flags);
 			ret = sde_core_irq_disable(kms, &irq_idx, 1);
-			if (ret)
+			spin_lock_irqsave(&node->state_lock, flags);
+			if (ret) {
 				DRM_ERROR("disable irq %d error %d\n",
 					irq_idx, ret);
-			else
-				node->state = IRQ_NOINIT;
-		} else {
-			node->state = IRQ_NOINIT;
+				node->state = IRQ_ENABLED;
+			} else {
+				node->state = IRQ_DISABLED;
+			}
 		}
 		spin_unlock_irqrestore(&node->state_lock, flags);
+
 		sde_core_irq_unregister_callback(kms, irq_idx, ad_irq);
-		ret = 0;
 		goto exit;
 	}
 
+	/* register AD irq */
 	ad_irq->arg = crtc;
 	ad_irq->func = sde_cp_ad_interrupt_cb;
 	ret = sde_core_irq_register_callback(kms, irq_idx, ad_irq);
@@ -1889,11 +1894,15 @@
 	}
 
 	spin_lock_irqsave(&node->state_lock, flags);
-	if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) {
+	if (node->state == IRQ_DISABLED) {
+		node->state = IRQ_ENABLING;
+		spin_unlock_irqrestore(&node->state_lock, flags);
 		ret = sde_core_irq_enable(kms, &irq_idx, 1);
+		spin_lock_irqsave(&node->state_lock, flags);
 		if (ret) {
 			DRM_ERROR("enable irq %d error %d\n", irq_idx, ret);
 			sde_core_irq_unregister_callback(kms, irq_idx, ad_irq);
+			node->state = IRQ_DISABLED;
 		} else {
 			node->state = IRQ_ENABLED;
 		}
@@ -2138,15 +2147,10 @@
 					irq_idx, ret);
 				node->state = IRQ_ENABLED;
 			} else {
-				node->state = IRQ_NOINIT;
+				node->state = IRQ_DISABLED;
 			}
-			spin_unlock_irqrestore(&node->state_lock, flags);
-		} else if (node->state == IRQ_DISABLED) {
-			node->state = IRQ_NOINIT;
-			spin_unlock_irqrestore(&node->state_lock, flags);
-		} else {
-			spin_unlock_irqrestore(&node->state_lock, flags);
 		}
+		spin_unlock_irqrestore(&node->state_lock, flags);
 
 		sde_core_irq_unregister_callback(kms, irq_idx, hist_irq);
 		goto exit;
@@ -2162,12 +2166,16 @@
 	}
 
 	spin_lock_irqsave(&node->state_lock, flags);
-	if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) {
+	if (node->state == IRQ_DISABLED) {
+		node->state = IRQ_ENABLING;
+		spin_unlock_irqrestore(&node->state_lock, flags);
 		ret = sde_core_irq_enable(kms, &irq_idx, 1);
+		spin_lock_irqsave(&node->state_lock, flags);
 		if (ret) {
 			DRM_ERROR("enable irq %d error %d\n", irq_idx, ret);
 			sde_core_irq_unregister_callback(kms,
 				irq_idx, hist_irq);
+			node->state = IRQ_DISABLED;
 		} else {
 			node->state = IRQ_ENABLED;
 		}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 9d7d1e0..85b9f7e 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -89,12 +89,19 @@
 	if (!bl_lvl && brightness)
 		bl_lvl = 1;
 
+	if (display->panel->bl_config.bl_update ==
+		BL_UPDATE_DELAY_UNTIL_FIRST_FRAME && !c_conn->allow_bl_update) {
+		c_conn->unset_bl_level = bl_lvl;
+		return 0;
+	}
+
 	if (c_conn->ops.set_backlight) {
 		event.type = DRM_EVENT_SYS_BACKLIGHT;
 		event.length = sizeof(u32);
 		msm_mode_object_event_notify(&c_conn->base.base,
 				c_conn->base.dev, &event, (u8 *)&brightness);
 		rc = c_conn->ops.set_backlight(c_conn->display, bl_lvl);
+		c_conn->unset_bl_level = 0;
 	}
 
 	return rc;
@@ -519,6 +526,15 @@
 
 	bl_config = &dsi_display->panel->bl_config;
 
+	if (dsi_display->panel->bl_config.bl_update ==
+		BL_UPDATE_DELAY_UNTIL_FIRST_FRAME && !c_conn->allow_bl_update) {
+		c_conn->unset_bl_level = bl_config->bl_level;
+		return 0;
+	}
+
+	if (c_conn->unset_bl_level)
+		bl_config->bl_level = c_conn->unset_bl_level;
+
 	if (c_conn->bl_scale > MAX_BL_SCALE_LEVEL)
 		bl_config->bl_scale = MAX_BL_SCALE_LEVEL;
 	else
@@ -533,6 +549,7 @@
 		bl_config->bl_scale, bl_config->bl_scale_ad,
 		bl_config->bl_level);
 	rc = c_conn->ops.set_backlight(dsi_display, bl_config->bl_level);
+	c_conn->unset_bl_level = 0;
 
 	return rc;
 }
@@ -572,8 +589,11 @@
 		}
 	}
 
-	/* Special handling for postproc properties */
-	if (c_conn->bl_scale_dirty) {
+	/*
+	 * Special handling for postproc properties and
+	 * for updating backlight if any unset backlight level is present
+	 */
+	if (c_conn->bl_scale_dirty || c_conn->unset_bl_level) {
 		_sde_connector_update_bl_scale(c_conn);
 		c_conn->bl_scale_dirty = false;
 	}
@@ -639,29 +659,44 @@
 	sde_connector_schedule_status_work(connector, false);
 
 	c_conn = to_sde_connector(connector);
-	if (c_conn->panel_dead) {
+	if (c_conn->bl_device) {
 		c_conn->bl_device->props.power = FB_BLANK_POWERDOWN;
 		c_conn->bl_device->props.state |= BL_CORE_FBBLANK;
 		backlight_update_status(c_conn->bl_device);
 	}
+
+	c_conn->allow_bl_update = false;
 }
 
 void sde_connector_helper_bridge_enable(struct drm_connector *connector)
 {
 	struct sde_connector *c_conn = NULL;
+	struct dsi_display *display;
 
 	if (!connector)
 		return;
 
 	c_conn = to_sde_connector(connector);
+	display = (struct dsi_display *) c_conn->display;
 
-	/* Special handling for ESD recovery case */
-	if (c_conn->panel_dead) {
+	/*
+	 * Special handling for some panels which need atleast
+	 * one frame to be transferred to GRAM before enabling backlight.
+	 * So delay backlight update to these panels until the
+	 * first frame commit is received from the HW.
+	 */
+	if (display->panel->bl_config.bl_update ==
+				BL_UPDATE_DELAY_UNTIL_FIRST_FRAME)
+		sde_encoder_wait_for_event(c_conn->encoder,
+				MSM_ENC_TX_COMPLETE);
+	c_conn->allow_bl_update = true;
+
+	if (c_conn->bl_device) {
 		c_conn->bl_device->props.power = FB_BLANK_UNBLANK;
 		c_conn->bl_device->props.state &= ~BL_CORE_FBBLANK;
 		backlight_update_status(c_conn->bl_device);
-		c_conn->panel_dead = false;
 	}
+	c_conn->panel_dead = false;
 }
 
 int sde_connector_clk_ctrl(struct drm_connector *connector, bool enable)
@@ -1914,6 +1949,9 @@
 
 		sde_kms_info_add_keystr(info, "mode_name", mode->name);
 
+		sde_kms_info_add_keyint(info, "bit_clk_rate",
+					mode_info.clk_rate);
+
 		topology_idx = (int)sde_rm_get_topology_name(
 							mode_info.topology);
 		if (topology_idx < SDE_RM_TOPOLOGY_MAX) {
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 51dc92d..0ae6a91 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -329,7 +329,9 @@
  * @bl_scale_dirty: Flag to indicate PP BL scale value(s) is changed
  * @bl_scale: BL scale value for ABA feature
  * @bl_scale_ad: BL scale value for AD feature
- * last_cmd_tx_sts: status of the last command transfer
+ * @unset_bl_level: BL level that needs to be set later
+ * @allow_bl_update: Flag to indicate if BL update is allowed currently or not
+ * @last_cmd_tx_sts: status of the last command transfer
  */
 struct sde_connector {
 	struct drm_connector base;
@@ -373,6 +375,8 @@
 	bool bl_scale_dirty;
 	u32 bl_scale;
 	u32 bl_scale_ad;
+	u32 unset_bl_level;
+	bool allow_bl_update;
 
 	bool last_cmd_tx_sts;
 };
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index cb4e82d..e0094d7 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -748,8 +748,9 @@
 	SDE_DEBUG("\n");
 
 	if ((msm_is_mode_seamless(adjusted_mode) ||
-			msm_is_mode_seamless_vrr(adjusted_mode)) &&
-		(!crtc->enabled)) {
+	     (msm_is_mode_seamless_vrr(adjusted_mode) ||
+	      msm_is_mode_seamless_dyn_clk(adjusted_mode))) &&
+	    (!crtc->enabled)) {
 		SDE_ERROR("crtc state prevents seamless transition\n");
 		return false;
 	}
@@ -6215,7 +6216,7 @@
 			INIT_LIST_HEAD(&node->list);
 			node->func = custom_events[i].func;
 			node->event = event;
-			node->state = IRQ_NOINIT;
+			node->state = IRQ_DISABLED;
 			spin_lock_init(&node->state_lock);
 			break;
 		}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 709a51f..4700a6c 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -430,7 +430,7 @@
 };
 
 enum sde_crtc_irq_state {
-	IRQ_NOINIT,
+	IRQ_ENABLING,
 	IRQ_ENABLED,
 	IRQ_DISABLING,
 	IRQ_DISABLED,
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index a1986db..0f5e127 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -1454,7 +1454,7 @@
 	struct msm_mode_info mode_info;
 	int i, rc = 0;
 
-	if (!sde_enc || !disp_info) {
+	if (!sde_enc || !sde_enc->cur_master || !disp_info) {
 		SDE_ERROR("invalid param sde_enc:%d or disp_info:%d\n",
 					sde_enc != NULL, disp_info != NULL);
 		return;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
index a88f513..b7948a1 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
@@ -63,6 +63,11 @@
 #define MAX_DWORDS_SZ (BIT(14) - 1)
 #define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
 
+static uint32_t reg_dma_register_count;
+static uint32_t reg_dma_intr_status_offset;
+static uint32_t reg_dma_intr_4_status_offset;
+static uint32_t reg_dma_intr_clear_offset;
+
 typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg);
 
 static struct sde_hw_reg_dma *reg_dma;
@@ -106,9 +111,6 @@
 	[CTL_3][1] = BIT(24),
 };
 
-static int reg_dma_int_status_off;
-static int reg_dma_clear_status_off;
-
 static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
@@ -130,6 +132,7 @@
 		enum sde_reg_dma_last_cmd_mode mode);
 static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
 static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);
+static void dump_regs_v1(void);
 
 static reg_dma_internal_ops write_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
 	[HW_BLK_SELECT] = write_decode_sel,
@@ -471,7 +474,7 @@
 
 static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
 {
-	u32 cmd1;
+	u32 cmd1, mask = 0, val = 0;
 	struct sde_hw_blk_reg_map hw;
 
 	memset(&hw, 0, sizeof(hw));
@@ -485,15 +488,25 @@
 
 	SET_UP_REG_DMA_REG(hw, reg_dma);
 	SDE_REG_WRITE(&hw, REG_DMA_OP_MODE_OFF, BIT(0));
-	SDE_REG_WRITE(&hw, reg_dma_clear_status_off,
-		ctl_trigger_done_mask[cfg->ctl->idx][cfg->queue_select]);
+	val = SDE_REG_READ(&hw, reg_dma_intr_4_status_offset);
+	if (val) {
+		DRM_DEBUG("LUT dma status %x\n", val);
+		mask = BIT(0) | BIT(1) | BIT(2) | BIT(16);
+		SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset + sizeof(u32) * 4,
+			mask);
+		SDE_EVT32(val);
+	}
+
 	SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
 			cfg->dma_buf->iova);
 	SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
 			cmd1);
-	if (cfg->last_command)
+	if (cfg->last_command) {
+		mask = ctl_trigger_done_mask[cfg->ctl->idx][cfg->queue_select];
+		SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset, mask);
 		SDE_REG_WRITE(&cfg->ctl->hw, REG_DMA_CTL_TRIGGER_OFF,
 			queue_sel[cfg->queue_select]);
+	}
 
 	return 0;
 }
@@ -539,13 +552,17 @@
 	reg_dma->ops.dealloc_reg_dma = dealloc_reg_dma_v1;
 	reg_dma->ops.reset_reg_dma_buf = reset_reg_dma_buffer_v1;
 	reg_dma->ops.last_command = last_cmd_v1;
+	reg_dma->ops.dump_regs = dump_regs_v1;
 
 	reg_dma_ctl_queue_off[CTL_0] = REG_DMA_CTL0_QUEUE_0_CMD0_OFF;
 	for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
 		reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
 			(sizeof(u32) * 4);
-	reg_dma_int_status_off = 0x90;
-	reg_dma_clear_status_off = 0xa0;
+
+	reg_dma_register_count = 60;
+	reg_dma_intr_status_offset = 0x90;
+	reg_dma_intr_4_status_offset = 0xa0;
+	reg_dma_intr_clear_offset = 0xb0;
 
 	return 0;
 }
@@ -859,7 +876,7 @@
 	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode);
 	if (mode == REG_DMA_WAIT4_COMP) {
 		rc = readl_poll_timeout(hw.base_off + hw.blk_off +
-			reg_dma_int_status_off, val,
+			reg_dma_intr_status_offset, val,
 			(val & ctl_trigger_done_mask[ctl->idx][q]),
 			10, 20000);
 		if (rc)
@@ -881,3 +898,18 @@
 		last_cmd_buf[i] = NULL;
 	}
 }
+
+static void dump_regs_v1(void)
+{
+	uint32_t i = 0;
+	u32 val;
+	struct sde_hw_blk_reg_map hw;
+
+	memset(&hw, 0, sizeof(hw));
+	SET_UP_REG_DMA_REG(hw, reg_dma);
+
+	for (i = 0; i < reg_dma_register_count; i++) {
+		val = SDE_REG_READ(&hw, i * sizeof(u32));
+		DRM_ERROR("offset %x val %x\n", (u32)(i * sizeof(u32)), val);
+	}
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_reg_dma.c b/drivers/gpu/drm/msm/sde/sde_reg_dma.c
index 1bef4b8..ca4caa0 100644
--- a/drivers/gpu/drm/msm/sde/sde_reg_dma.c
+++ b/drivers/gpu/drm/msm/sde/sde_reg_dma.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -69,11 +69,15 @@
 	return 0;
 }
 
+static void default_dump_reg(void)
+{
+}
+
 static struct sde_hw_reg_dma reg_dma = {
 	.ops = {default_check_support, default_setup_payload,
 		default_kick_off, default_reset, default_alloc_reg_dma_buf,
 		default_dealloc_reg_dma, default_buf_reset_reg_dma,
-		default_last_command},
+		default_last_command, default_dump_reg},
 };
 
 int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m,
@@ -118,7 +122,7 @@
 	.ops = {default_check_support, default_setup_payload,
 		default_kick_off, default_reset, default_alloc_reg_dma_buf,
 		default_dealloc_reg_dma, default_buf_reset_reg_dma,
-		default_last_command},
+		default_last_command, default_dump_reg},
 	};
 
 	if (!reg_dma.drm_dev || !reg_dma.caps)
diff --git a/drivers/gpu/drm/msm/sde/sde_reg_dma.h b/drivers/gpu/drm/msm/sde/sde_reg_dma.h
index 41a292a..f980a80 100644
--- a/drivers/gpu/drm/msm/sde/sde_reg_dma.h
+++ b/drivers/gpu/drm/msm/sde/sde_reg_dma.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -266,6 +266,7 @@
  * @dealloc_reg_dma: de-allocate reg dma buffer
  * @reset_reg_dma_buf: reset the buffer to init state
  * @last_command: notify control that last command is queued
+ * @dump_regs: dump reg dma registers
  */
 struct sde_hw_reg_dma_ops {
 	int (*check_support)(enum sde_reg_dma_features feature,
@@ -279,6 +280,7 @@
 	int (*reset_reg_dma_buf)(struct sde_reg_dma_buffer *buf);
 	int (*last_command)(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
 			enum sde_reg_dma_last_cmd_mode mode);
+	void (*dump_regs)(void);
 };
 
 /**
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 56c288f..5bfae1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -271,12 +271,16 @@
 		nv_connector->edid = NULL;
 	}
 
-	/* Outputs are only polled while runtime active, so acquiring a
-	 * runtime PM ref here is unnecessary (and would deadlock upon
-	 * runtime suspend because it waits for polling to finish).
+	/* Outputs are only polled while runtime active, so resuming the
+	 * device here is unnecessary (and would deadlock upon runtime suspend
+	 * because it waits for polling to finish). We do however, want to
+	 * prevent the autosuspend timer from elapsing during this operation
+	 * if possible.
 	 */
-	if (!drm_kms_helper_is_poll_worker()) {
-		ret = pm_runtime_get_sync(connector->dev->dev);
+	if (drm_kms_helper_is_poll_worker()) {
+		pm_runtime_get_noresume(dev->dev);
+	} else {
+		ret = pm_runtime_get_sync(dev->dev);
 		if (ret < 0 && ret != -EACCES)
 			return conn_status;
 	}
@@ -354,10 +358,8 @@
 
  out:
 
-	if (!drm_kms_helper_is_poll_worker()) {
-		pm_runtime_mark_last_busy(connector->dev->dev);
-		pm_runtime_put_autosuspend(connector->dev->dev);
-	}
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
 
 	return conn_status;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 6526a33..3ddd409 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -367,8 +367,6 @@
 	pm_runtime_get_sync(drm->dev->dev);
 
 	drm_helper_hpd_irq_event(drm->dev);
-	/* enable polling for external displays */
-	drm_kms_helper_poll_enable(drm->dev);
 
 	pm_runtime_mark_last_busy(drm->dev->dev);
 	pm_runtime_put_sync(drm->dev->dev);
@@ -391,15 +389,29 @@
 {
 	struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
 	struct acpi_bus_event *info = data;
+	int ret;
 
 	if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
 		if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
-			/*
-			 * This may be the only indication we receive of a
-			 * connector hotplug on a runtime suspended GPU,
-			 * schedule hpd_work to check.
-			 */
-			schedule_work(&drm->hpd_work);
+			ret = pm_runtime_get(drm->dev->dev);
+			if (ret == 1 || ret == -EACCES) {
+				/* If the GPU is already awake, or in a state
+				 * where we can't wake it up, it can handle
+				 * it's own hotplug events.
+				 */
+				pm_runtime_put_autosuspend(drm->dev->dev);
+			} else if (ret == 0) {
+				/* This may be the only indication we receive
+				 * of a connector hotplug on a runtime
+				 * suspended GPU, schedule hpd_work to check.
+				 */
+				NV_DEBUG(drm, "ACPI requested connector reprobe\n");
+				schedule_work(&drm->hpd_work);
+				pm_runtime_put_noidle(drm->dev->dev);
+			} else {
+				NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
+					ret);
+			}
 
 			/* acpi-video should not generate keypresses for this */
 			return NOTIFY_BAD;
@@ -422,6 +434,11 @@
 	if (ret)
 		return ret;
 
+	/* enable connector detection and polling for connectors without HPD
+	 * support
+	 */
+	drm_kms_helper_poll_enable(dev);
+
 	/* enable hotplug interrupts */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct nouveau_connector *conn = nouveau_connector(connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 909f69a..505dca4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -601,7 +601,7 @@
 		struct nouveau_bo *nvbo;
 		uint32_t data;
 
-		if (unlikely(r->bo_index > req->nr_buffers)) {
+		if (unlikely(r->bo_index >= req->nr_buffers)) {
 			NV_PRINTK(err, cli, "reloc bo index invalid\n");
 			ret = -EINVAL;
 			break;
@@ -611,7 +611,7 @@
 		if (b->presumed.valid)
 			continue;
 
-		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+		if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
 			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
 			ret = -EINVAL;
 			break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 9b638bd..d370bf8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -23,6 +23,10 @@
 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
 #include "priv.h"
 
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
 static int
 nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
 {
@@ -95,6 +99,15 @@
 	unsigned long pgsize_bitmap;
 	int ret;
 
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+	if (dev->archdata.mapping) {
+		struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+		arm_iommu_detach_device(dev);
+		arm_iommu_release_mapping(mapping);
+	}
+#endif
+
 	if (!tdev->func->iommu_bit)
 		return;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index a410c0d..6a1b81e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -161,7 +161,8 @@
 	}
 
 	/* load and execute some other ucode image (bios therm?) */
-	return pmu_load(init, 0x01, post, NULL, NULL);
+	pmu_load(init, 0x01, post, NULL, NULL);
+	return 0;
 }
 
 static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index a188a39..6ad827b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -823,7 +823,7 @@
 	int ret, i;
 
 	ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id));
-	if (ret < ARRAY_SIZE(id) || id[0] == 0x00) {
+	if (ret < 0 || ret < ARRAY_SIZE(id) || id[0] == 0x00) {
 		dev_err(ctx->dev, "read id failed\n");
 		ctx->error = -EIO;
 		return;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index f416f5c..c5e1aa5 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -850,7 +850,7 @@
 	return ret;
 }
 
-static int radeon_lvds_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector,
 				  struct drm_display_mode *mode)
 {
 	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -1010,7 +1010,7 @@
 	return ret;
 }
 
-static int radeon_vga_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector,
 				  struct drm_display_mode *mode)
 {
 	struct drm_device *dev = connector->dev;
@@ -1154,7 +1154,7 @@
 	return 1;
 }
 
-static int radeon_tv_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector,
 				struct drm_display_mode *mode)
 {
 	if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
@@ -1496,7 +1496,7 @@
 		radeon_connector->use_digital = true;
 }
 
-static int radeon_dvi_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector,
 				  struct drm_display_mode *mode)
 {
 	struct drm_device *dev = connector->dev;
@@ -1798,7 +1798,7 @@
 	return ret;
 }
 
-static int radeon_dp_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector,
 				  struct drm_display_mode *mode)
 {
 	struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index aad2f4a..97828fa 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -283,7 +283,6 @@
 		remote = of_graph_get_remote_port_parent(ep);
 		if (!remote) {
 			DRM_DEBUG_DRIVER("Error retrieving the output node\n");
-			of_node_put(remote);
 			continue;
 		}
 
@@ -297,11 +296,13 @@
 
 			if (of_graph_parse_endpoint(ep, &endpoint)) {
 				DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
+				of_node_put(remote);
 				continue;
 			}
 
 			if (!endpoint.id) {
 				DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
+				of_node_put(remote);
 				continue;
 			}
 		}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 39d0fdc..6a7994a 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -217,7 +217,7 @@
 
 		struct fb_deferred_io *fbdefio;
 
-		fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+		fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
 
 		if (fbdefio) {
 			fbdefio->delay = DL_DEFIO_WRITE_DELAY;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 873f010..10e2c19 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -169,18 +169,13 @@
 	struct list_head *node;
 	struct urb_node *unode;
 	struct urb *urb;
-	int ret;
 	unsigned long flags;
 
 	DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
 
 	/* keep waiting and freeing, until we've got 'em all */
 	while (count--) {
-
-		/* Getting interrupted means a leak, but ok at shutdown*/
-		ret = down_interruptible(&udl->urbs.limit_sem);
-		if (ret)
-			break;
+		down(&udl->urbs.limit_sem);
 
 		spin_lock_irqsave(&udl->urbs.lock, flags);
 
@@ -204,17 +199,22 @@
 static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
 {
 	struct udl_device *udl = dev->dev_private;
-	int i = 0;
 	struct urb *urb;
 	struct urb_node *unode;
 	char *buf;
+	size_t wanted_size = count * size;
 
 	spin_lock_init(&udl->urbs.lock);
 
+retry:
 	udl->urbs.size = size;
 	INIT_LIST_HEAD(&udl->urbs.list);
 
-	while (i < count) {
+	sema_init(&udl->urbs.limit_sem, 0);
+	udl->urbs.count = 0;
+	udl->urbs.available = 0;
+
+	while (udl->urbs.count * size < wanted_size) {
 		unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
 		if (!unode)
 			break;
@@ -230,11 +230,16 @@
 		}
 		unode->urb = urb;
 
-		buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
+		buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
 					 &urb->transfer_dma);
 		if (!buf) {
 			kfree(unode);
 			usb_free_urb(urb);
+			if (size > PAGE_SIZE) {
+				size /= 2;
+				udl_free_urb_list(dev);
+				goto retry;
+			}
 			break;
 		}
 
@@ -245,16 +250,14 @@
 
 		list_add_tail(&unode->entry, &udl->urbs.list);
 
-		i++;
+		up(&udl->urbs.limit_sem);
+		udl->urbs.count++;
+		udl->urbs.available++;
 	}
 
-	sema_init(&udl->urbs.limit_sem, i);
-	udl->urbs.count = i;
-	udl->urbs.available = i;
+	DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
 
-	DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
-
-	return i;
+	return udl->urbs.count;
 }
 
 struct urb *udl_get_urb(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 7505655..a2d8630 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -327,6 +327,9 @@
 	vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
 						       vc4_state->crtc_h);
 
+	vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
+			       vc4_state->y_scaling[0] == VC4_SCALING_NONE);
+
 	if (num_planes > 1) {
 		vc4_state->is_yuv = true;
 
@@ -342,21 +345,17 @@
 			vc4_get_scaling_mode(vc4_state->src_h[1],
 					     vc4_state->crtc_h);
 
-		/* YUV conversion requires that scaling be enabled,
-		 * even on a plane that's otherwise 1:1.  Choose TPZ
-		 * for simplicity.
+		/* YUV conversion requires that horizontal scaling be enabled,
+		 * even on a plane that's otherwise 1:1. Looks like only PPF
+		 * works in that case, so let's pick that one.
 		 */
-		if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
-			vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
-		if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
-			vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+		if (vc4_state->is_unity)
+			vc4_state->x_scaling[0] = VC4_SCALING_PPF;
+	} else {
+		vc4_state->x_scaling[1] = VC4_SCALING_NONE;
+		vc4_state->y_scaling[1] = VC4_SCALING_NONE;
 	}
 
-	vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
-			       vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
-			       vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
-			       vc4_state->y_scaling[1] == VC4_SCALING_NONE);
-
 	/* No configuring scaling on the cursor plane, since it gets
 	   non-vblank-synced updates, and scaling requires requires
 	   LBM changes which have to be vblank-synced.
@@ -611,7 +610,10 @@
 		vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
 	}
 
-	if (!vc4_state->is_unity) {
+	if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+	    vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+	    vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+	    vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
 		/* LBM Base Address. */
 		if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
 		    vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index d6e5ded..8774bf1 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -316,13 +316,17 @@
 /*
  * Fill a CSI bus config struct from mbus_config and mbus_framefmt.
  */
-static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
+static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
 				 struct v4l2_mbus_config *mbus_cfg,
 				 struct v4l2_mbus_framefmt *mbus_fmt)
 {
+	int ret;
+
 	memset(csicfg, 0, sizeof(*csicfg));
 
-	mbus_code_to_bus_cfg(csicfg, mbus_fmt->code);
+	ret = mbus_code_to_bus_cfg(csicfg, mbus_fmt->code);
+	if (ret < 0)
+		return ret;
 
 	switch (mbus_cfg->type) {
 	case V4L2_MBUS_PARALLEL:
@@ -353,6 +357,8 @@
 		/* will never get here, keep compiler quiet */
 		break;
 	}
+
+	return 0;
 }
 
 int ipu_csi_init_interface(struct ipu_csi *csi,
@@ -362,8 +368,11 @@
 	struct ipu_csi_bus_config cfg;
 	unsigned long flags;
 	u32 width, height, data = 0;
+	int ret;
 
-	fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+	ret = fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+	if (ret < 0)
+		return ret;
 
 	/* set default sensor frame width and height */
 	width = mbus_fmt->width;
@@ -567,11 +576,14 @@
 	struct ipu_csi_bus_config cfg;
 	unsigned long flags;
 	u32 temp;
+	int ret;
 
 	if (vc > 3)
 		return -EINVAL;
 
-	mbus_code_to_bus_cfg(&cfg, mbus_fmt->code);
+	ret = mbus_code_to_bus_cfg(&cfg, mbus_fmt->code);
+	if (ret < 0)
+		return ret;
 
 	spin_lock_irqsave(&csi->lock, flags);
 
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index ef0d7f1..e3a664e 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -1070,5 +1070,16 @@
 #define PDC_GPU_TCS3_CMD0_DATA			0x215DB
 #define PDC_GPU_SEQ_MEM_0			0xA0000
 
+/* GPU CX_MISC registers */
+#define A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0	0x1
+#define A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1	0x2
+#define A6XX_LLC_NUM_GPU_SCIDS			5
+#define A6XX_GPU_LLC_SCID_NUM_BITS		5
+#define A6XX_GPU_LLC_SCID_MASK \
+	((1 << (A6XX_LLC_NUM_GPU_SCIDS * A6XX_GPU_LLC_SCID_NUM_BITS)) - 1)
+#define A6XX_GPUHTW_LLC_SCID_SHIFT		25
+#define A6XX_GPUHTW_LLC_SCID_MASK \
+	(((1 << A6XX_GPU_LLC_SCID_NUM_BITS) - 1) << A6XX_GPUHTW_LLC_SCID_SHIFT)
+
 #endif /* _A6XX_REG_H */
 
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 7c219fa..ea2240c 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1262,6 +1262,22 @@
 		KGSL_DRV_WARN(device, "cx_dbgc ioremap failed\n");
 }
 
+static void adreno_cx_misc_probe(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct resource *res;
+
+	res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
+					   "cx_misc");
+
+	if (res == NULL)
+		return;
+
+	adreno_dev->cx_misc_len = resource_size(res);
+	adreno_dev->cx_misc_virt = devm_ioremap(device->dev,
+					res->start, adreno_dev->cx_misc_len);
+}
+
 static void adreno_efuse_read_soc_hw_rev(struct adreno_device *adreno_dev)
 {
 	unsigned int val;
@@ -1382,6 +1398,9 @@
 	/* Probe for the optional CX_DBGC block */
 	adreno_cx_dbgc_probe(device);
 
+	/* Probe for the optional CX_MISC block */
+	adreno_cx_misc_probe(device);
+
 	/*
 	 * qcom,iommu-secure-id is used to identify MMUs that can handle secure
 	 * content but that is only part of the story - the GPU also has to be
@@ -1851,7 +1870,7 @@
 
 	status = kgsl_mmu_start(device);
 	if (status)
-		goto error_pwr_off;
+		goto error_boot_oob_clear;
 
 	_set_secvid(device);
 
@@ -2072,6 +2091,12 @@
 error_mmu_off:
 	kgsl_mmu_stop(&device->mmu);
 
+error_boot_oob_clear:
+	if (gpudev->oob_clear &&
+			ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+		gpudev->oob_clear(adreno_dev,
+				OOB_BOOT_SLUMBER_CLEAR_MASK);
+
 error_pwr_off:
 	/* set the state back to original state */
 	kgsl_pwrctrl_change_state(device, state);
@@ -3296,6 +3321,54 @@
 	__raw_writel(value, adreno_dev->cx_dbgc_virt + cx_dbgc_offset);
 }
 
+void adreno_cx_misc_regread(struct adreno_device *adreno_dev,
+	unsigned int offsetwords, unsigned int *value)
+{
+	unsigned int cx_misc_offset;
+
+	cx_misc_offset = (offsetwords << 2);
+	if (!adreno_dev->cx_misc_virt ||
+		(cx_misc_offset >= adreno_dev->cx_misc_len))
+		return;
+
+	*value = __raw_readl(adreno_dev->cx_misc_virt + cx_misc_offset);
+
+	/*
+	 * ensure this read finishes before the next one.
+	 * i.e. act like normal readl()
+	 */
+	rmb();
+}
+
+void adreno_cx_misc_regwrite(struct adreno_device *adreno_dev,
+	unsigned int offsetwords, unsigned int value)
+{
+	unsigned int cx_misc_offset;
+
+	cx_misc_offset = (offsetwords << 2);
+	if (!adreno_dev->cx_misc_virt ||
+		(cx_misc_offset >= adreno_dev->cx_misc_len))
+		return;
+
+	/*
+	 * ensure previous writes post before this one,
+	 * i.e. act like normal writel()
+	 */
+	wmb();
+	__raw_writel(value, adreno_dev->cx_misc_virt + cx_misc_offset);
+}
+
+void adreno_cx_misc_regrmw(struct adreno_device *adreno_dev,
+		unsigned int offsetwords,
+		unsigned int mask, unsigned int bits)
+{
+	unsigned int val = 0;
+
+	adreno_cx_misc_regread(adreno_dev, offsetwords, &val);
+	val &= ~mask;
+	adreno_cx_misc_regwrite(adreno_dev, offsetwords, val | bits);
+}
+
 /**
  * adreno_waittimestamp - sleep while waiting for the specified timestamp
  * @device - pointer to a KGSL device structure
@@ -3772,7 +3845,6 @@
 	.device_private_create = adreno_device_private_create,
 	.device_private_destroy = adreno_device_private_destroy,
 	/* Optional functions */
-	.snapshot_gmu = adreno_snapshot_gmu,
 	.drawctxt_create = adreno_drawctxt_create,
 	.drawctxt_detach = adreno_drawctxt_detach,
 	.drawctxt_destroy = adreno_drawctxt_destroy,
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 555856a..3b5dd88d 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -429,6 +429,8 @@
  * @chipid: Chip ID specific to the GPU
  * @gmem_base: Base physical address of GMEM
  * @gmem_size: GMEM size
+ * @cx_misc_len: Length of the CX MISC register block
+ * @cx_misc_virt: Pointer where the CX MISC block is mapped
  * @gpucore: Pointer to the adreno_gpu_core structure
  * @pfp_fw: Buffer which holds the pfp ucode
  * @pfp_fw_size: Size of pfp ucode buffer
@@ -509,6 +511,8 @@
 	unsigned long cx_dbgc_base;
 	unsigned int cx_dbgc_len;
 	void __iomem *cx_dbgc_virt;
+	unsigned int cx_misc_len;
+	void __iomem *cx_misc_virt;
 	const struct adreno_gpu_core *gpucore;
 	struct adreno_firmware fw[2];
 	size_t gpmu_cmds_size;
@@ -941,7 +945,6 @@
 	/* GPU specific function hooks */
 	void (*irq_trace)(struct adreno_device *, unsigned int status);
 	void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *);
-	void (*snapshot_gmu)(struct adreno_device *, struct kgsl_snapshot *);
 	void (*platform_setup)(struct adreno_device *);
 	void (*init)(struct adreno_device *);
 	void (*remove)(struct adreno_device *);
@@ -1123,9 +1126,6 @@
 		struct kgsl_snapshot *snapshot,
 		struct kgsl_context *context);
 
-void adreno_snapshot_gmu(struct kgsl_device *device,
-		struct kgsl_snapshot *snapshot);
-
 int adreno_reset(struct kgsl_device *device, int fault);
 
 void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
@@ -1170,6 +1170,14 @@
 		unsigned int offsetwords, unsigned int *value);
 void adreno_cx_dbgc_regwrite(struct kgsl_device *device,
 		unsigned int offsetwords, unsigned int value);
+void adreno_cx_misc_regread(struct adreno_device *adreno_dev,
+		unsigned int offsetwords, unsigned int *value);
+void adreno_cx_misc_regwrite(struct adreno_device *adreno_dev,
+		unsigned int offsetwords, unsigned int value);
+void adreno_cx_misc_regrmw(struct adreno_device *adreno_dev,
+		unsigned int offsetwords,
+		unsigned int mask, unsigned int bits);
+
 
 #define ADRENO_TARGET(_name, _id) \
 static inline int adreno_is_##_name(struct adreno_device *adreno_dev) \
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 4f98912..e1f32e8 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -156,6 +156,12 @@
 	unsigned int speed_bin[3];
 	struct kgsl_device *device = &adreno_dev->dev;
 
+	if (of_get_property(device->pdev->dev.of_node,
+		"qcom,gpu-speed-bin-vectors", NULL)) {
+		adreno_efuse_speed_bin_array(adreno_dev);
+		return;
+	}
+
 	if (of_property_read_u32_array(device->pdev->dev.of_node,
 		"qcom,gpu-speed-bin", speed_bin, 3))
 		return;
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 517b813..b74d48c 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -32,17 +32,6 @@
 
 #define MIN_HBB		13
 
-#define A6XX_LLC_NUM_GPU_SCIDS		5
-#define A6XX_GPU_LLC_SCID_NUM_BITS	5
-#define A6XX_GPU_LLC_SCID_MASK \
-	((1 << (A6XX_LLC_NUM_GPU_SCIDS * A6XX_GPU_LLC_SCID_NUM_BITS)) - 1)
-#define A6XX_GPUHTW_LLC_SCID_SHIFT	25
-#define A6XX_GPUHTW_LLC_SCID_MASK \
-	(((1 << A6XX_GPU_LLC_SCID_NUM_BITS) - 1) << A6XX_GPUHTW_LLC_SCID_SHIFT)
-
-#define A6XX_GPU_CX_REG_BASE		0x509E000
-#define A6XX_GPU_CX_REG_SIZE		0x1000
-
 #define GPU_LIMIT_THRESHOLD_ENABLE	BIT(31)
 
 static int _load_gmu_firmware(struct kgsl_device *device);
@@ -2522,24 +2511,6 @@
 	}
 }
 
-/* GPU System Cache control registers */
-#define A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0   0x4
-#define A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1   0x8
-
-static inline void _reg_rmw(void __iomem *regaddr,
-	unsigned int mask, unsigned int bits)
-{
-	unsigned int val = 0;
-
-	val = __raw_readl(regaddr);
-	/* Make sure the above read completes before we proceed  */
-	rmb();
-	val &= ~mask;
-	__raw_writel(val | bits, regaddr);
-	/* Make sure the above write posts before we proceed*/
-	wmb();
-}
-
 /*
  * a6xx_llc_configure_gpu_scid() - Program the sub-cache ID for all GPU blocks
  * @adreno_dev: The adreno device pointer
@@ -2549,17 +2520,15 @@
 	uint32_t gpu_scid;
 	uint32_t gpu_cntl1_val = 0;
 	int i;
-	void __iomem *gpu_cx_reg;
 
 	gpu_scid = adreno_llc_get_scid(adreno_dev->gpu_llc_slice);
 	for (i = 0; i < A6XX_LLC_NUM_GPU_SCIDS; i++)
 		gpu_cntl1_val = (gpu_cntl1_val << A6XX_GPU_LLC_SCID_NUM_BITS)
 			| gpu_scid;
 
-	gpu_cx_reg = ioremap(A6XX_GPU_CX_REG_BASE, A6XX_GPU_CX_REG_SIZE);
-	_reg_rmw(gpu_cx_reg + A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
+	adreno_cx_misc_regrmw(adreno_dev,
+			A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
 			A6XX_GPU_LLC_SCID_MASK, gpu_cntl1_val);
-	iounmap(gpu_cx_reg);
 }
 
 /*
@@ -2569,15 +2538,13 @@
 static void a6xx_llc_configure_gpuhtw_scid(struct adreno_device *adreno_dev)
 {
 	uint32_t gpuhtw_scid;
-	void __iomem *gpu_cx_reg;
 
 	gpuhtw_scid = adreno_llc_get_scid(adreno_dev->gpuhtw_llc_slice);
 
-	gpu_cx_reg = ioremap(A6XX_GPU_CX_REG_BASE, A6XX_GPU_CX_REG_SIZE);
-	_reg_rmw(gpu_cx_reg + A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
+	adreno_cx_misc_regrmw(adreno_dev,
+			A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
 			A6XX_GPUHTW_LLC_SCID_MASK,
 			gpuhtw_scid << A6XX_GPUHTW_LLC_SCID_SHIFT);
-	iounmap(gpu_cx_reg);
 }
 
 /*
@@ -2586,19 +2553,14 @@
  */
 static void a6xx_llc_enable_overrides(struct adreno_device *adreno_dev)
 {
-	void __iomem *gpu_cx_reg;
-
 	/*
 	 * 0x3: readnoallocoverrideen=0
 	 *      read-no-alloc=0 - Allocate lines on read miss
 	 *      writenoallocoverrideen=1
 	 *      write-no-alloc=1 - Do not allocates lines on write miss
 	 */
-	gpu_cx_reg = ioremap(A6XX_GPU_CX_REG_BASE, A6XX_GPU_CX_REG_SIZE);
-	__raw_writel(0x3, gpu_cx_reg + A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0);
-	/* Make sure the above write posts before we proceed*/
-	wmb();
-	iounmap(gpu_cx_reg);
+	adreno_cx_misc_regwrite(adreno_dev,
+			A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0, 0x3);
 }
 
 static const char *fault_block[8] = {
@@ -3909,7 +3871,6 @@
 	.reg_offsets = &a6xx_reg_offsets,
 	.start = a6xx_start,
 	.snapshot = a6xx_snapshot,
-	.snapshot_gmu = a6xx_snapshot_gmu,
 	.irq = &a6xx_irq,
 	.snapshot_data = &a6xx_snapshot_data,
 	.irq_trace = trace_kgsl_a5xx_irq_status,
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index bf1111c..3267213 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -129,8 +129,5 @@
 
 void a6xx_snapshot(struct adreno_device *adreno_dev,
 		struct kgsl_snapshot *snapshot);
-void a6xx_snapshot_gmu(struct adreno_device *adreno_dev,
-		struct kgsl_snapshot *snapshot);
-
 void a6xx_crashdump_init(struct adreno_device *adreno_dev);
 #endif
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 7376a38..d3f13d4 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -1456,7 +1456,7 @@
  * This is where all of the A6XX GMU specific bits and pieces are grabbed
  * into the snapshot memory
  */
-void a6xx_snapshot_gmu(struct adreno_device *adreno_dev,
+static void a6xx_snapshot_gmu(struct adreno_device *adreno_dev,
 		struct kgsl_snapshot *snapshot)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1479,8 +1479,6 @@
 				a6xx_gmu_gx_registers,
 				ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
 	}
-
-	a6xx_snapshot_debugbus(device, snapshot);
 }
 
 /* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
@@ -1571,6 +1569,15 @@
 	/* GMU TCM data dumped through AHB */
 	a6xx_snapshot_gmu(adreno_dev, snapshot);
 
+	/*
+	 * Dump debugbus data here to capture it for both
+	 * GMU and GPU snapshot. Debugbus data can be accessed
+	 * even if the gx headswitch or sptprac is off. If gx
+	 * headswitch is off, data for gx blocks will show as
+	 * 0x5c00bd00.
+	 */
+	a6xx_snapshot_debugbus(device, snapshot);
+
 	sptprac_on = gpudev->sptprac_is_on(adreno_dev);
 
 	/* Return if the GX is off */
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index a634d98..e7e0aae 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1716,8 +1716,9 @@
 			ib2base, ib2sz, drawctxt->rb->id);
 
 		pr_fault(device, drawobj,
-			"gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
-			drawobj->context->id, drawobj->timestamp, status,
+			"gpu fault ctx %d ctx_type %s ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+			drawobj->context->id, get_api_type_str(drawctxt->type),
+			drawobj->timestamp, status,
 			rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
 
 		if (rb != NULL)
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 6876796..a769915 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -472,6 +472,7 @@
 {
 	struct kgsl_device *device;
 	struct adreno_device *adreno_dev;
+	struct adreno_gpudev *gpudev;
 	struct adreno_context *drawctxt;
 	struct adreno_ringbuffer *rb;
 	int ret, count, i;
@@ -482,6 +483,7 @@
 
 	device = context->device;
 	adreno_dev = ADRENO_DEVICE(device);
+	gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	drawctxt = ADRENO_CONTEXT(context);
 	rb = drawctxt->rb;
 
@@ -562,6 +564,9 @@
 
 	mutex_unlock(&device->mutex);
 
+	if (gpudev->preemption_context_destroy)
+		gpudev->preemption_context_destroy(context);
+
 	/* wake threads waiting to submit commands from this context */
 	wake_up_all(&drawctxt->waiting);
 	wake_up_all(&drawctxt->wq);
@@ -570,18 +575,10 @@
 void adreno_drawctxt_destroy(struct kgsl_context *context)
 {
 	struct adreno_context *drawctxt;
-	struct adreno_device *adreno_dev;
-	struct adreno_gpudev *gpudev;
 
 	if (context == NULL)
 		return;
 
-	adreno_dev = ADRENO_DEVICE(context->device);
-	gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-
-	if (gpudev->preemption_context_destroy)
-		gpudev->preemption_context_destroy(context);
-
 	drawctxt = ADRENO_CONTEXT(context);
 	kfree(drawctxt);
 }
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index eef506f..4857c78 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -139,4 +139,16 @@
 void adreno_drawctxt_dump(struct kgsl_device *device,
 		struct kgsl_context *context);
 
+static struct adreno_context_type ctxt_type_table[] = {KGSL_CONTEXT_TYPES};
+
+static inline const char *get_api_type_str(unsigned int type)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ctxt_type_table); i++) {
+		if (ctxt_type_table[i].type == type)
+			return ctxt_type_table[i].str;
+	}
+	return "UNKNOWN";
+}
 #endif  /* __ADRENO_DRAWCTXT_H */
diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c
index 2985f24..3b85129 100644
--- a/drivers/gpu/msm/adreno_profile.c
+++ b/drivers/gpu/msm/adreno_profile.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -77,19 +77,6 @@
 #define SIZE_PIPE_ENTRY(cnt) (50 + (cnt) * 62)
 #define SIZE_LOG_ENTRY(cnt) (6 + (cnt) * 5)
 
-static struct adreno_context_type ctxt_type_table[] = {KGSL_CONTEXT_TYPES};
-
-static const char *get_api_type_str(unsigned int type)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(ctxt_type_table) - 1; i++) {
-		if (ctxt_type_table[i].type == type)
-			return ctxt_type_table[i].str;
-	}
-	return "UNKNOWN";
-}
-
 static inline uint _ib_start(struct adreno_device *adreno_dev,
 			 unsigned int *cmds)
 {
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index 2293919..b93ee9c 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -946,24 +946,6 @@
 
 }
 
-/* adreno_snapshot_gmu - Snapshot the Adreno GMU state
- * @device - KGSL device to snapshot
- * @snapshot - Pointer to the snapshot instance
- * This is a hook function called by kgsl_snapshot to snapshot the
- * Adreno specific information for the GMU snapshot.  In turn, this function
- * calls the GMU specific snapshot function to get core specific information.
- */
-void adreno_snapshot_gmu(struct kgsl_device *device,
-		struct kgsl_snapshot *snapshot)
-{
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-
-	/* Add GMU specific sections */
-	if (gpudev->snapshot_gmu)
-		gpudev->snapshot_gmu(adreno_dev, snapshot);
-}
-
 /*
  * adreno_snapshot_cp_roq - Dump CP merciu data in snapshot
  * @device: Device being snapshotted
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 4a98a24..6ae38a3 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -97,7 +97,8 @@
 	{ KGSL_CONTEXT_TYPE_GL, "GL" }, \
 	{ KGSL_CONTEXT_TYPE_CL, "CL" }, \
 	{ KGSL_CONTEXT_TYPE_C2D, "C2D" }, \
-	{ KGSL_CONTEXT_TYPE_RS, "RS" }
+	{ KGSL_CONTEXT_TYPE_RS, "RS" }, \
+	{ KGSL_CONTEXT_TYPE_VK, "VK" }
 
 #define KGSL_CONTEXT_ID(_context) \
 	((_context != NULL) ? (_context)->id : KGSL_MEMSTORE_GLOBAL)
@@ -152,8 +153,6 @@
 	unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid);
 	void (*snapshot)(struct kgsl_device *device,
 		struct kgsl_snapshot *snapshot, struct kgsl_context *context);
-	void (*snapshot_gmu)(struct kgsl_device *device,
-		struct kgsl_snapshot *snapshot);
 	irqreturn_t (*irq_handler)(struct kgsl_device *device);
 	int (*drain)(struct kgsl_device *device);
 	struct kgsl_device_private * (*device_private_create)(void);
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 6e31964..222c86a 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -60,6 +60,8 @@
 	unsigned int image_start;
 };
 
+#define GMU_CM3_CFG_NONMASKINTR_SHIFT    9
+
 struct gmu_iommu_context {
 	const char *name;
 	struct device *dev;
@@ -450,7 +452,7 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	int perf_idx = INVALID_DCVS_IDX, bw_idx = INVALID_DCVS_IDX;
-	int ret;
+	int ret = 0;
 
 	if (gpu_pwrlevel < gmu->num_gpupwrlevels - 1)
 		perf_idx = gmu->num_gpupwrlevels - gpu_pwrlevel - 1;
@@ -462,23 +464,22 @@
 		(bw_idx == INVALID_DCVS_IDX))
 		return -EINVAL;
 
-	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
 		ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev,
 			GMU_DCVS_NOHFI, perf_idx, bw_idx);
+	else if (test_bit(GMU_HFI_ON, &gmu->flags))
+		ret = hfi_send_dcvs_vote(gmu, perf_idx, bw_idx, ACK_NONBLOCK);
 
-		if (ret) {
-			dev_err_ratelimited(&gmu->pdev->dev,
-				"Failed to set GPU perf idx %d, bw idx %d\n",
-				perf_idx, bw_idx);
+	if (ret) {
+		dev_err_ratelimited(&gmu->pdev->dev,
+			"Failed to set GPU perf idx %d, bw idx %d\n",
+			perf_idx, bw_idx);
 
-			adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
-			adreno_dispatcher_schedule(device);
-		}
-
-		return ret;
+		adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
+		adreno_dispatcher_schedule(device);
 	}
 
-	return hfi_send_dcvs_vote(gmu, perf_idx, bw_idx, ACK_NONBLOCK);
+	return ret;
 }
 
 struct rpmh_arc_vals {
@@ -787,12 +788,30 @@
 	return rpmh_arc_votes_init(gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE);
 }
 
+static void send_nmi_to_gmu(struct adreno_device *adreno_dev)
+{
+	/* Mask so there's no interrupt caused by NMI */
+	adreno_write_gmureg(adreno_dev,
+			ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF);
+
+	/* Make sure the interrupt is masked before causing it */
+	wmb();
+	adreno_write_gmureg(adreno_dev,
+		ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0);
+	adreno_write_gmureg(adreno_dev,
+		ADRENO_REG_GMU_CM3_CFG,
+		(1 << GMU_CM3_CFG_NONMASKINTR_SHIFT));
+
+	/* Make sure the NMI is invoked before we proceed*/
+	wmb();
+}
+
 static irqreturn_t gmu_irq_handler(int irq, void *data)
 {
 	struct gmu_device *gmu = data;
 	struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	unsigned int status = 0;
+	unsigned int mask, status = 0;
 
 	adreno_read_gmureg(ADRENO_DEVICE(device),
 			ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS, &status);
@@ -801,6 +820,20 @@
 
 	/* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */
 	if (status & GMU_INT_WDOG_BITE) {
+		/* Temporarily mask the watchdog interrupt to prevent a storm */
+		adreno_read_gmureg(adreno_dev,
+				ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK, &mask);
+		adreno_write_gmureg(adreno_dev,
+				ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
+				(mask | GMU_INT_WDOG_BITE));
+
+		send_nmi_to_gmu(adreno_dev);
+		/*
+		 * There is sufficient delay for the GMU to have finished
+		 * handling the NMI before snapshot is taken, as the fault
+		 * worker is scheduled below.
+		 */
+
 		dev_err_ratelimited(&gmu->pdev->dev,
 				"GMU watchdog expired interrupt received\n");
 		adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
@@ -1350,19 +1383,8 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct gmu_device *gmu = &device->gmu;
 
-	/* Mask so there's no interrupt caused by NMI */
-	adreno_write_gmureg(adreno_dev,
-			ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF);
-
-	/* Make sure the interrupt is masked before causing it */
-	wmb();
-	adreno_write_gmureg(adreno_dev,
-		ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0);
-	adreno_write_gmureg(adreno_dev,
-		ADRENO_REG_GMU_CM3_CFG, (1 << 9));
-
+	send_nmi_to_gmu(adreno_dev);
 	/* Wait for the NMI to be handled */
-	wmb();
 	udelay(100);
 	kgsl_device_snapshot(device, NULL, true);
 
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 3539cda..4cf6250 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -836,11 +836,21 @@
 		no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
 
 	if (!no_page_fault_log && __ratelimit(&_rs)) {
+		const char *api_str;
+
+		if (context != NULL) {
+			struct adreno_context *drawctxt =
+					ADRENO_CONTEXT(context);
+
+			api_str = get_api_type_str(drawctxt->type);
+		} else
+			api_str = "UNKNOWN";
+
 		KGSL_MEM_CRIT(ctx->kgsldev,
 			"GPU PAGE FAULT: addr = %lX pid= %d\n", addr, ptname);
 		KGSL_MEM_CRIT(ctx->kgsldev,
-			"context=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
-			ctx->name, ptbase, contextidr,
+			"context=%s ctx_type=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
+			ctx->name, api_str, ptbase, contextidr,
 			write ? "write" : "read", fault_type);
 
 		if (gpudev->iommu_fault_block) {
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 2a149ac..b2679b3 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -2760,6 +2760,12 @@
 		status = gmu_start(device);
 		break;
 	case KGSL_STATE_INIT:
+		/* if GMU already in FAULT */
+		if (kgsl_gmu_isenabled(device) &&
+			test_bit(GMU_FAULT, &gmu->flags)) {
+			status = -EINVAL;
+			break;
+		}
 		status = kgsl_pwrctrl_enable(device);
 		break;
 	/* The following 3 cases shouldn't occur, but don't panic. */
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index 0ed17d8..009cbbb 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -704,22 +704,20 @@
 	snapshot->size += sizeof(*header);
 
 	/* Build the Linux specific header */
-	/* We either want to only dump GMU, or we want to dump GPU and GMU */
-	if (gmu_fault) {
-		/* Dump only the GMU */
+	if (gmu_fault)
 		kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_OS,
-				snapshot, snapshot_os_no_ctxt, NULL);
-
-		if (device->ftbl->snapshot_gmu)
-			device->ftbl->snapshot_gmu(device, snapshot);
-	} else {
-		/* Dump GPU and GMU */
+			snapshot, snapshot_os_no_ctxt, NULL);
+	else
 		kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_OS,
-				snapshot, snapshot_os, NULL);
+			snapshot, snapshot_os, NULL);
 
-		if (device->ftbl->snapshot)
-			device->ftbl->snapshot(device, snapshot, context);
-	}
+	/*
+	 * Trigger both GPU and GMU snapshot. GPU specific code
+	 * will take care of whether to dumps full state or only
+	 * GMU state based on current GPU power state.
+	 */
+	if (device->ftbl->snapshot)
+		device->ftbl->snapshot(device, snapshot, context);
 
 	/*
 	 * The timestamp is the seconds since boot so it is easier to match to
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 8eed456..9deed4f 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -966,6 +966,13 @@
 	Say Y here if you have a Alps touchpads over i2c-hid or usbhid
 	and want support for its special functionalities.
 
+config HID_QVR
+	tristate "QVR support"
+	depends on HID
+	---help---
+	Say 'Y' or 'M' if you want to connect an external device to
+	stream IMU data for QVR support.
+
 endmenu
 
 endif # HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 86b2b57..95c3235 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -78,6 +78,7 @@
 
 obj-$(CONFIG_HID_PLANTRONICS)	+= hid-plantronics.o
 obj-$(CONFIG_HID_PRIMAX)	+= hid-primax.o
+obj-$(CONFIG_HID_QVR)		+= hid-qvr.o hid-trace.o
 obj-$(CONFIG_HID_ROCCAT)	+= hid-roccat.o hid-roccat-common.o \
 	hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \
 	hid-roccat-koneplus.o hid-roccat-konepure.o hid-roccat-kovaplus.o \
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index cb2e85c..a8b8058 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -333,7 +333,8 @@
 		struct hid_field *field, struct hid_usage *usage,
 		unsigned long **bit, int *max)
 {
-	if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
+	if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
+			usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
 		/* The fn key on Apple USB keyboards */
 		set_bit(EV_REP, hi->input->evbit);
 		hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
@@ -479,6 +480,12 @@
 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
 		.driver_data = APPLE_HAS_FN },
+	{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
+		.driver_data = APPLE_HAS_FN },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+		.driver_data = APPLE_HAS_FN },
+	{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+		.driver_data = APPLE_HAS_FN },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
 		.driver_data = APPLE_HAS_FN },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index aea6267..48856a0 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2018,6 +2018,10 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
+#if IS_ENABLED(CONFIG_HID_QVR)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_QVR5, USB_DEVICE_ID_QVR5) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_QVR32A, USB_DEVICE_ID_QVR32A) },
+#endif
 	{ HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
 #if IS_ENABLED(CONFIG_HID_ROCCAT)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
@@ -2060,6 +2064,10 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9347b37..a784464 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -83,6 +83,7 @@
 #define USB_DEVICE_ID_ANTON_TOUCH_PAD	0x3101
 
 #define USB_VENDOR_ID_APPLE		0x05ac
+#define BT_VENDOR_ID_APPLE		0x004c
 #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE	0x0304
 #define USB_DEVICE_ID_APPLE_MAGICMOUSE	0x030d
 #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD	0x030e
@@ -152,6 +153,7 @@
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO   0x0256
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS   0x0257
 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI   0x0267
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI   0x026c
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI	0x0290
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO	0x0291
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS	0x0292
@@ -549,6 +551,9 @@
 #define USB_VENDOR_ID_IRTOUCHSYSTEMS	0x6615
 #define USB_DEVICE_ID_IRTOUCH_INFRARED_USB	0x0070
 
+#define USB_VENDOR_ID_INNOMEDIA			0x1292
+#define USB_DEVICE_ID_INNEX_GENESIS_ATARI	0x4745
+
 #define USB_VENDOR_ID_ITE               0x048d
 #define USB_DEVICE_ID_ITE_LENOVO_YOGA   0x8386
 #define USB_DEVICE_ID_ITE_LENOVO_YOGA2  0x8350
@@ -885,6 +890,7 @@
 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD	0xff17
 #define USB_DEVICE_ID_SAITEK_PS1000	0x0621
 #define USB_DEVICE_ID_SAITEK_RAT7_OLD	0x0ccb
+#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION	0x0ccd
 #define USB_DEVICE_ID_SAITEK_RAT7	0x0cd7
 #define USB_DEVICE_ID_SAITEK_RAT9	0x0cfa
 #define USB_DEVICE_ID_SAITEK_MMO7	0x0cd0
@@ -924,6 +930,8 @@
 #define USB_DEVICE_ID_SONY_PS3_BDREMOTE		0x0306
 #define USB_DEVICE_ID_SONY_PS3_CONTROLLER	0x0268
 #define USB_DEVICE_ID_SONY_PS4_CONTROLLER	0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2	0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE	0x0ba0
 #define USB_DEVICE_ID_SONY_MOTION_CONTROLLER	0x03d5
 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER	0x042f
 #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER		0x0002
@@ -1125,4 +1133,9 @@
 #define USB_VENDOR_ID_UGTIZER			0x2179
 #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610	0x0053
 
+#define USB_VENDOR_ID_QVR5	0x045e
+#define USB_VENDOR_ID_QVR32A	0x04b4
+#define USB_DEVICE_ID_QVR5	0x0659
+#define USB_DEVICE_ID_QVR32A	0x00c3
+
 #endif
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 1b0084d..28373da 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -955,6 +955,8 @@
 
 	ret = sysfs_create_group(&hdev->dev.kobj,
 			&ntrig_attribute_group);
+	if (ret)
+		hid_err(hdev, "cannot create sysfs group\n");
 
 	return 0;
 err_free:
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
index febb21e..584b10d 100644
--- a/drivers/hid/hid-plantronics.c
+++ b/drivers/hid/hid-plantronics.c
@@ -2,7 +2,7 @@
  *  Plantronics USB HID Driver
  *
  *  Copyright (c) 2014 JD Cole <jd.cole@plantronics.com>
- *  Copyright (c) 2015 Terry Junge <terry.junge@plantronics.com>
+ *  Copyright (c) 2015-2018 Terry Junge <terry.junge@plantronics.com>
  */
 
 /*
@@ -48,6 +48,10 @@
 	unsigned short mapped_key;
 	unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
 
+	/* special case for PTT products */
+	if (field->application == HID_GD_JOYSTICK)
+		goto defaulted;
+
 	/* handle volume up/down mapping */
 	/* non-standard types or multi-HID interfaces - plt_type is PID */
 	if (!(plt_type & HID_USAGE_PAGE)) {
diff --git a/drivers/hid/hid-qvr.c b/drivers/hid/hid-qvr.c
new file mode 100644
index 0000000..e619587
--- /dev/null
+++ b/drivers/hid/hid-qvr.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/msm_ion.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/hiddev.h>
+#include <linux/hid-debug.h>
+#include <linux/hidraw.h>
+#include <linux/device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/gpio.h>
+#include <linux/timekeeping.h>
+#include <linux/ion.h>
+#include "../soc/qcom/smp2p_private.h"
+#include "hid-ids.h"
+#include "hid-qvr.h"
+#include "hid-trace.h"
+
+static struct ion_handle *handle;
+static struct ion_client *client;
+static void *vaddr;
+static size_t vsize;
+static uint64_t ts_base;
+static uint64_t ts_offset;
+static int msg_size = 368;
+
+struct gpio_info {
+	int gpio_base_id;
+	int irq_base_id;
+};
+
+
+/* GPIO Inbound/Outbound callback info */
+struct gpio_inout {
+	struct gpio_info in;
+	struct gpio_info out;
+};
+
+static struct gpio_inout gpio_info[SMP2P_NUM_PROCS];
+static struct gpio_info *in_gpio_info_ptr;
+static struct gpio_info *out_gpio_info_ptr;
+
+
+static struct hid_driver qvr_external_sensor_driver;
+static int fd;
+
+
+struct ion_handle {
+	struct kref ref;
+	unsigned int user_ref_count;
+	struct ion_client *client;
+	struct ion_buffer *buffer;
+	struct rb_node node;
+	unsigned int kmap_cnt;
+	int id;
+};
+
+struct qvr_buf_index {
+	int most_recent_index;
+	uint8_t padding[60];
+};
+
+struct qvr_sensor_t {
+	uint64_t gts;
+	uint64_t ats;
+	uint64_t mts;
+	s32 gx;
+	s32 gy;
+	s32 gz;
+	s32 ax;
+	s32 ay;
+	s32 az;
+	s32 mx;
+	s32 my;
+	s32 mz;
+	uint8_t padding[4];
+};
+
+
+int qvr_send_package_wrap(u8 *message, int msize, struct hid_device *hid)
+{
+	struct qvr_sensor_t *sensor_buf;
+	struct qvr_sensor_t *data;
+	static int buf_index;
+	struct external_imu_format imuData = { 0 };
+	struct qvr_buf_index *index_buf;
+
+	/*
+	 * Actual message size is 369 bytes
+	 * to make it 8 byte aligned we created a structure of size 368 bytes.
+	 * Ignoring the first byte 'report id' (which is always 1)
+	 *
+	 */
+	memcpy((void *)&imuData, (void *)message + 1, msg_size);
+
+	if (!ts_base)
+		ts_base = ktime_to_ns(ktime_get_boottime());
+	if (!ts_offset)
+		ts_offset = imuData.gts0;
+	index_buf = (struct qvr_buf_index *)
+		((uintptr_t)vaddr + (vsize / 2) + (8 * sizeof(*sensor_buf)));
+	sensor_buf = (struct qvr_sensor_t *)((uintptr_t)vaddr + (vsize / 2));
+
+	data = (struct qvr_sensor_t *)&(sensor_buf[buf_index]);
+	if (ts_offset > imuData.gts0)
+		data->ats = ts_base + ((ts_offset - imuData.gts0) * 100);
+	else
+		data->ats = ts_base + ((imuData.gts0 - ts_offset) * 100);
+	if (imuData.mts0 == 0)
+		data->mts = 0;
+	else
+		data->mts = data->ats;
+	data->gts = data->ats;
+	data->ax = -imuData.ax0;
+	data->ay = imuData.ay0;
+	data->az = -imuData.az0;
+	data->gx = -imuData.gx0;
+	data->gy = imuData.gy0;
+	data->gz = -imuData.gz0;
+	data->mx = -imuData.mx0;
+	data->my = imuData.my0;
+	data->mz = -imuData.mz0;
+
+	trace_qvr_recv_sensor("gyro", data->gts, data->gx, data->gy, data->gz);
+	trace_qvr_recv_sensor("accel", data->ats, data->ax, data->ay, data->az);
+
+	pr_debug("%s: gts= %llu, gx= %d, gy=%d, gz=%d", __func__,
+		data->gts, data->gx, data->gy, data->gz);
+	pr_debug("%s: ats= %llu, ax= %d, ay=%d, az=%d", __func__,
+		data->ats, data->ax, data->ay, data->az);
+	pr_debug("%s: mts= %llu, mx= %d, my=%d, mz=%d", __func__,
+		data->mts, data->mx, data->my, data->mz);
+
+	index_buf->most_recent_index = buf_index;
+	buf_index = (buf_index == (8 - 1)) ? 0 : buf_index + 1;
+	return 0;
+}
+
+static int register_smp2p(char *node_name, struct gpio_info *gpio_info_ptr)
+{
+	struct device_node *node = NULL;
+	int cnt = 0;
+	int id = 0;
+
+	node = of_find_compatible_node(NULL, NULL, node_name);
+	if (node) {
+		cnt = of_gpio_count(node);
+		if (cnt && gpio_info_ptr) {
+			id = of_get_gpio(node, 0);
+			if (id == -EPROBE_DEFER)
+				return id;
+			gpio_info_ptr->gpio_base_id = id;
+			gpio_info_ptr->irq_base_id = gpio_to_irq(id);
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+
+
+static int kernel_map_gyro_buffer(int fd)
+{
+	handle = ion_import_dma_buf_fd(client, fd);
+	if (IS_ERR(handle)) {
+		pr_err("%s: ion_import_dma_buf_fd failed\n", __func__);
+		return -EINVAL;
+	}
+
+	if (ion_handle_get_size(client, handle, &vsize)) {
+		pr_err("%s: Could not dma buf %d size\n", __func__, fd);
+		return -EINVAL;
+	}
+
+	vaddr = ion_map_kernel(client, handle);
+	if (IS_ERR_OR_NULL(vaddr)) {
+		ion_free(client, handle);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+static void kernel_unmap_gyro_buffer(void)
+{
+	if (!IS_ERR_OR_NULL(vaddr)) {
+		ion_unmap_kernel(client, handle);
+		ion_free(client, handle);
+		vaddr = NULL;
+	}
+}
+
+static ssize_t fd_show(struct kobject *kobj,
+	struct kobj_attribute *attr,
+	char *buf)
+{
+	return snprintf(buf, 16, "%d\n", fd);
+}
+
+static ssize_t fd_store(struct kobject *kobj,
+	struct kobj_attribute *attr,
+	const char *buf, size_t count)
+{
+	int ret;
+
+	ret = kstrtoint(buf, 10, &fd);
+	if (ret < 0)
+		return ret;
+	if (fd == -1)
+		kernel_unmap_gyro_buffer();
+	else
+		kernel_map_gyro_buffer(fd);
+	ts_base = 0;
+	ts_offset = 0;
+
+	return count;
+}
+
+static ssize_t ts_base_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	return  snprintf(buf, 16, "%lld\n", ts_base);
+}
+
+static ssize_t ts_base_store(struct kobject *kobj,
+	struct kobj_attribute *attr,
+	const char *buf, size_t count)
+{
+	return 0;
+}
+
+static ssize_t ts_offset_show(struct kobject *kobj,
+	struct kobj_attribute *attr, char *buf)
+{
+	return  snprintf(buf, 16, "%lld\n", ts_offset * 100);
+}
+
+static ssize_t ts_offset_store(struct kobject *kobj,
+	struct kobj_attribute *attr,
+	const char *buf, size_t count)
+{
+	return 0;
+}
+
+static struct kobj_attribute fd_attribute = __ATTR(fd, 0664,
+	fd_show,
+	fd_store);
+static struct kobj_attribute ts_base_attribute = __ATTR(ts_base, 0664,
+	ts_base_show,
+	ts_base_store);
+static struct kobj_attribute ts_offset_attribute = __ATTR(ts_offset, 0664,
+	ts_offset_show,
+	ts_offset_store);
+
+static struct attribute *attrs[] = {
+	&fd_attribute.attr,
+	&ts_base_attribute.attr,
+	&ts_offset_attribute.attr,
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+
+static struct kobject *qvr_external_sensor_kobj;
+
+static int qvr_external_sensor_probe(struct hid_device *hdev,
+	const struct hid_device_id *id)
+{
+	int ret;
+	char *in_node_name = "qcom,smp2pgpio_client_qvrexternal_5_in";
+	char *out_node_name = "qcom,smp2pgpio_client_qvrexternal_5_out";
+	__u8 hid_buf[255] = { 0 };
+	size_t hid_count = 64;
+
+	ret = register_smp2p(in_node_name, in_gpio_info_ptr);
+	if (ret) {
+		pr_err("%s: register_smp2p failed", __func__);
+		goto err_free;
+	}
+	ret = register_smp2p(out_node_name, out_gpio_info_ptr);
+	if (ret) {
+		pr_err("%s: register_smp2p failed", __func__);
+		goto err_free;
+	}
+	ret = hid_open_report(hdev);
+	if (ret) {
+		pr_err("%s: hid_open_report failed", __func__);
+		goto err_free;
+	}
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (ret) {
+		pr_err("%s: hid_hw_start failed", __func__);
+		goto err_free;
+	}
+	hid_buf[0] = 2;
+	hid_buf[1] = 7;
+	ret = hid_hw_raw_request(hdev, hid_buf[0],
+		hid_buf,
+		hid_count,
+		HID_FEATURE_REPORT,
+		HID_REQ_SET_REPORT);
+	return 0;
+err_free:
+	return ret;
+
+}
+
+static int qvr_external_sensor_raw_event(struct hid_device *hid,
+	struct hid_report *report,
+	u8 *data, int size)
+{
+	int val;
+	int ret = -1;
+
+	if (vaddr != NULL && report->id == 0x1) {
+		ret = qvr_send_package_wrap(data/*hid_value*/, size, hid);
+		if (ret != 0) {
+			pr_err("%s: qvr_send_package_wrap failed", __func__);
+			return ret;
+		}
+		val = 1 ^ gpio_get_value(out_gpio_info_ptr->gpio_base_id + 0);
+		gpio_set_value(out_gpio_info_ptr->gpio_base_id + 0, val);
+		ret = -1;
+	}
+	return ret;
+}
+
+static void qvr_external_sensor_device_remove(struct hid_device *hdev)
+{
+	hid_hw_stop(hdev);
+}
+
+static struct hid_device_id qvr_external_sensor_table[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_QVR5, USB_DEVICE_ID_QVR5) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_QVR32A, USB_DEVICE_ID_QVR32A) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, qvr_external_sensor_table);
+
+static struct hid_driver qvr_external_sensor_driver = {
+	.name = "qvr_external_sensor",
+	.id_table = qvr_external_sensor_table,
+	.probe = qvr_external_sensor_probe,
+	.raw_event = qvr_external_sensor_raw_event,
+	.remove = qvr_external_sensor_device_remove,
+};
+
+module_hid_driver(qvr_external_sensor_driver);
+
+static int __init qvr_external_sensor_init(void)
+{
+	const char *device_name = "aoe";
+	int ret = 0;
+
+	in_gpio_info_ptr = &gpio_info[SMP2P_CDSP_PROC].in;
+	in_gpio_info_ptr->gpio_base_id = -1;
+	out_gpio_info_ptr = &gpio_info[SMP2P_CDSP_PROC].out;
+	out_gpio_info_ptr->gpio_base_id = -1;
+
+
+	qvr_external_sensor_kobj =
+		kobject_create_and_add("qvr_external_sensor", kernel_kobj);
+
+	if (!qvr_external_sensor_kobj) {
+		pr_err("%s: kobject_create_and_add() fail\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = sysfs_create_group(qvr_external_sensor_kobj, &attr_group);
+	if (ret) {
+		pr_err("%s: can't register sysfs\n", __func__);
+		return -ENOMEM;
+	}
+
+	client = msm_ion_client_create(device_name);
+	if (client == NULL) {
+		pr_err("msm_ion_client_create failed in %s", __func__);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static void __exit qvr_external_sensor_exit(void)
+{
+	kobject_put(qvr_external_sensor_kobj);
+}
+
+module_init(qvr_external_sensor_init);
+module_exit(qvr_external_sensor_exit);
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/hid/hid-qvr.h b/drivers/hid/hid-qvr.h
new file mode 100644
index 0000000..251b969
--- /dev/null
+++ b/drivers/hid/hid-qvr.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef HID_QVR_H_FILE
+#define HID_QVR_H_FILE
+
+#define QVR_EXTERNAL_SENSOR_REPORT_ID 0x1
+
+struct external_imu_format {
+	s16 temp0;
+	s16 temp1;
+	s16 temp2;
+	s16 temp3;
+	u64 gts0;
+	u64 gts1;
+	u64 gts2;
+	u64 gts3;
+	s16 gx0;
+	s16 gx1;
+	s16 gx2;
+	s16 gx3;
+	s16 gx4;
+	s16 gx5;
+	s16 gx6;
+	s16 gx7;
+	s16 gx8;
+	s16 gx9;
+	s16 gx10;
+	s16 gx11;
+	s16 gx12;
+	s16 gx13;
+	s16 gx14;
+	s16 gx15;
+	s16 gx16;
+	s16 gx17;
+	s16 gx18;
+	s16 gx19;
+	s16 gx20;
+	s16 gx21;
+	s16 gx22;
+	s16 gx23;
+	s16 gx24;
+	s16 gx25;
+	s16 gx26;
+	s16 gx27;
+	s16 gx28;
+	s16 gx29;
+	s16 gx30;
+	s16 gx31;
+	s16 gy0;
+	s16 gy1;
+	s16 gy2;
+	s16 gy3;
+	s16 gy4;
+	s16 gy5;
+	s16 gy6;
+	s16 gy7;
+	s16 gy8;
+	s16 gy9;
+	s16 gy10;
+	s16 gy11;
+	s16 gy12;
+	s16 gy13;
+	s16 gy14;
+	s16 gy15;
+	s16 gy16;
+	s16 gy17;
+	s16 gy18;
+	s16 gy19;
+	s16 gy20;
+	s16 gy21;
+	s16 gy22;
+	s16 gy23;
+	s16 gy24;
+	s16 gy25;
+	s16 gy26;
+	s16 gy27;
+	s16 gy28;
+	s16 gy29;
+	s16 gy30;
+	s16 gy31;
+	s16 gz0;
+	s16 gz1;
+	s16 gz2;
+	s16 gz3;
+	s16 gz4;
+	s16 gz5;
+	s16 gz6;
+	s16 gz7;
+	s16 gz8;
+	s16 gz9;
+	s16 gz10;
+	s16 gz11;
+	s16 gz12;
+	s16 gz13;
+	s16 gz14;
+	s16 gz15;
+	s16 gz16;
+	s16 gz17;
+	s16 gz18;
+	s16 gz19;
+	s16 gz20;
+	s16 gz21;
+	s16 gz22;
+	s16 gz23;
+	s16 gz24;
+	s16 gz25;
+	s16 gz26;
+	s16 gz27;
+	s16 gz28;
+	s16 gz29;
+	s16 gz30;
+	s16 gz31;
+	u64 ats0;
+	u64 ats1;
+	u64 ats2;
+	u64 ats3;
+	s32 ax0;
+	s32 ax1;
+	s32 ax2;
+	s32 ax3;
+	s32 ay0;
+	s32 ay1;
+	s32 ay2;
+	s32 ay3;
+	s32 az0;
+	s32 az1;
+	s32 az2;
+	s32 az3;
+	u64 mts0;
+	u64 mts1;
+	u64 mts2;
+	u64 mts3;
+	s16 mx0;
+	s16 mx1;
+	s16 mx2;
+	s16 mx3;
+	s16 my0;
+	s16 my1;
+	s16 my2;
+	s16 my3;
+	s16 mz0;
+	s16 mz1;
+	s16 mz2;
+	s16 mz3;//368 bytes
+};
+
+int qvr_send_package_wrap(u8 *message, int msize, struct hid_device *hid);
+void qvr_clear_def_parmeter(void);
+void qvr_init(struct hid_device *hdev);
+int qvr_input_init(void);
+void qvr_input_remove(void);
+
+#endif
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 39e6426..683861f 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -183,6 +183,8 @@
 		.driver_data = SAITEK_RELEASE_MODE_RAT7 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
 		.driver_data = SAITEK_RELEASE_MODE_RAT7 },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
+		.driver_data = SAITEK_RELEASE_MODE_RAT7 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
 		.driver_data = SAITEK_RELEASE_MODE_RAT7 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 1b1dccd..eee58d1 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -2581,6 +2581,12 @@
 		.driver_data = DUALSHOCK4_CONTROLLER_USB },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
 		.driver_data = DUALSHOCK4_CONTROLLER_BT },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+		.driver_data = DUALSHOCK4_CONTROLLER_USB },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+		.driver_data = DUALSHOCK4_CONTROLLER_BT },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+		.driver_data = DUALSHOCK4_CONTROLLER_USB },
 	/* Nyko Core Controller for PS3 */
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
 		.driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
diff --git a/drivers/hid/hid-trace.c b/drivers/hid/hid-trace.c
new file mode 100644
index 0000000..b3afcb7
--- /dev/null
+++ b/drivers/hid/hid-trace.c
@@ -0,0 +1,17 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+/* Instantiate tracepoints */
+#define CREATE_TRACE_POINTS
+#include "hid-trace.h"
diff --git a/drivers/hid/hid-trace.h b/drivers/hid/hid-trace.h
new file mode 100644
index 0000000..4415055
--- /dev/null
+++ b/drivers/hid/hid-trace.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_HID_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _HID_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hid
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hid-trace
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(qvr_recv_sensor,
+	TP_PROTO(char *sensor, uint64_t ts, s32 x, s32 y, s32 z),
+	TP_ARGS(sensor, ts, x, y, z),
+	TP_STRUCT__entry(
+		__field(char *, sensor)
+		__field(uint64_t, ts)
+		__field(int, x)
+		__field(int, y)
+		__field(int, z)
+		),
+	TP_fast_assign(
+		__entry->sensor = sensor;
+		__entry->ts = ts;
+		__entry->x = x;
+		__entry->y = y;
+		__entry->z = z;
+		),
+	TP_printk(
+		"%s - ts=%llu x=%d y=%d z=%d",
+		__entry->sensor,
+		__entry->ts,
+		__entry->x,
+		__entry->y,
+		__entry->z
+		)
+	);
+
+#endif /* _HID_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 00bce00..ce2b800 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -1101,6 +1101,14 @@
 	pm_runtime_enable(&client->dev);
 	device_enable_async_suspend(&client->dev);
 
+	/* Make sure there is something at this address */
+	ret = i2c_smbus_read_byte(client);
+	if (ret < 0) {
+		dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
+		ret = -ENXIO;
+		goto err_pm;
+	}
+
 	ret = i2c_hid_fetch_hid_descriptor(ihid);
 	if (ret < 0)
 		goto err_pm;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 1916f80..1ed4110 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -170,6 +170,9 @@
 	{ USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT },
+	{ USB_VENDOR_ID_QVR5, USB_DEVICE_ID_QVR5, HID_QUIRK_HIDINPUT_FORCE },
+	{ USB_VENDOR_ID_QVR32A, USB_DEVICE_ID_QVR32A, HID_QUIRK_HIDINPUT_FORCE },
+	{ USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT },
 
 	{ 0, 0 }
 };
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index db951c4..b1ad378 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2429,8 +2429,14 @@
 			if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
 				features->device_type |= WACOM_DEVICETYPE_PAD;
 
-			features->x_max = 4096;
-			features->y_max = 4096;
+			if (features->type == INTUOSHT2) {
+				features->x_max = features->x_max / 10;
+				features->y_max = features->y_max / 10;
+			}
+			else {
+				features->x_max = 4096;
+				features->y_max = 4096;
+			}
 		}
 		else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
 			features->device_type |= WACOM_DEVICETYPE_PAD;
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 3cefd1a..9c262d9 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -274,14 +274,18 @@
 	return clamp_val(reg, 0, 1023) & (0xff << 2);
 }
 
-static u16 adt7475_read_word(struct i2c_client *client, int reg)
+static int adt7475_read_word(struct i2c_client *client, int reg)
 {
-	u16 val;
+	int val1, val2;
 
-	val = i2c_smbus_read_byte_data(client, reg);
-	val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
+	val1 = i2c_smbus_read_byte_data(client, reg);
+	if (val1 < 0)
+		return val1;
+	val2 = i2c_smbus_read_byte_data(client, reg + 1);
+	if (val2 < 0)
+		return val2;
 
-	return val;
+	return val1 | (val2 << 8);
 }
 
 static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index ac63e56..9ac6e16 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -17,7 +17,7 @@
  * Bi-directional Current/Power Monitor with I2C Interface
  * Datasheet: http://www.ti.com/product/ina230
  *
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
  * Thanks to Jan Volkering
  *
  * This program is free software; you can redistribute it and/or modify
@@ -328,6 +328,15 @@
 	return 0;
 }
 
+static ssize_t ina2xx_show_shunt(struct device *dev,
+			      struct device_attribute *da,
+			      char *buf)
+{
+	struct ina2xx_data *data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
+}
+
 static ssize_t ina2xx_store_shunt(struct device *dev,
 				  struct device_attribute *da,
 				  const char *buf, size_t count)
@@ -402,7 +411,7 @@
 
 /* shunt resistance */
 static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
-			  ina2xx_show_value, ina2xx_store_shunt,
+			  ina2xx_show_shunt, ina2xx_store_shunt,
 			  INA2XX_CALIBRATION);
 
 /* update interval (ina226 only) */
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index b900f76..c5653fe 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -1263,6 +1263,19 @@
 		qpnp_adc_map_voltage_temp(adcmap_batt_therm_qrd,
 			ARRAY_SIZE(adcmap_batt_therm_qrd),
 			batt_thm_voltage, &adc_chan_result->physical);
+	} else {
+
+		qpnp_adc_scale_with_calib_param(adc_code,
+			adc_properties, chan_properties, &batt_thm_voltage);
+
+		adc_chan_result->measurement = batt_thm_voltage;
+
+		return qpnp_adc_map_voltage_temp(
+				adcmap_batt_therm_qrd,
+				ARRAY_SIZE(adcmap_batt_therm_qrd),
+				batt_thm_voltage,
+				&adc_chan_result->physical);
+
 	}
 	return 0;
 }
@@ -2152,6 +2165,64 @@
 }
 EXPORT_SYMBOL(qpnp_adc_qrd_skut1_btm_scaler);
 
+int32_t qpnp_adc_qrd_215_btm_scaler(struct qpnp_vadc_chip *chip,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{
+	struct qpnp_vadc_linear_graph btm_param;
+	int64_t low_output = 0, high_output = 0;
+	int rc = 0;
+
+	if (param->adc_tm_hc) {
+		pr_debug("Update scaling for VADC_TM_HC\n");
+		return -EINVAL;
+	}
+
+	qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
+
+	pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
+				param->low_temp);
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_batt_therm_qrd,
+		ARRAY_SIZE(adcmap_batt_therm_qrd),
+		(param->low_temp),
+		&low_output);
+	if (rc) {
+		pr_debug("low_temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("low_output:%lld\n", low_output);
+	low_output *= btm_param.dy;
+	low_output = div64_s64(low_output, btm_param.adc_vref);
+	low_output += btm_param.adc_gnd;
+
+	rc = qpnp_adc_map_voltage_temp(
+		adcmap_batt_therm_qrd,
+		ARRAY_SIZE(adcmap_batt_therm_qrd),
+		(param->high_temp),
+		&high_output);
+	if (rc) {
+		pr_debug("high temp mapping failed with %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("high_output:%lld\n", high_output);
+	high_output *= btm_param.dy;
+	high_output = div64_s64(high_output, btm_param.adc_vref);
+	high_output += btm_param.adc_gnd;
+
+	/* btm low temperature correspondes to high voltage threshold */
+	*low_threshold = high_output;
+	/* btm high temperature correspondes to low voltage threshold */
+	*high_threshold = low_output;
+
+	pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold,
+				*low_threshold);
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_qrd_215_btm_scaler);
+
 int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_vadc_chip *chip,
 		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold)
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
index 621e08f..066072a 100644
--- a/drivers/hwtracing/coresight/coresight-cti.c
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -166,14 +166,19 @@
 out:
 		spin_lock_irqsave(&drvdata->spinlock, flag);
 		drvdata->l2_off = true;
-		drvdata->state->cticontrol = cti_readl(drvdata, CTICONTROL);
-		drvdata->state->ctiappset = cti_readl(drvdata, CTIAPPSET);
-		drvdata->state->ctigate = cti_readl(drvdata, CTIGATE);
-		for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
-			drvdata->state->ctiinen[trig] =
-				cti_readl(drvdata, CTIINEN(trig));
-			drvdata->state->ctiouten[trig] =
-				cti_readl(drvdata, CTIOUTEN(trig));
+		if (drvdata->refcnt) {
+			drvdata->state->cticontrol =
+					cti_readl(drvdata, CTICONTROL);
+			drvdata->state->ctiappset =
+					cti_readl(drvdata, CTIAPPSET);
+			drvdata->state->ctigate =
+					cti_readl(drvdata, CTIGATE);
+			for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+				drvdata->state->ctiinen[trig] =
+					cti_readl(drvdata, CTIINEN(trig));
+				drvdata->state->ctiouten[trig] =
+					cti_readl(drvdata, CTIOUTEN(trig));
+			}
 		}
 		spin_unlock_irqrestore(&drvdata->spinlock, flag);
 	}
@@ -209,17 +214,22 @@
 		continue;
 out:
 		spin_lock_irqsave(&drvdata->spinlock, flag);
-		CTI_UNLOCK(drvdata);
-		cti_writel(drvdata, drvdata->state->ctiappset, CTIAPPSET);
-		cti_writel(drvdata, drvdata->state->ctigate, CTIGATE);
-		for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
-			cti_writel(drvdata, drvdata->state->ctiinen[trig],
-				   CTIINEN(trig));
-			cti_writel(drvdata, drvdata->state->ctiouten[trig],
-				   CTIOUTEN(trig));
+		if (drvdata->refcnt) {
+			CTI_UNLOCK(drvdata);
+			cti_writel(drvdata, drvdata->state->ctiappset,
+				CTIAPPSET);
+			cti_writel(drvdata, drvdata->state->ctigate,
+				CTIGATE);
+			for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+				cti_writel(drvdata,
+				drvdata->state->ctiinen[trig], CTIINEN(trig));
+				cti_writel(drvdata,
+				drvdata->state->ctiouten[trig], CTIOUTEN(trig));
+			}
+			cti_writel(drvdata, drvdata->state->cticontrol,
+				CTICONTROL);
+			CTI_LOCK(drvdata);
 		}
-		cti_writel(drvdata, drvdata->state->cticontrol, CTICONTROL);
-		CTI_LOCK(drvdata);
 		drvdata->l2_off = false;
 		spin_unlock_irqrestore(&drvdata->spinlock, flag);
 	}
@@ -380,8 +390,10 @@
 	 */
 	if (drvdata->refcnt == 0) {
 		ret = pm_runtime_get_sync(drvdata->dev);
-		if (ret)
+		if (ret < 0) {
+			pm_runtime_put(drvdata->dev);
 			goto err1;
+		}
 	}
 
 	spin_lock_irqsave(&drvdata->spinlock, flag);
@@ -464,8 +476,10 @@
 	 */
 	if (drvdata->refcnt == 0) {
 		ret = pm_runtime_get_sync(drvdata->dev);
-		if (ret)
+		if (ret < 0) {
+			pm_runtime_put(drvdata->dev);
 			goto err1;
+		}
 	}
 
 	spin_lock_irqsave(&drvdata->spinlock, flag);
@@ -1472,8 +1486,10 @@
 	}
 	if (drvdata->cti_save && !drvdata->cti_hwclk) {
 		ret = pm_runtime_get_sync(drvdata->dev);
-		if (ret)
+		if (ret < 0) {
+			pm_runtime_put(drvdata->dev);
 			return ret;
+		}
 	}
 
 	mutex_lock(&cti_lock);
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index ff579a7..7473c6e 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -47,8 +47,9 @@
 
 /** register definition **/
 /* FFSR - 0x300 */
-#define FFSR_FT_STOPPED		BIT(1)
+#define FFSR_FT_STOPPED_BIT	1
 /* FFCR - 0x304 */
+#define FFCR_FON_MAN_BIT	6
 #define FFCR_FON_MAN		BIT(6)
 #define FFCR_STOP_FI		BIT(12)
 
@@ -93,9 +94,9 @@
 	/* Generate manual flush */
 	writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
 	/* Wait for flush to complete */
-	coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0);
+	coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
 	/* Wait for formatter to stop */
-	coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1);
+	coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
 
 	CS_LOCK(drvdata->base);
 }
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 21c74ec..2009a23 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -106,7 +106,7 @@
 	dev_err(&csdev->dev, "couldn't find inport, parent: %s, child: %s\n",
 		dev_name(&parent->dev), dev_name(&csdev->dev));
 
-	return 0;
+	return -ENODEV;
 }
 
 static int coresight_find_link_outport(struct coresight_device *csdev,
@@ -124,7 +124,7 @@
 	dev_err(&csdev->dev, "couldn't find outport, parent: %s, child: %s\n",
 		dev_name(&csdev->dev), dev_name(&child->dev));
 
-	return 0;
+	return -ENODEV;
 }
 
 static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
@@ -178,6 +178,9 @@
 	else
 		refport = 0;
 
+	if (refport < 0)
+		return refport;
+
 	if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
 		if (link_ops(csdev)->enable) {
 			ret = link_ops(csdev)->enable(csdev, inport, outport);
@@ -776,6 +779,10 @@
 		return ret;
 
 	if (val) {
+
+		if (csdev->enable)
+			return size;
+
 		ret = coresight_enable(csdev);
 		if (ret)
 			return ret;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 9e7ef5c..b2d8b63 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -234,12 +234,16 @@
 	/*
 	 * It's not always possible to have 1 to 2 ratio when d=7, so fall back
 	 * to minimal possible clkh in this case.
+	 *
+	 * Note:
+	 * CLKH is not allowed to be 0, in this case I2C clock is not generated
+	 * at all
 	 */
-	if (clk >= clkl + d) {
+	if (clk > clkl + d) {
 		clkh = clk - clkl - d;
 		clkl -= d;
 	} else {
-		clkh = 0;
+		clkh = 1;
 		clkl = clk - (d << 1);
 	}
 
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index b32bf7e..26f1691 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -135,6 +135,7 @@
 
 #define SBREG_BAR		0x10
 #define SBREG_SMBCTRL		0xc6000c
+#define SBREG_SMBCTRL_DNV	0xcf000c
 
 /* Host status bits for SMBPCISTS */
 #define SMBPCISTS_INTS		0x08
@@ -1387,7 +1388,11 @@
 	spin_unlock(&p2sb_spinlock);
 
 	res = &tco_res[ICH_RES_MEM_OFF];
-	res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+	if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
+		res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
+	else
+		res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+
 	res->end = res->start + 3;
 	res->flags = IORESOURCE_MEM;
 
@@ -1403,6 +1408,13 @@
 }
 
 #ifdef CONFIG_ACPI
+static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
+				      acpi_physical_address address)
+{
+	return address >= priv->smba &&
+	       address <= pci_resource_end(priv->pci_dev, SMBBAR);
+}
+
 static acpi_status
 i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
 		     u64 *value, void *handler_context, void *region_context)
@@ -1418,7 +1430,7 @@
 	 */
 	mutex_lock(&priv->acpi_lock);
 
-	if (!priv->acpi_reserved) {
+	if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
 		priv->acpi_reserved = true;
 
 		dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 47fc1f1..c418830 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -376,6 +376,7 @@
 		goto err_desc;
 	}
 
+	reinit_completion(&dma->cmd_complete);
 	txdesc->callback = i2c_imx_dma_callback;
 	txdesc->callback_param = i2c_imx;
 	if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -619,7 +620,6 @@
 	 * The first byte must be transmitted by the CPU.
 	 */
 	imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
-	reinit_completion(&i2c_imx->dma->cmd_complete);
 	time_left = wait_for_completion_timeout(
 				&i2c_imx->dma->cmd_complete,
 				msecs_to_jiffies(DMA_TIMEOUT));
@@ -665,9 +665,6 @@
 	struct imx_i2c_dma *dma = i2c_imx->dma;
 	struct device *dev = &i2c_imx->adapter.dev;
 
-	temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
-	temp |= I2CR_DMAEN;
-	imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
 
 	dma->chan_using = dma->chan_rx;
 	dma->dma_transfer_dir = DMA_DEV_TO_MEM;
@@ -678,7 +675,6 @@
 	if (result)
 		return result;
 
-	reinit_completion(&i2c_imx->dma->cmd_complete);
 	time_left = wait_for_completion_timeout(
 				&i2c_imx->dma->cmd_complete,
 				msecs_to_jiffies(DMA_TIMEOUT));
@@ -781,6 +777,7 @@
 	int i, result;
 	unsigned int temp;
 	int block_data = msgs->flags & I2C_M_RECV_LEN;
+	int use_dma = i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data;
 
 	dev_dbg(&i2c_imx->adapter.dev,
 		"<%s> write slave address: addr=0x%x\n",
@@ -807,12 +804,14 @@
 	 */
 	if ((msgs->len - 1) || block_data)
 		temp &= ~I2CR_TXAK;
+	if (use_dma)
+		temp |= I2CR_DMAEN;
 	imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
 	imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */
 
 	dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__);
 
-	if (i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data)
+	if (use_dma)
 		return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg);
 
 	/* read data */
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7aea288..b51adff 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -589,7 +589,7 @@
 
 	/* unmap the data buffer */
 	if (dma_size != 0)
-		dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction);
+		dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
 
 	if (unlikely(!time_left)) {
 		dev_err(dev, "completion wait timed out\n");
diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c
index 99e7c97..631169b 100644
--- a/drivers/i2c/busses/i2c-msm-v2.c
+++ b/drivers/i2c/busses/i2c-msm-v2.c
@@ -2321,6 +2321,12 @@
 	struct i2c_msm_ctrl      *ctrl = i2c_get_adapdata(adap);
 	struct i2c_msm_xfer      *xfer = &ctrl->xfer;
 
+	if (num < 1) {
+		dev_err(ctrl->dev,
+		"error on number of msgs(%d) received\n", num);
+		return -EINVAL;
+	}
+
 	if (IS_ERR_OR_NULL(msgs)) {
 		dev_err(ctrl->dev, " error on msgs Accessing invalid  pointer location\n");
 		return PTR_ERR(msgs);
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index abe1798..ec37ff0 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -121,6 +121,7 @@
 	struct msm_gpi_dma_async_tx_cb_param tx_cb;
 	struct msm_gpi_dma_async_tx_cb_param rx_cb;
 	enum i2c_se_mode se_mode;
+	bool autosuspend_disable;
 };
 
 struct geni_i2c_err_log {
@@ -641,6 +642,7 @@
 {
 	struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
 	int i, ret = 0, timeout = 0;
+	int ref = 0;
 
 	gi2c->err = 0;
 	gi2c->cur = &msgs[0];
@@ -654,6 +656,12 @@
 		pm_runtime_set_suspended(gi2c->dev);
 		return ret;
 	}
+	ref = atomic_read(&gi2c->dev->power.usage_count);
+	if (ref <= 0) {
+		GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+			"resume usage count mismatch:%d\n", ref);
+	}
+
 	if (gi2c->se_mode == GSI_ONLY) {
 		ret = geni_i2c_gsi_xfer(adap, msgs, num);
 		goto geni_i2c_txn_ret;
@@ -759,8 +767,17 @@
 	if (ret == 0)
 		ret = num;
 
-	pm_runtime_mark_last_busy(gi2c->dev);
-	pm_runtime_put_autosuspend(gi2c->dev);
+	if (gi2c->autosuspend_disable) {
+		pm_runtime_put_sync(gi2c->dev);
+		ref = atomic_read(&gi2c->dev->power.usage_count);
+		if (ref < 0)
+			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+				"suspend usage count mismatch:%d\n", ref);
+	} else {
+		pm_runtime_mark_last_busy(gi2c->dev);
+		pm_runtime_put_autosuspend(gi2c->dev);
+	}
+
 	gi2c->cur = NULL;
 	gi2c->err = 0;
 	dev_dbg(gi2c->dev, "i2c txn ret:%d\n", ret);
@@ -874,6 +891,9 @@
 		gi2c->i2c_rsc.clk_freq_out = KHz(400);
 	}
 
+	gi2c->autosuspend_disable = of_property_read_bool(pdev->dev.of_node,
+									"qcom,disable-autosuspend");
+
 	gi2c->irq = platform_get_irq(pdev, 0);
 	if (gi2c->irq < 0) {
 		dev_err(gi2c->dev, "IRQ error for i2c-geni\n");
@@ -905,8 +925,11 @@
 	strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
 
 	pm_runtime_set_suspended(gi2c->dev);
-	pm_runtime_set_autosuspend_delay(gi2c->dev, I2C_AUTO_SUSPEND_DELAY);
-	pm_runtime_use_autosuspend(gi2c->dev);
+	if (!gi2c->autosuspend_disable) {
+		pm_runtime_set_autosuspend_delay(gi2c->dev,
+							I2C_AUTO_SUSPEND_DELAY);
+		pm_runtime_use_autosuspend(gi2c->dev);
+	}
 	pm_runtime_enable(gi2c->dev);
 	i2c_add_adapter(&gi2c->adap);
 
@@ -994,6 +1017,9 @@
 				    "i2c fifo/se-dma mode. fifo depth:%d\n",
 				    gi2c_tx_depth);
 		}
+		if (gi2c->autosuspend_disable)
+			GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+					    "i2c in autosuspend disable mode\n");
 	}
 	if (gi2c->se_mode == FIFO_SE_DMA)
 		enable_irq(gi2c->irq);
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 4af9bba..586e557 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -547,6 +547,14 @@
 {
 	u32 cnfg;
 
+	/*
+	 * NACK interrupt is generated before the I2C controller generates
+	 * the STOP condition on the bus. So wait for 2 clock periods
+	 * before disabling the controller so that the STOP condition has
+	 * been delivered properly.
+	 */
+	udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
+
 	cnfg = i2c_readl(i2c_dev, I2C_CNFG);
 	if (cnfg & I2C_CNFG_PACKET_MODE_EN)
 		i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG);
@@ -708,15 +716,6 @@
 	if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
 		return 0;
 
-	/*
-	 * NACK interrupt is generated before the I2C controller generates
-	 * the STOP condition on the bus. So wait for 2 clock periods
-	 * before resetting the controller so that the STOP condition has
-	 * been delivered properly.
-	 */
-	if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
-		udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
-
 	tegra_i2c_init(i2c_dev);
 	if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
 		if (msg->flags & I2C_M_IGNORE_NAK)
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index db9105e..0da4991 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -400,11 +400,8 @@
 		return ret;
 
 	for (msg = msgs; msg < emsg; msg++) {
-		/* If next message is read, skip the stop condition */
-		bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-		/* but, force it if I2C_M_STOP is set */
-		if (msg->flags & I2C_M_STOP)
-			stop = true;
+		/* Emit STOP if it is the last message or I2C_M_STOP is set. */
+		bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
 		ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
 		if (ret)
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 56e92af..fdfcee92 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -247,11 +247,8 @@
 		return ret;
 
 	for (msg = msgs; msg < emsg; msg++) {
-		/* If next message is read, skip the stop condition */
-		bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-		/* but, force it if I2C_M_STOP is set */
-		if (msg->flags & I2C_M_STOP)
-			stop = true;
+		/* Emit STOP if it is the last message or I2C_M_STOP is set. */
+		bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
 		ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
 		if (ret)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 66bce3b..b72cf2f 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -538,6 +538,7 @@
 {
 	u8 rx_watermark;
 	struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
+	unsigned long flags;
 
 	/* Clear and enable Rx full interrupt. */
 	xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
@@ -553,6 +554,7 @@
 		rx_watermark = IIC_RX_FIFO_DEPTH;
 	xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
 
+	local_irq_save(flags);
 	if (!(msg->flags & I2C_M_NOSTART))
 		/* write the address */
 		xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
@@ -563,6 +565,8 @@
 
 	xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
 		msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
+	local_irq_restore(flags);
+
 	if (i2c->nmsgs == 1)
 		/* very last, enable bus not busy as well */
 		xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 99eba52..1642b55 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -508,7 +508,7 @@
 		return ret;
 
 	if (!state)
-		return 0;
+		return len;
 
 	mutex_lock(&indio_dev->mlock);
 	switch ((u32)this_attr->address) {
@@ -642,7 +642,7 @@
 		code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
 			AD9523_CLK_DIST_DIV_REV(ret);
 		*val = code / 1000000;
-		*val2 = (code % 1000000) * 10;
+		*val2 = code % 1000000;
 		return IIO_VAL_INT_PLUS_MICRO;
 	default:
 		return -EINVAL;
diff --git a/drivers/iio/imu/inv_mpu/Kconfig b/drivers/iio/imu/inv_mpu/Kconfig
new file mode 100644
index 0000000..7505454
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/Kconfig
@@ -0,0 +1,63 @@
+#
+# inv-mpu-iio driver for Invensense MPU devices
+#
+
+config INV_MPU_IIO
+	tristate
+	select IIO_BUFFER
+	select IIO_KFIFO_BUF
+	select IIO_TRIGGER
+	select CRC32
+
+choice
+	prompt "Chip name"
+	depends on INV_MPU_IIO
+
+config INV_MPU_IIO_ICM20648
+	bool "ICM20648/ICM20948"
+	help
+	  Select this if you are using a ICM20648/ICM20948 chip.
+
+config INV_MPU_IIO_ICM20608D
+	bool "ICM20608D/ICM20609/ICM20689"
+	help
+	  Select this if you are using a ICM20608D/ICM20609/ICM20689 chip.
+
+config INV_MPU_IIO_ICM20602
+	bool "ICM20602"
+	help
+	  Select this if you are using a ICM20602 chip.
+
+config INV_MPU_IIO_ICM20690
+	bool "ICM20690"
+	help
+	  Select this if you are using a ICM20690 chip.
+
+config INV_MPU_IIO_IAM20680
+	bool "IAM20680"
+	help
+	  Select this if you are using a IAM20680 chip.
+
+endchoice
+
+config INV_MPU_IIO_I2C
+	tristate "Invensense ICM20xxx devices (I2C)"
+	depends on I2C && !INV_MPU6050_IIO
+	select INV_MPU_IIO
+	default n
+	help
+	  This driver supports Invensense ICM20xxx devices over I2C.
+	  This driver can be built as a module. The module will be called
+	  inv-mpu-iio-i2c.
+
+config INV_MPU_IIO_SPI
+	tristate "Invensense ICM20xxx devices (SPI)"
+	depends on SPI_MASTER && !INV_MPU6050_IIO
+	select INV_MPU_IIO
+	default n
+	help
+	  This driver supports Invensense ICM20xxx devices over SPI.
+	  This driver can be built as a module. The module will be called
+	  inv-mpu-iio-spi.
+
+source "drivers/iio/imu/inv_mpu/inv_test/Kconfig"
diff --git a/drivers/iio/imu/inv_mpu/Makefile b/drivers/iio/imu/inv_mpu/Makefile
new file mode 100644
index 0000000..dfc4c25
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/Makefile
@@ -0,0 +1,61 @@
+#
+# Makefile for Invensense inv-mpu-iio device.
+#
+
+obj-$(CONFIG_INV_MPU_IIO) += inv-mpu-iio.o
+
+inv-mpu-iio-objs += inv_mpu_common.o
+inv-mpu-iio-objs += inv_mpu_ring.o
+inv-mpu-iio-objs += inv_mpu_timestamp.o
+inv-mpu-iio-objs += inv_mpu_dts.o
+
+# chip support
+ifeq ($(CONFIG_INV_MPU_IIO_ICM20648), y)
+inv-mpu-iio-objs += icm20648/inv_mpu_init.o
+inv-mpu-iio-objs += icm20648/inv_mpu_core.o
+inv-mpu-iio-objs += icm20648/inv_mpu_parsing.o
+inv-mpu-iio-objs += icm20648/inv_mpu_setup.o
+inv-mpu-iio-objs += icm20648/inv_mpu_dmp_fifo.o
+inv-mpu-iio-objs += icm20648/inv_slave_compass.o
+inv-mpu-iio-objs += icm20648/inv_slave_pressure.o
+inv-mpu-iio-objs += icm20648/inv_slave_als.o
+inv-mpu-iio-objs += icm20648/inv_mpu_load_dmp.o
+inv-mpu-iio-objs += icm20648/inv_mpu_selftest.o
+inv-mpu-iio-objs += dmp_support/inv_mpu_misc.o
+else ifeq ($(CONFIG_INV_MPU_IIO_ICM20690), y)
+inv-mpu-iio-objs += icm20690/inv_mpu_init_20690.o
+inv-mpu-iio-objs += icm20690/inv_mpu_core_20690.o
+inv-mpu-iio-objs += icm20690/inv_mpu_parsing_20690.o
+inv-mpu-iio-objs += icm20690/inv_mpu_setup_20690.o
+inv-mpu-iio-objs += icm20690/inv_mpu_selftest_20690.o
+inv-mpu-iio-objs += icm20690/inv_slave_compass.o
+else ifeq ($(CONFIG_INV_MPU_IIO_ICM20602), y)
+inv-mpu-iio-objs += icm20602/inv_mpu_init_20602.o
+inv-mpu-iio-objs += icm20602/inv_mpu_core_20602.o
+inv-mpu-iio-objs += icm20602/inv_mpu_parsing_20602.o
+inv-mpu-iio-objs += icm20602/inv_mpu_setup_20602.o
+inv-mpu-iio-objs += icm20602/inv_mpu_selftest_20602.o
+else ifeq ($(CONFIG_INV_MPU_IIO_ICM20608D), y)
+inv-mpu-iio-objs += icm20608d/inv_mpu_init_20608.o
+inv-mpu-iio-objs += icm20608d/inv_mpu_core_20608.o
+inv-mpu-iio-objs += icm20608d/inv_mpu_parsing_20608.o
+inv-mpu-iio-objs += icm20608d/inv_mpu_setup_20608D.o
+inv-mpu-iio-objs += icm20608d/inv_mpu_dmp_fifo.o
+inv-mpu-iio-objs += icm20608d/inv_mpu_load_dmp.o
+inv-mpu-iio-objs += icm20608d/inv_mpu_selftest_20608.o
+inv-mpu-iio-objs += dmp_support/inv_mpu_misc.o
+else ifeq ($(CONFIG_INV_MPU_IIO_IAM20680), y)
+inv-mpu-iio-objs += iam20680/inv_mpu_init_20680.o
+inv-mpu-iio-objs += iam20680/inv_mpu_core_20680.o
+inv-mpu-iio-objs += iam20680/inv_mpu_parsing_20680.o
+inv-mpu-iio-objs += iam20680/inv_mpu_setup_20680.o
+inv-mpu-iio-objs += iam20680/inv_mpu_selftest_20680.o
+endif
+
+# Bus support
+obj-$(CONFIG_INV_MPU_IIO_I2C) += inv-mpu-iio-i2c.o
+inv-mpu-iio-i2c-objs := inv_mpu_i2c.o
+obj-$(CONFIG_INV_MPU_IIO_SPI) += inv-mpu-iio-spi.o
+inv-mpu-iio-spi-objs := inv_mpu_spi.o
+
+obj-y += inv_test/
diff --git a/drivers/iio/imu/inv_mpu/README b/drivers/iio/imu/inv_mpu/README
new file mode 100644
index 0000000..47ff502
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/README
@@ -0,0 +1,117 @@
+Kernel driver inv-mpu-iio
+Author: InvenSense, Inc.
+
+
+Table of Contents
+=================
+- Description
+- Integrating the Driver in the Linux Kernel
+- Dts file
+- Communicating with the Driver in Userspace
+
+
+Description
+===========
+This document describes how to install the Invensense device driver into a
+Linux kernel. The supported chips are listed in Kconfig and user selects an
+appropriate one from .e.g. menuconfig.
+
+
+Integrating the Driver in the Linux Kernel
+==========================================
+Please add the files as follows (kernel 3.10):
+- Copy mpu.h to <kernel_root>/include/linux/iio/imu/
+- Copy inv_mpu folder under <kernel_root>/drivers/iio/imu/
+
+In order to see the driver in menuconfig when building the kernel, please
+make modifications as shown below:
+
+    add "source "drivers/iio/imu/inv_mpu/Kconfig""
+      in <kernel_root>/drivers/iio/imu/Kconfig
+
+    add "obj-y += inv_mpu/"
+      in <kernel_root>/drivers/iio/imu/Makefile
+
+
+
+Dts file
+========
+In order to recognize the Invensense device on the I2C/SPI bus, dts(or dtsi)
+file must be modified.
+
+Example)
+ICM20648 + AK09911/BMP280/APDS9930 on AUX I2C
+
+    i2c@f9968000 {
+        /* Invensense */
+        mpu6515_acc@68 {
+            compatible = "inven,icm20648";
+            reg = <0x68>;
+            interrupt-parent = <&msmgpio>;
+            interrupts = <73 0x2>;
+            inven,vdd_ana-supply = <&pm8941_l17>;
+            inven,vcc_i2c-supply = <&pm8941_lvs1>;
+            inven,gpio_int1 = <&msmgpio 73 0x00>;
+            fs_range = <0x00>;
+            /* mount matrix */
+            axis_map_x = <1>;
+            axis_map_y = <0>;
+            axis_map_z = <2>;
+            negate_x = <0>;
+            negate_y = <0>;
+            negate_z = <1>;
+            poll_interval = <200>;
+            min_interval = <5>;
+            inven,secondary_reg = <0x0c>;
+            /* If no compass sensor,
+             * replace "compass" with "none"
+             */
+            inven,secondary_type = "compass";
+            inven,secondary_name = "ak09911";
+            inven,secondary_axis_map_x = <1>;
+            inven,secondary_axis_map_y = <0>;
+            inven,secondary_axis_map_z = <2>;
+            inven,secondary_negate_x = <1>;
+            inven,secondary_negate_y = <1>;
+            inven,secondary_negate_z = <1>;
+            /* If no pressure sensor,
+             * replace "pressure" with "none"
+             */
+            inven,aux_type = "pressure";
+            inven,aux_name = "bmp280";
+            inven,aux_reg = <0x76>;
+            /* If no ALS sensor
+             * replace "als" with "none"
+             */
+            inven,read_only_slave_type = "als";
+            inven,read_only_slave_name = "apds9930";
+            inven,read_only_slave_reg = <0x39>;
+        };
+    };
+
+
+Communicating with the Driver in Userspace
+==========================================
+The driver generates several files in sysfs upon installation.
+These files are used to communicate with the driver. The files can be found at:
+
+(I2C) /sys/devices/*.i2c/i2c-*/*-*/iio:device*
+(SPI) /sys/devices/*.spi/spi_master/spi*/spi*.*/iio:device*
+
+Group and Owner for all entries should be updated to system/system at
+boot time to allow userspace to access properly.
+
+
+License
+=======
+Copyright (C) 2018 InvenSense, Inc.
+
+This software is licensed under the terms of the GNU General Public
+License version 2, as published by the Free Software Foundation, and
+may be copied, distributed, and modified under those terms.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
diff --git a/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_core_20680.c b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_core_20680.c
new file mode 100644
index 0000000..b429f57
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_core_20680.c
@@ -0,0 +1,1072 @@
+/*
+ * Copyright (C) 2017-2018 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+
+#include "../inv_mpu_iio.h"
+
+static const struct inv_hw_s hw_info[INV_NUM_PARTS] = {
+	{128, "ICM20608D"},
+	{128, "ICM20690"},
+	{128, "ICM20602"},
+	{128, "IAM20680"},
+};
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static char debug_reg_addr = 0x6;
+#endif
+
+const char sensor_l_info[][30] = {
+	"SENSOR_L_ACCEL",
+	"SENSOR_L_GYRO",
+	"SENSOR_L_MAG",
+	"SENSOR_L_ALS",
+	"SENSOR_L_SIXQ",
+	"SENSOR_L_THREEQ",
+	"SENSOR_L_NINEQ",
+	"SENSOR_L_PEDQ",
+	"SENSOR_L_GEOMAG",
+	"SENSOR_L_PRESSURE",
+	"SENSOR_L_GYRO_CAL",
+	"SENSOR_L_MAG_CAL",
+	"SENSOR_L_EIS_GYRO",
+	"SENSOR_L_ACCEL_WAKE",
+	"SENSOR_L_GYRO_WAKE",
+	"SENSOR_L_MAG_WAKE",
+	"SENSOR_L_ALS_WAKE",
+	"SENSOR_L_SIXQ_WAKE",
+	"SENSOR_L_NINEQ_WAKE",
+	"SENSOR_L_PEDQ_WAKE",
+	"SENSOR_L_GEOMAG_WAKE",
+	"SENSOR_L_PRESSURE_WAKE",
+	"SENSOR_L_GYRO_CAL_WAKE",
+	"SENSOR_L_MAG_CAL_WAKE",
+	"SENSOR_L_NUM_MAX",
+};
+
+static int inv_set_accel_bias_reg(struct inv_mpu_state *st,
+			int accel_bias, int axis)
+{
+	int accel_reg_bias;
+	u8 addr;
+	u8 d[2];
+	int result = 0;
+
+	switch (axis) {
+	case 0:
+		/* X */
+		addr = REG_XA_OFFS_H;
+		break;
+	case 1:
+		/* Y */
+		addr = REG_YA_OFFS_H;
+		break;
+	case 2:
+		/* Z* */
+		addr = REG_ZA_OFFS_H;
+		break;
+	default:
+		result = -EINVAL;
+		goto accel_bias_set_err;
+	}
+
+	result = inv_plat_read(st, addr, 2, d);
+	if (result)
+		goto accel_bias_set_err;
+	accel_reg_bias = ((int)d[0] << 8) | d[1];
+
+	/* accel_bias is 2g scaled by 1<<16.
+	 * Convert to 16g, and mask bit0 */
+	accel_reg_bias -= ((accel_bias / 8 / 65536) & ~1);
+
+	d[0] = (accel_reg_bias >> 8) & 0xff;
+	d[1] = (accel_reg_bias) & 0xff;
+	result = inv_plat_single_write(st, addr, d[0]);
+	if (result)
+		goto accel_bias_set_err;
+	result = inv_plat_single_write(st, addr + 1, d[1]);
+	if (result)
+		goto accel_bias_set_err;
+
+accel_bias_set_err:
+	return result;
+}
+
+static int inv_set_gyro_bias_reg(struct inv_mpu_state *st,
+			const int gyro_bias, int axis)
+{
+	int gyro_reg_bias;
+	u8 addr;
+	u8 d[2];
+	int result = 0;
+
+	switch (axis) {
+	case 0:
+		/* X */
+		addr = REG_XG_OFFS_USR_H;
+		break;
+	case 1:
+		/* Y */
+		addr = REG_YG_OFFS_USR_H;
+		break;
+	case 2:
+		/* Z */
+		addr = REG_ZG_OFFS_USR_H;
+		break;
+	default:
+		result = -EINVAL;
+		goto gyro_bias_set_err;
+	}
+
+	/* gyro_bias is 2000dps scaled by 1<<16.
+	 * Convert to 1000dps */
+	gyro_reg_bias = (-gyro_bias * 2 / 65536);
+
+	d[0] = (gyro_reg_bias >> 8) & 0xff;
+	d[1] = (gyro_reg_bias) & 0xff;
+	result = inv_plat_single_write(st, addr, d[0]);
+	if (result)
+		goto gyro_bias_set_err;
+	result = inv_plat_single_write(st, addr + 1, d[1]);
+	if (result)
+		goto gyro_bias_set_err;
+
+gyro_bias_set_err:
+	return result;
+}
+
+static int _bias_store(struct device *dev,
+			struct device_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int result, data;
+
+	result = inv_switch_power_in_lp(st, true);
+	if (result)
+		return result;
+
+	result = kstrtoint(buf, 10, &data);
+	if (result)
+		goto bias_store_fail;
+	switch (this_attr->address) {
+	case ATTR_ACCEL_X_OFFSET:
+		result = inv_set_accel_bias_reg(st, data, 0);
+		if (result)
+			goto bias_store_fail;
+		st->input_accel_bias[0] = data;
+		break;
+	case ATTR_ACCEL_Y_OFFSET:
+		result = inv_set_accel_bias_reg(st, data, 1);
+		if (result)
+			goto bias_store_fail;
+		st->input_accel_bias[1] = data;
+		break;
+	case ATTR_ACCEL_Z_OFFSET:
+		result = inv_set_accel_bias_reg(st, data, 2);
+		if (result)
+			goto bias_store_fail;
+		st->input_accel_bias[2] = data;
+		break;
+	case ATTR_GYRO_X_OFFSET:
+		result = inv_set_gyro_bias_reg(st, data, 0);
+		if (result)
+			goto bias_store_fail;
+		st->input_gyro_bias[0] = data;
+		break;
+	case ATTR_GYRO_Y_OFFSET:
+		result = inv_set_gyro_bias_reg(st, data, 1);
+		if (result)
+			goto bias_store_fail;
+		st->input_gyro_bias[1] = data;
+		break;
+	case ATTR_GYRO_Z_OFFSET:
+		result = inv_set_gyro_bias_reg(st, data, 2);
+		if (result)
+			goto bias_store_fail;
+		st->input_gyro_bias[2] = data;
+		break;
+	default:
+		break;
+	}
+
+bias_store_fail:
+	if (result)
+		return result;
+	result = inv_switch_power_in_lp(st, false);
+	if (result)
+		return result;
+
+	return count;
+}
+
+static ssize_t inv_bias_store(struct device *dev,
+			struct device_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	int result;
+
+	mutex_lock(&indio_dev->mlock);
+	result = _bias_store(dev, attr, buf, count);
+	mutex_unlock(&indio_dev->mlock);
+
+	return result;
+}
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static ssize_t inv_debug_store(struct device *dev,
+			struct device_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int result, data;
+
+	result = kstrtoint(buf, 10, &data);
+	if (result)
+		return result;
+	switch (this_attr->address) {
+	case ATTR_DMP_LP_EN_OFF:
+		st->chip_config.lp_en_mode_off = !!data;
+		inv_switch_power_in_lp(st, !!data);
+		break;
+	case ATTR_DMP_CLK_SEL:
+		st->chip_config.clk_sel = !!data;
+		inv_switch_power_in_lp(st, !!data);
+		break;
+	case ATTR_DEBUG_REG_ADDR:
+		debug_reg_addr = data;
+		break;
+	case ATTR_DEBUG_REG_WRITE:
+		inv_plat_single_write(st, debug_reg_addr, data);
+		break;
+	}
+	return count;
+}
+#endif
+
+static int _misc_attr_store(struct device *dev,
+			struct device_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int result, data;
+
+	result = inv_switch_power_in_lp(st, true);
+	if (result)
+		return result;
+	result = kstrtoint(buf, 10, &data);
+	if (result)
+		return result;
+	switch (this_attr->address) {
+	case ATTR_GYRO_SCALE:
+		if (data > 3)
+			return -EINVAL;
+		st->chip_config.fsr = data;
+		result = inv_set_gyro_sf(st);
+		return result;
+	case ATTR_ACCEL_SCALE:
+		if (data > 3)
+			return -EINVAL;
+		st->chip_config.accel_fs = data;
+		result = inv_set_accel_sf(st);
+		return result;
+	default:
+		return -EINVAL;
+	}
+	st->trigger_state = MISC_TRIGGER;
+	result = set_inv_enable(indio_dev);
+
+	return result;
+}
+
+/*
+ * inv_misc_attr_store() -  calling this function
+ */
+static ssize_t inv_misc_attr_store(struct device *dev,
+			struct device_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	int result;
+
+	mutex_lock(&indio_dev->mlock);
+	result = _misc_attr_store(dev, attr, buf, count);
+	mutex_unlock(&indio_dev->mlock);
+	if (result)
+		return result;
+
+	return count;
+}
+
+static ssize_t inv_sensor_rate_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	return snprintf(buf, MAX_WR_SZ, "%d\n",
+					st->sensor_l[this_attr->address].rate);
+}
+
+static ssize_t inv_sensor_rate_store(struct device *dev,
+			struct device_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int data, rate, ind;
+	int result;
+
+	result = kstrtoint(buf, 10, &data);
+	if (result)
+		return -EINVAL;
+	if (data <= 0) {
+		pr_err("sensor_rate_store: invalid data=%d\n", data);
+		return -EINVAL;
+	}
+	ind = this_attr->address;
+	rate = inv_rate_convert(st, ind, data);
+
+	pr_debug("sensor [%s] requested  rate %d input [%d]\n",
+						sensor_l_info[ind], rate, data);
+
+	if (rate == st->sensor_l[ind].rate)
+		return count;
+	mutex_lock(&indio_dev->mlock);
+	st->sensor_l[ind].rate = rate;
+	st->trigger_state = DATA_TRIGGER;
+	inv_check_sensor_on(st);
+	result = set_inv_enable(indio_dev);
+	pr_debug("%s rate %d div %d\n", sensor_l_info[ind],
+				st->sensor_l[ind].rate, st->sensor_l[ind].div);
+	mutex_unlock(&indio_dev->mlock);
+
+	return count;
+}
+
+static ssize_t inv_sensor_on_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	return snprintf(buf, MAX_WR_SZ, "%d\n", st->sensor_l[this_attr->address].on);
+}
+
+static ssize_t inv_sensor_on_store(struct device *dev,
+			struct device_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int data, on, ind;
+	int result;
+
+	result = kstrtoint(buf, 10, &data);
+	if (result)
+		return -EINVAL;
+	if (data < 0) {
+		pr_err("sensor_on_store: invalid data=%d\n", data);
+		return -EINVAL;
+	}
+	ind = this_attr->address;
+	on = !!data;
+
+	pr_debug("sensor [%s] requested  %s, input [%d]\n",
+			sensor_l_info[ind], (on == 1) ? "On" : "Off", data);
+
+	if (on == st->sensor_l[ind].on) {
+		pr_debug("sensor [%s] is already %s, input [%d]\n",
+			sensor_l_info[ind], (on == 1) ? "On" : "Off", data);
+		return count;
+	}
+
+	mutex_lock(&indio_dev->mlock);
+	st->sensor_l[ind].on = on;
+	st->trigger_state = RATE_TRIGGER;
+	inv_check_sensor_on(st);
+	result = set_inv_enable(indio_dev);
+	mutex_unlock(&indio_dev->mlock);
+	if (result)
+		return result;
+
+	pr_debug("Sensor [%s] is %s by sysfs\n",
+				sensor_l_info[ind], (on == 1) ? "On" : "Off");
+	return count;
+}
+
+static int inv_check_l_step(struct inv_mpu_state *st)
+{
+	if (st->step_counter_l_on || st->step_counter_wake_l_on)
+		st->ped.on = true;
+	else
+		st->ped.on = false;
+
+	return 0;
+}
+
+static int _basic_attr_store(struct device *dev,
+			struct device_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int data;
+	int result;
+	u32 power_on_data;
+
+	result = kstrtoint(buf, 10, &data);
+	if (result || (data < 0))
+		return -EINVAL;
+
+	switch (this_attr->address) {
+	case ATTR_DMP_PED_ON:
+		if ((!!data) == st->ped.on)
+			return count;
+		st->ped.on = !!data;
+		break;
+	case ATTR_DMP_TILT_ENABLE:
+		if ((!!data) == st->chip_config.tilt_enable)
+			return count;
+		st->chip_config.tilt_enable = !!data;
+		pr_info("Tile %s\n",
+			st->chip_config.tilt_enable ==
+			1 ? "Enabled" : "Disabled");
+		break;
+	case ATTR_DMP_PICK_UP_ENABLE:
+		if ((!!data) == st->chip_config.pick_up_enable) {
+			pr_info("Pick_up enable already %s\n",
+				st->chip_config.pick_up_enable ==
+				1 ? "Enabled" : "Disabled");
+			return count;
+		}
+		st->chip_config.pick_up_enable = !!data;
+		pr_info("Pick up %s\n",
+			st->chip_config.pick_up_enable ==
+			1 ? "Enable" : "Disable");
+		break;
+	case ATTR_IN_POWER_ON:
+		{
+			u8 p0[2];
+			u8 p1[2];
+
+			power_on_data = (u32)data;
+			p0[0] = (power_on_data & 0xff);
+			p0[1] = ((power_on_data >> 8) & 0xff);
+			p1[0] = ((power_on_data >> 16) & 0xff);
+			p1[1] = ((power_on_data >> 24) & 0xff);
+
+			if (st->bus_type == BUS_SPI) {
+				struct spi_transfer power_on;
+				struct spi_message msg;
+
+				memset(&power_on, 0, sizeof(struct spi_transfer));
+
+				power_on.bits_per_word = 8;
+				power_on.len = 2;
+
+				power_on.tx_buf = p0;
+				power_on.rx_buf = p1;
+				spi_message_init(&msg);
+				spi_message_add_tail(&power_on, &msg);
+				spi_sync(to_spi_device(st->dev), &msg);
+
+			} else if (st->bus_type == BUS_I2C) {
+				struct i2c_msg msgs[2];
+
+				p0[0] &= 0x7f;
+
+				msgs[0].addr = st->i2c_addr;
+				msgs[0].flags = 0;	/* write */
+				msgs[0].buf = &p0[0];
+				msgs[0].len = 1;
+
+				msgs[1].addr = st->i2c_addr;
+				msgs[1].flags = I2C_M_RD;
+				msgs[1].buf = &p1[1];
+				msgs[1].len = 1;
+
+				result = i2c_transfer(st->sl_handle, msgs, 2);
+				if (result < 2)
+					return -EIO;
+			}
+			st->power_on_data = ((p0[0] << 24) | (p0[1] << 16) |
+							(p1[0] << 8) | p1[1]);
+			return count;
+		}
+	case ATTR_DMP_EIS_ENABLE:
+		if ((!!data) == st->chip_config.eis_enable)
+			return count;
+		st->chip_config.eis_enable = !!data;
+		pr_info("Eis %s\n",
+			st->chip_config.eis_enable == 1 ? "Enable" : "Disable");
+		break;
+	case ATTR_DMP_STEP_DETECTOR_ON:
+		st->step_detector_l_on = !!data;
+		break;
+	case ATTR_DMP_STEP_DETECTOR_WAKE_ON:
+		st->step_detector_wake_l_on = !!data;
+		break;
+	case ATTR_DMP_STEP_COUNTER_ON:
+		st->step_counter_l_on = !!data;
+		break;
+	case ATTR_DMP_STEP_COUNTER_WAKE_ON:
+		st->step_counter_wake_l_on = !!data;
+		break;
+	case ATTR_DMP_BATCHMODE_TIMEOUT:
+		if (data == st->batch.timeout)
+			return count;
+		st->batch.timeout = data;
+		break;
+	default:
+		return -EINVAL;
+	};
+	inv_check_l_step(st);
+	inv_check_sensor_on(st);
+
+	st->trigger_state = EVENT_TRIGGER;
+	result = set_inv_enable(indio_dev);
+	if (result)
+		return result;
+
+	return count;
+}
+
+/*
+ * inv_basic_attr_store()
+ */
+static ssize_t inv_basic_attr_store(struct device *dev,
+			struct device_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	int result;
+
+	mutex_lock(&indio_dev->mlock);
+	result = _basic_attr_store(dev, attr, buf, count);
+
+	mutex_unlock(&indio_dev->mlock);
+
+	return result;
+}
+
+/*
+ * inv_attr_show()
+ */
+static ssize_t inv_attr_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	s8 *m;
+
+	switch (this_attr->address) {
+	case ATTR_GYRO_SCALE:
+		{
+			const s16 gyro_scale[] = { 250, 500, 1000, 2000 };
+
+			return snprintf(buf, MAX_WR_SZ, "%d\n",
+				gyro_scale[st->chip_config.fsr]);
+		}
+	case ATTR_ACCEL_SCALE:
+		{
+			const s16 accel_scale[] = { 2, 4, 8, 16 };
+			return snprintf(buf, MAX_WR_SZ, "%d\n",
+				accel_scale[st->chip_config.accel_fs]);
+		}
+	case ATTR_GYRO_ENABLE:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->chip_config.gyro_enable);
+	case ATTR_ACCEL_ENABLE:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->chip_config.accel_enable);
+	case ATTR_IN_POWER_ON:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->power_on_data);
+	case ATTR_DMP_BATCHMODE_TIMEOUT:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->batch.timeout);
+	case ATTR_DMP_PED_ON:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->ped.on);
+	case ATTR_DMP_TILT_ENABLE:
+		return snprintf(buf, MAX_WR_SZ, "%d\n",
+			st->chip_config.tilt_enable);
+	case ATTR_DMP_PICK_UP_ENABLE:
+		return snprintf(buf, MAX_WR_SZ, "%d\n",
+			st->chip_config.pick_up_enable);
+	case ATTR_DMP_EIS_ENABLE:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->chip_config.eis_enable);
+	case ATTR_DMP_LP_EN_OFF:
+		return snprintf(buf, MAX_WR_SZ, "%d\n",
+			st->chip_config.lp_en_mode_off);
+	case ATTR_DMP_STEP_COUNTER_ON:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->step_counter_l_on);
+	case ATTR_DMP_STEP_COUNTER_WAKE_ON:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->step_counter_wake_l_on);
+	case ATTR_DMP_STEP_DETECTOR_ON:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->step_detector_l_on);
+	case ATTR_DMP_STEP_DETECTOR_WAKE_ON:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->step_detector_wake_l_on);
+	case ATTR_GYRO_MATRIX:
+		m = st->plat_data.orientation;
+		return snprintf(buf, MAX_WR_SZ, "%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+			m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7],
+			m[8]);
+	case ATTR_ACCEL_MATRIX:
+		m = st->plat_data.orientation;
+		return snprintf(buf, MAX_WR_SZ, "%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+			m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7],
+			m[8]);
+	case ATTR_GYRO_SF:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->gyro_sf);
+	case ATTR_ANGLVEL_X_ST_CALIBBIAS:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->gyro_st_bias[0]);
+	case ATTR_ANGLVEL_Y_ST_CALIBBIAS:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->gyro_st_bias[1]);
+	case ATTR_ANGLVEL_Z_ST_CALIBBIAS:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->gyro_st_bias[2]);
+	case ATTR_ACCEL_X_ST_CALIBBIAS:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->accel_st_bias[0]);
+	case ATTR_ACCEL_Y_ST_CALIBBIAS:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->accel_st_bias[1]);
+	case ATTR_ACCEL_Z_ST_CALIBBIAS:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->accel_st_bias[2]);
+	case ATTR_GYRO_X_OFFSET:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->input_gyro_bias[0]);
+	case ATTR_GYRO_Y_OFFSET:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->input_gyro_bias[1]);
+	case ATTR_GYRO_Z_OFFSET:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->input_gyro_bias[2]);
+	case ATTR_ACCEL_X_OFFSET:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->input_accel_bias[0]);
+	case ATTR_ACCEL_Y_OFFSET:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->input_accel_bias[1]);
+	case ATTR_ACCEL_Z_OFFSET:
+		return snprintf(buf, MAX_WR_SZ, "%d\n", st->input_accel_bias[2]);
+	default:
+		return -EPERM;
+	}
+}
+
+static ssize_t inv_self_test(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+	int res;
+
+	mutex_lock(&indio_dev->mlock);
+	res = inv_hw_self_test(st);
+	set_inv_enable(indio_dev);
+	mutex_unlock(&indio_dev->mlock);
+
+	return snprintf(buf, MAX_WR_SZ, "%d\n", res);
+}
+
+
+/*
+ *  inv_temperature_show() - Read temperature data directly from registers.
+ */
+static ssize_t inv_temperature_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+
+	u8 data[2];
+	s32 temp;
+	int res;
+
+	mutex_lock(&indio_dev->mlock);
+	res = inv_plat_read(st, REG_RAW_TEMP, 2, data);
+	if (res)
+		return res;
+	mutex_unlock(&indio_dev->mlock);
+
+	temp = (s32)be16_to_cpup((__be16 *)(data)) * 10000;
+	temp = temp / TEMP_SENSITIVITY + TEMP_OFFSET;
+
+	return snprintf(buf, MAX_WR_SZ, "%d %lld\n", temp, get_time_ns());
+}
+
+/*
+ *  inv_reg_dump_show() - Register dump for testing.
+ */
+static ssize_t inv_reg_dump_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int ii;
+	char data;
+	int bytes_printed = 0;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+
+	mutex_lock(&indio_dev->mlock);
+	bytes_printed += snprintf(buf + bytes_printed, MAX_WR_SZ, "bank 0\n");
+
+	for (ii = 0; ii < 0x7F; ii++) {
+		/* don't read fifo r/w register */
+		if ((ii == REG_MEM_R_W) || (ii == REG_FIFO_R_W))
+			data = 0;
+		else
+			inv_plat_read(st, ii, 1, &data);
+		bytes_printed += snprintf(buf + bytes_printed, MAX_WR_SZ,
+				"%#2x: %#2x\n", ii, data);
+	}
+	set_inv_enable(indio_dev);
+	mutex_unlock(&indio_dev->mlock);
+
+	return bytes_printed;
+}
+
+static ssize_t inv_flush_batch_store(struct device *dev,
+			struct device_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	int result, data;
+
+	result = kstrtoint(buf, 10, &data);
+	if (result)
+		return result;
+
+	mutex_lock(&indio_dev->mlock);
+	result = inv_flush_batch_data(indio_dev, data);
+	mutex_unlock(&indio_dev->mlock);
+
+	return count;
+}
+
+static const struct iio_chan_spec inv_mpu_channels[] = {
+	IIO_CHAN_SOFT_TIMESTAMP(INV_MPU_SCAN_TIMESTAMP),
+};
+
+/* special run time sysfs entry, read only */
+static DEVICE_ATTR(debug_reg_dump, S_IRUGO | S_IWUSR, inv_reg_dump_show, NULL);
+static DEVICE_ATTR(out_temperature, S_IRUGO | S_IWUSR,
+			inv_temperature_show, NULL);
+static DEVICE_ATTR(misc_self_test, S_IRUGO | S_IWUSR, inv_self_test, NULL);
+
+static IIO_DEVICE_ATTR(info_anglvel_matrix, S_IRUGO, inv_attr_show, NULL,
+			ATTR_GYRO_MATRIX);
+static IIO_DEVICE_ATTR(info_accel_matrix, S_IRUGO, inv_attr_show, NULL,
+			ATTR_ACCEL_MATRIX);
+
+static IIO_DEVICE_ATTR(info_gyro_sf, S_IRUGO, inv_attr_show, NULL,
+			ATTR_GYRO_SF);
+/* write only sysfs */
+static DEVICE_ATTR(misc_flush_batch, S_IWUSR, NULL, inv_flush_batch_store);
+
+/* sensor on/off sysfs control */
+static IIO_DEVICE_ATTR(in_accel_enable, S_IRUGO | S_IWUSR,
+			inv_sensor_on_show, inv_sensor_on_store, SENSOR_L_ACCEL);
+static IIO_DEVICE_ATTR(in_anglvel_enable, S_IRUGO | S_IWUSR,
+			inv_sensor_on_show, inv_sensor_on_store, SENSOR_L_GYRO);
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static IIO_DEVICE_ATTR(in_eis_enable, S_IRUGO | S_IWUSR,
+			inv_sensor_on_show, inv_sensor_on_store,
+			SENSOR_L_EIS_GYRO);
+#endif
+static IIO_DEVICE_ATTR(in_accel_wake_enable, S_IRUGO | S_IWUSR,
+			inv_sensor_on_show, inv_sensor_on_store,
+			SENSOR_L_ACCEL_WAKE);
+static IIO_DEVICE_ATTR(in_anglvel_wake_enable, S_IRUGO | S_IWUSR,
+			inv_sensor_on_show, inv_sensor_on_store,
+			SENSOR_L_GYRO_WAKE);
+
+/* sensor rate sysfs control */
+static IIO_DEVICE_ATTR(in_accel_rate, S_IRUGO | S_IWUSR,
+			inv_sensor_rate_show, inv_sensor_rate_store,
+			SENSOR_L_ACCEL);
+static IIO_DEVICE_ATTR(in_anglvel_rate, S_IRUGO | S_IWUSR, inv_sensor_rate_show,
+			inv_sensor_rate_store, SENSOR_L_GYRO);
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static IIO_DEVICE_ATTR(in_eis_rate, S_IRUGO | S_IWUSR,
+			inv_sensor_rate_show, inv_sensor_rate_store,
+			SENSOR_L_EIS_GYRO);
+#endif
+static IIO_DEVICE_ATTR(in_accel_wake_rate, S_IRUGO | S_IWUSR,
+			inv_sensor_rate_show, inv_sensor_rate_store,
+			SENSOR_L_ACCEL_WAKE);
+static IIO_DEVICE_ATTR(in_anglvel_wake_rate, S_IRUGO | S_IWUSR,
+			inv_sensor_rate_show, inv_sensor_rate_store,
+			SENSOR_L_GYRO_WAKE);
+
+static IIO_DEVICE_ATTR(misc_batchmode_timeout, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_basic_attr_store,
+			ATTR_DMP_BATCHMODE_TIMEOUT);
+
+/* engine scale */
+static IIO_DEVICE_ATTR(in_accel_scale, S_IRUGO | S_IWUSR, inv_attr_show,
+			inv_misc_attr_store, ATTR_ACCEL_SCALE);
+static IIO_DEVICE_ATTR(in_anglvel_scale, S_IRUGO | S_IWUSR, inv_attr_show,
+			inv_misc_attr_store, ATTR_GYRO_SCALE);
+
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static IIO_DEVICE_ATTR(debug_lp_en_off, S_IRUGO | S_IWUSR, inv_attr_show,
+			inv_debug_store, ATTR_DMP_LP_EN_OFF);
+static IIO_DEVICE_ATTR(debug_clock_sel, S_IRUGO | S_IWUSR, inv_attr_show,
+			inv_debug_store, ATTR_DMP_CLK_SEL);
+static IIO_DEVICE_ATTR(debug_reg_write, S_IRUGO | S_IWUSR, inv_attr_show,
+			inv_debug_store, ATTR_DEBUG_REG_WRITE);
+static IIO_DEVICE_ATTR(debug_reg_write_addr, S_IRUGO | S_IWUSR, inv_attr_show,
+			inv_debug_store, ATTR_DEBUG_REG_ADDR);
+#endif
+
+static IIO_DEVICE_ATTR(in_accel_x_st_calibbias, S_IRUGO | S_IWUSR,
+			inv_attr_show, NULL, ATTR_ACCEL_X_ST_CALIBBIAS);
+static IIO_DEVICE_ATTR(in_accel_y_st_calibbias, S_IRUGO | S_IWUSR,
+			inv_attr_show, NULL, ATTR_ACCEL_Y_ST_CALIBBIAS);
+static IIO_DEVICE_ATTR(in_accel_z_st_calibbias, S_IRUGO | S_IWUSR,
+			inv_attr_show, NULL, ATTR_ACCEL_Z_ST_CALIBBIAS);
+
+static IIO_DEVICE_ATTR(in_anglvel_x_st_calibbias, S_IRUGO | S_IWUSR,
+			inv_attr_show, NULL, ATTR_ANGLVEL_X_ST_CALIBBIAS);
+static IIO_DEVICE_ATTR(in_anglvel_y_st_calibbias, S_IRUGO | S_IWUSR,
+			inv_attr_show, NULL, ATTR_ANGLVEL_Y_ST_CALIBBIAS);
+static IIO_DEVICE_ATTR(in_anglvel_z_st_calibbias, S_IRUGO | S_IWUSR,
+			inv_attr_show, NULL, ATTR_ANGLVEL_Z_ST_CALIBBIAS);
+
+static IIO_DEVICE_ATTR(in_accel_x_offset, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_bias_store, ATTR_ACCEL_X_OFFSET);
+static IIO_DEVICE_ATTR(in_accel_y_offset, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_bias_store, ATTR_ACCEL_Y_OFFSET);
+static IIO_DEVICE_ATTR(in_accel_z_offset, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_bias_store, ATTR_ACCEL_Z_OFFSET);
+
+static IIO_DEVICE_ATTR(in_anglvel_x_offset, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_bias_store, ATTR_GYRO_X_OFFSET);
+static IIO_DEVICE_ATTR(in_anglvel_y_offset, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_bias_store, ATTR_GYRO_Y_OFFSET);
+static IIO_DEVICE_ATTR(in_anglvel_z_offset, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_bias_store, ATTR_GYRO_Z_OFFSET);
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static IIO_DEVICE_ATTR(in_step_detector_enable, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_basic_attr_store,
+			ATTR_DMP_STEP_DETECTOR_ON);
+static IIO_DEVICE_ATTR(in_step_detector_wake_enable, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_basic_attr_store,
+			ATTR_DMP_STEP_DETECTOR_WAKE_ON);
+static IIO_DEVICE_ATTR(in_step_counter_enable, S_IRUGO | S_IWUSR, inv_attr_show,
+			inv_basic_attr_store, ATTR_DMP_STEP_COUNTER_ON);
+static IIO_DEVICE_ATTR(in_step_counter_wake_enable, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_basic_attr_store,
+			ATTR_DMP_STEP_COUNTER_WAKE_ON);
+
+static IIO_DEVICE_ATTR(event_tilt_enable, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_basic_attr_store,
+			ATTR_DMP_TILT_ENABLE);
+
+static IIO_DEVICE_ATTR(event_eis_enable, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_basic_attr_store,
+			ATTR_DMP_EIS_ENABLE);
+
+static IIO_DEVICE_ATTR(event_pick_up_enable, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_basic_attr_store,
+			ATTR_DMP_PICK_UP_ENABLE);
+
+static IIO_DEVICE_ATTR(in_power_on, S_IRUGO | S_IWUSR,
+			inv_attr_show, inv_basic_attr_store,
+			ATTR_IN_POWER_ON);
+#endif
+
+static const struct attribute *inv_raw_attributes[] = {
+	&dev_attr_debug_reg_dump.attr,
+	&dev_attr_out_temperature.attr,
+	&dev_attr_misc_flush_batch.attr,
+	&dev_attr_misc_self_test.attr,
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+	&iio_dev_attr_in_power_on.dev_attr.attr,
+#endif
+	&iio_dev_attr_in_accel_enable.dev_attr.attr,
+	&iio_dev_attr_in_accel_wake_enable.dev_attr.attr,
+	&iio_dev_attr_info_accel_matrix.dev_attr.attr,
+	&iio_dev_attr_in_accel_scale.dev_attr.attr,
+	&iio_dev_attr_misc_batchmode_timeout.dev_attr.attr,
+	&iio_dev_attr_in_accel_rate.dev_attr.attr,
+	&iio_dev_attr_in_accel_wake_rate.dev_attr.attr,
+};
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static const struct attribute *inv_debug_attributes[] = {
+	&iio_dev_attr_debug_lp_en_off.dev_attr.attr,
+	&iio_dev_attr_debug_clock_sel.dev_attr.attr,
+	&iio_dev_attr_debug_reg_write.dev_attr.attr,
+	&iio_dev_attr_debug_reg_write_addr.dev_attr.attr,
+};
+#endif
+
+static const struct attribute *inv_gyro_attributes[] = {
+	&iio_dev_attr_info_anglvel_matrix.dev_attr.attr,
+	&iio_dev_attr_in_anglvel_enable.dev_attr.attr,
+	&iio_dev_attr_in_anglvel_rate.dev_attr.attr,
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+	&iio_dev_attr_in_eis_enable.dev_attr.attr,
+#endif
+	&iio_dev_attr_in_anglvel_wake_enable.dev_attr.attr,
+	&iio_dev_attr_in_anglvel_scale.dev_attr.attr,
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+	&iio_dev_attr_in_eis_rate.dev_attr.attr,
+#endif
+	&iio_dev_attr_in_anglvel_wake_rate.dev_attr.attr,
+	&iio_dev_attr_info_gyro_sf.dev_attr.attr,
+};
+
+static const struct attribute *inv_bias_attributes[] = {
+	&iio_dev_attr_in_accel_x_st_calibbias.dev_attr.attr,
+	&iio_dev_attr_in_accel_y_st_calibbias.dev_attr.attr,
+	&iio_dev_attr_in_accel_z_st_calibbias.dev_attr.attr,
+	&iio_dev_attr_in_accel_x_offset.dev_attr.attr,
+	&iio_dev_attr_in_accel_y_offset.dev_attr.attr,
+	&iio_dev_attr_in_accel_z_offset.dev_attr.attr,
+	&iio_dev_attr_in_anglvel_x_st_calibbias.dev_attr.attr,
+	&iio_dev_attr_in_anglvel_y_st_calibbias.dev_attr.attr,
+	&iio_dev_attr_in_anglvel_z_st_calibbias.dev_attr.attr,
+	&iio_dev_attr_in_anglvel_x_offset.dev_attr.attr,
+	&iio_dev_attr_in_anglvel_y_offset.dev_attr.attr,
+	&iio_dev_attr_in_anglvel_z_offset.dev_attr.attr,
+};
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+static const struct attribute *inv_pedometer_attributes[] = {
+	&iio_dev_attr_event_tilt_enable.dev_attr.attr,
+	&iio_dev_attr_event_eis_enable.dev_attr.attr,
+	&iio_dev_attr_event_pick_up_enable.dev_attr.attr,
+	&iio_dev_attr_in_step_counter_enable.dev_attr.attr,
+	&iio_dev_attr_in_step_counter_wake_enable.dev_attr.attr,
+	&iio_dev_attr_in_step_detector_enable.dev_attr.attr,
+	&iio_dev_attr_in_step_detector_wake_enable.dev_attr.attr,
+};
+#endif
+
+static struct attribute *inv_attributes[ARRAY_SIZE(inv_raw_attributes) +
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+					ARRAY_SIZE(inv_debug_attributes) +
+#endif
+					ARRAY_SIZE(inv_gyro_attributes) +
+					ARRAY_SIZE(inv_bias_attributes) +
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+					ARRAY_SIZE(inv_pedometer_attributes) +
+#endif
+					 + 1];
+
+static const struct attribute_group inv_attribute_group = {
+	.name = "mpu",
+	.attrs = inv_attributes
+};
+
+static const struct iio_info mpu_info = {
+	.driver_module = THIS_MODULE,
+	.attrs = &inv_attribute_group,
+};
+
+/*
+ *  inv_check_chip_type() - check and setup chip type.
+ */
+int inv_check_chip_type(struct iio_dev *indio_dev, const char *name)
+{
+	int result;
+	int t_ind;
+	struct inv_chip_config_s *conf;
+	struct mpu_platform_data *plat;
+	struct inv_mpu_state *st;
+
+	st = iio_priv(indio_dev);
+	conf = &st->chip_config;
+	plat = &st->plat_data;
+
+	if (!strcmp(name, "iam20680"))
+		st->chip_type = IAM20680;
+	else
+		return -EPERM;
+	st->chip_config.has_gyro = 1;
+
+	st->hw = &hw_info[st->chip_type];
+	result = inv_mpu_initialize(st);
+	if (result)
+		return result;
+
+	t_ind = 0;
+	memcpy(&inv_attributes[t_ind], inv_raw_attributes,
+				sizeof(inv_raw_attributes));
+	t_ind += ARRAY_SIZE(inv_raw_attributes);
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+	memcpy(&inv_attributes[t_ind], inv_pedometer_attributes,
+				sizeof(inv_pedometer_attributes));
+	t_ind += ARRAY_SIZE(inv_pedometer_attributes);
+#endif
+
+	memcpy(&inv_attributes[t_ind], inv_gyro_attributes,
+				sizeof(inv_gyro_attributes));
+	t_ind += ARRAY_SIZE(inv_gyro_attributes);
+
+	memcpy(&inv_attributes[t_ind], inv_bias_attributes,
+				sizeof(inv_bias_attributes));
+	t_ind += ARRAY_SIZE(inv_bias_attributes);
+
+#ifndef SUPPORT_ONLY_BASIC_FEATURES
+	memcpy(&inv_attributes[t_ind], inv_debug_attributes,
+				sizeof(inv_debug_attributes));
+	t_ind += ARRAY_SIZE(inv_debug_attributes);
+#endif
+
+	inv_attributes[t_ind] = NULL;
+
+	indio_dev->channels = inv_mpu_channels;
+	indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
+
+	indio_dev->info = &mpu_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->currentmode = INDIO_DIRECT_MODE;
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(inv_check_chip_type);
+
+int inv_create_dmp_sysfs(struct iio_dev *ind)
+{
+	// dummy
+	return 0;
+}
+EXPORT_SYMBOL_GPL(inv_create_dmp_sysfs);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Invensense device ICM20xxx driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_iio_reg_20680.h b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_iio_reg_20680.h
new file mode 100644
index 0000000..3f8ce71
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_iio_reg_20680.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2017-2018 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _INV_MPU_IIO_REG_20680_H_
+#define _INV_MPU_IIO_REG_20680_H_
+
+/* Uncomment when HAL does not support the algorithm library
+ * for calibration and sensor fusion not to expose unused
+ * sysfs entries */
+#define SUPPORT_ONLY_BASIC_FEATURES
+
+/* Uncomment to read data registers for sensor data instead of FIFO */
+//#define SENSOR_DATA_FROM_REGISTERS
+
+/* Uncomment to enable timer based batching */
+#define TIMER_BASED_BATCHING
+
+/* Polling (batch mode) can be enabled only when FIFO read */
+#if defined(SENSOR_DATA_FROM_REGISTERS)
+#undef TIMER_BASED_BATCHING
+#endif
+
+/*register and associated bit definition*/
+#define REG_XA_OFFS_H		0x77
+#define REG_YA_OFFS_H		0x7A
+#define REG_ZA_OFFS_H           0x7D
+#define REG_XG_OFFS_USR_H        0x13
+#define REG_YG_OFFS_USR_H        0x15
+#define REG_ZG_OFFS_USR_H        0x17
+#define REG_SAMPLE_RATE_DIV     0x19
+
+#define REG_CONFIG              0x1A
+#define EXT_SYNC_SET                      8
+
+#define REG_GYRO_CONFIG		0x1B
+#define BITS_SELF_TEST_EN		0xE0
+#define SHIFT_GYRO_FS_SEL		0x03
+
+#define REG_ACCEL_CONFIG	0x1C
+#define SHIFT_ACCEL_FS			0x03
+
+#define REG_LP_MODE_CTRL	0x1E
+#define BIT_GYRO_CYCLE_EN               0x80
+
+#define REG_ACCEL_WOM_THR	0x1F
+#define REG_ACCEL_WOM_X_THR	0x20
+#define REG_ACCEL_WOM_Y_THR	0x21
+#define REG_ACCEL_WOM_Z_THR	0x22
+
+#define REG_ACCEL_MOT_THR       0x1F
+#define REG_ACCEL_MOT_DUR       0x20
+
+#define REG_ACCEL_CONFIG_2  0x1D
+#define BIT_ACCEL_FCHOCIE_B              0x08
+
+#define REG_FIFO_EN			0x23
+#define BITS_GYRO_FIFO_EN	0x70
+#define BIT_ACCEL_FIFO_EN	0x08
+
+#define REG_FSYNC_INT		0x36
+#define BIT_FSYNC_INT                   0x80
+
+#define REG_INT_PIN_CFG		0x37
+
+#define REG_INT_ENABLE		0x38
+#define BIT_WOM_X_INT_EN		0x80
+#define BIT_WOM_Y_INT_EN		0x40
+#define BIT_WOM_Z_INT_EN		0x20
+#define BIT_WOM_ALL_INT_EN		0xE0
+#define BIT_FSYNC_INT_EN		0x8
+#define BIT_DATA_RDY_EN		        0x1
+
+#define REG_INT_STATUS          0x3A
+#define BIT_WOM_X_INT                  0x80
+#define BIT_WOM_Y_INT                  0x40
+#define BIT_WOM_Z_INT                  0x20
+
+#define REG_RAW_ACCEL           0x3B
+#define REG_RAW_TEMP            0x41
+#define REG_RAW_GYRO            0x43
+#define REG_EXT_SENS_DATA_00    0x49
+#define REG_EXT_SENS_DATA_08    0x51
+#define REG_EXT_SENS_DATA_09    0x52
+
+#define REG_ACCEL_INTEL_CTRL 0x69
+#define BIT_ACCEL_INTEL_EN              0x80
+#define BIT_ACCEL_INTEL_MODE            0x40
+
+#define REG_USER_CTRL			0x6A
+#define BIT_COND_RST				0x01
+#define BIT_FIFO_RST				0x04
+#define BIT_FIFO_EN				0x40
+
+#define REG_PWR_MGMT_1			0x6B
+#define BIT_H_RESET				0x80
+#define BIT_SLEEP					0x40
+#define BIT_LP_EN                       	0x20
+#define BIT_CLK_PLL				0x01
+#define BIT_CLK_MASK				0x07
+
+#define REG_PWR_MGMT_2			0x6C
+#define BIT_PWR_ACCEL_STBY		0x38
+#define BIT_PWR_GYRO_STBY		0x07
+#define BIT_PWR_ALL_OFF			0x3F
+#define BIT_FIFO_LP_EN			0x80
+
+#define REG_MEM_BANK_SEL	0x6D
+#define REG_MEM_START_ADDR	0x6E
+#define REG_MEM_R_W		0x6F
+
+#define REG_FIFO_COUNT_H        0x72
+#define REG_FIFO_R_W            0x74
+#define REG_WHO_AM_I              0x75
+
+#define REG_6500_XG_ST_DATA     0x50
+#define REG_6500_XA_ST_DATA     0xD
+#define REG_6500_XA_OFFS_H      0x77
+#define REG_6500_YA_OFFS_H      0x7A
+#define REG_6500_ZA_OFFS_H      0x7D
+#define REG_6500_ACCEL_CONFIG2  0x1D
+#define BIT_ACCEL_FCHOCIE_B              0x08
+#define BIT_FIFO_SIZE_1K                 0x40
+
+#define REG_LP_MODE_CFG		0x1E
+
+#define REG_6500_LP_ACCEL_ODR   0x1E
+#define REG_6500_ACCEL_WOM_THR  0x1F
+
+/* data output control reg 2 */
+#define ACCEL_ACCURACY_SET  0x4000
+#define GYRO_ACCURACY_SET   0x2000
+#define CPASS_ACCURACY_SET  0x1000
+
+/* data definitions */
+#define ACCEL_COVARIANCE 0
+#define BYTES_PER_SENSOR         6
+#define BYTES_FOR_TEMP           2
+#define FIFO_COUNT_BYTE          2
+#define HARDWARE_FIFO_SIZE       512
+#define FIFO_SIZE                (HARDWARE_FIFO_SIZE * 7 / 10)
+#define POWER_UP_TIME            100
+#define REG_UP_TIME_USEC         100
+#define LEFT_OVER_BYTES          128
+#define IIO_BUFFER_BYTES         8
+#define BASE_SAMPLE_RATE         1000
+#define DRY_RUN_TIME             50
+#define INV_IAM20680_GYRO_START_TIME 35
+#define INV_IAM20680_ACCEL_START_TIME 30
+#define MODE_1K_INIT_SAMPLE      5
+#define FIRST_SAMPLE_BUF_MS      30
+
+#ifdef BIAS_CONFIDENCE_HIGH
+#define DEFAULT_ACCURACY         3
+#else
+#define DEFAULT_ACCURACY         1
+#endif
+
+/* temperature */
+#define TEMP_SENSITIVITY        32680   // 326.8 LSB/degC * 100
+#define TEMP_OFFSET             2500    // 25 degC * 100
+
+/* enum for sensor */
+enum INV_SENSORS {
+	SENSOR_ACCEL = 0,
+	SENSOR_TEMP,
+	SENSOR_GYRO,
+	SENSOR_COMPASS,
+	SENSOR_NUM_MAX,
+	SENSOR_INVALID,
+};
+
+enum inv_filter_e {
+	INV_FILTER_256HZ_NOLPF2 = 0,
+	INV_FILTER_188HZ,
+	INV_FILTER_98HZ,
+	INV_FILTER_42HZ,
+	INV_FILTER_20HZ,
+	INV_FILTER_10HZ,
+	INV_FILTER_5HZ,
+	INV_FILTER_2100HZ_NOLPF,
+	NUM_FILTER
+};
+
+#define MPU_DEFAULT_DMP_FREQ     200
+#define PEDOMETER_FREQ           (MPU_DEFAULT_DMP_FREQ >> 2)
+#define SENSOR_FUSION_MIN_RATE   100
+#define GESTURE_ACCEL_RATE       50
+#define ESI_GYRO_RATE            1000
+#define MAX_FIFO_PACKET_READ     6
+#define MAX_BATCH_FIFO_SIZE      FIFO_SIZE
+
+#define MIN_MST_ODR_CONFIG       4
+#define MAX_MST_ODR_CONFIG       5
+/* initial rate is important. For non-DMP mode, it is set as 4 1000/256*/
+#define MPU_INIT_SENSOR_RATE     4
+#define MAX_MST_NON_COMPASS_ODR_CONFIG 7
+#define THREE_AXES               3
+#define NINE_ELEM                (THREE_AXES * THREE_AXES)
+#define MPU_TEMP_SHIFT           16
+
+#define DMP_DIVIDER              (BASE_SAMPLE_RATE / MPU_DEFAULT_DMP_FREQ)
+#define DEFAULT_BATCH_RATE       400
+#define DEFAULT_BATCH_TIME    (MSEC_PER_SEC / DEFAULT_BATCH_RATE)
+
+#define TEMPERATURE_SCALE  3340827L
+#define TEMPERATURE_OFFSET 1376256L
+#define SECONDARY_INIT_WAIT 100
+#define MPU_SOFT_REV_ADDR               0x86
+#define MPU_SOFT_REV_MASK               0xf
+#define SW_REV_LP_EN_MODE               4
+
+/* data limit definitions */
+#define MIN_FIFO_RATE            4
+#define MAX_FIFO_RATE            MPU_DEFAULT_DMP_FREQ
+
+#define MAX_MPU_MEM              8192
+#define MAX_PRS_RATE             281
+
+enum inv_devices {
+	ICM20608D,
+	ICM20690,
+	ICM20602,
+	IAM20680,
+	INV_NUM_PARTS,
+};
+#endif
diff --git a/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_init_20680.c b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_init_20680.c
new file mode 100644
index 0000000..58bd8d0
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_init_20680.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2017-2018 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "inv_mpu: " fmt
+#include "../inv_mpu_iio.h"
+
+static int inv_calc_gyro_sf(s8 pll)
+{
+	int a, r;
+	int value, t;
+
+	t = 102870L + 81L * pll;
+	a = (1L << 30) / t;
+	r = (1L << 30) - a * t;
+	value = a * 797 * DMP_DIVIDER;
+	value += (s64) ((a * 1011387LL * DMP_DIVIDER) >> 20);
+	value += r * 797L * DMP_DIVIDER / t;
+	value += (s32) ((s64) ((r * 1011387LL * DMP_DIVIDER) >> 20)) / t;
+	value <<= 1;
+
+	return value;
+}
+
+static int inv_read_timebase(struct inv_mpu_state *st)
+{
+
+	inv_plat_single_write(st, REG_CONFIG, 3);
+
+	st->eng_info[ENGINE_ACCEL].base_time = NSEC_PER_SEC;
+	st->eng_info[ENGINE_ACCEL].base_time_1k = NSEC_PER_SEC;
+	/* talor expansion to calculate base time unit */
+	st->eng_info[ENGINE_GYRO].base_time = NSEC_PER_SEC;
+	st->eng_info[ENGINE_GYRO].base_time_1k = NSEC_PER_SEC;
+	st->eng_info[ENGINE_I2C].base_time = NSEC_PER_SEC;
+	st->eng_info[ENGINE_I2C].base_time_1k = NSEC_PER_SEC;
+
+	st->eng_info[ENGINE_ACCEL].orig_rate = BASE_SAMPLE_RATE;
+	st->eng_info[ENGINE_GYRO].orig_rate = BASE_SAMPLE_RATE;
+	st->eng_info[ENGINE_I2C].orig_rate = BASE_SAMPLE_RATE;
+
+	st->gyro_sf = inv_calc_gyro_sf(0);
+
+	return 0;
+}
+
+int inv_set_gyro_sf(struct inv_mpu_state *st)
+{
+	int result;
+
+	result = inv_plat_single_write(st, REG_GYRO_CONFIG,
+				   st->chip_config.fsr << SHIFT_GYRO_FS_SEL);
+
+	return result;
+}
+
+int inv_set_accel_sf(struct inv_mpu_state *st)
+{
+	int result;
+
+	result = inv_plat_single_write(st, REG_ACCEL_CONFIG,
+				st->chip_config.accel_fs << SHIFT_ACCEL_FS);
+	return result;
+}
+
+// dummy for 20602
+int inv_set_accel_intel(struct inv_mpu_state *st)
+{
+	return 0;
+}
+
+static void inv_init_sensor_struct(struct inv_mpu_state *st)
+{
+	int i;
+
+	for (i = 0; i < SENSOR_NUM_MAX; i++)
+		st->sensor[i].rate = MPU_INIT_SENSOR_RATE;
+
+	st->sensor[SENSOR_ACCEL].sample_size = BYTES_PER_SENSOR;
+	st->sensor[SENSOR_TEMP].sample_size = BYTES_FOR_TEMP;
+	st->sensor[SENSOR_GYRO].sample_size = BYTES_PER_SENSOR;
+
+	st->sensor_l[SENSOR_L_SIXQ].base = SENSOR_GYRO;
+	st->sensor_l[SENSOR_L_PEDQ].base = SENSOR_GYRO;
+
+	st->sensor_l[SENSOR_L_SIXQ_WAKE].base = SENSOR_GYRO;
+	st->sensor_l[SENSOR_L_PEDQ_WAKE].base = SENSOR_GYRO;
+
+	st->sensor[SENSOR_ACCEL].a_en = true;
+	st->sensor[SENSOR_GYRO].a_en = false;
+
+	st->sensor[SENSOR_ACCEL].g_en = false;
+	st->sensor[SENSOR_GYRO].g_en = true;
+
+	st->sensor[SENSOR_ACCEL].c_en = false;
+	st->sensor[SENSOR_GYRO].c_en = false;
+
+	st->sensor[SENSOR_ACCEL].p_en = false;
+	st->sensor[SENSOR_GYRO].p_en = false;
+
+	st->sensor[SENSOR_ACCEL].engine_base = ENGINE_ACCEL;
+	st->sensor[SENSOR_GYRO].engine_base = ENGINE_GYRO;
+
+	st->sensor_l[SENSOR_L_ACCEL].base = SENSOR_ACCEL;
+	st->sensor_l[SENSOR_L_GESTURE_ACCEL].base = SENSOR_ACCEL;
+	st->sensor_l[SENSOR_L_GYRO].base = SENSOR_GYRO;
+	st->sensor_l[SENSOR_L_GYRO_CAL].base = SENSOR_GYRO;
+	st->sensor_l[SENSOR_L_EIS_GYRO].base = SENSOR_GYRO;
+
+	st->sensor_l[SENSOR_L_ACCEL_WAKE].base = SENSOR_ACCEL;
+	st->sensor_l[SENSOR_L_GYRO_WAKE].base = SENSOR_GYRO;
+
+	st->sensor_l[SENSOR_L_GYRO_CAL_WAKE].base = SENSOR_GYRO;
+
+	st->sensor_l[SENSOR_L_ACCEL].header = ACCEL_HDR;
+	st->sensor_l[SENSOR_L_GESTURE_ACCEL].header = ACCEL_HDR;
+	st->sensor_l[SENSOR_L_GYRO].header = GYRO_HDR;
+	st->sensor_l[SENSOR_L_GYRO_CAL].header = GYRO_CALIB_HDR;
+
+	st->sensor_l[SENSOR_L_EIS_GYRO].header = EIS_GYRO_HDR;
+	st->sensor_l[SENSOR_L_SIXQ].header = SIXQUAT_HDR;
+	st->sensor_l[SENSOR_L_THREEQ].header = LPQ_HDR;
+	st->sensor_l[SENSOR_L_NINEQ].header = NINEQUAT_HDR;
+	st->sensor_l[SENSOR_L_PEDQ].header = PEDQUAT_HDR;
+
+	st->sensor_l[SENSOR_L_ACCEL_WAKE].header = ACCEL_WAKE_HDR;
+	st->sensor_l[SENSOR_L_GYRO_WAKE].header = GYRO_WAKE_HDR;
+	st->sensor_l[SENSOR_L_GYRO_CAL_WAKE].header = GYRO_CALIB_WAKE_HDR;
+	st->sensor_l[SENSOR_L_MAG_WAKE].header = COMPASS_WAKE_HDR;
+	st->sensor_l[SENSOR_L_MAG_CAL_WAKE].header = COMPASS_CALIB_WAKE_HDR;
+	st->sensor_l[SENSOR_L_SIXQ_WAKE].header = SIXQUAT_WAKE_HDR;
+	st->sensor_l[SENSOR_L_NINEQ_WAKE].header = NINEQUAT_WAKE_HDR;
+	st->sensor_l[SENSOR_L_PEDQ_WAKE].header = PEDQUAT_WAKE_HDR;
+
+	st->sensor_l[SENSOR_L_ACCEL].wake_on = false;
+	st->sensor_l[SENSOR_L_GYRO].wake_on = false;
+	st->sensor_l[SENSOR_L_GYRO_CAL].wake_on = false;
+	st->sensor_l[SENSOR_L_MAG].wake_on = false;
+	st->sensor_l[SENSOR_L_MAG_CAL].wake_on = false;
+	st->sensor_l[SENSOR_L_EIS_GYRO].wake_on = false;
+	st->sensor_l[SENSOR_L_SIXQ].wake_on = false;
+	st->sensor_l[SENSOR_L_NINEQ].wake_on = false;
+	st->sensor_l[SENSOR_L_PEDQ].wake_on = false;
+
+	st->sensor_l[SENSOR_L_ACCEL_WAKE].wake_on = true;
+	st->sensor_l[SENSOR_L_GYRO_WAKE].wake_on = true;
+	st->sensor_l[SENSOR_L_GYRO_CAL_WAKE].wake_on = true;
+	st->sensor_l[SENSOR_L_MAG_WAKE].wake_on = true;
+	st->sensor_l[SENSOR_L_SIXQ_WAKE].wake_on = true;
+	st->sensor_l[SENSOR_L_NINEQ_WAKE].wake_on = true;
+	st->sensor_l[SENSOR_L_PEDQ_WAKE].wake_on = true;
+}
+
+static int inv_init_config(struct inv_mpu_state *st)
+{
+	int res, i;
+
+	st->batch.overflow_on = 0;
+	st->chip_config.fsr = MPU_INIT_GYRO_SCALE;
+	st->chip_config.accel_fs = MPU_INIT_ACCEL_SCALE;
+	st->ped.int_thresh = MPU_INIT_PED_INT_THRESH;
+	st->ped.step_thresh = MPU_INIT_PED_STEP_THRESH;
+	st->chip_config.low_power_gyro_on = 1;
+	st->eis.count_precision = NSEC_PER_MSEC;
+	st->firmware = 0;
+	st->fifo_count_mode = BYTE_MODE;
+#ifdef TIMER_BASED_BATCHING
+	st->batch_timeout = 0;
+	st->is_batch_timer_running = false;
+#endif
+
+	st->eng_info[ENGINE_GYRO].base_time = NSEC_PER_SEC;
+	st->eng_info[ENGINE_ACCEL].base_time = NSEC_PER_SEC;
+
+	inv_init_sensor_struct(st);
+	res = inv_read_timebase(st);
+	if (res)
+		return res;
+
+	res = inv_set_gyro_sf(st);
+	if (res)
+		return res;
+	res = inv_set_accel_sf(st);
+	if (res)
+		return res;
+	res =  inv_set_accel_intel(st);
+	if (res)
+		return res;
+
+	for (i = 0; i < SENSOR_NUM_MAX; i++)
+		st->sensor[i].ts = 0;
+
+	for (i = 0; i < SENSOR_NUM_MAX; i++)
+		st->sensor[i].previous_ts = 0;
+
+	return res;
+}
+
+int inv_mpu_initialize(struct inv_mpu_state *st)
+{
+	u8 v;
+	int result;
+	struct inv_chip_config_s *conf;
+	struct mpu_platform_data *plat;
+
+	conf = &st->chip_config;
+	plat = &st->plat_data;
+
+	/* verify whoami */
+	result = inv_plat_read(st, REG_WHO_AM_I, 1, &v);
+	if (result)
+		return result;
+	pr_info("whoami= %x\n", v);
+	if (v == 0x00 || v == 0xff)
+		return -ENODEV;
+
+	/* reset to make sure previous state are not there */
+	result = inv_plat_single_write(st, REG_PWR_MGMT_1, BIT_H_RESET);
+	if (result)
+		return result;
+	usleep_range(REG_UP_TIME_USEC, REG_UP_TIME_USEC);
+	msleep(100);
+	/* toggle power state */
+	result = inv_set_power(st, false);
+	if (result)
+		return result;
+	result = inv_set_power(st, true);
+	if (result)
+		return result;
+
+	result = inv_plat_single_write(st, REG_USER_CTRL, st->i2c_dis);
+	if (result)
+		return result;
+	result = inv_init_config(st);
+	if (result)
+		return result;
+
+	result = mem_r(MPU_SOFT_REV_ADDR, 1, &v);
+	pr_info("sw_rev=%x, res=%d\n", v, result);
+	if (result)
+		return result;
+	st->chip_config.lp_en_mode_off = 0;
+
+	pr_info("%s: Mask %X, v = %X, lp mode = %d\n", __func__,
+		MPU_SOFT_REV_MASK, v, st->chip_config.lp_en_mode_off);
+	result = inv_set_power(st, false);
+
+	pr_info("%s: initialize result is %d....\n", __func__, result);
+	return 0;
+}
diff --git a/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_parsing_20680.c b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_parsing_20680.c
new file mode 100644
index 0000000..0f17b6d
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_parsing_20680.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2017-2018 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/math64.h>
+
+#include "../inv_mpu_iio.h"
+
+static char iden[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
+
+static int inv_process_gyro(struct inv_mpu_state *st, u8 *d, u64 t)
+{
+	s16 raw[3];
+	s32 calib[3];
+	int i;
+#define BIAS_UNIT 2859
+
+	for (i = 0; i < 3; i++)
+		raw[i] = be16_to_cpup((__be16 *) (d + i * 2));
+
+	for (i = 0; i < 3; i++)
+		calib[i] = (raw[i] << 15);
+
+
+	inv_push_gyro_data(st, raw, calib, t);
+
+	return 0;
+}
+
+static int inv_check_fsync(struct inv_mpu_state *st, u8 fsync_status)
+{
+	u8 data[1];
+
+	if (!st->chip_config.eis_enable)
+		return 0;
+	inv_plat_read(st, REG_FSYNC_INT, 1, data);
+	if (data[0] & BIT_FSYNC_INT) {
+		pr_debug("fsync\n");
+		st->eis.eis_triggered = true;
+		st->eis.fsync_delay = 1;
+		st->eis.prev_state = 1;
+		st->eis.frame_count++;
+		st->eis.eis_frame = true;
+	}
+	st->header_count--;
+
+	return 0;
+}
+
+static int inv_push_sensor(struct inv_mpu_state *st, int ind, u64 t, u8 *d)
+{
+#ifdef ACCEL_BIAS_TEST
+	s16 acc[3], avg[3];
+#endif
+
+	switch (ind) {
+	case SENSOR_ACCEL:
+		inv_convert_and_push_8bytes(st, ind, d, t, iden);
+#ifdef ACCEL_BIAS_TEST
+		acc[0] = be16_to_cpup((__be16 *) (d));
+		acc[1] = be16_to_cpup((__be16 *) (d + 2));
+		acc[2] = be16_to_cpup((__be16 *) (d + 4));
+		if(inv_get_3axis_average(acc, avg, 0)){
+			pr_debug("accel 200 samples average = %5d, %5d, %5d\n", avg[0], avg[1], avg[2]);
+		}
+#endif
+		break;
+	case SENSOR_TEMP:
+		inv_check_fsync(st, d[1]);
+		break;
+	case SENSOR_GYRO:
+		inv_process_gyro(st, d, t);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int inv_push_20680_data(struct inv_mpu_state *st, u8 *d)
+{
+	u8 *dptr;
+	int i;
+
+	dptr = d;
+
+	for (i = 0; i < SENSOR_NUM_MAX; i++) {
+		if (st->sensor[i].on) {
+			inv_get_dmp_ts(st, i);
+			if (st->sensor[i].send && (!st->ts_algo.first_sample)) {
+				st->sensor[i].sample_calib++;
+				inv_push_sensor(st, i, st->sensor[i].ts, dptr);
+			}
+			dptr += st->sensor[i].sample_size;
+		}
+	}
+	if (st->ts_algo.first_sample)
+		st->ts_algo.first_sample--;
+	st->header_count--;
+
+	return 0;
+}
+
+static int inv_process_20680_data(struct inv_mpu_state *st)
+{
+	int total_bytes, tmp, res, fifo_count, pk_size, i;
+	u8 *dptr, *d;
+	u8 data[14];
+	bool done_flag;
+	u8 v;
+#ifdef SENSOR_DATA_FROM_REGISTERS
+	u8 reg;
+	int len;
+#endif
+
+	if(st->gesture_only_on && (!st->batch.timeout)) {
+		res = inv_plat_read(st, REG_INT_STATUS, 1, data);
+		if (res)
+			return res;
+		pr_debug("ges cnt=%d, statu=%x\n",
+						st->gesture_int_count, data[0]);
+		if (data[0] & (BIT_WOM_ALL_INT_EN)) {
+			if (!st->gesture_int_count) {
+				inv_switch_power_in_lp(st, true);
+				res = inv_plat_single_write(st, REG_INT_ENABLE,
+					BIT_WOM_ALL_INT_EN | BIT_DATA_RDY_EN);
+				if (res)
+					return res;
+				v = 0;
+				if (st->chip_config.gyro_enable)
+					v |= BITS_GYRO_FIFO_EN;
+
+				if (st->chip_config.accel_enable)
+					v |= BIT_ACCEL_FIFO_EN;
+				res = inv_plat_single_write(st, REG_FIFO_EN, v);
+				if (res)
+					return res;
+				/* First time wake up from WOM.
+					We don't need data in the FIFO */
+				res = inv_reset_fifo(st, true);
+				if (res)
+					return res;
+				res = inv_switch_power_in_lp(st, false);
+				st->gesture_int_count = WOM_DELAY_THRESHOLD;
+
+				return res;
+			}
+			st->gesture_int_count = WOM_DELAY_THRESHOLD;
+		} else {
+			if (!st->gesture_int_count) {
+				inv_switch_power_in_lp(st, true);
+				res = inv_plat_single_write(st, REG_FIFO_EN, 0);
+				res = inv_plat_single_write(st, REG_INT_ENABLE,
+					BIT_WOM_ALL_INT_EN);
+				inv_switch_power_in_lp(st, false);
+
+				return res;
+			}
+			st->gesture_int_count--;
+		}
+	}
+
+	fifo_count = inv_get_last_run_time_non_dmp_record_mode(st);
+	pr_debug("fifc= %d\n", fifo_count);
+	if (!fifo_count) {
+		pr_debug("REG_FIFO_COUNT_H size is 0\n");
+		return 0;
+	}
+	pk_size = st->batch.pk_size;
+	if (!pk_size)
+		return -EINVAL;
+
+	if (fifo_count >= (HARDWARE_FIFO_SIZE / st->batch.pk_size)) {
+		pr_warn("fifo overflow pkt count=%d pkt sz=%d\n", fifo_count, st->batch.pk_size);
+		return -EOVERFLOW;
+	}
+
+	fifo_count *= st->batch.pk_size;
+	st->fifo_count = fifo_count;
+	d = st->fifo_data_store;
+	dptr = d;
+	total_bytes = fifo_count;
+
+#ifdef SENSOR_DATA_FROM_REGISTERS
+	len = 0;
+	if (st->sensor[SENSOR_GYRO].on) {
+		reg = REG_RAW_GYRO;
+		len += BYTES_PER_SENSOR;
+		if (st->sensor[SENSOR_ACCEL].on && !st->sensor[SENSOR_TEMP].on)
+			len += BYTES_FOR_TEMP;
+	}
+	if (st->sensor[SENSOR_TEMP].on) {
+		reg = REG_RAW_TEMP;
+		len += BYTES_FOR_TEMP;
+	}
+	if (st->sensor[SENSOR_ACCEL].on) {
+		reg = REG_RAW_ACCEL;
+		len += BYTES_PER_SENSOR;
+	}
+
+	if (len == 0) {
+		pr_debug("No sensor is enabled\n");
+		return 0;
+	}
+
+	/* read data registers */
+	res = inv_plat_read(st, reg, len, data);
+	if (res < 0) {
+		pr_err("read data registers is failed\n");
+		return res;
+	}
+
+	/* copy sensor data to buffer as FIFO data format */
+	tmp = 0;
+	if (st->sensor[SENSOR_ACCEL].on) {
+		for (i = 0; i < BYTES_PER_SENSOR; i++)
+			dptr[i] = data[tmp + i];
+		dptr += BYTES_PER_SENSOR;
+		tmp += BYTES_PER_SENSOR;
+	}
+
+	if (st->sensor[SENSOR_TEMP].on) {
+		for (i = 0; i < BYTES_FOR_TEMP; i++)
+			dptr[i] = data[tmp + i];
+		dptr += BYTES_FOR_TEMP;
+		tmp += BYTES_FOR_TEMP;
+	}
+
+	if (st->sensor[SENSOR_GYRO].on) {
+		if (st->sensor[SENSOR_ACCEL].on && !st->sensor[SENSOR_TEMP].on)
+			tmp += BYTES_FOR_TEMP;
+		for (i = 0; i < BYTES_PER_SENSOR; i++)
+			dptr[i] = data[tmp + i];
+	}
+#else
+	while (total_bytes > 0) {
+		if (total_bytes < pk_size * MAX_FIFO_PACKET_READ)
+			tmp = total_bytes;
+		else
+			tmp = pk_size * MAX_FIFO_PACKET_READ;
+		res = inv_plat_read(st, REG_FIFO_R_W, tmp, dptr);
+		if (res < 0) {
+			pr_err("read REG_FIFO_R_W is failed\n");
+			return res;
+		}
+		pr_debug("inside: %x, %x, %x, %x, %x, %x, %x, %x\n", dptr[0], dptr[1], dptr[2],
+						dptr[3], dptr[4], dptr[5], dptr[6], dptr[7]);
+		pr_debug("insid2: %x, %x, %x, %x, %x, %x, %x, %x\n", dptr[8], dptr[9], dptr[10],
+						dptr[11], dptr[12], dptr[13], dptr[14], dptr[15]);
+
+		dptr += tmp;
+		total_bytes -= tmp;
+	}
+#endif /* SENSOR_DATA_FROM_REGISTERS */
+	dptr = d;
+	pr_debug("dd: %x, %x, %x, %x, %x, %x, %x, %x\n", d[0], d[1], d[2],
+						d[3], d[4], d[5], d[6], d[7]);
+	pr_debug("dd2: %x, %x, %x, %x, %x, %x, %x, %x\n", d[8], d[9], d[10],
+					d[11], d[12], d[13], d[14], d[15]);
+	total_bytes = fifo_count;
+
+	for (i = 0; i < SENSOR_NUM_MAX; i++) {
+		if (st->sensor[i].on) {
+			st->sensor[i].count =  total_bytes / pk_size;
+		}
+	}
+	st->header_count = 0;
+	for (i = 0; i < SENSOR_NUM_MAX; i++) {
+		if (st->sensor[i].on)
+			st->header_count = max(st->header_count,
+							st->sensor[i].count);
+	}
+
+	st->ts_algo.calib_counter++;
+	inv_bound_timestamp(st);
+
+	dptr = d;
+	done_flag = false;
+
+	while (!done_flag) {
+		pr_debug("total%d, pk=%d\n", total_bytes, pk_size);
+		if (total_bytes >= pk_size) {
+			res = inv_push_20680_data(st, dptr);
+			if (res)
+				return res;
+			total_bytes -= pk_size;
+			dptr += pk_size;
+		} else {
+			done_flag = true;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ *  _inv_read_fifo() - Transfer data from FIFO to ring buffer.
+ */
+static void _inv_read_fifo(struct inv_mpu_state *st)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(st);
+	int result;
+
+	result = wait_event_interruptible_timeout(st->wait_queue,
+					st->resume_state, msecs_to_jiffies(300));
+	if (result <= 0)
+		return;
+	mutex_lock(&indio_dev->mlock);
+#ifdef TIMER_BASED_BATCHING
+	if (st->batch_timeout) {
+		if (inv_plat_single_write(st, REG_INT_ENABLE, st->int_en))
+			pr_err("REG_INT_ENABLE write error\n");
+	}
+#endif
+	st->wake_sensor_received = false;
+	result = inv_process_20680_data(st);
+	if (result)
+		goto err_reset_fifo;
+	mutex_unlock(&indio_dev->mlock);
+
+	if (st->wake_sensor_received)
+#ifdef CONFIG_HAS_WAKELOCK
+		wake_lock_timeout(&st->wake_lock, msecs_to_jiffies(200));
+#else
+		__pm_wakeup_event(&st->wake_lock, 200); /* 200 msecs */
+#endif
+	return;
+
+err_reset_fifo:
+	if ((!st->chip_config.gyro_enable) &&
+		(!st->chip_config.accel_enable) &&
+		(!st->chip_config.slave_enable) &&
+		(!st->chip_config.pressure_enable)) {
+		inv_switch_power_in_lp(st, false);
+		mutex_unlock(&indio_dev->mlock);
+
+		return;
+	}
+
+	pr_err("error to reset fifo\n");
+	inv_switch_power_in_lp(st, true);
+	inv_reset_fifo(st, true);
+	inv_switch_power_in_lp(st, false);
+	mutex_unlock(&indio_dev->mlock);
+
+	return;
+}
+
+irqreturn_t inv_read_fifo(int irq, void *dev_id)
+{
+	struct inv_mpu_state *st = (struct inv_mpu_state *)dev_id;
+
+	_inv_read_fifo(st);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef TIMER_BASED_BATCHING
+void inv_batch_work(struct work_struct *work)
+{
+	struct inv_mpu_state *st =
+		container_of(work, struct inv_mpu_state, batch_work);
+	struct iio_dev *indio_dev = iio_priv_to_dev(st);
+
+	mutex_lock(&indio_dev->mlock);
+	if (inv_plat_single_write(st, REG_INT_ENABLE, st->int_en | BIT_DATA_RDY_EN))
+		pr_err("REG_INT_ENABLE write error\n");
+	mutex_unlock(&indio_dev->mlock);
+
+	return;
+}
+#endif
+
+int inv_flush_batch_data(struct iio_dev *indio_dev, int data)
+{
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+
+#ifndef SENSOR_DATA_FROM_REGISTERS
+	if (st->chip_config.gyro_enable ||
+		st->chip_config.accel_enable ||
+		st->chip_config.slave_enable ||
+		st->chip_config.pressure_enable) {
+		st->wake_sensor_received = 0;
+		inv_process_20680_data(st);
+		if (st->wake_sensor_received)
+#ifdef CONFIG_HAS_WAKELOCK
+			wake_lock_timeout(&st->wake_lock, msecs_to_jiffies(200));
+#else
+			__pm_wakeup_event(&st->wake_lock, 200); /* 200 msecs */
+#endif
+		inv_switch_power_in_lp(st, false);
+	}
+#endif /* SENSOR_DATA_FROM_REGISTERS */
+	inv_push_marker_to_buffer(st, END_MARKER, data);
+
+	return 0;
+}
+
diff --git a/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_selftest_20680.c b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_selftest_20680.c
new file mode 100644
index 0000000..7a90b4d
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_selftest_20680.c
@@ -0,0 +1,752 @@
+/*
+* Copyright (C) 2017-2018 InvenSense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include "../inv_mpu_iio.h"
+
+/* register settings */
+#define DEF_SELFTEST_GYRO_SENS          (32768 / 250)
+/* wait time before collecting data */
+#define MAX_PACKETS                     20
+#define SELFTEST_WAIT_TIME              (MAX_PACKETS * 10)
+#define DEF_ST_STABLE_TIME              20
+#define DEF_GYRO_SCALE                  131
+#define DEF_ST_PRECISION                1000
+#define DEF_ST_ACCEL_FS_MG              2000UL
+#define DEF_ST_SCALE                    32768
+#define DEF_ST_TRY_TIMES                2
+#define DEF_ST_ACCEL_RESULT_SHIFT       1
+#define DEF_ST_SAMPLES                  200
+
+#define DEF_ACCEL_ST_SHIFT_DELTA_MIN    500
+#define DEF_ACCEL_ST_SHIFT_DELTA_MAX    1500
+#define DEF_GYRO_CT_SHIFT_DELTA         500
+
+#define SENSOR_UP_TIME	30
+#define REG_UP_TIME		2
+
+#define DEF_ST_ACCEL_FS_MG         2000UL
+#define DEF_ACCEL_ST_SHIFT_DELTA   500
+#define ACCEL_ST_AL_MIN ((DEF_ACCEL_ST_AL_MIN * DEF_ST_SCALE \
+				 / DEF_ST_ACCEL_FS_MG) * DEF_ST_PRECISION)
+#define ACCEL_ST_AL_MAX ((DEF_ACCEL_ST_AL_MAX * DEF_ST_SCALE \
+				 / DEF_ST_ACCEL_FS_MG) * DEF_ST_PRECISION)
+
+#define THREE_AXIS               3
+#define DEF_ST_MPU6500_ACCEL_LPF        2
+#define DEF_SELFTEST_SAMPLE_RATE        0  /* 1000Hz */
+#define DEF_SELFTEST_SAMPLE_RATE_LP     3  /*  250Hz */
+#define DEF_SELFTEST_SAMPLE_RATE_ACC_LP 10 /*  250Hz LPOSC_CLKSEL */
+#define INV_MPU_SAMPLE_RATE_CHANGE_STABLE 50
+#define DEF_SELFTEST_6500_ACCEL_FS      (0 << 3)
+#define DEF_SELFTEST_GYRO_FS            (0 << 3)
+#define DEF_ST_6500_STABLE_TIME         20
+#define BIT_ACCEL_OUT           0x08
+#define BITS_GYRO_OUT           0x70
+#define THREE_AXIS               3
+#define DEF_GYRO_WAIT_TIME              10
+#define DEF_GYRO_WAIT_TIME_LP           50
+
+/* Gyro Offset Max Value (dps) */
+#define DEF_GYRO_OFFSET_MAX             20
+/* Gyro Self Test Absolute Limits ST_AL (dps) */
+#define DEF_GYRO_ST_AL                  60
+/* Accel Self Test Absolute Limits ST_AL (mg) */
+#define DEF_ACCEL_ST_AL_MIN             225
+#define DEF_ACCEL_ST_AL_MAX             675
+
+struct recover_regs {
+	u8 int_enable;		/* REG_INT_ENABLE */
+	u8 fifo_en;			/* REG_FIFO_EN */
+	u8 user_ctrl;		/* REG_USER_CTRL */
+	u8 config;			/* REG_CONFIG */
+	u8 gyro_config;		/* REG_GYRO_CONFIG */
+	u8 accel_config;	/* REG_ACCEL_CONFIG */
+	u8 accel_config_2;	/* REG_ACCEL_CONFIG_2 */
+	u8 smplrt_div;		/* REG_SAMPLE_RATE_DIV */
+	u8 lp_mode;			/* REG_LP_MODE_CTRL */
+	u8 pwr_mgmt_1;		/* REG_PWR_MGMT_1 */
+	u8 pwr_mgmt_2;		/* REG_PWR_MGMT_2 */
+};
+
+static struct recover_regs saved_regs;
+
+static const u16 mpu_st_tb[256] = {
+	2620, 2646, 2672, 2699, 2726, 2753, 2781, 2808,
+	2837, 2865, 2894, 2923, 2952, 2981, 3011, 3041,
+	3072, 3102, 3133, 3165, 3196, 3228, 3261, 3293,
+	3326, 3359, 3393, 3427, 3461, 3496, 3531, 3566,
+	3602, 3638, 3674, 3711, 3748, 3786, 3823, 3862,
+	3900, 3939, 3979, 4019, 4059, 4099, 4140, 4182,
+	4224, 4266, 4308, 4352, 4395, 4439, 4483, 4528,
+	4574, 4619, 4665, 4712, 4759, 4807, 4855, 4903,
+	4953, 5002, 5052, 5103, 5154, 5205, 5257, 5310,
+	5363, 5417, 5471, 5525, 5581, 5636, 5693, 5750,
+	5807, 5865, 5924, 5983, 6043, 6104, 6165, 6226,
+	6289, 6351, 6415, 6479, 6544, 6609, 6675, 6742,
+	6810, 6878, 6946, 7016, 7086, 7157, 7229, 7301,
+	7374, 7448, 7522, 7597, 7673, 7750, 7828, 7906,
+	7985, 8065, 8145, 8227, 8309, 8392, 8476, 8561,
+	8647, 8733, 8820, 8909, 8998, 9088, 9178, 9270,
+	9363, 9457, 9551, 9647, 9743, 9841, 9939, 10038,
+	10139, 10240, 10343, 10446, 10550, 10656, 10763, 10870,
+	10979, 11089, 11200, 11312, 11425, 11539, 11654, 11771,
+	11889, 12008, 12128, 12249, 12371, 12495, 12620, 12746,
+	12874, 13002, 13132, 13264, 13396, 13530, 13666, 13802,
+	13940, 14080, 14221, 14363, 14506, 14652, 14798, 14946,
+	15096, 15247, 15399, 15553, 15709, 15866, 16024, 16184,
+	16346, 16510, 16675, 16842, 17010, 17180, 17352, 17526,
+	17701, 17878, 18057, 18237, 18420, 18604, 18790, 18978,
+	19167, 19359, 19553, 19748, 19946, 20145, 20347, 20550,
+	20756, 20963, 21173, 21385, 21598, 21814, 22033, 22253,
+	22475, 22700, 22927, 23156, 23388, 23622, 23858, 24097,
+	24338, 24581, 24827, 25075, 25326, 25579, 25835, 26093,
+	26354, 26618, 26884, 27153, 27424, 27699, 27976, 28255,
+	28538, 28823, 29112, 29403, 29697, 29994, 30294, 30597,
+	30903, 31212, 31524, 31839, 32157, 32479, 32804
+};
+
+static void inv_show_saved_setting(struct inv_mpu_state *st)
+{
+	pr_debug(" REG_INT_ENABLE      : 0x%02X\n", saved_regs.int_enable);
+	pr_debug(" REG_FIFO_EN         : 0x%02X\n", saved_regs.fifo_en);
+	pr_debug(" REG_USER_CTRL       : 0x%02X\n", saved_regs.user_ctrl);
+	pr_debug(" REG_CONFIG          : 0x%02X\n", saved_regs.config);
+	pr_debug(" REG_GYRO_CONFIG     : 0x%02X\n", saved_regs.gyro_config);
+	pr_debug(" REG_ACCEL_CONFIG    : 0x%02X\n", saved_regs.accel_config);
+	pr_debug(" REG_ACCEL_CONFIG_2  : 0x%02X\n", saved_regs.accel_config_2);
+	pr_debug(" REG_SAMPLE_RATE_DIV : 0x%02X\n", saved_regs.smplrt_div);
+	pr_debug(" REG_LP_MODE_CTRL    : 0x%02X\n", saved_regs.lp_mode);
+	pr_debug(" REG_PWR_MGMT_1      : 0x%02X\n", saved_regs.pwr_mgmt_1);
+	pr_debug(" REG_PWR_MGMT_2      : 0x%02X\n", saved_regs.pwr_mgmt_2);
+}
+
+static int inv_save_setting(struct inv_mpu_state *st)
+{
+	int result;
+
+	result = inv_plat_read(st, REG_PWR_MGMT_1, 1,
+			&saved_regs.pwr_mgmt_1);
+	if (result)
+		return result;
+
+	/* wake up */
+	result = inv_plat_single_write(st, REG_PWR_MGMT_1,
+			(saved_regs.pwr_mgmt_1 & ~BIT_SLEEP));
+	if (result)
+		return result;
+
+	result = inv_plat_read(st, REG_INT_ENABLE, 1,
+			&saved_regs.int_enable);
+	if (result)
+		return result;
+	result = inv_plat_read(st, REG_FIFO_EN, 1,
+			&saved_regs.fifo_en);
+	if (result)
+		return result;
+	result = inv_plat_read(st, REG_USER_CTRL, 1,
+			&saved_regs.user_ctrl);
+	if (result)
+		return result;
+	result = inv_plat_read(st, REG_CONFIG, 1,
+			&saved_regs.config);
+	if (result)
+		return result;
+	result = inv_plat_read(st, REG_GYRO_CONFIG, 1,
+			&saved_regs.gyro_config);
+	if (result)
+		return result;
+	result = inv_plat_read(st, REG_ACCEL_CONFIG, 1,
+			&saved_regs.accel_config);
+	if (result)
+		return result;
+	result = inv_plat_read(st, REG_ACCEL_CONFIG_2, 1,
+			&saved_regs.accel_config_2);
+	if (result)
+		return result;
+	result = inv_plat_read(st, REG_SAMPLE_RATE_DIV, 1,
+			&saved_regs.smplrt_div);
+	if (result)
+		return result;
+	result = inv_plat_read(st, REG_LP_MODE_CTRL, 1,
+			&saved_regs.lp_mode);
+	if (result)
+		return result;
+	result = inv_plat_read(st, REG_PWR_MGMT_2, 1,
+			&saved_regs.pwr_mgmt_2);
+	if (result)
+		return result;
+
+	inv_show_saved_setting(st);
+
+	return result;
+}
+
+static int inv_recover_setting(struct inv_mpu_state *st)
+{
+	int result;
+	/* Stop sensors */
+	result = inv_plat_single_write(st, REG_PWR_MGMT_2,
+			BIT_PWR_ACCEL_STBY | BIT_PWR_GYRO_STBY);
+	if (result)
+		return result;
+
+	/* Restore sensor configurations */
+	result = inv_plat_single_write(st, REG_INT_ENABLE,
+			saved_regs.int_enable);
+	if (result)
+		return result;
+	result = inv_plat_single_write(st, REG_FIFO_EN,
+			saved_regs.fifo_en);
+	if (result)
+		return result;
+	result = inv_plat_single_write(st, REG_USER_CTRL,
+			saved_regs.user_ctrl);
+	if (result)
+		return result;
+	result = inv_plat_single_write(st, REG_CONFIG,
+			saved_regs.config);
+	if (result)
+		return result;
+	result = inv_plat_single_write(st, REG_GYRO_CONFIG,
+			saved_regs.gyro_config);
+	if (result)
+		return result;
+	result = inv_plat_single_write(st, REG_ACCEL_CONFIG,
+			saved_regs.accel_config);
+	if (result)
+		return result;
+	result = inv_plat_single_write(st, REG_ACCEL_CONFIG_2,
+			saved_regs.accel_config_2);
+	if (result)
+		return result;
+	result = inv_plat_single_write(st, REG_SAMPLE_RATE_DIV,
+			saved_regs.smplrt_div);
+	if (result)
+		return result;
+	result = inv_plat_single_write(st, REG_LP_MODE_CTRL,
+			saved_regs.lp_mode);
+	if (result)
+		return result;
+	result = inv_plat_single_write(st, REG_PWR_MGMT_1,
+			saved_regs.pwr_mgmt_1);
+	if (result)
+		return result;
+
+	result = inv_plat_single_write(st, REG_PWR_MGMT_2,
+			saved_regs.pwr_mgmt_2);
+	if (result)
+		return result;
+
+	return result;
+}
+
+int inv_switch_engine(struct inv_mpu_state *st, bool en, u32 mask)
+{
+	u8 data, mgmt_1;
+	int result;
+
+	if (BIT_PWR_GYRO_STBY == mask) {
+		result = inv_plat_read(st, REG_PWR_MGMT_1, 1, &mgmt_1);
+		if (result)
+			return result;
+		mgmt_1 &= ~BIT_CLK_MASK;
+	}
+
+	if ((BIT_PWR_GYRO_STBY == mask) && (!en)) {
+		result = inv_plat_single_write(st, REG_PWR_MGMT_1, mgmt_1);
+		if (result)
+			return result;
+	}
+
+	result = inv_plat_read(st, REG_PWR_MGMT_2, 1, &data);
+	if (result)
+		return result;
+	if (en)
+		data &= (~mask);
+	else
+		data |= mask;
+	data |= BIT_FIFO_LP_EN;
+	result = inv_plat_single_write(st, REG_PWR_MGMT_2, data);
+	if (result)
+		return result;
+
+	if ((BIT_PWR_GYRO_STBY == mask) && en) {
+		/* only gyro on needs sensor up time */
+		msleep(SENSOR_UP_TIME);
+		/* after gyro is on & stable, switch internal clock to PLL */
+		mgmt_1 |= BIT_CLK_PLL;
+		result = inv_plat_single_write(st, REG_PWR_MGMT_1, mgmt_1);
+		if (result)
+			return result;
+	}
+	if ((BIT_PWR_ACCEL_STBY == mask) && en)
+		msleep(REG_UP_TIME);
+
+	return 0;
+}
+
+int inv_set_offset_reg(struct inv_mpu_state *st, int reg, int val)
+{
+	int result;
+	u8 d;
+
+	d = ((val >> 8) & 0xff);
+	result = inv_plat_single_write(st, reg, d);
+	if (result)
+		return result;
+
+	d = (val & 0xff);
+	result = inv_plat_single_write(st, reg + 1, d);
+
+	return result;
+}
+
+/**
+* inv_check_gyro_self_test() - check gyro self test. this function
+*                                   returns zero as success. A non-zero return
+*                                   value indicates failure in self test.
+*  @*st: main data structure.
+*  @*reg_avg: average value of normal test.
+*  @*st_avg:  average value of self test
+*/
+int inv_check_gyro_self_test(struct inv_mpu_state *st,
+						int *reg_avg, int *st_avg) {
+	u8 regs[3];
+	int ret_val, result;
+	int otp_value_zero = 0;
+	int st_shift_prod[3], st_shift_cust[3], i;
+
+	ret_val = 0;
+	result = inv_plat_read(st, REG_6500_XG_ST_DATA, 3, regs);
+	if (result)
+		return result;
+	pr_debug("%s self_test gyro shift_code - %02x %02x %02x\n",
+		st->hw->name, regs[0], regs[1], regs[2]);
+
+	for (i = 0; i < 3; i++) {
+		if (regs[i] != 0) {
+			st_shift_prod[i] = mpu_st_tb[regs[i] - 1];
+		} else {
+			st_shift_prod[i] = 0;
+			otp_value_zero = 1;
+		}
+	}
+	pr_debug("%s self_test gyro st_shift_prod - %+d %+d %+d\n",
+		st->hw->name, st_shift_prod[0], st_shift_prod[1],
+		st_shift_prod[2]);
+
+	for (i = 0; i < 3; i++) {
+		st_shift_cust[i] = st_avg[i] - reg_avg[i];
+		if (!otp_value_zero) {
+			/* Self Test Pass/Fail Criteria A */
+			if (st_shift_cust[i] < DEF_GYRO_CT_SHIFT_DELTA
+						* st_shift_prod[i])
+					ret_val = 1;
+		} else {
+			/* Self Test Pass/Fail Criteria B */
+			if (st_shift_cust[i] < DEF_GYRO_ST_AL *
+						DEF_SELFTEST_GYRO_SENS *
+						DEF_ST_PRECISION)
+				ret_val = 1;
+		}
+	}
+	pr_debug("%s self_test gyro st_shift_cust - %+d %+d %+d\n",
+		st->hw->name, st_shift_cust[0], st_shift_cust[1],
+		st_shift_cust[2]);
+
+	if (ret_val == 0) {
+		/* Self Test Pass/Fail Criteria C */
+		for (i = 0; i < 3; i++)
+			if (abs(reg_avg[i]) > DEF_GYRO_OFFSET_MAX *
+						DEF_SELFTEST_GYRO_SENS *
+						DEF_ST_PRECISION)
+				ret_val = 1;
+	}
+
+	return ret_val;
+}
+
+/**
+* inv_check_accel_self_test() - check 6500 accel self test. this function
+*                                   returns zero as success. A non-zero return
+*                                   value indicates failure in self test.
+*  @*st: main data structure.
+*  @*reg_avg: average value of normal test.
+*  @*st_avg:  average value of self test
+*/
+int inv_check_accel_self_test(struct inv_mpu_state *st,
+						int *reg_avg, int *st_avg) {
+	int ret_val, result;
+	int st_shift_prod[3], st_shift_cust[3], st_shift_ratio[3], i;
+	u8 regs[3];
+	int otp_value_zero = 0;
+
+	ret_val = 0;
+	result = inv_plat_read(st, REG_6500_XA_ST_DATA, 3, regs);
+	if (result)
+		return result;
+	pr_debug("%s self_test accel shift_code - %02x %02x %02x\n",
+		st->hw->name, regs[0], regs[1], regs[2]);
+
+	for (i = 0; i < 3; i++) {
+		if (regs[i] != 0) {
+			st_shift_prod[i] = mpu_st_tb[regs[i] - 1];
+		} else {
+			st_shift_prod[i] = 0;
+			otp_value_zero = 1;
+		}
+	}
+	pr_debug("%s self_test accel st_shift_prod - %+d %+d %+d\n",
+		st->hw->name, st_shift_prod[0], st_shift_prod[1],
+		st_shift_prod[2]);
+
+	if (!otp_value_zero) {
+		/* Self Test Pass/Fail Criteria A */
+		for (i = 0; i < 3; i++) {
+			st_shift_cust[i] = st_avg[i] - reg_avg[i];
+			st_shift_ratio[i] = abs(st_shift_cust[i] /
+					st_shift_prod[i] - DEF_ST_PRECISION);
+			if (st_shift_ratio[i] > DEF_ACCEL_ST_SHIFT_DELTA)
+				ret_val = 1;
+		}
+	} else {
+		/* Self Test Pass/Fail Criteria B */
+		for (i = 0; i < 3; i++) {
+			st_shift_cust[i] = abs(st_avg[i] - reg_avg[i]);
+			if (st_shift_cust[i] < ACCEL_ST_AL_MIN ||
+					st_shift_cust[i] > ACCEL_ST_AL_MAX)
+				ret_val = 1;
+		}
+	}
+	pr_debug("%s self_test accel st_shift_cust - %+d %+d %+d\n",
+		st->hw->name, st_shift_cust[0], st_shift_cust[1],
+		st_shift_cust[2]);
+
+	return ret_val;
+}
+
+/*
+ *  inv_do_test() - do the actual test of self testing
+ */
+int inv_do_test(struct inv_mpu_state *st, int self_test_flag,
+		int *gyro_result, int *accel_result, int lp_mode)
+{
+	int result, i, j, packet_size;
+	u8 data[BYTES_PER_SENSOR * 2], d, dd;
+	int fifo_count, packet_count, ind, s;
+
+	packet_size = BYTES_PER_SENSOR * 2;
+
+	/* disable interrupt */
+	result = inv_plat_single_write(st, REG_INT_ENABLE, 0);
+	if (result)
+		return result;
+	/* disable the sensor output to FIFO */
+	result = inv_plat_single_write(st, REG_FIFO_EN, 0);
+	if (result)
+		return result;
+	/* disable fifo reading */
+	result = inv_plat_single_write(st, REG_USER_CTRL, 0);
+	if (result)
+		return result;
+	/* clear FIFO */
+	result = inv_plat_single_write(st, REG_USER_CTRL, BIT_FIFO_RST);
+	if (result)
+		return result;
+	/* setup parameters */
+	result = inv_plat_single_write(st, REG_CONFIG, INV_FILTER_98HZ);
+	if (result)
+		return result;
+
+	/* gyro lp mode */
+	if (lp_mode == 1)
+		d = BIT_GYRO_CYCLE_EN;
+	else if (lp_mode == 2)
+		d = DEF_SELFTEST_SAMPLE_RATE_ACC_LP;
+	else
+		d = 0;
+	result = inv_plat_single_write(st, REG_LP_MODE_CTRL, d);
+	if (result)
+		return result;
+
+	/* config accel LPF register */
+	if (lp_mode == 2)
+		d = BIT_ACCEL_FCHOCIE_B;
+	else
+		d = DEF_ST_MPU6500_ACCEL_LPF;
+	result = inv_plat_single_write(st, REG_6500_ACCEL_CONFIG2, d);
+	if (result)
+		return result;
+
+	if (lp_mode) {
+		result = inv_plat_single_write(st, REG_SAMPLE_RATE_DIV,
+				DEF_SELFTEST_SAMPLE_RATE_LP);
+	} else {
+		result = inv_plat_single_write(st, REG_SAMPLE_RATE_DIV,
+				DEF_SELFTEST_SAMPLE_RATE);
+	}
+	if (result)
+		return result;
+	/* wait for the sampling rate change to stabilize */
+	mdelay(INV_MPU_SAMPLE_RATE_CHANGE_STABLE);
+	result = inv_plat_single_write(st, REG_GYRO_CONFIG,
+		self_test_flag | DEF_SELFTEST_GYRO_FS);
+	if (result)
+		return result;
+
+	d = DEF_SELFTEST_6500_ACCEL_FS;
+	d |= self_test_flag;
+	result = inv_plat_single_write(st, REG_ACCEL_CONFIG, d);
+	if (result)
+		return result;
+
+	/* wait for the output to get stable */
+	msleep(DEF_ST_6500_STABLE_TIME);
+
+	/* enable FIFO reading */
+	result = inv_plat_single_write(st, REG_USER_CTRL, BIT_FIFO_EN);
+	if (result)
+		return result;
+	/* enable sensor output to FIFO */
+	d = BITS_GYRO_OUT | BIT_ACCEL_OUT;
+	for (i = 0; i < THREE_AXIS; i++) {
+		gyro_result[i] = 0;
+		accel_result[i] = 0;
+	}
+	s = 0;
+	while (s < 200 /*st->self_test.samples*/) {
+		/* Stop FIFO */
+		result = inv_plat_single_write(st, REG_USER_CTRL, 0);
+		if (result)
+			return result;
+		/* clear FIFO */
+		result = inv_plat_single_write(st, REG_USER_CTRL, BIT_FIFO_RST);
+		if (result)
+			return result;
+		/* enable FIFO reading */
+		result = inv_plat_single_write(st, REG_USER_CTRL, BIT_FIFO_EN);
+		if (result)
+			return result;
+
+		/* accel lp mode */
+		dd = BIT_CLK_PLL;
+		if (lp_mode == 2)
+			dd |= BIT_LP_EN;
+		else
+			dd &= ~BIT_LP_EN;
+		result = inv_plat_single_write(st, REG_PWR_MGMT_1, dd);
+		if (result)
+			return result;
+
+		result = inv_plat_single_write(st, REG_FIFO_EN, d);
+		if (result)
+			return result;
+		if (lp_mode)
+			mdelay(DEF_GYRO_WAIT_TIME_LP);
+		else
+			mdelay(DEF_GYRO_WAIT_TIME);
+
+		result = inv_plat_single_write(st, REG_FIFO_EN, 0);
+		if (result)
+			return result;
+
+		result = inv_plat_read(st, REG_FIFO_COUNT_H,
+					FIFO_COUNT_BYTE, data);
+		if (result)
+			return result;
+		fifo_count = be16_to_cpup((__be16 *)(&data[0]));
+		pr_debug("%s self_test fifo_count - %d\n",
+			 st->hw->name, fifo_count);
+		packet_count = fifo_count / packet_size;
+		i = 0;
+		while ((i < packet_count) && (s < 200 /*st->self_test.samples*/)) {
+			short vals[3];
+			result = inv_plat_read(st, REG_FIFO_R_W,
+				packet_size, data);
+			if (result)
+				return result;
+			ind = 0;
+
+			for (j = 0; j < THREE_AXIS; j++) {
+				vals[j] = (short)be16_to_cpup(
+					(__be16 *)(&data[ind + 2 * j]));
+				accel_result[j] += vals[j];
+			}
+			ind += BYTES_PER_SENSOR;
+			pr_debug(
+				"%s self_test accel data - %d %+d %+d %+d",
+				st->hw->name, s, vals[0], vals[1], vals[2]);
+
+			for (j = 0; j < THREE_AXIS; j++) {
+				vals[j] = (short)be16_to_cpup(
+					(__be16 *)(&data[ind + 2 * j]));
+				gyro_result[j] += vals[j];
+			}
+			pr_debug("%s self_test gyro data - %d %+d %+d %+d",
+				st->hw->name, s, vals[0], vals[1], vals[2]);
+
+			s++;
+			i++;
+		}
+	}
+
+	for (j = 0; j < THREE_AXIS; j++) {
+		accel_result[j] = accel_result[j] / s;
+		accel_result[j] *= DEF_ST_PRECISION;
+	}
+	for (j = 0; j < THREE_AXIS; j++) {
+		gyro_result[j] = gyro_result[j] / s;
+		gyro_result[j] *= DEF_ST_PRECISION;
+	}
+
+	return 0;
+}
+
+
+int inv_power_up_self_test(struct inv_mpu_state *st)
+{
+	int result;
+
+	result = inv_switch_power_in_lp(st, true);
+
+	/* make sure no interrupts */
+	result = inv_plat_single_write(st, REG_INT_ENABLE, 0);
+	if (result)
+		return result;
+
+	if (result)
+		return result;
+	result = inv_switch_engine(st, true, BIT_PWR_ACCEL_STBY);
+	if (result)
+		return result;
+	result = inv_switch_engine(st, true, BIT_PWR_GYRO_STBY);
+	if (result)
+		return result;
+
+	return 0;
+}
+
+/*
+ *  inv_hw_self_test() - main function to do hardware self test
+ */
+int inv_hw_self_test(struct inv_mpu_state *st)
+{
+	int result;
+	int gyro_bias_st[THREE_AXIS], gyro_bias_regular[THREE_AXIS];
+	int accel_bias_st[THREE_AXIS], accel_bias_regular[THREE_AXIS];
+#if 0
+	int gyro_bias_regular_lp[THREE_AXIS];
+	int accel_bias_regular_lp[THREE_AXIS];
+	int dummy_bias_regular[THREE_AXIS];
+#endif
+	int test_times, i;
+	char accel_result, gyro_result;
+
+	result = inv_save_setting(st);
+	if (result)
+		return result;
+
+	result = inv_power_up_self_test(st);
+	if (result)
+		return result;
+	accel_result = 0;
+	gyro_result = 0;
+	test_times = DEF_ST_TRY_TIMES;
+	while (test_times > 0) {
+		result = inv_do_test(st, 0, gyro_bias_regular,
+			accel_bias_regular, 0);
+		if (result == -EAGAIN)
+			test_times--;
+		else
+			test_times = 0;
+	}
+	if (result)
+		goto test_fail;
+	pr_debug("%s self_test accel bias_regular - %+d %+d %+d\n",
+		st->hw->name, accel_bias_regular[0],
+		accel_bias_regular[1], accel_bias_regular[2]);
+	pr_debug("%s self_test gyro bias_regular - %+d %+d %+d\n",
+		st->hw->name, gyro_bias_regular[0], gyro_bias_regular[1],
+		gyro_bias_regular[2]);
+
+	test_times = DEF_ST_TRY_TIMES;
+	while (test_times > 0) {
+		result = inv_do_test(st, BITS_SELF_TEST_EN, gyro_bias_st,
+					accel_bias_st, 0);
+		if (result == -EAGAIN)
+			test_times--;
+		else
+			break;
+	}
+	if (result)
+		goto test_fail;
+	pr_debug("%s self_test accel bias_st - %+d %+d %+d\n",
+		st->hw->name, accel_bias_st[0], accel_bias_st[1],
+		accel_bias_st[2]);
+	pr_debug("%s self_test gyro bias_st - %+d %+d %+d\n",
+		st->hw->name, gyro_bias_st[0], gyro_bias_st[1],
+		gyro_bias_st[2]);
+
+#if 0
+	/* lp gyro mode */
+	test_times = DEF_ST_TRY_TIMES;
+	while (test_times > 0) {
+		result = inv_do_test(st, 0, gyro_bias_regular_lp,
+			dummy_bias_regular, 1);
+		if (result == -EAGAIN)
+			test_times--;
+		else
+			test_times = 0;
+	}
+	if (result)
+		goto test_fail;
+	pr_debug("%s self_test gyro bias_regular lp - %+d %+d %+d\n",
+		 st->hw->name, gyro_bias_regular_lp[0], gyro_bias_regular_lp[1],
+		 gyro_bias_regular_lp[2]);
+
+	/* lp accel mode */
+	test_times = DEF_ST_TRY_TIMES;
+	while (test_times > 0) {
+		result = inv_do_test(st, 0, dummy_bias_regular,
+			accel_bias_regular_lp, 2);
+		if (result == -EAGAIN)
+			test_times--;
+		else
+			test_times = 0;
+	}
+	if (result)
+		goto test_fail;
+	pr_debug("%s self_test accel bias_regular lp - %+d %+d %+d\n",
+		 st->hw->name, accel_bias_regular_lp[0],
+		 accel_bias_regular_lp[1], accel_bias_regular_lp[2]);
+#endif
+
+	/* copy bias */
+	for (i = 0; i < 3; i++) {
+		/* gyro : LN bias as LN is default mode */
+		st->gyro_st_bias[i] = gyro_bias_regular[i] / DEF_ST_PRECISION;
+		/* accel : LN bias as LN is default mode */
+		st->accel_st_bias[i] = accel_bias_regular[i] / DEF_ST_PRECISION;
+	}
+
+	/* Check is done on continuous mode data */
+	accel_result = !inv_check_accel_self_test(st,
+		accel_bias_regular, accel_bias_st);
+	gyro_result = !inv_check_gyro_self_test(st,
+		gyro_bias_regular, gyro_bias_st);
+
+test_fail:
+	inv_recover_setting(st);
+	return (accel_result << DEF_ST_ACCEL_RESULT_SHIFT) | gyro_result;
+}
diff --git a/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_setup_20680.c b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_setup_20680.c
new file mode 100644
index 0000000..5e9cf89
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/iam20680/inv_mpu_setup_20680.c
@@ -0,0 +1,466 @@
+/*
+* Copyright (C) 2017-2018 InvenSense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+#define pr_fmt(fmt) "inv_mpu: " fmt
+#include "../inv_mpu_iio.h"
+
+/* set LN mode for gyro regardless of conditions */
+#define USE_GYRO_LN_MODE
+
+static int inv_calc_engine_dur(struct inv_engine_info *ei)
+{
+	if (!ei->running_rate)
+		return -EINVAL;
+	ei->dur = ei->base_time / ei->orig_rate;
+	ei->dur *= ei->divider;
+
+	return 0;
+}
+
+static int inv_turn_on_fifo(struct inv_mpu_state *st)
+{
+	u8 int_en, fifo_en, mode, user;
+	int r;
+
+	r = inv_plat_single_write(st, REG_FIFO_EN, 0);
+	if (r)
+		return r;
+	r = inv_plat_single_write(st, REG_USER_CTRL, BIT_FIFO_RST);
+	if (r)
+		return r;
+	fifo_en = 0;
+	int_en = 0;
+
+	if (st->gesture_only_on && (!st->batch.timeout)) {
+		st->gesture_int_count = WOM_DELAY_THRESHOLD;
+		int_en |= BIT_WOM_ALL_INT_EN;
+	}
+#ifdef TIMER_BASED_BATCHING
+	if (st->chip_config.eis_enable)
+		int_en |= BIT_FSYNC_INT_EN;
+	if (!st->batch_timeout) {
+		int_en |= BIT_DATA_RDY_EN;
+	}
+#else
+	if (st->batch.timeout) {
+		if(!st->batch.fifo_wm_th)
+			int_en = BIT_DATA_RDY_EN;
+	} else {
+		int_en = BIT_DATA_RDY_EN;
+		if (st->chip_config.eis_enable)
+			int_en |= BIT_FSYNC_INT_EN;
+	}
+#endif
+	if (st->sensor[SENSOR_GYRO].on)
+		fifo_en |= BITS_GYRO_FIFO_EN;
+
+	if (st->sensor[SENSOR_ACCEL].on)
+		fifo_en |= BIT_ACCEL_FIFO_EN;
+	r = inv_plat_single_write(st, REG_FIFO_EN, fifo_en);
+	if (r)
+		return r;
+	st->int_en = int_en;
+	r = inv_plat_single_write(st, REG_INT_ENABLE, int_en);
+	if (r)
+		return r;
+	if (st->gesture_only_on && (!st->batch.timeout)) {
+		mode = BIT_ACCEL_INTEL_EN | BIT_ACCEL_INTEL_MODE;
+	} else {
+		mode = 0;
+	}
+	r = inv_plat_single_write(st, REG_ACCEL_INTEL_CTRL, mode);
+#ifdef SENSOR_DATA_FROM_REGISTERS
+	user = 0;
+#else
+	user = BIT_FIFO_EN;
+#endif
+	r = inv_plat_single_write(st, REG_USER_CTRL, user | st->i2c_dis);
+#ifdef TIMER_BASED_BATCHING
+	if (fifo_en && st->batch_timeout) {
+		if (st->is_batch_timer_running)
+			hrtimer_cancel(&st ->hr_batch_timer);
+		st->is_batch_timer_running = true;
+		hrtimer_start(&st ->hr_batch_timer,
+			ns_to_ktime(st->batch_timeout), HRTIMER_MODE_REL);
+	} else {
+		if (st->is_batch_timer_running)
+			hrtimer_cancel(&st ->hr_batch_timer);
+		st->is_batch_timer_running = false;
+	}
+#endif
+
+	return r;
+}
+
+/*
+ *  inv_reset_fifo() - Reset FIFO related registers.
+ */
+int inv_reset_fifo(struct inv_mpu_state *st, bool turn_off)
+{
+	int r, i;
+	struct inv_timestamp_algo *ts_algo = &st->ts_algo;
+	int dur_ms;
+
+	r = inv_turn_on_fifo(st);
+	if (r)
+		return r;
+
+	ts_algo->last_run_time = get_time_ns();
+	ts_algo->reset_ts = ts_algo->last_run_time;
+	if (st->mode_1k_on)
+		ts_algo->first_sample = MODE_1K_INIT_SAMPLE;
+	else
+		ts_algo->first_sample = 1;
+
+	dur_ms = st->smplrt_div + 1;
+	if ((ts_algo->first_sample * dur_ms) < FIRST_SAMPLE_BUF_MS)
+		ts_algo->first_sample = FIRST_SAMPLE_BUF_MS / dur_ms;
+	if (ts_algo->first_sample == 0)
+		ts_algo->first_sample = 1;
+
+	st->last_temp_comp_time = ts_algo->last_run_time;
+	st->left_over_size = 0;
+	for (i = 0; i < SENSOR_NUM_MAX; i++) {
+		st->sensor[i].calib_flag = 0;
+		st->sensor[i].sample_calib = 0;
+		st->sensor[i].time_calib = ts_algo->last_run_time;
+	}
+
+	ts_algo->calib_counter = 0;
+
+	return 0;
+}
+
+static int inv_turn_on_engine(struct inv_mpu_state *st)
+{
+	u8 v, w;
+	int r;
+	unsigned int wait_ms;
+
+	if (st->chip_config.gyro_enable | st->chip_config.accel_enable) {
+		w = 0;
+		if (!st->chip_config.gyro_enable)
+			w |= BIT_PWR_GYRO_STBY;
+		if (!st->chip_config.accel_enable)
+			w |= BIT_PWR_ACCEL_STBY;
+	} else if (st->chip_config.compass_enable) {
+		w = BIT_PWR_GYRO_STBY;
+	} else {
+		w = (BIT_PWR_GYRO_STBY | BIT_PWR_ACCEL_STBY);
+	}
+
+	r = inv_plat_read(st, REG_PWR_MGMT_2, 1, &v);
+	if (r)
+		return r;
+	r = inv_plat_single_write(st, REG_PWR_MGMT_2, w);
+	if (r)
+		return r;
+
+	wait_ms = 0;
+	if (st->chip_config.gyro_enable
+		&& (v & BIT_PWR_GYRO_STBY)) {
+		wait_ms = INV_IAM20680_GYRO_START_TIME;
+	}
+	if (st->chip_config.accel_enable
+		&& (v & BIT_PWR_ACCEL_STBY)) {
+		if (INV_IAM20680_ACCEL_START_TIME > wait_ms)
+			wait_ms = INV_IAM20680_ACCEL_START_TIME;
+	}
+	if (wait_ms)
+		msleep(wait_ms);
+
+	if (st->chip_config.has_compass) {
+		if (st->chip_config.compass_enable)
+			r = st->slave_compass->resume(st);
+		else
+			r = st->slave_compass->suspend(st);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+static int inv_setup_dmp_rate(struct inv_mpu_state *st)
+{
+	int i;
+
+	for (i = 0; i < SENSOR_NUM_MAX; i++) {
+		if (st->sensor[i].on) {
+			st->cntl |= st->sensor[i].output;
+			st->sensor[i].dur =
+				st->eng_info[st->sensor[i].engine_base].dur;
+			st->sensor[i].div = 1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ *  inv_set_lpf() - set low pass filer based on fifo rate.
+ */
+static int inv_set_lpf(struct inv_mpu_state *st, int rate)
+{
+	const short hz[] = {188, 98, 42, 20, 10, 5};
+	const int   d[] = {INV_FILTER_188HZ, INV_FILTER_98HZ,
+			INV_FILTER_42HZ, INV_FILTER_20HZ,
+			INV_FILTER_10HZ, INV_FILTER_5HZ};
+	int i, h, data, result;
+
+#ifdef USE_GYRO_LN_MODE
+	if (1) {
+#else
+	if (st->chip_config.eis_enable || st->ois.en || st->mode_1k_on) {
+#endif
+		h = (rate >> 1);
+		i = 0;
+		while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
+			i++;
+		data = d[i];
+		data |= EXT_SYNC_SET;
+		result = inv_plat_single_write(st, REG_CONFIG, data);
+		if (result)
+			return result;
+
+		st->chip_config.lpf = data;
+		result = inv_plat_single_write(st, REG_LP_MODE_CTRL, 0);
+	} else {
+		result = inv_plat_single_write(st, REG_LP_MODE_CTRL,
+							BIT_GYRO_CYCLE_EN);
+		if (result)
+			return result;
+		data = 0;
+		result = inv_plat_single_write(st, REG_CONFIG, data | 3);
+	}
+
+	return result;
+}
+
+static int inv_set_div(struct inv_mpu_state *st, int a_d, int g_d)
+{
+	int result, div;
+
+	if (st->chip_config.gyro_enable)
+		div = g_d;
+	else
+		div = a_d;
+	if (st->chip_config.eis_enable)
+		div = 0;
+
+	st->smplrt_div = div;
+	pr_debug("div= %d\n", div);
+	result = inv_plat_single_write(st, REG_SAMPLE_RATE_DIV, div);
+
+	return result;
+}
+
+// 20680 does not support batching
+static int inv_set_batch(struct inv_mpu_state *st)
+{
+#ifdef TIMER_BASED_BATCHING
+	u64 timeout;
+	int required_fifo_size;
+
+	if (st->batch.timeout) {
+		required_fifo_size = st->batch.timeout * st->eng_info[ENGINE_GYRO].running_rate
+					* st->batch.pk_size / 1000;
+		if (required_fifo_size > MAX_BATCH_FIFO_SIZE) {
+			required_fifo_size = MAX_BATCH_FIFO_SIZE;
+			timeout = (required_fifo_size / st->batch.pk_size) * (1000 / st->eng_info[ENGINE_GYRO].running_rate);
+		} else {
+			timeout = st->batch.timeout;
+		}
+	} else {
+		timeout = 1000 / st->eng_info[ENGINE_GYRO].running_rate;
+	}
+	if (timeout <= 1000 / st->eng_info[ENGINE_GYRO].running_rate)
+		st->batch_timeout = 0;
+	else
+		st->batch_timeout = timeout * 1000000; // ms to ns
+#endif
+	st->batch.fifo_wm_th = 0;
+
+	return 0;
+}
+
+static int inv_set_rate(struct inv_mpu_state *st)
+{
+	int g_d, a_d, result, i;
+
+	result = inv_setup_dmp_rate(st);
+	if (result)
+		return result;
+
+	g_d = st->eng_info[ENGINE_GYRO].divider - 1;
+	a_d = st->eng_info[ENGINE_ACCEL].divider - 1;
+	result = inv_set_div(st, a_d, g_d);
+	if (result)
+		return result;
+	result = inv_set_lpf(st, st->eng_info[ENGINE_GYRO].running_rate);
+	if (result)
+		return result;
+	// set ADLPF at this point not to change after accel is enabled
+	result = inv_set_accel_config2(st, false);
+	st->batch.pk_size = 0;
+	for (i = 0; i < SENSOR_NUM_MAX; i++) {
+		if (st->sensor[i].on)
+			st->batch.pk_size +=  st->sensor[i].sample_size;
+	}
+
+	inv_set_batch(st);
+
+	return result;
+}
+
+static int inv_determine_engine(struct inv_mpu_state *st)
+{
+	int i;
+	bool a_en, g_en;
+	int accel_rate, gyro_rate;
+
+	a_en = false;
+	g_en = false;
+	gyro_rate = MPU_INIT_SENSOR_RATE;
+	accel_rate = MPU_INIT_SENSOR_RATE;
+	/* loop the streaming sensors to see which engine needs to be turned on
+		*/
+	for (i = 0; i < SENSOR_NUM_MAX; i++) {
+		if (st->sensor[i].on) {
+			a_en |= st->sensor[i].a_en;
+			g_en |= st->sensor[i].g_en;
+		}
+	}
+
+	if (st->chip_config.eis_enable) {
+		g_en = true;
+		st->eis.frame_count = 0;
+		st->eis.fsync_delay = 0;
+		st->eis.gyro_counter = 0;
+		st->eis.voting_count = 0;
+		st->eis.voting_count_sub = 0;
+		gyro_rate = BASE_SAMPLE_RATE;
+	} else {
+		st->eis.eis_triggered = false;
+		st->eis.prev_state = false;
+	}
+
+	accel_rate = st->sensor[SENSOR_ACCEL].rate;
+	gyro_rate  = max(gyro_rate, st->sensor[SENSOR_GYRO].rate);
+
+	st->ts_algo.clock_base = ENGINE_ACCEL;
+
+	if (g_en) {
+		/* gyro engine needs to be fastest */
+		if (a_en)
+			gyro_rate = max(gyro_rate, accel_rate);
+		accel_rate = gyro_rate;
+		st->ts_algo.clock_base = ENGINE_GYRO;
+	} else if (a_en) {
+		/* accel engine needs to be fastest if gyro engine is off */
+		gyro_rate = accel_rate;
+		st->ts_algo.clock_base = ENGINE_ACCEL;
+	}
+
+	st->eng_info[ENGINE_GYRO].running_rate = gyro_rate;
+	st->eng_info[ENGINE_ACCEL].running_rate = accel_rate;
+	if ((gyro_rate >= BASE_SAMPLE_RATE) ||
+					(accel_rate >= BASE_SAMPLE_RATE))
+		st->mode_1k_on = true;
+	else
+		st->mode_1k_on = false;
+	/* engine divider for pressure and compass is set later */
+	if (st->chip_config.eis_enable || st->mode_1k_on) {
+		st->eng_info[ENGINE_GYRO].divider = 1;
+		st->eng_info[ENGINE_ACCEL].divider = 1;
+		// need to update rate and div for 1khz mode
+		for ( i = 0 ; i < SENSOR_L_NUM_MAX ; i++ ) {
+			if (st->sensor_l[i].on) {
+				st->sensor_l[i].counter = 0;
+				if (st->sensor_l[i].rate)
+					st->sensor_l[i].div =
+						BASE_SAMPLE_RATE
+						/ st->sensor_l[i].rate;
+				else
+					st->sensor_l[i].div = 0xffff;
+			}
+		}
+	} else {
+		st->eng_info[ENGINE_GYRO].divider = BASE_SAMPLE_RATE /
+			st->eng_info[ENGINE_GYRO].running_rate;
+		st->eng_info[ENGINE_ACCEL].divider = BASE_SAMPLE_RATE /
+			st->eng_info[ENGINE_ACCEL].running_rate;
+	}
+
+	for ( i = 0 ; i < SENSOR_L_NUM_MAX ; i++ )
+		st->sensor_l[i].counter = 0;
+
+	inv_calc_engine_dur(&st->eng_info[ENGINE_GYRO]);
+	inv_calc_engine_dur(&st->eng_info[ENGINE_ACCEL]);
+
+	pr_debug("gen: %d aen: %d grate: %d arate: %d\n",
+				g_en, a_en, gyro_rate, accel_rate);
+
+	st->chip_config.gyro_enable = g_en;
+	st->chip_config.accel_enable = a_en;
+
+	return 0;
+}
+
+/*
+ *  set_inv_enable() - enable function.
+ */
+int set_inv_enable(struct iio_dev *indio_dev)
+{
+	int result;
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+
+	result = inv_switch_power_in_lp(st, true);
+	if (result)
+		return result;
+	inv_stop_interrupt(st);
+	inv_determine_engine(st);
+	result = inv_set_rate(st);
+	if (result) {
+		pr_err("inv_set_rate error\n");
+		return result;
+	}
+	result = inv_turn_on_engine(st);
+	if (result) {
+		pr_err("inv_turn_on_engine error\n");
+		return result;
+	}
+	result = inv_reset_fifo(st, false);
+	if (result)
+		return result;
+	result = inv_switch_power_in_lp(st, false);
+	if ((!st->chip_config.gyro_enable) &&
+		(!st->chip_config.accel_enable)) {
+		inv_set_power(st, false);
+		return 0;
+	}
+
+	return result;
+}
+/* dummy function for 20608D */
+int inv_enable_pedometer_interrupt(struct inv_mpu_state *st, bool en)
+{
+	return 0;
+}
+int inv_dmp_read(struct inv_mpu_state *st, int off, int size, u8 *buf)
+{
+	return 0;
+}
+int inv_firmware_load(struct inv_mpu_state *st)
+{
+	return 0;
+}
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_common.c b/drivers/iio/imu/inv_mpu/inv_mpu_common.c
new file mode 100644
index 0000000..33db034
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_common.c
@@ -0,0 +1,988 @@
+/*
+ * Copyright (C) 2012-2017 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "inv_mpu: " fmt
+#include "inv_mpu_iio.h"
+#ifdef CONFIG_RTC_INTF_ALARM
+#include <linux/android_alarm.h>
+#endif
+#include <linux/export.h>
+
+#ifdef CONFIG_RTC_INTF_ALARM
+s64 get_time_ns(void)
+{
+	struct timespec ts;
+
+	/* get_monotonic_boottime(&ts); */
+
+	/* Workaround for some platform on which monotonic clock and
+	 * Android SystemClock has a gap.
+	 * Use ktime_to_timespec(alarm_get_elapsed_realtime()) instead of
+	 * get_monotonic_boottime() for these platform
+	 */
+
+	ts = ktime_to_timespec(alarm_get_elapsed_realtime());
+
+	return timespec_to_ns(&ts);
+}
+#else
+s64 get_time_ns(void)
+{
+	struct timespec ts;
+
+	get_monotonic_boottime(&ts);
+
+	/* Workaround for some platform on which monotonic clock and
+	 * Android SystemClock has a gap.
+	 * Use ktime_to_timespec(alarm_get_elapsed_realtime()) instead of
+	 * get_monotonic_boottime() for these platform
+	 */
+	return timespec_to_ns(&ts);
+}
+
+#endif
+
+#ifdef ACCEL_BIAS_TEST
+int inv_get_3axis_average(s16 src[], s16 dst[], s16 reset)
+{
+#define BUFFER_SIZE 200
+	static s16 buffer[BUFFER_SIZE][3];
+	static s16 current_position = 0;
+	static s16 ready = 0;
+	int sum[3]= {0,};
+	int i;
+
+	if(reset){
+		current_position = 0;
+		ready = 0;
+	}
+	buffer[current_position][0] = src[0];
+	buffer[current_position][1] = src[1];
+	buffer[current_position][2] = src[2];
+	current_position++;
+	if(current_position == BUFFER_SIZE){
+		ready = 1;
+		current_position = 0;
+	}
+	if(ready){
+		for(i = 0 ; i < BUFFER_SIZE ; i++){
+			sum[0] += buffer[i][0];
+			sum[1] += buffer[i][1];
+			sum[2] += buffer[i][2];
+		}
+		dst[0] = sum[0]/BUFFER_SIZE;
+		dst[1] = sum[1]/BUFFER_SIZE;
+		dst[2] = sum[2]/BUFFER_SIZE;
+		return 1;
+	}
+	return 0;
+}
+#endif
+
+int inv_q30_mult(int a, int b)
+{
+#define DMP_MULTI_SHIFT                 30
+	u64 temp;
+	int result;
+
+	temp = ((u64)a) * b;
+	result = (int)(temp >> DMP_MULTI_SHIFT);
+
+	return result;
+}
+#if defined(CONFIG_INV_MPU_IIO_ICM20648) || \
+					defined(CONFIG_INV_MPU_IIO_ICM20690)
+/* inv_read_secondary(): set secondary registers for reading.
+   The chip must be set as bank 3 before calling.
+ */
+int inv_read_secondary(struct inv_mpu_state *st, int ind, int addr,
+		       int reg, int len)
+{
+	int result;
+
+	result = inv_plat_single_write(st, st->slv_reg[ind].addr,
+				       INV_MPU_BIT_I2C_READ | addr);
+	if (result)
+		return result;
+	result = inv_plat_single_write(st, st->slv_reg[ind].reg, reg);
+	if (result)
+		return result;
+	result = inv_plat_single_write(st, st->slv_reg[ind].ctrl,
+				       INV_MPU_BIT_SLV_EN | len);
+
+	return result;
+}
+
+int inv_execute_read_secondary(struct inv_mpu_state *st, int ind, int addr,
+			       int reg, int len, u8 *d)
+{
+	int result;
+
+	inv_set_bank(st, BANK_SEL_3);
+	result = inv_read_secondary(st, ind, addr, reg, len);
+	if (result)
+		return result;
+	inv_set_bank(st, BANK_SEL_0);
+	result = inv_plat_single_write(st, REG_USER_CTRL, st->i2c_dis |
+				       BIT_I2C_MST_EN);
+	msleep(SECONDARY_INIT_WAIT);
+	result = inv_plat_single_write(st, REG_USER_CTRL, st->i2c_dis);
+	if (result)
+		return result;
+	result = inv_plat_read(st, REG_EXT_SLV_SENS_DATA_00, len, d);
+
+	return result;
+}
+
+/* inv_write_secondary(): set secondary registers for writing.
+   The chip must be set as bank 3 before calling.
+ */
+int inv_write_secondary(struct inv_mpu_state *st, int ind, int addr,
+			int reg, int v)
+{
+	int result;
+
+	result = inv_plat_single_write(st, st->slv_reg[ind].addr, addr);
+	if (result)
+		return result;
+	result = inv_plat_single_write(st, st->slv_reg[ind].reg, reg);
+	if (result)
+		return result;
+	result = inv_plat_single_write(st, st->slv_reg[ind].ctrl,
+				       INV_MPU_BIT_SLV_EN | 1);
+
+	result = inv_plat_single_write(st, st->slv_reg[ind].d0, v);
+
+	return result;
+}
+
+int inv_execute_write_secondary(struct inv_mpu_state *st, int ind, int addr,
+				int reg, int v)
+{
+	int result;
+
+	inv_set_bank(st, BANK_SEL_3);
+	result = inv_write_secondary(st, ind, addr, reg, v);
+	if (result)
+		return result;
+	inv_set_bank(st, BANK_SEL_0);
+	result = inv_plat_single_write(st, REG_USER_CTRL, st->i2c_dis |
+				       BIT_I2C_MST_EN);
+	msleep(SECONDARY_INIT_WAIT);
+	result = inv_plat_single_write(st, REG_USER_CTRL, st->i2c_dis);
+
+	return result;
+}
+
+int inv_set_bank(struct inv_mpu_state *st, u8 bank)
+{
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+	int r;
+
+	r = inv_plat_single_write(st, REG_BANK_SEL, bank);
+
+	return r;
+#else
+	return 0;
+#endif
+}
+#endif
+
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+/**
+ *  inv_write_cntl() - Write control word to designated address.
+ *  @st:	Device driver instance.
+ *  @wd:        control word.
+ *  @en:	enable/disable.
+ *  @cntl:	control address to be written.
+ */
+int inv_write_cntl(struct inv_mpu_state *st, u16 wd, bool en, int cntl)
+{
+	int result;
+	u8 reg[2], d_out[2];
+
+	result = mem_r(cntl, 2, d_out);
+	if (result)
+		return result;
+	reg[0] = ((wd >> 8) & 0xff);
+	reg[1] = (wd & 0xff);
+	if (!en) {
+		d_out[0] &= ~reg[0];
+		d_out[1] &= ~reg[1];
+	} else {
+		d_out[0] |= reg[0];
+		d_out[1] |= reg[1];
+	}
+	result = mem_w(cntl, 2, d_out);
+
+	return result;
+}
+#endif
+
+int inv_set_power(struct inv_mpu_state *st, bool power_on)
+{
+	u8 d;
+	int r;
+
+	if ((!power_on) == st->chip_config.is_asleep)
+		return 0;
+
+	d = BIT_CLK_PLL;
+	if (!power_on)
+		d |= BIT_SLEEP;
+
+	r = inv_plat_single_write(st, REG_PWR_MGMT_1, d);
+	if (r)
+		return r;
+
+	if (power_on)
+		usleep_range(REG_UP_TIME_USEC, REG_UP_TIME_USEC);
+
+	st->chip_config.is_asleep = !power_on;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(inv_set_power);
+
+int inv_stop_interrupt(struct inv_mpu_state *st)
+{
+	int res;
+#if defined(CONFIG_INV_MPU_IIO_ICM20648)
+	/* disable_irq_wake alone should work already. However,
+	   it might need system configuration change. From driver side,
+	   we will disable IRQ altogether for non-wakeup sensors. */
+	res = inv_plat_read(st, REG_INT_ENABLE, 1, &st->int_en);
+	if (res)
+		return res;
+	res = inv_plat_read(st, REG_INT_ENABLE_2, 1, &st->int_en_2);
+	if (res)
+		return res;
+	res = inv_plat_single_write(st, REG_INT_ENABLE, 0);
+	if (res)
+		return res;
+	res = inv_plat_single_write(st, REG_INT_ENABLE_2, 0);
+	if (res)
+		return res;
+#endif
+#if defined(CONFIG_INV_MPU_IIO_ICM20608D)
+	res = inv_plat_read(st, REG_INT_ENABLE, 1, &st->int_en);
+	if (res)
+		return res;
+	res = inv_plat_single_write(st, REG_INT_ENABLE, 0);
+	if (res)
+		return res;
+#endif
+#if defined(CONFIG_INV_MPU_IIO_ICM20602) \
+	|| defined(CONFIG_INV_MPU_IIO_ICM20690) \
+	|| defined(CONFIG_INV_MPU_IIO_IAM20680)	
+	res = inv_plat_read(st, REG_INT_ENABLE, 1, &st->int_en);
+	if (res)
+		return res;
+	res = inv_plat_single_write(st, REG_INT_ENABLE, 0);
+	if (res)
+		return res;
+#endif
+	return 0;
+}
+int inv_reenable_interrupt(struct inv_mpu_state *st)
+{
+	int res = 0;
+#if defined(CONFIG_INV_MPU_IIO_ICM20648)
+	res = inv_plat_single_write(st, REG_INT_ENABLE, st->int_en);
+	if (res)
+		return res;
+	res = inv_plat_single_write(st, REG_INT_ENABLE_2, st->int_en_2);
+	if (res)
+		return res;
+#elif defined(CONFIG_INV_MPU_IIO_ICM20608D)
+	res = inv_plat_single_write(st, REG_INT_ENABLE, st->int_en);
+	if (res)
+		return res;
+#endif
+#if defined(CONFIG_INV_MPU_IIO_ICM20602) \
+	|| defined(CONFIG_INV_MPU_IIO_ICM20690) \
+	|| defined(CONFIG_INV_MPU_IIO_IAM20680)	
+	res = inv_plat_single_write(st, REG_INT_ENABLE, st->int_en);
+	if (res)
+		return res;
+#endif
+	return res;
+}
+
+static int inv_lp_en_off_mode(struct inv_mpu_state *st, bool on)
+{
+	int r;
+
+	if (!st->chip_config.is_asleep)
+		return 0;
+
+	r = inv_plat_single_write(st, REG_PWR_MGMT_1, BIT_CLK_PLL);
+	st->chip_config.is_asleep = 0;
+
+	return r;
+}
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+static int inv_lp_en_on_mode(struct inv_mpu_state *st, bool on)
+{
+	int r = 0;
+	u8 w;
+
+	if ((!st->chip_config.is_asleep) &&
+	    ((!on) == st->chip_config.lp_en_set))
+		return 0;
+
+	w = BIT_CLK_PLL;
+	if ((!on) && (!st->eis.eis_triggered))
+		w |= BIT_LP_EN;
+	r = inv_plat_single_write(st, REG_PWR_MGMT_1, w);
+	st->chip_config.is_asleep = 0;
+	st->chip_config.lp_en_set = (!on);
+	return r;
+}
+#endif
+#if defined(CONFIG_INV_MPU_IIO_ICM20602) \
+	|| defined(CONFIG_INV_MPU_IIO_ICM20690) \
+	|| defined(CONFIG_INV_MPU_IIO_IAM20680)	
+int inv_set_accel_config2(struct inv_mpu_state *st, bool cycle_mode)
+{
+	int cycle_freq[] = {275, 192, 111, 59};
+	int cont_freq[] = {219, 219, 99, 45, 22, 11, 6};
+	int i, r, rate;
+	u8 v;
+
+	v = 0;
+#ifdef CONFIG_INV_MPU_IIO_ICM20690
+	v |= BIT_FIFO_SIZE_1K;
+#endif
+	if (cycle_mode) {
+		rate = (st->eng_info[ENGINE_ACCEL].running_rate << 1);
+		i = ARRAY_SIZE(cycle_freq) - 1;
+		while (i > 0) {
+			if (rate < cycle_freq[i]) {
+				break;
+			}
+			i--;
+		}
+		r = inv_plat_single_write(st, REG_ACCEL_CONFIG_2, v |
+								(i << 4) | 7);
+		if (r)
+			return r;
+	} else {
+		rate = (st->eng_info[ENGINE_ACCEL].running_rate >> 1);
+		for (i = 1; i < ARRAY_SIZE(cont_freq); i++) {
+			if (rate >= cont_freq[i])
+				break;
+		}
+		if (i > 6)
+			i = 6;
+		r = inv_plat_single_write(st, REG_ACCEL_CONFIG_2, v | i);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+static int inv_lp_en_on_mode(struct inv_mpu_state *st, bool on)
+{
+	int r = 0;
+	u8 w;
+	bool cond_check;
+
+	if ((!st->chip_config.is_asleep) &&
+					((!on) == st->chip_config.lp_en_set))
+		return 0;
+	cond_check = (!on) && st->cycle_on;
+
+	w = BIT_CLK_PLL;
+	r = inv_plat_single_write(st, REG_PWR_MGMT_1, w);
+	if (cond_check) {
+		w |= BIT_LP_EN;
+		inv_set_accel_config2(st, true);
+		st->chip_config.lp_en_set = true;
+		r = inv_plat_single_write(st, REG_PWR_MGMT_1, w);
+	} else {
+		inv_set_accel_config2(st, false);
+#ifdef CONFIG_INV_MPU_IIO_ICM20690
+		r = inv_plat_single_write(st, REG_PWR_MGMT_1, w | BIT_SLEEP);
+		if (r)
+			return r;
+#endif
+		st->chip_config.lp_en_set = false;
+		r = inv_plat_single_write(st, REG_PWR_MGMT_1, w);
+		msleep(10);
+	}
+	st->chip_config.is_asleep = 0;
+
+	return r;
+}
+#endif
+#ifdef CONFIG_INV_MPU_IIO_ICM20608D
+static int inv_set_accel_config2(struct inv_mpu_state *st)
+{
+	int cont_freq[] = {219, 219, 99, 45, 22, 11, 6};
+	int dec2_cfg = 0;
+	int i, r, rate;
+
+	rate = (st->eng_info[ENGINE_ACCEL].running_rate << 1);
+	i = 0;
+	if (!st->chip_config.eis_enable){
+		while ((rate < cont_freq[i]) && (i < ARRAY_SIZE(cont_freq) - 1))
+			i++;
+		dec2_cfg = 2<<4; //4x
+	}
+	r = inv_plat_single_write(st, REG_ACCEL_CONFIG_2, i | dec2_cfg);
+	if (r)
+		return r;
+	return 0;
+}
+static int inv_lp_en_on_mode(struct inv_mpu_state *st, bool on)
+{
+	int r = 0;
+	u8 w;
+
+	w = BIT_CLK_PLL;
+	if ((!on) && (!st->chip_config.eis_enable))
+		w |= BIT_LP_EN;
+	inv_set_accel_config2(st);
+	r = inv_plat_single_write(st, REG_PWR_MGMT_1, w);
+
+	return r;
+}
+#endif
+int inv_switch_power_in_lp(struct inv_mpu_state *st, bool on)
+{
+	int r;
+
+	if (st->chip_config.lp_en_mode_off)
+		r = inv_lp_en_off_mode(st, on);
+	else
+		r = inv_lp_en_on_mode(st, on);
+
+	return r;
+}
+EXPORT_SYMBOL_GPL(inv_switch_power_in_lp);
+
+int write_be16_to_mem(struct inv_mpu_state *st, u16 data, int addr)
+{
+	u8 d[2];
+
+	d[0] = (data >> 8) & 0xff;
+	d[1] = data & 0xff;
+
+	return mem_w(addr, sizeof(d), d);
+}
+
+int write_be32_to_mem(struct inv_mpu_state *st, u32 data, int addr)
+{
+	cpu_to_be32s(&data);
+	return mem_w(addr, sizeof(data), (u8 *)&data);
+}
+
+int read_be16_from_mem(struct inv_mpu_state *st, u16 *o, int addr)
+{
+	int result;
+	u8 d[2];
+
+	result = mem_r(addr, 2, (u8 *) &d);
+	*o = d[0] << 8 | d[1];
+
+	return result;
+}
+
+int read_be32_from_mem(struct inv_mpu_state *st, u32 *o, int addr)
+{
+	int result;
+	u32 d = 0;
+
+	result = mem_r(addr, 4, (u8 *) &d);
+	*o = be32_to_cpup((__be32 *)(&d));
+
+	return result;
+}
+
+int be32_to_int(u8 *d)
+{
+	return (d[0] << 24) | (d[1] << 16) | (d[2] << 8) | d[3];
+}
+
+u32 inv_get_cntr_diff(u32 curr_counter, u32 prev)
+{
+	u32 diff;
+
+	if (curr_counter > prev)
+		diff = curr_counter - prev;
+	else
+		diff = 0xffffffff - prev + curr_counter + 1;
+
+	return diff;
+}
+
+int inv_write_2bytes(struct inv_mpu_state *st, int addr, int data)
+{
+	u8 d[2];
+
+	if (data < 0 || data > USHRT_MAX)
+		return -EINVAL;
+
+	d[0] = (u8) ((data >> 8) & 0xff);
+	d[1] = (u8) (data & 0xff);
+
+	return mem_w(addr, ARRAY_SIZE(d), d);
+}
+
+
+
+int inv_process_eis(struct inv_mpu_state *st, u16 delay)
+{
+	int tmp1, tmp2, tmp3;
+
+	switch (st->eis.voting_state) {
+	case 0:
+		st->eis.gyro_counter_s[0] = st->eis.gyro_counter;
+		st->eis.fsync_delay_s[0] = delay - st->eis.fsync_delay;
+		st->eis.voting_count = 1;
+		st->eis.voting_count_sub = 0;
+		st->eis.voting_state = 1;
+		break;
+	case 1:
+		if (abs(st->eis.gyro_counter_s[0] -
+						st->eis.gyro_counter) <= 1) {
+			st->eis.voting_count++;
+		} else {
+			st->eis.gyro_counter_s[2] = st->eis.gyro_counter;
+			st->eis.voting_count_sub++;
+			st->eis.voting_state = 2;
+		}
+		if (st->eis.voting_count > 5)
+			st->eis.voting_state = 3;
+		break;
+	case 2:
+		tmp1 = abs(st->eis.gyro_counter_s[0] - st->eis.gyro_counter);
+		tmp2 = abs(st->eis.gyro_counter_s[2] - st->eis.gyro_counter);
+
+		if ((tmp1 < tmp2) && (tmp1 <= 1))
+			st->eis.voting_count++;
+		else
+			st->eis.voting_count_sub++;
+		if (st->eis.voting_count > 5) {
+			st->eis.voting_state = 3;
+			st->eis.voting_count = 0;
+			st->eis.voting_count_sub = 0;
+		}
+
+		if (st->eis.voting_count_sub > 5) {
+			st->eis.gyro_counter_s[0] = st->eis.gyro_counter;
+			st->eis.fsync_delay_s[0] = delay - st->eis.fsync_delay;
+			st->eis.voting_state = 1;
+			st->eis.voting_count = 1;
+			st->eis.voting_count_sub = 0;
+		}
+		break;
+	case 3:
+		tmp1 = abs(st->eis.gyro_counter_s[0] - st->eis.gyro_counter);
+		if (tmp1 == 1) {
+			st->eis.gyro_counter_s[1] = st->eis.gyro_counter;
+			st->eis.fsync_delay_s[1] = delay - st->eis.fsync_delay;
+			st->eis.voting_state = 4;
+			st->eis.voting_count_sub = 1;
+			st->eis.voting_count = 1;
+		}
+		break;
+	case 4:
+		if (st->eis.gyro_counter == st->eis.gyro_counter_s[0]) {
+			tmp1 = delay - st->eis.fsync_delay;
+			tmp2 = abs(tmp1 - st->eis.fsync_delay_s[0]);
+			if (tmp2 < 3) {
+				st->eis.voting_count++;
+			} else {
+				st->eis.fsync_delay_s[2] = tmp1;
+				st->eis.voting_count_sub = 1;
+				st->eis.voting_state = 5;
+			}
+			if (st->eis.voting_count > 5) {
+				st->eis.voting_count = 1;
+				st->eis.voting_state = 6;
+			}
+		}
+		break;
+	case 5:
+		if (st->eis.gyro_counter == st->eis.gyro_counter_s[0]) {
+			tmp1 = delay - st->eis.fsync_delay;
+
+			tmp2 = abs(tmp1 - st->eis.fsync_delay_s[0]);
+			tmp3 = abs(tmp1 - st->eis.fsync_delay_s[2]);
+			if ((tmp2 < tmp3) && (tmp2 < 3))
+				st->eis.voting_count++;
+			else
+				st->eis.voting_count_sub++;
+			if ((st->eis.voting_count > 5) &&
+					(st->eis.voting_count_sub
+					< st->eis.voting_count)) {
+				st->eis.voting_state = 6;
+				st->eis.voting_count = 1;
+			} else if (st->eis.voting_count_sub > 5) {
+				st->eis.fsync_delay_s[0] = tmp1;
+				st->eis.voting_state = 4;
+				st->eis.voting_count = 1;
+			}
+
+		}
+		break;
+	case 6:
+		if (st->eis.gyro_counter == st->eis.gyro_counter_s[1]) {
+			tmp1 = delay - st->eis.fsync_delay;
+			tmp2 = abs(tmp1 - st->eis.fsync_delay_s[1]);
+			if (tmp2 < 3) {
+				st->eis.voting_count++;
+			} else {
+				st->eis.fsync_delay_s[2] = tmp1;
+				st->eis.voting_count_sub = 1;
+				st->eis.voting_count = 1;
+				st->eis.voting_state = 7;
+			}
+			if (st->eis.voting_count > 5)
+				st->eis.voting_state = 8;
+		}
+		break;
+	case 7:
+		if (st->eis.gyro_counter == st->eis.gyro_counter_s[1]) {
+			tmp1 = delay - st->eis.fsync_delay;
+
+			tmp2 = abs(tmp1 - st->eis.fsync_delay_s[1]);
+			tmp3 = abs(tmp1 - st->eis.fsync_delay_s[2]);
+			if ((tmp2 < tmp3) && (tmp2 < 3))
+				st->eis.voting_count++;
+			else
+				st->eis.voting_count_sub++;
+			if ((st->eis.voting_count > 5) &&
+					(st->eis.voting_count_sub
+					< st->eis.voting_count)) {
+				st->eis.voting_state = 8;
+			} else if (st->eis.voting_count_sub > 5) {
+				st->eis.fsync_delay_s[1] = tmp1;
+				st->eis.voting_state = 6;
+				st->eis.voting_count = 1;
+			}
+
+		}
+		break;
+	default:
+		break;
+	}
+
+	pr_debug("de= %d gc= %d\n", delay, st->eis.gyro_counter);
+	st->eis.fsync_delay = delay;
+	st->eis.gyro_counter = 0;
+
+	pr_debug("state=%d g1= %d d1= %d g2= %d d2= %d\n",
+			st->eis.voting_state,
+			st->eis.gyro_counter_s[0],
+			st->eis.fsync_delay_s[0],
+			st->eis.gyro_counter_s[1],
+			st->eis.fsync_delay_s[1]);
+
+	return 0;
+}
+
+int inv_rate_convert(struct inv_mpu_state *st, int ind, int data)
+{
+	int t, out, out1, out2;
+	int base_freq;
+
+	if (data <= MPU_DEFAULT_DMP_FREQ)
+		base_freq = MPU_DEFAULT_DMP_FREQ;
+	else
+		base_freq = BASE_SAMPLE_RATE;
+
+	t = base_freq / data;
+	if (!t)
+		t = 1;
+	out1 = base_freq / (t + 1);
+	out2 = base_freq / t;
+	if ((data - out1) * INV_ODR_BUFFER_MULTI < data)
+		out = out1;
+	else
+		out = out2;
+
+	return out;
+}
+
+static void inv_check_wake_non_wake(struct inv_mpu_state *st,
+			enum SENSOR_L wake, enum SENSOR_L non_wake)
+{
+	int tmp_rate;
+
+	if (!st->sensor_l[wake].on && !st->sensor_l[non_wake].on)
+		return;
+
+	tmp_rate = MPU_INIT_SENSOR_RATE;
+	if (st->sensor_l[wake].on)
+		tmp_rate = st->sensor_l[wake].rate;
+	if (st->sensor_l[non_wake].on)
+		tmp_rate = max(tmp_rate, st->sensor_l[non_wake].rate);
+	st->sensor_l[wake].rate = tmp_rate;
+	st->sensor_l[non_wake].rate = tmp_rate;
+}
+
+static void inv_check_wake_non_wake_divider(struct inv_mpu_state *st,
+			enum SENSOR_L wake, enum SENSOR_L non_wake)
+{
+	if (st->sensor_l[wake].on && st->sensor_l[non_wake].on)
+		st->sensor_l[non_wake].div = 0xffff;
+
+}
+
+#if defined(CONFIG_INV_MPU_IIO_ICM20602) \
+	|| defined(CONFIG_INV_MPU_IIO_ICM20690) \
+	|| defined(CONFIG_INV_MPU_IIO_IAM20680)	
+int inv_check_sensor_on(struct inv_mpu_state *st)
+{
+	int i, max_rate;
+	enum SENSOR_L wake[] = {SENSOR_L_GYRO_WAKE, SENSOR_L_ACCEL_WAKE,
+					SENSOR_L_MAG_WAKE};
+	enum SENSOR_L non_wake[] = {SENSOR_L_GYRO, SENSOR_L_ACCEL,
+					SENSOR_L_MAG};
+
+	st->sensor_l[SENSOR_L_GESTURE_ACCEL].rate = GESTURE_ACCEL_RATE;
+	for (i = 0; i < SENSOR_NUM_MAX; i++)
+		st->sensor[i].on = false;
+	for (i = 0; i < SENSOR_NUM_MAX; i++)
+		st->sensor[i].rate = MPU_INIT_SENSOR_RATE;
+
+	if ((st->step_detector_l_on
+			|| st->step_detector_wake_l_on
+			|| st->step_counter_l_on
+			|| st->step_counter_wake_l_on
+			|| st->chip_config.pick_up_enable
+			|| st->chip_config.tilt_enable)
+			&& (!st->sensor_l[SENSOR_L_ACCEL].on)
+			&& (!st->sensor_l[SENSOR_L_ACCEL_WAKE].on))
+		st->sensor_l[SENSOR_L_GESTURE_ACCEL].on = true;
+	else
+		st->sensor_l[SENSOR_L_GESTURE_ACCEL].on = false;
+
+
+	st->chip_config.wake_on = false;
+	for (i = 0; i < SENSOR_L_NUM_MAX; i++) {
+		if (st->sensor_l[i].on && st->sensor_l[i].rate) {
+			st->sensor[st->sensor_l[i].base].on = true;
+			st->chip_config.wake_on |= st->sensor_l[i].wake_on;
+		}
+	}
+	if (st->sensor_l[SENSOR_L_GESTURE_ACCEL].on &&
+				(!st->sensor[SENSOR_GYRO].on) &&
+				(!st->sensor[SENSOR_COMPASS].on))
+		st->gesture_only_on = true;
+	else
+		st->gesture_only_on = false;
+
+	for (i = 0; i < SENSOR_L_NUM_MAX; i++) {
+		if (st->sensor_l[i].on) {
+			st->sensor[st->sensor_l[i].base].rate =
+			    max(st->sensor[st->sensor_l[i].base].rate,
+							st->sensor_l[i].rate);
+		}
+	}
+	max_rate = MPU_INIT_SENSOR_RATE;
+	if (st->chip_config.eis_enable) {
+		max_rate = ESI_GYRO_RATE;
+		st->sensor_l[SENSOR_L_EIS_GYRO].rate = ESI_GYRO_RATE;
+	}
+
+	for (i = 0; i < SENSOR_NUM_MAX; i++) {
+		if (st->sensor[i].on) {
+			max_rate = max(max_rate, st->sensor[i].rate);
+		}
+	}
+	for (i = 0; i < SENSOR_NUM_MAX; i++) {
+		if (st->sensor[i].on) {
+			st->sensor[i].rate = max_rate;
+		}
+	}
+	for (i = 0; i < ARRAY_SIZE(wake); i++)
+		inv_check_wake_non_wake(st, wake[i], non_wake[i]);
+
+	for (i = 0; i < SENSOR_L_NUM_MAX; i++) {
+		if (st->sensor_l[i].on) {
+			if (st->sensor_l[i].rate)
+				st->sensor_l[i].div =
+				    st->sensor[st->sensor_l[i].base].rate
+							/ st->sensor_l[i].rate;
+			else
+				st->sensor_l[i].div = 0xffff;
+			pr_debug("sensor= %d, div= %d\n",
+						i, st->sensor_l[i].div);
+		}
+	}
+	for (i = 0; i < ARRAY_SIZE(wake); i++)
+		inv_check_wake_non_wake_divider(st, wake[i], non_wake[i]);
+
+	if (st->step_detector_wake_l_on ||
+			st->step_counter_wake_l_on ||
+			st->chip_config.pick_up_enable ||
+			st->chip_config.tilt_enable)
+		st->chip_config.wake_on = true;
+
+	return 0;
+}
+#else
+static void inv_do_check_sensor_on(struct inv_mpu_state *st,
+				enum SENSOR_L *wake,
+				enum SENSOR_L *non_wake, int sensor_size)
+{
+	int i;
+
+	for (i = 0; i < SENSOR_NUM_MAX; i++)
+		st->sensor[i].on = false;
+
+	for (i = 0; i < SENSOR_NUM_MAX; i++)
+		st->sensor[i].rate = MPU_INIT_SENSOR_RATE;
+
+	st->chip_config.wake_on = false;
+	for (i = 0; i < SENSOR_L_NUM_MAX; i++) {
+		if (st->sensor_l[i].on && st->sensor_l[i].rate) {
+			st->sensor[st->sensor_l[i].base].on = true;
+			st->chip_config.wake_on |= st->sensor_l[i].wake_on;
+		}
+	}
+
+	for (i = 0; i < SENSOR_L_NUM_MAX; i++) {
+		if (st->sensor_l[i].on) {
+			st->sensor[st->sensor_l[i].base].rate =
+			    max(st->sensor[st->sensor_l[i].base].rate,
+				st->sensor_l[i].rate);
+		}
+	}
+	for (i = 0; i < sensor_size; i++)
+		inv_check_wake_non_wake(st, wake[i], non_wake[i]);
+
+	for (i = 0; i < SENSOR_L_NUM_MAX; i++) {
+		if (st->sensor_l[i].on) {
+			if (st->sensor_l[i].rate)
+				st->sensor_l[i].div =
+				    st->sensor[st->sensor_l[i].base].rate
+				    / st->sensor_l[i].rate;
+			else
+				st->sensor_l[i].div = 0xffff;
+		}
+	}
+	for (i = 0; i < sensor_size; i++)
+		inv_check_wake_non_wake_divider(st, wake[i], non_wake[i]);
+
+	if (st->step_detector_wake_l_on ||
+			st->step_counter_wake_l_on ||
+			st->chip_config.pick_up_enable ||
+			st->chip_config.tilt_enable ||
+			st->smd.on)
+		st->chip_config.wake_on = true;
+
+}
+#endif
+
+#if defined(CONFIG_INV_MPU_IIO_ICM20608D)
+int inv_check_sensor_on(struct inv_mpu_state *st)
+{
+	enum SENSOR_L wake[] = {SENSOR_L_GYRO_WAKE, SENSOR_L_ACCEL_WAKE,
+				SENSOR_L_SIXQ_WAKE, SENSOR_L_PEDQ_WAKE,
+				SENSOR_L_GYRO_CAL_WAKE};
+	enum SENSOR_L non_wake[] = {SENSOR_L_GYRO, SENSOR_L_ACCEL,
+				SENSOR_L_SIXQ, SENSOR_L_PEDQ,
+				SENSOR_L_GYRO_CAL};
+
+	inv_do_check_sensor_on(st, wake, non_wake, ARRAY_SIZE(wake));
+
+	return 0;
+}
+#endif
+
+#if defined(CONFIG_INV_MPU_IIO_ICM20648)
+int inv_check_sensor_on(struct inv_mpu_state *st)
+{
+	enum SENSOR_L wake[] = {SENSOR_L_GYRO_WAKE, SENSOR_L_ACCEL_WAKE,
+				SENSOR_L_MAG_WAKE, SENSOR_L_ALS_WAKE,
+				SENSOR_L_SIXQ_WAKE, SENSOR_L_PEDQ_WAKE,
+				SENSOR_L_NINEQ_WAKE, SENSOR_L_GEOMAG_WAKE,
+				SENSOR_L_PRESSURE_WAKE,
+				SENSOR_L_GYRO_CAL_WAKE,
+				SENSOR_L_MAG_CAL_WAKE};
+	enum SENSOR_L non_wake[] = {SENSOR_L_GYRO, SENSOR_L_ACCEL,
+					SENSOR_L_MAG, SENSOR_L_ALS,
+					SENSOR_L_SIXQ, SENSOR_L_PEDQ,
+					SENSOR_L_NINEQ, SENSOR_L_GEOMAG,
+					SENSOR_L_PRESSURE,
+					SENSOR_L_GYRO_CAL,
+					SENSOR_L_MAG_CAL};
+
+	inv_do_check_sensor_on(st, wake, non_wake, ARRAY_SIZE(wake));
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+int inv_mpu_suspend(struct iio_dev *indio_dev)
+{
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+
+	/* add code according to different request Start */
+	dev_info(st->dev, "%s suspend\n", st->hw->name);
+	mutex_lock(&indio_dev->mlock);
+
+	st->resume_state = false;
+	if (st->chip_config.wake_on) {
+		enable_irq_wake(st->irq);
+	} else {
+		inv_stop_interrupt(st);
+	}
+
+	mutex_unlock(&indio_dev->mlock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(inv_mpu_suspend);
+
+/*
+ * inv_mpu_complete(): complete method for this driver.
+ *    This method can be modified according to the request of different
+ *    customers. It basically undo everything suspend is doing
+ *    and recover the chip to what it was before suspend. We use complete to
+ *    make sure that alarm clock resume is finished. If we use resume, the
+ *    alarm clock may not resume yet and get incorrect clock reading.
+ */
+void inv_mpu_complete(struct iio_dev *indio_dev)
+{
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+
+	dev_info(st->dev, "%s resume\n", st->hw->name);
+	if (st->resume_state)
+		return;
+
+	mutex_lock(&indio_dev->mlock);
+
+	if (!st->chip_config.wake_on) {
+		inv_reenable_interrupt(st);
+	} else {
+		disable_irq_wake(st->irq);
+	}
+	/*	resume state is used to synchronize read_fifo such that it won't
+		proceed unless resume is finished. */
+	st->resume_state = true;
+	/*	resume flag is indicating that current clock reading is from resume,
+		it has up to 1 second drift and should do proper processing */
+	st->ts_algo.resume_flag  = true;
+	mutex_unlock(&indio_dev->mlock);
+	wake_up_interruptible(&st->wait_queue);
+
+	return;
+}
+EXPORT_SYMBOL_GPL(inv_mpu_complete);
+#endif
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_dts.c b/drivers/iio/imu/inv_mpu/inv_mpu_dts.c
new file mode 100644
index 0000000..0b8b3fc
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_dts.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2012-2017 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/export.h>
+
+#include <linux/iio/imu/mpu.h>
+#include "inv_mpu_dts.h"
+#include "inv_mpu_iio.h"
+
+#ifdef CONFIG_OF
+
+static int inv_mpu_power_on(struct mpu_platform_data *pdata)
+{
+	int err;
+
+	if (!IS_ERR(pdata->vdd_ana)) {
+		err = regulator_enable(pdata->vdd_ana);
+		if (err)
+			return err;
+	}
+	if (!IS_ERR(pdata->vdd_i2c)) {
+		err = regulator_enable(pdata->vdd_i2c);
+		if (err)
+			goto error_disable_vdd_ana;
+	}
+
+	return 0;
+
+error_disable_vdd_ana:
+	regulator_disable(pdata->vdd_ana);
+	return err;
+}
+
+static int inv_mpu_power_off(struct mpu_platform_data *pdata)
+{
+	if (!IS_ERR(pdata->vdd_ana))
+		regulator_disable(pdata->vdd_ana);
+	if (!IS_ERR(pdata->vdd_i2c))
+		regulator_disable(pdata->vdd_i2c);
+
+	return 0;
+}
+
+static int inv_parse_orientation_matrix(struct device *dev, s8 *orient)
+{
+	int rc, i;
+	struct device_node *np = dev->of_node;
+	u32 temp_val, temp_val2;
+
+	for (i = 0; i < 9; i++)
+		orient[i] = 0;
+
+	/* parsing axis x orientation matrix */
+	rc = of_property_read_u32(np, "axis_map_x", &temp_val);
+	if (rc) {
+		dev_err(dev, "Unable to read axis_map_x\n");
+		return rc;
+	}
+	rc = of_property_read_u32(np, "negate_x", &temp_val2);
+	if (rc) {
+		dev_err(dev, "Unable to read negate_x\n");
+		return rc;
+	}
+	if (temp_val2)
+		orient[temp_val] = -1;
+	else
+		orient[temp_val] = 1;
+
+	/* parsing axis y orientation matrix */
+	rc = of_property_read_u32(np, "axis_map_y", &temp_val);
+	if (rc) {
+		dev_err(dev, "Unable to read axis_map_y\n");
+		return rc;
+	}
+	rc = of_property_read_u32(np, "negate_y", &temp_val2);
+	if (rc) {
+		dev_err(dev, "Unable to read negate_y\n");
+		return rc;
+	}
+	if (temp_val2)
+		orient[3 + temp_val] = -1;
+	else
+		orient[3 + temp_val] = 1;
+
+	/* parsing axis z orientation matrix */
+	rc = of_property_read_u32(np, "axis_map_z", &temp_val);
+	if (rc) {
+		dev_err(dev, "Unable to read axis_map_z\n");
+		return rc;
+	}
+	rc = of_property_read_u32(np, "negate_z", &temp_val2);
+	if (rc) {
+		dev_err(dev, "Unable to read negate_z\n");
+		return rc;
+	}
+	if (temp_val2)
+		orient[6 + temp_val] = -1;
+	else
+		orient[6 + temp_val] = 1;
+
+	return 0;
+}
+
+static int inv_parse_secondary_orientation_matrix(struct device *dev,
+						  s8 *orient)
+{
+	int rc, i;
+	struct device_node *np = dev->of_node;
+	u32 temp_val, temp_val2;
+
+	for (i = 0; i < 9; i++)
+		orient[i] = 0;
+
+	/* parsing axis x orientation matrix */
+	rc = of_property_read_u32(np, "inven,secondary_axis_map_x", &temp_val);
+	if (rc) {
+		dev_err(dev, "Unable to read secondary axis_map_x\n");
+		return rc;
+	}
+	rc = of_property_read_u32(np, "inven,secondary_negate_x", &temp_val2);
+	if (rc) {
+		dev_err(dev, "Unable to read secondary negate_x\n");
+		return rc;
+	}
+	if (temp_val2)
+		orient[temp_val] = -1;
+	else
+		orient[temp_val] = 1;
+
+	/* parsing axis y orientation matrix */
+	rc = of_property_read_u32(np, "inven,secondary_axis_map_y", &temp_val);
+	if (rc) {
+		dev_err(dev, "Unable to read secondary axis_map_y\n");
+		return rc;
+	}
+	rc = of_property_read_u32(np, "inven,secondary_negate_y", &temp_val2);
+	if (rc) {
+		dev_err(dev, "Unable to read secondary negate_y\n");
+		return rc;
+	}
+	if (temp_val2)
+		orient[3 + temp_val] = -1;
+	else
+		orient[3 + temp_val] = 1;
+
+	/* parsing axis z orientation matrix */
+	rc = of_property_read_u32(np, "inven,secondary_axis_map_z", &temp_val);
+	if (rc) {
+		dev_err(dev, "Unable to read secondary axis_map_z\n");
+		return rc;
+	}
+	rc = of_property_read_u32(np, "inven,secondary_negate_z", &temp_val2);
+	if (rc) {
+		dev_err(dev, "Unable to read secondary negate_z\n");
+		return rc;
+	}
+	if (temp_val2)
+		orient[6 + temp_val] = -1;
+	else
+		orient[6 + temp_val] = 1;
+
+	return 0;
+}
+
+static int inv_parse_secondary(struct device *dev,
+			       struct mpu_platform_data *pdata)
+{
+	int rc;
+	struct device_node *np = dev->of_node;
+	u32 temp_val;
+	const char *name;
+
+	if (of_property_read_string(np, "inven,secondary_type", &name)) {
+		dev_err(dev, "Missing secondary type.\n");
+		return -EINVAL;
+	}
+	if (!strcmp(name, "compass")) {
+		pdata->sec_slave_type = SECONDARY_SLAVE_TYPE_COMPASS;
+	} else if (!strcmp(name, "none")) {
+		pdata->sec_slave_type = SECONDARY_SLAVE_TYPE_NONE;
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+
+	if (of_property_read_string(np, "inven,secondary_name", &name)) {
+		dev_err(dev, "Missing secondary name.\n");
+		return -EINVAL;
+	}
+	if (!strcmp(name, "ak8963"))
+		pdata->sec_slave_id = COMPASS_ID_AK8963;
+	else if (!strcmp(name, "ak8975"))
+		pdata->sec_slave_id = COMPASS_ID_AK8975;
+	else if (!strcmp(name, "ak8972"))
+		pdata->sec_slave_id = COMPASS_ID_AK8972;
+	else if (!strcmp(name, "ak09911"))
+		pdata->sec_slave_id = COMPASS_ID_AK09911;
+	else if (!strcmp(name, "ak09912"))
+		pdata->sec_slave_id = COMPASS_ID_AK09912;
+	else if (!strcmp(name, "ak09916"))
+		pdata->sec_slave_id = COMPASS_ID_AK09916;
+	else
+		return -EINVAL;
+	rc = of_property_read_u32(np, "inven,secondary_reg", &temp_val);
+	if (rc) {
+		dev_err(dev, "Unable to read secondary register\n");
+		return rc;
+	}
+	pdata->secondary_i2c_addr = temp_val;
+	rc = inv_parse_secondary_orientation_matrix(dev,
+						    pdata->
+						    secondary_orientation);
+
+	return rc;
+}
+
+static int inv_parse_aux(struct device *dev, struct mpu_platform_data *pdata)
+{
+	int rc;
+	struct device_node *np = dev->of_node;
+	u32 temp_val;
+	const char *name;
+
+	if (of_property_read_string(np, "inven,aux_type", &name)) {
+		dev_err(dev, "Missing aux type.\n");
+		return -EINVAL;
+	}
+	if (!strcmp(name, "pressure")) {
+		pdata->aux_slave_type = SECONDARY_SLAVE_TYPE_PRESSURE;
+	} else if (!strcmp(name, "none")) {
+		pdata->aux_slave_type = SECONDARY_SLAVE_TYPE_NONE;
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+
+	if (of_property_read_string(np, "inven,aux_name", &name)) {
+		dev_err(dev, "Missing aux name.\n");
+		return -EINVAL;
+	}
+	if (!strcmp(name, "bmp280"))
+		pdata->aux_slave_id = PRESSURE_ID_BMP280;
+	else
+		return -EINVAL;
+
+	rc = of_property_read_u32(np, "inven,aux_reg", &temp_val);
+	if (rc) {
+		dev_err(dev, "Unable to read aux register\n");
+		return rc;
+	}
+	pdata->aux_i2c_addr = temp_val;
+
+	return 0;
+}
+
+static int inv_parse_readonly_secondary(struct device *dev,
+				 	struct mpu_platform_data *pdata)
+{
+	int rc;
+	struct device_node *np = dev->of_node;
+	u32 temp_val;
+	const char *name;
+
+	if (of_property_read_string(np, "inven,read_only_slave_type", &name)) {
+		dev_err(dev, "Missing read only slave type type.\n");
+		return -EINVAL;
+	}
+	if (!strcmp(name, "als")) {
+		pdata->read_only_slave_type = SECONDARY_SLAVE_TYPE_ALS;
+	} else if (!strcmp(name, "none")) {
+		pdata->read_only_slave_type = SECONDARY_SLAVE_TYPE_NONE;
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+
+	if (of_property_read_string(np, "inven,read_only_slave_name", &name)) {
+		dev_err(dev, "Missing read only slave type name.\n");
+		return -EINVAL;
+	}
+	if (!strcmp(name, "apds9930"))
+		pdata->read_only_slave_id = ALS_ID_APDS_9930;
+	else
+		return -EINVAL;
+
+	rc = of_property_read_u32(np, "inven,read_only_slave_reg", &temp_val);
+	if (rc) {
+		dev_err(dev, "Unable to read read only slave reg register\n");
+		return rc;
+	}
+	pdata->read_only_i2c_addr = temp_val;
+
+	return 0;
+}
+
+int invensense_mpu_parse_dt(struct device *dev, struct mpu_platform_data *pdata)
+{
+	int rc;
+
+	rc = inv_parse_orientation_matrix(dev, pdata->orientation);
+	if (rc)
+		return rc;
+	rc = inv_parse_secondary(dev, pdata);
+	if (rc)
+		return rc;
+	inv_parse_aux(dev, pdata);
+
+	inv_parse_readonly_secondary(dev, pdata);
+
+	pdata->vdd_ana = regulator_get(dev, "inven,vdd_ana");
+	if (IS_ERR(pdata->vdd_ana)) {
+		rc = PTR_ERR(pdata->vdd_ana);
+		dev_warn(dev, "regulator get failed vdd_ana-supply rc=%d\n", rc);
+	}
+	pdata->vdd_i2c = regulator_get(dev, "inven,vcc_i2c");
+	if (IS_ERR(pdata->vdd_i2c)) {
+		rc = PTR_ERR(pdata->vdd_i2c);
+		dev_warn(dev, "regulator get failed vcc-i2c-supply rc=%d\n", rc);
+	}
+	pdata->power_on = inv_mpu_power_on;
+	pdata->power_off = inv_mpu_power_off;
+	dev_dbg(dev, "parse dt complete\n");
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(invensense_mpu_parse_dt);
+
+#endif /* CONFIG_OF */
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_dts.h b/drivers/iio/imu/inv_mpu/inv_mpu_dts.h
new file mode 100644
index 0000000..90966fe
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_dts.h
@@ -0,0 +1,25 @@
+/*
+* Copyright (C) 2012-2017 InvenSense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _INV_MPU_DTS_H_
+#define _INV_MPU_DTS_H_
+
+#include <linux/kernel.h>
+#include <linux/iio/imu/mpu.h>
+
+#ifdef CONFIG_OF
+int invensense_mpu_parse_dt(struct device *dev,
+			    struct mpu_platform_data *pdata);
+#endif
+
+#endif /* #ifndef _INV_MPU_DTS_H_ */
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu/inv_mpu_i2c.c
new file mode 100644
index 0000000..e7838fc
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_i2c.c
@@ -0,0 +1,556 @@
+/*
+* Copyright (C) 2012-2018 InvenSense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+
+#include "inv_mpu_iio.h"
+#include "inv_mpu_dts.h"
+
+#define CONFIG_DYNAMIC_DEBUG_I2C 0
+
+/**
+ *  inv_i2c_read_base() - Read one or more bytes from the device registers.
+ *  @st:	Device driver instance.
+ *  @i2c_addr:  i2c address of device.
+ *  @reg:	First device register to be read from.
+ *  @length:	Number of bytes to read.
+ *  @data:	Data read from device.
+ *  NOTE:This is not re-implementation of i2c_smbus_read because i2c
+ *       address could be specified in this case. We could have two different
+ *       i2c address due to secondary i2c interface.
+ */
+int inv_i2c_read_base(struct inv_mpu_state *st, u16 i2c_addr,
+						u8 reg, u16 length, u8 *data)
+{
+	struct i2c_msg msgs[2];
+	int res;
+
+	if (!data)
+		return -EINVAL;
+
+	msgs[0].addr = i2c_addr;
+	msgs[0].flags = 0;	/* write */
+	msgs[0].buf = &reg;
+	msgs[0].len = 1;
+
+	msgs[1].addr = i2c_addr;
+	msgs[1].flags = I2C_M_RD;
+	msgs[1].buf = data;
+	msgs[1].len = length;
+
+	res = i2c_transfer(st->sl_handle, msgs, 2);
+
+	if (res < 2) {
+		if (res >= 0)
+			res = -EIO;
+	} else
+		res = 0;
+	INV_I2C_INC_MPUWRITE(3);
+	INV_I2C_INC_MPUREAD(length);
+
+	return res;
+}
+
+/**
+ *  inv_i2c_single_write_base() - Write a byte to a device register.
+ *  @st:	Device driver instance.
+ *  @i2c_addr:  I2C address of the device.
+ *  @reg:	Device register to be written to.
+ *  @data:	Byte to write to device.
+ *  NOTE:This is not re-implementation of i2c_smbus_write because i2c
+ *       address could be specified in this case. We could have two different
+ *       i2c address due to secondary i2c interface.
+ */
+int inv_i2c_single_write_base(struct inv_mpu_state *st,
+						u16 i2c_addr, u8 reg, u8 data)
+{
+	u8 tmp[2];
+	struct i2c_msg msg;
+	int res;
+
+	tmp[0] = reg;
+	tmp[1] = data;
+
+	msg.addr = i2c_addr;
+	msg.flags = 0;		/* write */
+	msg.buf = tmp;
+	msg.len = 2;
+
+	INV_I2C_INC_MPUWRITE(3);
+
+	res = i2c_transfer(st->sl_handle, &msg, 1);
+	if (res < 1) {
+		if (res == 0)
+			res = -EIO;
+		return res;
+	} else
+		return 0;
+}
+
+static int inv_i2c_single_write(struct inv_mpu_state *st, u8 reg, u8 data)
+{
+	return inv_i2c_single_write_base(st, st->i2c_addr, reg, data);
+}
+
+static int inv_i2c_read(struct inv_mpu_state *st, u8 reg, int len, u8 *data)
+{
+	return inv_i2c_read_base(st, st->i2c_addr, reg, len, data);
+}
+
+static int _memory_write(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+						u32 len, u8 const *data)
+{
+	u8 bank[2];
+	u8 addr[2];
+	u8 buf[513];
+
+	struct i2c_msg msgs[3];
+	int res;
+
+	if (!data || !st)
+		return -EINVAL;
+
+	if (len >= (sizeof(buf) - 1))
+		return -ENOMEM;
+
+	bank[0] = REG_MEM_BANK_SEL;
+	bank[1] = mem_addr >> 8;
+
+	addr[0] = REG_MEM_START_ADDR;
+	addr[1] = mem_addr & 0xFF;
+
+	buf[0] = REG_MEM_R_W;
+	memcpy(buf + 1, data, len);
+
+	/* write message */
+	msgs[0].addr = mpu_addr;
+	msgs[0].flags = 0;
+	msgs[0].buf = bank;
+	msgs[0].len = sizeof(bank);
+
+	msgs[1].addr = mpu_addr;
+	msgs[1].flags = 0;
+	msgs[1].buf = addr;
+	msgs[1].len = sizeof(addr);
+
+	msgs[2].addr = mpu_addr;
+	msgs[2].flags = 0;
+	msgs[2].buf = (u8 *) buf;
+	msgs[2].len = len + 1;
+
+	INV_I2C_INC_MPUWRITE(3 + 3 + (2 + len));
+
+#if CONFIG_DYNAMIC_DEBUG_I2C
+	{
+		char *write = 0;
+		pr_debug("%s WM%02X%02X%02X%s%s - %d\n", st->hw->name,
+			mpu_addr, bank[1], addr[1],
+			wr_pr_debug_begin(data, len, write),
+			wr_pr_debug_end(write), len);
+	}
+#endif
+
+	res = i2c_transfer(st->sl_handle, msgs, 3);
+	if (res != 3) {
+		if (res >= 0)
+			res = -EIO;
+		return res;
+	} else {
+		return 0;
+	}
+}
+
+static int inv_i2c_mem_write(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+						u32 len, u8 const *data)
+{
+	int r, i, j;
+#define DMP_MEM_CMP_SIZE 16
+	u8 w[DMP_MEM_CMP_SIZE];
+	bool retry;
+
+	j = 0;
+	retry = true;
+	while ((j < 3) && retry) {
+		retry = false;
+		r = _memory_write(st, mpu_addr, mem_addr, len, data);
+		if (len < DMP_MEM_CMP_SIZE) {
+			r = mem_r(mem_addr, len, w);
+			for (i = 0; i < len; i++) {
+				if (data[i] != w[i]) {
+					pr_debug
+				("error write=%x, len=%d,data=%x, w=%x, i=%d\n",
+					mem_addr, len, data[i], w[i], i);
+					retry = true;
+				}
+			}
+		}
+		j++;
+	}
+
+	return r;
+}
+
+static int inv_i2c_mem_read(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+						u32 len, u8 *data)
+{
+	u8 bank[2];
+	u8 addr[2];
+	u8 buf;
+
+	struct i2c_msg msgs[4];
+	int res;
+
+	if (!data || !st)
+		return -EINVAL;
+
+	bank[0] = REG_MEM_BANK_SEL;
+	bank[1] = mem_addr >> 8;
+
+	addr[0] = REG_MEM_START_ADDR;
+	addr[1] = mem_addr & 0xFF;
+
+	buf = REG_MEM_R_W;
+
+	/* write message */
+	msgs[0].addr = mpu_addr;
+	msgs[0].flags = 0;
+	msgs[0].buf = bank;
+	msgs[0].len = sizeof(bank);
+
+	msgs[1].addr = mpu_addr;
+	msgs[1].flags = 0;
+	msgs[1].buf = addr;
+	msgs[1].len = sizeof(addr);
+
+	msgs[2].addr = mpu_addr;
+	msgs[2].flags = 0;
+	msgs[2].buf = &buf;
+	msgs[2].len = 1;
+
+	msgs[3].addr = mpu_addr;
+	msgs[3].flags = I2C_M_RD;
+	msgs[3].buf = data;
+	msgs[3].len = len;
+
+	res = i2c_transfer(st->sl_handle, msgs, 4);
+	if (res != 4) {
+		if (res >= 0)
+			res = -EIO;
+	} else
+		res = 0;
+	INV_I2C_INC_MPUWRITE(3 + 3 + 3);
+	INV_I2C_INC_MPUREAD(len);
+
+#if CONFIG_DYNAMIC_DEBUG_I2C
+	{
+		char *read = 0;
+		pr_debug("%s RM%02X%02X%02X%02X - %s%s\n", st->hw->name,
+			mpu_addr, bank[1], addr[1], len,
+			wr_pr_debug_begin(data, len, read),
+			wr_pr_debug_end(read));
+	}
+#endif
+
+	return res;
+}
+
+/*
+ *  inv_mpu_probe() - probe function.
+ */
+static int inv_mpu_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct inv_mpu_state *st;
+	struct iio_dev *indio_dev;
+	int result;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		result = -ENOSYS;
+		pr_err("I2c function error\n");
+		goto out_no_free;
+	}
+
+#ifdef KERNEL_VERSION_4_X
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+	if (indio_dev == NULL) {
+		pr_err("memory allocation failed\n");
+		result = -ENOMEM;
+		goto out_no_free;
+	}
+#else
+	indio_dev = iio_device_alloc(sizeof(*st));
+	if (indio_dev == NULL) {
+		pr_err("memory allocation failed\n");
+		result = -ENOMEM;
+		goto out_no_free;
+	}
+#endif
+	st = iio_priv(indio_dev);
+	st->client = client;
+	st->sl_handle = client->adapter;
+	st->i2c_addr = client->addr;
+	st->write = inv_i2c_single_write;
+	st->read = inv_i2c_read;
+	st->mem_write = inv_i2c_mem_write;
+	st->mem_read = inv_i2c_mem_read;
+	st->dev = &client->dev;
+	st->bus_type = BUS_I2C;
+#ifdef CONFIG_OF
+	result = invensense_mpu_parse_dt(st->dev, &st->plat_data);
+	if (result)
+#  ifdef KERNEL_VERSION_4_X
+		return -ENODEV;
+#  else
+		goto out_free;
+#  endif
+
+	/* Power on device */
+	if (st->plat_data.power_on) {
+		result = st->plat_data.power_on(&st->plat_data);
+		if (result < 0) {
+			dev_err(st->dev, "power_on failed: %d\n", result);
+#  ifdef KERNEL_VERSION_4_X
+			return -ENODEV;
+#  else
+			goto out_free;
+#  endif
+		}
+		pr_info("%s: power on here.\n", __func__);
+	}
+	pr_info("%s: power on.\n", __func__);
+
+	msleep(100);
+#else
+	if (dev_get_platdata(st->dev) == NULL)
+#  ifdef KERNEL_VERSION_4_X
+		return -ENODEV;
+#  else
+		goto out_free;
+#  endif
+	st->plat_data = *(struct mpu_platform_data *)dev_get_platdata(st->dev);
+#endif
+
+	/* power is turned on inside check chip type */
+	result = inv_check_chip_type(indio_dev, id->name);
+	if (result)
+#ifdef KERNEL_VERSION_4_X
+		return -ENODEV;
+#else
+		goto out_free;
+#endif
+
+	/* Make state variables available to all _show and _store functions. */
+	i2c_set_clientdata(client, indio_dev);
+	indio_dev->dev.parent = st->dev;
+	indio_dev->name = id->name;
+
+	st->irq = client->irq;
+
+	result = inv_mpu_configure_ring(indio_dev);
+	if (result) {
+		pr_err("configure ring buffer fail\n");
+		goto out_free;
+	}
+#ifdef KERNEL_VERSION_4_X
+	INV_I2C_SETIRQ(IRQ_MPU, st->irq);
+	result = devm_iio_device_register(st->dev, indio_dev);
+	if (result) {
+		pr_err("IIO device register fail\n");
+		goto out_unreg_ring;
+	}
+#else
+	result = iio_buffer_register(indio_dev, indio_dev->channels,
+				     indio_dev->num_channels);
+	if (result) {
+		pr_err("ring buffer register fail\n");
+		goto out_unreg_ring;
+	}
+	INV_I2C_SETIRQ(IRQ_MPU, client->irq);
+	result = iio_device_register(indio_dev);
+	if (result) {
+		pr_err("IIO device register fail\n");
+		goto out_remove_ring;
+	}
+#endif
+
+	result = inv_create_dmp_sysfs(indio_dev);
+	if (result) {
+		pr_err("create dmp sysfs failed\n");
+		goto out_unreg_iio;
+	}
+	init_waitqueue_head(&st->wait_queue);
+	st->resume_state = true;
+#ifdef CONFIG_HAS_WAKELOCK
+	wake_lock_init(&st->wake_lock, WAKE_LOCK_SUSPEND, "inv_mpu");
+#else
+	wakeup_source_init(&st->wake_lock, "inv_mpu");
+#endif
+	dev_info(st->dev, "%s ma-kernel-%s is ready to go!\n",
+				indio_dev->name, INVENSENSE_DRIVER_VERSION);
+
+#ifdef SENSOR_DATA_FROM_REGISTERS
+	pr_info("Data read from registers\n");
+#else
+	pr_info("Data read from FIFO\n");
+#endif
+#ifdef TIMER_BASED_BATCHING
+	pr_info("Timer based batching\n");
+#endif
+
+	return 0;
+#ifdef KERNEL_VERSION_4_X
+out_unreg_iio:
+	devm_iio_device_unregister(st->dev, indio_dev);
+out_unreg_ring:
+	inv_mpu_unconfigure_ring(indio_dev);
+out_free:
+	devm_iio_device_free(st->dev, indio_dev);
+out_no_free:
+#else
+out_unreg_iio:
+	iio_device_unregister(indio_dev);
+out_remove_ring:
+	iio_buffer_unregister(indio_dev);
+out_unreg_ring:
+	inv_mpu_unconfigure_ring(indio_dev);
+out_free:
+	iio_device_free(indio_dev);
+out_no_free:
+#endif
+	dev_err(st->dev, "%s failed %d\n", __func__, result);
+
+	return -EIO;
+}
+
+static void inv_mpu_shutdown(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+	int result;
+
+	mutex_lock(&indio_dev->mlock);
+	inv_switch_power_in_lp(st, true);
+	dev_dbg(st->dev, "Shutting down %s...\n", st->hw->name);
+
+	/* reset to make sure previous state are not there */
+	result = inv_plat_single_write(st, REG_PWR_MGMT_1, BIT_H_RESET);
+	if (result)
+		dev_err(st->dev, "Failed to reset %s\n",
+			st->hw->name);
+	msleep(POWER_UP_TIME);
+	/* turn off power to ensure gyro engine is off */
+	result = inv_set_power(st, false);
+	if (result)
+		dev_err(st->dev, "Failed to turn off %s\n",
+			st->hw->name);
+	inv_switch_power_in_lp(st, false);
+	mutex_unlock(&indio_dev->mlock);
+}
+
+/*
+ *  inv_mpu_remove() - remove function.
+ */
+static int inv_mpu_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+
+#ifdef KERNEL_VERSION_4_X
+	devm_iio_device_unregister(st->dev, indio_dev);
+#else
+	iio_device_unregister(indio_dev);
+	iio_buffer_unregister(indio_dev);
+#endif
+	inv_mpu_unconfigure_ring(indio_dev);
+#ifdef KERNEL_VERSION_4_X
+	devm_iio_device_free(st->dev, indio_dev);
+#else
+	iio_device_free(indio_dev);
+#endif
+	dev_info(st->dev, "inv-mpu-iio module removed.\n");
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int inv_mpu_i2c_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+
+	return inv_mpu_suspend(indio_dev);
+}
+
+static void inv_mpu_i2c_complete(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+
+	inv_mpu_complete(indio_dev);
+}
+#endif
+
+static const struct dev_pm_ops inv_mpu_i2c_pmops = {
+#ifdef CONFIG_PM_SLEEP
+	.suspend = inv_mpu_i2c_suspend,
+	.complete = inv_mpu_i2c_complete,
+#endif
+};
+
+/* device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id inv_mpu_id[] = {
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+	{"icm20645", ICM20645},
+	{"icm10340", ICM10340},
+	{"icm20648", ICM20648},
+#else
+	{"icm20608d", ICM20608D},
+	{"icm20690", ICM20690},
+	{"icm20602", ICM20602},
+	{"iam20680", IAM20680},
+#endif
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
+
+static struct i2c_driver inv_mpu_driver = {
+	.probe = inv_mpu_probe,
+	.remove = inv_mpu_remove,
+	.shutdown = inv_mpu_shutdown,
+	.id_table = inv_mpu_id,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "inv-mpu-iio-i2c",
+		.pm = &inv_mpu_i2c_pmops,
+	},
+};
+module_i2c_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Invensense I2C device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu/inv_mpu_iio.h
new file mode 100644
index 0000000..9e73165
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_iio.h
@@ -0,0 +1,1138 @@
+/*
+ * Copyright (C) 2012-2018 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _INV_MPU_IIO_H_
+#define _INV_MPU_IIO_H_
+
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0))
+#define KERNEL_VERSION_4_X
+#endif
+
+#include <linux/i2c.h>
+#include <linux/kfifo.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/iio/imu/mpu.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#else
+#include <linux/pm_wakeup.h>
+#endif
+#include <linux/wait.h>
+
+#include <linux/iio/sysfs.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+#include "icm20648/dmp3Default.h"
+#endif
+#ifdef CONFIG_INV_MPU_IIO_ICM20608D
+#include "icm20608d/dmp3Default_20608D.h"
+#endif
+
+#include "inv_test/inv_counters.h"
+
+#if defined(CONFIG_INV_MPU_IIO_ICM20648)
+#include "icm20648/inv_mpu_iio_reg_20648.h"
+#elif defined(CONFIG_INV_MPU_IIO_ICM20602)
+#include "icm20602/inv_mpu_iio_reg_20602.h"
+#elif defined(CONFIG_INV_MPU_IIO_ICM20608D)
+#include "icm20608d/inv_mpu_iio_reg_20608.h"
+#elif defined(CONFIG_INV_MPU_IIO_ICM20690)
+#include "icm20690/inv_mpu_iio_reg_20690.h"
+#elif defined(CONFIG_INV_MPU_IIO_IAM20680)
+#include "iam20680/inv_mpu_iio_reg_20680.h"
+#endif
+
+#define INVENSENSE_DRIVER_VERSION		"8.1.2-simple-test1"
+
+/* #define DEBUG */
+
+/* #define ACCEL_BIAS_TEST */
+
+/* #define BIAS_CONFIDENCE_HIGH 1 */
+
+#define MAX_FIFO_READ_SIZE       128
+#define MAX_DMP_READ_SIZE        16
+
+/* data header defines */
+#define WAKE_HDR                 0x8000
+
+#define ACCEL_HDR                1
+#define GYRO_HDR                 2
+#define COMPASS_HDR              3
+#define ALS_HDR                  4
+#define SIXQUAT_HDR              5
+#define NINEQUAT_HDR             6
+#define PEDQUAT_HDR              7
+#define GEOMAG_HDR               8
+#define PRESSURE_HDR             9
+#define GYRO_CALIB_HDR           10
+#define COMPASS_CALIB_HDR        11
+#define STEP_COUNTER_HDR         12
+#define STEP_DETECTOR_HDR        13
+#define STEP_COUNT_HDR           14
+#define ACTIVITY_HDR             15
+#define PICK_UP_HDR              16
+#define EMPTY_MARKER             17
+#define END_MARKER               18
+#define COMPASS_ACCURACY_HDR     19
+#define ACCEL_ACCURACY_HDR       20
+#define GYRO_ACCURACY_HDR        21
+#define EIS_GYRO_HDR             36
+#define EIS_CALIB_HDR            37
+#define LPQ_HDR                  38
+
+#define ACCEL_WAKE_HDR           (ACCEL_HDR | WAKE_HDR)
+#define GYRO_WAKE_HDR            (GYRO_HDR | WAKE_HDR)
+#define COMPASS_WAKE_HDR         (COMPASS_HDR | WAKE_HDR)
+#define ALS_WAKE_HDR             (ALS_HDR | WAKE_HDR)
+#define SIXQUAT_WAKE_HDR         (SIXQUAT_HDR | WAKE_HDR)
+#define NINEQUAT_WAKE_HDR        (NINEQUAT_HDR | WAKE_HDR)
+#define PEDQUAT_WAKE_HDR         (PEDQUAT_HDR | WAKE_HDR)
+#define GEOMAG_WAKE_HDR          (GEOMAG_HDR | WAKE_HDR)
+#define PRESSURE_WAKE_HDR        (PRESSURE_HDR | WAKE_HDR)
+#define GYRO_CALIB_WAKE_HDR      (GYRO_CALIB_HDR | WAKE_HDR)
+#define COMPASS_CALIB_WAKE_HDR   (COMPASS_CALIB_HDR | WAKE_HDR)
+#define STEP_COUNTER_WAKE_HDR    (STEP_COUNTER_HDR | WAKE_HDR)
+#define STEP_DETECTOR_WAKE_HDR   (STEP_DETECTOR_HDR | WAKE_HDR)
+
+/* init parameters */
+#define MPU_INIT_SMD_THLD        1500
+#define MPU_INIT_GYRO_SCALE      3
+#define MPU_INIT_ACCEL_SCALE     2
+#define MPU_INIT_PED_INT_THRESH  2
+#define MPU_INIT_PED_STEP_THRESH 6
+#define MPU_4X_TS_GYRO_SHIFT      (3160000 / 2)
+#define DMP_START_ADDR_20645     0x900
+#define DMP_START_ADDR_20648     0x1000
+#define DMP_START_ADDR_10340     0x0a60
+#define DMP_START_ADDR_20608D    0x4B0
+#define MAX_WR_SZ                  100
+#define WOM_DELAY_THRESHOLD      200
+#define INV_ODR_BUFFER_MULTI     20
+#define INV_ODR_OVER_FACTOR      20
+
+#define COVARIANCE_SIZE          14
+#define ACCEL_COVARIANCE_SIZE  (COVARIANCE_SIZE * sizeof(int))
+
+enum inv_bus_type {
+	BUS_I2C = 0,
+	BUS_SPI,
+};
+
+struct inv_mpu_state;
+
+enum INV_ENGINE {
+	ENGINE_GYRO = 0,
+	ENGINE_ACCEL,
+	ENGINE_PRESSURE,
+	ENGINE_I2C,
+	ENGINE_NUM_MAX,
+};
+
+/**
+ *  struct inv_hw_s - Other important hardware information.
+ *  @num_reg:	Number of registers on device.
+ *  @name:      name of the chip
+ */
+struct inv_hw_s {
+	u8 num_reg;
+	u8 *name;
+};
+
+/**
+ *  struct inv_sensor - information for each sensor.
+ *  @ts: this sensors timestamp.
+ *  @ts_adj: sensor timestamp adjustment.
+ *  @previous_ts: previous timestamp for this sensor.
+ *  @dur: duration between samples in ns.
+ *  @rate:  sensor data rate.
+ *  @sample_size: number of bytes for the sensor.
+ *  @odr_addr: output data rate address in DMP.
+ *  @counter_addr: output counter address in DMP.
+ *  @output: output on/off control word.
+ *  @time_calib: calibrate timestamp.
+ *  @sample_calib: calibrate bytes accumulated.
+ *  @div:         divider in DMP mode.
+ *  @calib_flag:  calibrate flag used to improve the accuracy of estimation.
+ *  @on:    sensor on/off.
+ *  @a_en:  accel engine requirement.
+ *  @g_en:  gyro engine requirement.
+ *  @c_en:  compass_engine requirement.
+ *  @p_en:  pressure engine requirement.
+ *  @engine_base: engine base for this sensor.
+ *  @count: number of samples in one session.
+ *  @send: decide whether to send this sample or not.
+ */
+struct inv_sensor {
+	u64 ts;
+	s64 ts_adj;
+	u64 previous_ts;
+	int dur;
+	int rate;
+	u8 sample_size;
+	int odr_addr;
+	int counter_addr;
+	u16 output;
+	u64 time_calib;
+	u32 sample_calib;
+	int div;
+	bool calib_flag;
+	bool on;
+	bool a_en;
+	bool g_en;
+	bool c_en;
+	bool p_en;
+	enum INV_ENGINE engine_base;
+	int count;
+	bool send;
+};
+
+/**
+ *  struct inv_sensor - information for each sensor.
+ *  @sample_size: number of bytes for the sensor.
+ *  @output: output on/off control word.
+ *  @on:    sensor on/off.
+ *  @header: accuracy header for communicate with HAL
+ *dd  @count: number of samples in one session.
+ */
+struct inv_sensor_accuracy {
+	u16 output;
+	u8 sample_size;
+	bool on;
+	u16 header;
+};
+
+enum SENSOR_ACCURACY {
+	SENSOR_ACCEL_ACCURACY = 0,
+	SENSOR_GYRO_ACCURACY,
+	SENSOR_COMPASS_ACCURACY,
+	SENSOR_ACCURACY_NUM_MAX,
+};
+
+enum SENSOR_L {
+	SENSOR_L_ACCEL = 0,
+	SENSOR_L_GYRO,
+	SENSOR_L_MAG,
+	SENSOR_L_ALS,
+	SENSOR_L_SIXQ,
+	SENSOR_L_THREEQ,
+	SENSOR_L_NINEQ,
+	SENSOR_L_PEDQ,
+	SENSOR_L_GEOMAG,
+	SENSOR_L_PRESSURE,
+	SENSOR_L_GYRO_CAL,
+	SENSOR_L_MAG_CAL,
+	SENSOR_L_EIS_GYRO,
+	/*wake sensors */
+	SENSOR_L_ACCEL_WAKE = 13,
+	SENSOR_L_GYRO_WAKE,
+	SENSOR_L_MAG_WAKE,
+	SENSOR_L_ALS_WAKE,
+	SENSOR_L_SIXQ_WAKE,
+	SENSOR_L_NINEQ_WAKE,
+	SENSOR_L_PEDQ_WAKE,
+	SENSOR_L_GEOMAG_WAKE,
+	SENSOR_L_PRESSURE_WAKE,
+	SENSOR_L_GYRO_CAL_WAKE,
+	SENSOR_L_MAG_CAL_WAKE,
+	SENSOR_L_GESTURE_ACCEL,
+	SENSOR_L_NUM_MAX,
+};
+
+/**
+ *  struct android_l_sensor - information for each android sensor.
+ *  @ts: this sensors timestamp.
+ *  @base: android sensor based on invensense sensor.
+ *  @rate: output rate.
+ *  @on:  sensor on/off.
+ *  @wake_on: wake on sensor is on/off.
+ *  @div: divider for the output.
+ *  @counter: counter works with the divider.
+ *  @header: header for the output.
+ */
+struct android_l_sensor {
+	u64 ts;
+	enum INV_SENSORS base;
+	int rate;
+	bool on;
+	bool wake_on;
+	int div;
+	int counter;
+	u16 header;
+};
+
+/**
+ *  struct inv_batch - information for batchmode.
+ *  @on: normal batch mode on.
+ *  @default_on: default batch on. This is optimization option.
+ *  @overflow_on: overflow mode for batchmode.
+ *  @wake_fifo_on: overflow for suspend mode.
+ *  @step_only: mean only step detector data is batched.
+ *  @post_isr_run: mean post isr has runned once.
+ *  @counter: counter for batch mode.
+ *  @timeout: nominal timeout value for batchmode in milliseconds.
+ *  @max_rate: max rate for all batched sensors.
+ *  @pk_size: packet size;
+ *  @engine_base: engine base batch mode should stick to.
+ */
+struct inv_batch {
+	bool on;
+	bool default_on;
+	bool overflow_on;
+	bool wake_fifo_on;
+	bool step_only;
+	bool post_isr_run;
+	u32 counter;
+	u32 timeout;
+	u32 max_rate;
+	u32 pk_size;
+	u32 fifo_wm_th;
+	enum INV_ENGINE engine_base;
+};
+
+/**
+ *  struct inv_chip_config_s - Cached chip configuration data.
+ *  @fsr:		Full scale range.
+ *  @lpf:		Digital low pass filter frequency.
+ *  @accel_fs:		accel full scale range.
+ *  @accel_enable:	enable accel functionality
+ *  @gyro_enable:	enable gyro functionality
+ *  @compass_enable:    enable compass functinality.
+ *  @geomag_enable:     enable geomag sensor functions.
+ *  @als_enable:        enable ALS functionality.
+ *  @pressure_enable:   eanble pressure functionality.
+ *  @secondary_enable:  secondary I2C bus enabled or not.
+ *  @has_gyro:	has gyro or not.
+ *  @has_compass:	has secondary I2C compass or not.
+ *  @has_pressure:      has secondary I2C pressure or not.
+ *  @has_als:           has secondary I2C als or not.
+ *  @slave_enable:      secondary I2C interface enabled or not.
+ *  @normal_compass_measure: discard first compass data after reset.
+ *  @is_asleep:		1 if chip is powered down.
+ *  @lp_en_set:         1 if LP_EN bit is set;
+ *  @lp_en_mode_off:    debug mode that turns off LP_EN mode off.
+ *  @clk_sel:           debug_mode that turns on/off clock selection.
+ *  @dmp_on:		dmp is on/off.
+ *  @dmp_event_int_on:  dmp event interrupt on/off.
+ *  @wom_on:        WOM interrupt on. This is an internal variable.
+ *  @step_indicator_on: step indicate bit added to the sensor or not.
+ *  @tilt_enable: tilt enable.
+ *  @pick_up_enable: pick up gesture enable.
+ *  @step_detector_on:  step detector on or not.
+ *  @activity_on: turn on/off activity.
+ *  @activity_eng_on: activity engine on/off.
+ *  @firmware_loaded:	flag indicate firmware loaded or not.
+ *  @low_power_gyro_on: flag indicating low power gyro on/off.
+ *  @wake_on: any wake on sensor is on/off.
+ *  @compass_rate:    compass engine rate. Determined by underlying data.
+ */
+struct inv_chip_config_s {
+	u32 fsr:2;
+	u32 lpf:3;
+	u32 accel_fs:2;
+	u32 accel_enable:1;
+	u32 gyro_enable:1;
+	u32 compass_enable:1;
+	u32 geomag_enable:1;
+	u32 als_enable:1;
+	u32 prox_enable:1;
+	u32 pressure_enable:1;
+	u32 has_gyro:1;
+	u32 has_compass:1;
+	u32 has_pressure:1;
+	u32 has_als:1;
+	u32 slave_enable:1;
+	u32 normal_compass_measure:1;
+	u32 is_asleep:1;
+	u32 lp_en_set:1;
+	u32 lp_en_mode_off:1;
+	u32 clk_sel:1;
+	u32 dmp_on:1;
+	u32 dmp_event_int_on:1;
+	u32 wom_on:1;
+	u32 step_indicator_on:1;
+	u32 tilt_enable:1;
+	u32 pick_up_enable:1;
+	u32 eis_enable:1;
+	u32 step_detector_on:1;
+	u32 activity_on:1;
+	u32 activity_eng_on:1;
+	u32 firmware_loaded:1;
+	u32 low_power_gyro_on:1;
+	u32 wake_on:1;
+	int compass_rate;
+};
+
+/**
+ *  struct inv_temp_comp - temperature compensation structure.
+ *  @t_lo:    raw temperature in low temperature.
+ *  @t_hi:    raw temperature in high temperature.
+ *  @b_lo:    gyro bias in low temperature.
+ *  @b_hi:    gyro bias in high temperature.
+ *  @has_low:    flag indicate low temperature parameters is updated.
+ *  @has_high:   flag indicates high temperature parameters is updated.
+ *  @slope:      slope for temperature compensation.
+ */
+struct inv_temp_comp {
+	int t_lo;
+	int t_hi;
+	int b_lo[3];
+	int b_hi[3];
+	bool has_low;
+	bool has_high;
+	int slope[3];
+};
+
+/**
+ *  struct inv_chip_info_s - Chip related information.
+ *  @product_id:	Product id.
+ *  @product_revision:	Product revision.
+ *  @silicon_revision:	Silicon revision.
+ *  @software_revision:	software revision.
+ *  @compass_sens:	compass sensitivity.
+ *  @gyro_sens_trim:	Gyro sensitivity trim factor.
+ *  @accel_sens_trim:    accel sensitivity trim factor.
+ */
+struct inv_chip_info_s {
+	u8 product_id;
+	u8 product_revision;
+	u8 silicon_revision;
+	u8 software_revision;
+	u8 compass_sens[3];
+	u32 gyro_sens_trim;
+	u32 accel_sens_trim;
+};
+
+/**
+ * struct inv_smd significant motion detection structure.
+ * @threshold: accel threshold for motion detection.
+ * @delay: delay time to confirm 2nd motion.
+ * @delay2: delay window parameter.
+ * @on: smd on/off.
+ */
+struct inv_smd {
+	u32 threshold;
+	u32 delay;
+	u32 delay2;
+	bool on;
+};
+
+/**
+ * struct inv_ped pedometer related data structure.
+ * @step: steps taken.
+ * @time: time taken during the period.
+ * @last_step_time: last time the step is taken.
+ * @step_thresh: step threshold to show steps.
+ * @int_thresh: step threshold to generate interrupt.
+ * @int_on:   pedometer interrupt enable/disable.
+ * @on:  pedometer on/off.
+ * @engine_on: pedometer engine on/off.
+ */
+struct inv_ped {
+	u64 step;
+	u64 time;
+	u64 last_step_time;
+	u16 step_thresh;
+	u16 int_thresh;
+	bool int_on;
+	bool on;
+	bool engine_on;
+};
+
+/**
+ * struct inv_eis EIS related data structure.
+ * @prev_gyro: latest gyro data just before FSYNC triggerd
+ * @prev_timestamp: latest gyro timestamp just before FSYNC triggered
+ * @current_gyro: gyro data just after FSYNC triggerd
+ * @current_timestamp: gyro timestamp just after FSYNC triggered
+ * @fsync_timestamp: timestamp of FSYNC event
+ * @fsync_delay: delay time of FSYNC and Gyro data. DMP data of FSYNC event
+ * @eis_triggered: check fsync event is triggered or not.
+ * @eis_frame: current frame is eis frame;
+ * @current_sync: current frame contains fsync counter.
+ * @frame_count: frame count for synchronization.
+ */
+struct inv_eis {
+	int prev_gyro[3];
+	u64 prev_timestamp;
+	int current_gyro[3];
+	u64 current_timestamp;
+	u32 frame_dur;
+	u64 slope[3];
+	u64 fsync_timestamp;
+	u64 last_fsync_timestamp;
+	u16 fsync_delay;
+	bool eis_triggered;
+	bool eis_frame;
+	bool current_sync;
+	bool prev_state;
+	u32 frame_count;
+	int gyro_counter;
+	int gyro_counter_s[3];
+	int fsync_delay_s[3];
+	int voting_count;
+	int voting_count_sub;
+	int voting_state;
+	int count_precision;
+};
+
+enum TRIGGER_STATE {
+	DATA_TRIGGER = 0,
+	RATE_TRIGGER,
+	EVENT_TRIGGER,
+	MISC_TRIGGER,
+	DEBUG_TRIGGER,
+};
+
+enum inv_fifo_count_mode {
+	BYTE_MODE,
+	RECORD_MODE
+};
+
+/**
+ *  struct inv_secondary_reg - secondary registers data structure.
+ *  @addr:       address of the slave.
+ *  @reg: register address of slave.
+ *  @ctrl: control register.
+ *  @d0: data out register.
+ */
+struct inv_secondary_reg {
+	u8 addr;
+	u8 reg;
+	u8 ctrl;
+	u8 d0;
+};
+
+struct inv_secondary_set {
+	u8 delay_enable;
+	u8 delay_time;
+	u8 odr_config;
+};
+/**
+ *  struct inv_engine_info - data structure for engines.
+ *  @base_time: base time for each engine.
+ *  @base_time_1k: base time when chip is running at 1K;
+ *  @divider: divider used to downsample engine rate from original rate.
+ *  @running_rate: the actually running rate of engine.
+ *  @orig_rate: original rate for each engine before downsample.
+ *  @dur: duration for one tick.
+ *  @last_update_time: last update time.
+ */
+struct inv_engine_info {
+	u32 base_time;
+	u32 base_time_1k;
+	u32 divider;
+	u32 running_rate;
+	u32 orig_rate;
+	u32 dur;
+	u64 last_update_time;
+};
+
+struct inv_ois {
+	int gyro_fs;
+	int accel_fs;
+	bool en;
+};
+
+/**
+ *  struct inv_timestamp_algo - timestamp algorithm .
+ *  @last_run_time: last time the post ISR runs.
+ *  @ts_for_calib: ts storage for calibration.
+ *  @reset_ts: reset time.
+ *  @dmp_ticks: dmp ticks storage for calibration.
+ *  @start_dmp_counter: dmp counter when start a new session.
+ *  @calib_counter: calibration counter for timestamp.
+ *  @resume_flag: flag to indicate this is the first time after resume. time
+                 could have up to 1 seconds difference.
+ *  @clock_base: clock base to calculate the timestamp.
+ *  @gyro_ts_shift: 9 K counter for EIS.
+ *  @first_sample: first of 1K running should be dropped it affects timing
+ */
+struct inv_timestamp_algo {
+	u64 last_run_time;
+	u64 ts_for_calib;
+	u64 reset_ts;
+	u32 dmp_ticks;
+	u32 start_dmp_counter;
+	int calib_counter;
+	bool resume_flag;
+	enum INV_ENGINE clock_base;
+	u32 gyro_ts_shift;
+	u32 first_sample;
+};
+
+struct inv_mpu_slave;
+/**
+ *  struct inv_mpu_state - Driver state variables.
+ *  @dev:               device address of the current bus, i2c or spi.
+ *  @chip_config:	Cached attribute information.
+ *  @chip_info:		Chip information from read-only registers.
+ *  @smd:               SMD data structure.
+ *  @ped:               pedometer data structure.
+ *  @batch:             batchmode data structure.
+ *  @temp_comp:         gyro temperature compensation structure.
+ *  @slave_compass:     slave compass.
+ *  @slave_pressure:    slave pressure.
+ *  @slave_als:         slave als.
+ *  @slv_reg: slave register data structure.
+ *  @ts_algo: timestamp algorithm data structure.
+ *  @sec_set: slave register odr config.
+ *  @eng_info: information for each engine.
+ *  @hw:		Other hardware-specific information.
+ *  @chip_type:		chip type.
+ *  @suspend_resume_sema: semaphore for suspend/resume.
+ *  @wake_lock: wake lock of the system.
+ *  @client:		i2c client handle.
+ *  @plat_data:		platform data.
+ *  @sl_handle:         Handle to I2C port.
+ *  @sensor{SENSOR_NUM_MAX]: sensor individual properties.
+ *  @sensor_l[SENSOR_L_NUM_MAX]: android L sensors properties.
+ *  @sensor_accuracy[SENSOR_ACCURACY_NUM_MAX]: sensor accuracy.
+ *  @sensor_acurracy_flag: flag indiciate whether to check output accuracy.
+ *  @irq:               irq number store.
+ *  @accel_bias:        accel bias store.
+ *  @gyro_bias:         gyro bias store.
+ *  @accel_st_bias:     accel bias store, result of self-test.
+ *  @gyro_st_bias:      gyro bias store, result of self-test.
+ *  @gyro_ois_st_bias:  gyro bias store from ois self test result.
+ *  @input_accel_dmp_bias[3]: accel bias for dmp.
+ *  @input_gyro_dmp_bias[3]: gyro bias for dmp.
+ *  @input_compass_dmp_bias[3]: compass bias for dmp.
+ *  @input_accel_bias[3]: accel bias for offset register.
+ *  @input_gyro_bias[3]: gyro bias for offset register.
+ *  @fifo_data[8]: fifo data storage.
+ *  @i2c_addr:          i2c address.
+ *  @header_count:      header count in current FIFO.
+ *  @step_det_count:    number of step detectors in one batch.
+ *  @gyro_sf: gyro scale factor.
+ *  @left_over[LEFT_OVER_BYTES]: left over bytes storage.
+ *  @left_over_size: left over size.
+ *  @fifo_count: current fifo_count;
+ *  @wake_sensor_received: wake up sensor received.
+ *  @accel_cal_enable:  accel calibration on/off
+ *  @gyro_cal_enable:   gyro calibration on/off
+ *  @calib_compass_on: calibrate compass on.
+ *  @debug_determine_engine_on: determine engine on/off.
+ *  @poke_mode_on: poke mode on/off.
+ *  @mode_1k_on: indicate 1K Hz mode is on.
+ *  @poke_ts: time stamp for poke feature.
+ *  @step_detector_base_ts: base time stamp for step detector calculation.
+ *  @last_temp_comp_time: last time temperature compensation is done.
+ *  @i2c_dis: disable I2C interface or not.
+ *  @name: name for the chip.
+ *  @gyro_st_data: gyro self test data.
+ *  @accel_st_data: accel self test data.
+ *  @secondary_name: name for the slave device in the secondary I2C.
+ *  @compass_var: compass variance from DMP.
+ *  @current_compass_matrix: matrix compass data multiplied to before soft iron.
+ *  @final_compass_matrix: matrix compass data multiplied to before soft iron.
+ *  @trigger_state: information that which part triggers set_inv_enable.
+ *  @firmware: firmware data pointer.
+ *  @accel_calib_threshold: accel calibration threshold;
+ *  @accel_calib_rate: divider for accel calibration rate.
+ *  @accel_covariance[COVARIANCE_SIZE]: accel covariance data;
+ *  @kf: kfifo for activity store.
+ *  @activity_size: size for activity.
+ *  @cntl: control word for sensor enable.
+ *  @cntl2: control word for sensor extension.
+ *  @motion_event_cntl: control word for events.
+ *  @dmp_image_size: dmp image size.
+ *  @dmp_start_address: start address of dmp.
+ *  @step_counter_l_on: step counter android L sensor on/off.
+ *  @step_counter_wake_l_on: step counter android L sensor wake on/off .
+ *  @step_detector_l_on: step detector android L sensor on/off.
+ *  @step_detector_wake_l_on: step detector android L sensor wake on/off .
+ *  @gesture_only_on: indicate it is gesture only.
+ *  @mag_divider: mag divider when gyro/accel is faster than mag maximum rate.
+ *  @special_mag_mode: for 20690, there is special mag mode need to be handled.
+ *  @mag_start_flag: when mag divider is non zero, need to check the start.
+ *  @prev_steps: previous steps sent to the user.
+ *  @aut_key_in: authentication key input.
+ *  @aut_key_out: authentication key output.
+ *  @suspend_state: state variable to indicate that we are in suspend state.
+ *  @secondary_gyro_on: DMP out signal to turn on gyro.
+ *  @secondary_mag_on:  DMP out signal to turn on mag.
+ *  @secondary_prox_on: DMP out signal to turn on proximity.
+ *  @secondary_switch: showing this setup is triggerred by secondary switch.
+ *  @send_calib_gyro:       flag to indicate to send calibrated gyro.
+ *  @send_raw_compass: flag to send raw compass.
+ *  @resume_state: flag to synchronize the processing of inv_read_fifo()
+ *  @cycle_on: variable indicate accel cycle mode is on.
+ *  @secondary_switch_data: secondary switch data for activity.
+ *  @raw_gyro_data[6]:    save raw gyro data.
+ *  @raw_compass_data[3]: save raw compass data.
+ *  @wait_queue: wait queue to wake up inv_read_fifo()
+ *  @bac_drive_conf: bac drive configuration.
+ *  @bac_walk_conf: bac walk configuration.
+ *  @bac_smd_conf: bac smd configuration.
+ *  @bac_bike_conf: bac bike configuration.
+ *  @bac_run_conf: bac run configuration.
+ *  @bac_still_conf: back still configuration.
+ *  @power_on_data: power on data.
+ *  @fifo_data_store: store of FIFO data.
+ *  @int_en: store interrupt enable register data.
+ *  @int_en2: store interrupt enable register 2 data.
+ *  @gesture_int_count: interrupt count for gesture only mode.
+ *  @smplrt_div: SMPLRT_DIV register value.
+ */
+struct inv_mpu_state {
+	struct device *dev;
+	int (*write)(struct inv_mpu_state *st, u8 reg, u8 data);
+	int (*read)(struct inv_mpu_state *st, u8 reg, int len, u8 *data);
+	int (*mem_write)(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+	                 u32 len, u8 const *data);
+	int (*mem_read)(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+	                u32 len, u8 *data);
+	struct inv_chip_config_s chip_config;
+	struct inv_chip_info_s chip_info;
+	struct inv_smd smd;
+	struct inv_ped ped;
+	struct inv_eis eis;
+	struct inv_batch batch;
+	struct inv_temp_comp temp_comp;
+	struct inv_mpu_slave *slave_compass;
+	struct inv_mpu_slave *slave_pressure;
+	struct inv_mpu_slave *slave_als;
+	struct inv_secondary_reg slv_reg[4];
+	struct inv_timestamp_algo ts_algo;
+	struct inv_secondary_set sec_set;
+	struct inv_engine_info eng_info[ENGINE_NUM_MAX];
+	const struct inv_hw_s *hw;
+	enum inv_devices chip_type;
+	enum inv_bus_type bus_type;
+	enum inv_fifo_count_mode fifo_count_mode;
+#ifdef CONFIG_HAS_WAKELOCK
+	struct wake_lock wake_lock;
+#else
+	struct wakeup_source wake_lock;
+#endif
+#ifdef TIMER_BASED_BATCHING
+	struct hrtimer hr_batch_timer;
+	u64 batch_timeout;
+	bool is_batch_timer_running;
+	struct work_struct batch_work;
+#endif
+	struct i2c_client *client;
+	struct mpu_platform_data plat_data;
+	void *sl_handle;
+	struct inv_sensor sensor[SENSOR_NUM_MAX];
+	struct android_l_sensor sensor_l[SENSOR_L_NUM_MAX];
+	struct inv_sensor_accuracy sensor_accuracy[SENSOR_ACCURACY_NUM_MAX];
+	struct inv_ois ois;
+	bool sensor_acurracy_flag[SENSOR_ACCURACY_NUM_MAX];
+	short irq;
+	int accel_bias[3];
+	int gyro_bias[3];
+	int accel_st_bias[3];
+	int accel_ois_st_bias[3];
+	int gyro_st_bias[3];
+	int gyro_ois_st_bias[3];
+	int input_accel_dmp_bias[3];
+	int input_gyro_dmp_bias[3];
+	int input_compass_dmp_bias[3];
+	int input_accel_bias[3];
+	int input_gyro_bias[3];
+	u8 fifo_data[8];
+	u8 i2c_addr;
+	int header_count;
+	int step_det_count;
+	s32 gyro_sf;
+	u8 left_over[LEFT_OVER_BYTES];
+	u32 left_over_size;
+	u32 fifo_count;
+	bool wake_sensor_received;
+	bool accel_cal_enable;
+	bool gyro_cal_enable;
+	bool calib_compass_on;
+	bool debug_determine_engine_on;
+	bool poke_mode_on;
+	bool mode_1k_on;
+	u64 poke_ts;
+	u64 step_detector_base_ts;
+	u64 last_temp_comp_time;
+	u8 i2c_dis;
+	u8 name[20];
+	u8 gyro_st_data[3];
+	u8 accel_st_data[3];
+	u8 secondary_name[20];
+	s32 compass_var;
+	int current_compass_matrix[9];
+	int final_compass_matrix[9];
+	enum TRIGGER_STATE trigger_state;
+	u8 *firmware;
+	int accel_calib_threshold;
+	int accel_calib_rate;
+	u32 accel_covariance[COVARIANCE_SIZE];
+	 DECLARE_KFIFO(kf, u8, 128);
+	u32 activity_size;
+	int wom_thld;
+	u16 cntl;
+	u16 cntl2;
+	u16 motion_event_cntl;
+	int dmp_image_size;
+	int dmp_start_address;
+	bool step_counter_l_on;
+	bool step_counter_wake_l_on;
+	bool step_detector_l_on;
+	bool step_detector_wake_l_on;
+	bool gesture_only_on;
+	bool mag_start_flag;
+	int mag_divider;
+	bool special_mag_mode;
+	int prev_steps;
+	u32 curr_steps;
+	int aut_key_in;
+	int aut_key_out;
+	bool secondary_gyro_on;
+	bool secondary_mag_on;
+	bool secondary_prox_on;
+	bool secondary_switch;
+	bool send_calib_gyro;
+	bool send_raw_compass;
+	bool send_raw_gyro;
+	bool resume_state;
+	bool cycle_on;
+	int secondary_switch_data;
+	u8 raw_gyro_data[6];
+	u32 raw_compass_data[3];
+	wait_queue_head_t wait_queue;
+	u32 bac_drive_conf;
+	u32 bac_walk_conf;
+	u32 bac_smd_conf;
+	u32 bac_bike_conf;
+	u32 bac_run_conf;
+	u32 bac_still_conf;
+	u32 power_on_data;
+	u8 fifo_data_store[HARDWARE_FIFO_SIZE + LEFT_OVER_BYTES];
+	u8 int_en;
+	u8 int_en_2;
+	u8 gesture_int_count;
+	u8 smplrt_div;
+};
+
+/**
+ *  struct inv_mpu_slave - MPU slave structure.
+ *  @st_upper:  compass self test upper limit.
+ *  @st_lower:  compass self test lower limit.
+ *  @scale: compass scale.
+ *  @rate_scale: decide how fast a compass can read.
+ *  @min_read_time: minimum time between each reading.
+ *  @self_test: self test method of the slave.
+ *  @set_scale: set scale of slave
+ *  @get_scale: read scale back of the slave.
+ *  @suspend:		suspend operation.
+ *  @resume:		resume operation.
+ *  @setup:		setup chip. initialization.
+ *  @combine_data:	combine raw data into meaningful data.
+ *  @read_data:        read external sensor and output
+ *  @get_mode:		get current chip mode.
+ *  @set_lpf:            set low pass filter.
+ *  @set_fs:             set full scale
+ *  @prev_ts: last time it is read.
+ */
+struct inv_mpu_slave {
+	const short *st_upper;
+	const short *st_lower;
+	int scale;
+	int rate_scale;
+	int min_read_time;
+	int (*self_test) (struct inv_mpu_state *);
+	int (*set_scale) (struct inv_mpu_state *, int scale);
+	int (*get_scale) (struct inv_mpu_state *, int *val);
+	int (*suspend) (struct inv_mpu_state *);
+	int (*resume) (struct inv_mpu_state *);
+	int (*setup) (struct inv_mpu_state *);
+	int (*combine_data) (u8 *in, short *out);
+	int (*read_data) (struct inv_mpu_state *, short *out);
+	int (*get_mode) (void);
+	int (*set_lpf) (struct inv_mpu_state *, int rate);
+	int (*set_fs) (struct inv_mpu_state *, int fs);
+	u64 prev_ts;
+};
+
+/* scan element definition */
+enum inv_mpu_scan {
+	INV_MPU_SCAN_TIMESTAMP,
+};
+
+/* IIO attribute address */
+enum MPU_IIO_ATTR_ADDR {
+	ATTR_DMP_GYRO_X_DMP_BIAS,
+	ATTR_DMP_GYRO_Y_DMP_BIAS,
+	ATTR_DMP_GYRO_Z_DMP_BIAS,
+	ATTR_DMP_GYRO_CAL_ENABLE,
+	ATTR_DMP_ACCEL_X_DMP_BIAS,
+	ATTR_DMP_ACCEL_Y_DMP_BIAS,
+	ATTR_DMP_ACCEL_Z_DMP_BIAS,
+	ATTR_DMP_MAGN_X_DMP_BIAS,
+	ATTR_DMP_MAGN_Y_DMP_BIAS,
+	ATTR_DMP_MAGN_Z_DMP_BIAS,
+	ATTR_DMP_MAGN_ACCURACY,
+	ATTR_GYRO_X_OFFSET,
+	ATTR_GYRO_Y_OFFSET,
+	ATTR_GYRO_Z_OFFSET,
+	ATTR_ACCEL_X_OFFSET,
+	ATTR_ACCEL_Y_OFFSET,
+	ATTR_ACCEL_Z_OFFSET,
+	ATTR_DMP_SC_AUTH,
+	ATTR_DMP_EIS_AUTH,
+	ATTR_DMP_ACCEL_CAL_ENABLE,
+	ATTR_DMP_PED_INT_ON,
+	ATTR_DMP_PED_STEP_THRESH,
+	ATTR_DMP_PED_INT_THRESH,
+	ATTR_DMP_PED_ON,
+	ATTR_DMP_SMD_ENABLE,
+	ATTR_DMP_TILT_ENABLE,
+	ATTR_DMP_PICK_UP_ENABLE,
+	ATTR_DMP_EIS_ENABLE,
+	ATTR_DMP_PEDOMETER_STEPS,
+	ATTR_DMP_PEDOMETER_TIME,
+	ATTR_DMP_PEDOMETER_COUNTER,
+	ATTR_DMP_LOW_POWER_GYRO_ON,
+	ATTR_DMP_LP_EN_OFF,
+	ATTR_DMP_CLK_SEL,
+	ATTR_DMP_DEBUG_MEM_READ,
+	ATTR_DMP_DEBUG_MEM_WRITE,
+	ATTR_DEBUG_REG_WRITE,
+	ATTR_DEBUG_WRITE_CFG,
+	ATTR_DEBUG_REG_ADDR,
+	ATTR_WOM_THLD,
+	/* *****above this line, are DMP features, power needs on/off */
+	/* *****below this line, are DMP features, no power needed */
+	ATTR_IN_POWER_ON,
+	ATTR_DMP_ON,
+	ATTR_DMP_EVENT_INT_ON,
+	ATTR_DMP_STEP_COUNTER_ON,
+	ATTR_DMP_STEP_COUNTER_WAKE_ON,
+	ATTR_DMP_BATCHMODE_TIMEOUT,
+	ATTR_DMP_BATCHMODE_WAKE_FIFO_FULL,
+	ATTR_DMP_STEP_DETECTOR_ON,
+	ATTR_DMP_STEP_DETECTOR_WAKE_ON,
+	ATTR_DMP_ACTIVITY_ON,
+	ATTR_DMP_IN_ANGLVEL_ACCURACY_ENABLE,
+	ATTR_DMP_IN_ACCEL_ACCURACY_ENABLE,
+	ATTR_DMP_DEBUG_DETERMINE_ENGINE_ON,
+	ATTR_DMP_MISC_GYRO_RECALIBRATION,
+	ATTR_DMP_MISC_ACCEL_RECALIBRATION,
+	ATTR_DMP_PARAMS_ACCEL_CALIBRATION_THRESHOLD,
+	ATTR_DMP_PARAMS_ACCEL_CALIBRATION_RATE,
+	ATTR_GYRO_SCALE,
+	ATTR_ACCEL_SCALE,
+	ATTR_COMPASS_SCALE,
+	ATTR_COMPASS_SENSITIVITY_X,
+	ATTR_COMPASS_SENSITIVITY_Y,
+	ATTR_COMPASS_SENSITIVITY_Z,
+	ATTR_GYRO_ENABLE,
+	ATTR_ACCEL_ENABLE,
+	ATTR_COMPASS_ENABLE,
+	ATTR_FIRMWARE_LOADED,
+	ATTR_POKE_MODE,
+	ATTR_ANGLVEL_X_CALIBBIAS,
+	ATTR_ANGLVEL_Y_CALIBBIAS,
+	ATTR_ANGLVEL_Z_CALIBBIAS,
+	ATTR_ACCEL_X_CALIBBIAS,
+	ATTR_ACCEL_Y_CALIBBIAS,
+	ATTR_ACCEL_Z_CALIBBIAS,
+	ATTR_ANGLVEL_X_ST_CALIBBIAS,
+	ATTR_ANGLVEL_Y_ST_CALIBBIAS,
+	ATTR_ANGLVEL_Z_ST_CALIBBIAS,
+	ATTR_ANGLVEL_X_OIS_ST_CALIBBIAS,
+	ATTR_ANGLVEL_Y_OIS_ST_CALIBBIAS,
+	ATTR_ANGLVEL_Z_OIS_ST_CALIBBIAS,
+	ATTR_ACCEL_X_ST_CALIBBIAS,
+	ATTR_ACCEL_Y_ST_CALIBBIAS,
+	ATTR_ACCEL_Z_ST_CALIBBIAS,
+	ATTR_ACCEL_X_OIS_ST_CALIBBIAS,
+	ATTR_ACCEL_Y_OIS_ST_CALIBBIAS,
+	ATTR_ACCEL_Z_OIS_ST_CALIBBIAS,
+	ATTR_GYRO_MATRIX,
+	ATTR_ACCEL_MATRIX,
+	ATTR_COMPASS_MATRIX,
+	ATTR_FSYNC_FRAME_COUNT,
+	ATTR_SECONDARY_NAME,
+	ATTR_GYRO_SF,
+	ATTR_BAC_DRIVE_CONFIDENCE,
+	ATTR_BAC_WALK_CONFIDENCE,
+	ATTR_BAC_SMD_CONFIDENCE,
+	ATTR_BAC_BIKE_CONFIDENCE,
+	ATTR_BAC_STILL_CONFIDENCE,
+	ATTR_BAC_RUN_CONFIDENCE,
+	IN_OIS_ACCEL_FS,
+	IN_OIS_GYRO_FS,
+	IN_OIS_ENABLE,
+};
+
+int inv_mpu_configure_ring(struct iio_dev *indio_dev);
+int inv_mpu_probe_trigger(struct iio_dev *indio_dev);
+void inv_mpu_unconfigure_ring(struct iio_dev *indio_dev);
+void inv_mpu_remove_trigger(struct iio_dev *indio_dev);
+#ifdef CONFIG_PM_SLEEP
+int inv_mpu_suspend(struct iio_dev *indio_dev);
+void inv_mpu_complete(struct iio_dev *indio_dev);
+#endif
+
+int inv_get_pedometer_steps(struct inv_mpu_state *st, int *ped);
+int inv_get_pedometer_time(struct inv_mpu_state *st, int *ped);
+int inv_read_pedometer_counter(struct inv_mpu_state *st);
+
+int inv_dmp_read(struct inv_mpu_state *st, int off, int size, u8 *buf);
+int inv_firmware_load(struct inv_mpu_state *st);
+
+int set_inv_enable(struct iio_dev *indio_dev);
+
+int inv_mpu_setup_compass_slave(struct inv_mpu_state *st);
+int inv_mpu_setup_pressure_slave(struct inv_mpu_state *st);
+int inv_mpu_setup_als_slave(struct inv_mpu_state *st);
+int inv_mpu_initialize(struct inv_mpu_state *st);
+int inv_set_accel_sf(struct inv_mpu_state *st);
+int inv_set_gyro_sf(struct inv_mpu_state *st);
+s64 get_time_ns(void);
+int inv_i2c_read_base(struct inv_mpu_state *st, u16 i, u8 r, u16 l, u8 *d);
+int inv_i2c_single_write_base(struct inv_mpu_state *st, u16 i, u8 r, u8 d);
+int write_be32_to_mem(struct inv_mpu_state *st, u32 data, int addr);
+int write_be16_to_mem(struct inv_mpu_state *st, u16 data, int addr);
+int read_be32_from_mem(struct inv_mpu_state *st, u32 *o, int addr);
+int read_be16_from_mem(struct inv_mpu_state *st, u16 *o, int addr);
+u32 inv_get_cntr_diff(u32 curr_counter, u32 prev);
+int inv_write_2bytes(struct inv_mpu_state *st, int k, int data);
+int inv_set_bank(struct inv_mpu_state *st, u8 bank);
+int inv_set_power(struct inv_mpu_state *st, bool power_on);
+int inv_switch_power_in_lp(struct inv_mpu_state *st, bool on);
+#ifndef CONFIG_INV_MPU_IIO_ICM20608D
+int inv_set_accel_config2(struct inv_mpu_state *st, bool cycle_mode);
+#endif
+int inv_stop_dmp(struct inv_mpu_state *st);
+int inv_reset_fifo(struct inv_mpu_state *st, bool turn_off);
+int inv_create_dmp_sysfs(struct iio_dev *ind);
+int inv_check_chip_type(struct iio_dev *indio_dev, const char *name);
+int inv_write_compass_matrix(struct inv_mpu_state *st, int *adj);
+irqreturn_t inv_read_fifo(int irq, void *dev_id);
+#ifdef TIMER_BASED_BATCHING
+void inv_batch_work(struct work_struct *work);
+#endif
+int inv_flush_batch_data(struct iio_dev *indio_dev, int data);
+static inline int mpu_memory_write(struct inv_mpu_state *st, u8 mpu_addr,
+                                   u16 mem_addr, u32 len, u8 const *data)
+{
+	int ret = -1;
+
+	if (st->mem_write)
+		ret = st->mem_write(st, mpu_addr, mem_addr, len, data);
+
+	return ret;
+}
+static inline int mpu_memory_read(struct inv_mpu_state *st, u8 mpu_addr,
+                                  u16 mem_addr, u32 len, u8 *data)
+{
+	int ret = -1;
+
+	if (st->mem_read)
+		ret = st->mem_read(st, mpu_addr, mem_addr, len, data);
+
+	return ret;
+}
+int inv_read_secondary(struct inv_mpu_state *st, int ind, int addr,
+			int reg, int len);
+int inv_write_secondary(struct inv_mpu_state *st, int ind, int addr,
+			int reg, int v);
+int inv_execute_write_secondary(struct inv_mpu_state *st, int ind, int addr,
+				int reg, int v);
+int inv_execute_read_secondary(struct inv_mpu_state *st, int ind, int addr,
+			       int reg, int len, u8 *d);
+
+int inv_push_16bytes_buffer(struct inv_mpu_state *st, u16 hdr,
+						u64 t, int *q, s16 accur);
+int inv_push_gyro_data(struct inv_mpu_state *st, s16 *raw, s32 *calib, u64 t);
+int inv_push_8bytes_buffer(struct inv_mpu_state *st, u16 hdr, u64 t, s16 *d);
+int inv_push_8bytes_kf(struct inv_mpu_state *st, u16 hdr, u64 t, s16 *d);
+
+void inv_push_step_indicator(struct inv_mpu_state *st, u64 t);
+int inv_send_steps(struct inv_mpu_state *st, int step, u64 t);
+int inv_push_marker_to_buffer(struct inv_mpu_state *st, u16 hdr, int data);
+
+int inv_check_sensor_on(struct inv_mpu_state *st);
+int inv_write_cntl(struct inv_mpu_state *st, u16 wd, bool en, int cntl);
+
+int inv_get_packet_size(struct inv_mpu_state *st, u16 hdr,
+						u32 *pk_size, u8 *dptr);
+int inv_parse_packet(struct inv_mpu_state *st, u16 hdr, u8 *dptr);
+int inv_pre_parse_packet(struct inv_mpu_state *st, u16 hdr, u8 *dptr);
+int inv_process_dmp_data(struct inv_mpu_state *st);
+
+int be32_to_int(u8 *d);
+void inv_convert_and_push_16bytes(struct inv_mpu_state *st, u16 hdr,
+							u8 *d, u64 t, s8 *m);
+void inv_convert_and_push_8bytes(struct inv_mpu_state *st, u16 hdr,
+						u8 *d, u64 t, s8 *m);
+int inv_get_dmp_ts(struct inv_mpu_state *st, int i);
+int inv_process_step_det(struct inv_mpu_state *st, u8 *dptr);
+int inv_process_eis(struct inv_mpu_state *st, u16 delay);
+int inv_rate_convert(struct inv_mpu_state *st, int ind, int data);
+
+int inv_setup_dmp_firmware(struct inv_mpu_state *st);
+/* used to print i2c data using pr_debug */
+char *wr_pr_debug_begin(u8 const *data, u32 len, char *string);
+char *wr_pr_debug_end(char *string);
+
+int inv_hw_self_test(struct inv_mpu_state *st);
+int inv_q30_mult(int a, int b);
+#ifdef ACCEL_BIAS_TEST
+int inv_get_3axis_average(s16 src[], s16 dst[], s16 reset);
+#endif
+
+static inline int inv_plat_single_write(struct inv_mpu_state *st,
+							u8 reg, u8 data)
+{
+	int ret = -1;
+
+	if (st->write)
+		ret = st->write(st, reg, data);
+
+	return ret;
+}
+static inline int inv_plat_read(struct inv_mpu_state *st, u8 reg,
+							int len, u8 *data)
+{
+	int ret = -1;
+
+	if (st->read)
+		ret = st->read(st, reg, len, data);
+
+	return ret;
+}
+irqreturn_t inv_read_fifo(int , void *);
+
+int inv_stop_interrupt(struct inv_mpu_state *st);
+int inv_reenable_interrupt(struct inv_mpu_state *st);
+
+int inv_enable_pedometer_interrupt(struct inv_mpu_state *st, bool en);
+int inv_dataout_control1(struct inv_mpu_state *st, u16 cntl1);
+int inv_dataout_control2(struct inv_mpu_state *st, u16 cntl2);
+int inv_motion_interrupt_control(struct inv_mpu_state *st,
+						u16 motion_event_cntl);
+
+int inv_bound_timestamp(struct inv_mpu_state *st);
+int inv_update_dmp_ts(struct inv_mpu_state *st, int ind);
+int inv_get_last_run_time_non_dmp_record_mode(struct inv_mpu_state *st);
+
+#define mem_w(a, b, c) mpu_memory_write(st, st->i2c_addr, a, b, c)
+#define mem_r(a, b, c) mpu_memory_read(st, st->i2c_addr, a, b, c)
+
+#endif /* #ifndef _INV_MPU_IIO_H_ */
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu/inv_mpu_ring.c
new file mode 100644
index 0000000..3e5bccbe
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_ring.c
@@ -0,0 +1,643 @@
+/*
+* Copyright (C) 2012-2018 InvenSense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/math64.h>
+#include <linux/miscdevice.h>
+
+#include "inv_mpu_iio.h"
+
+static void inv_push_timestamp(struct iio_dev *indio_dev, u64 t)
+{
+	u8 buf[IIO_BUFFER_BYTES];
+	struct inv_mpu_state *st;
+
+	st = iio_priv(indio_dev);
+	if (st->poke_mode_on)
+		memcpy(buf, &st->poke_ts, sizeof(t));
+	else
+		memcpy(buf, &t, sizeof(t));
+	iio_push_to_buffers(indio_dev, buf);
+}
+
+int inv_push_marker_to_buffer(struct inv_mpu_state *st, u16 hdr, int data)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(st);
+	u8 buf[IIO_BUFFER_BYTES];
+
+	memcpy(buf, &hdr, sizeof(hdr));
+	memcpy(&buf[4], &data, sizeof(data));
+	iio_push_to_buffers(indio_dev, buf);
+
+	return 0;
+}
+static int inv_calc_precision(struct inv_mpu_state *st)
+{
+	int diff;
+	int init;
+
+	if (st->eis.voting_state != 8)
+		return 0;
+	diff = abs(st->eis.fsync_delay_s[1] - st->eis.fsync_delay_s[0]);
+	init = 0;
+	if (diff)
+		init = st->sensor[SENSOR_GYRO].dur / diff;
+
+	if (abs(init - NSEC_PER_USEC) < (NSEC_PER_USEC >> 3))
+		 st->eis.count_precision = init;
+	else
+		st->eis.voting_state = 0;
+
+	pr_debug("dur= %d prc= %d\n", st->sensor[SENSOR_GYRO].dur,
+						st->eis.count_precision);
+
+	return 0;
+}
+
+static s64 calc_frame_ave(struct inv_mpu_state *st, int delay)
+{
+	s64 ts;
+
+	ts = st->eis.current_timestamp - delay;
+#if defined(CONFIG_INV_MPU_IIO_ICM20648) | defined(CONFIG_INV_MPU_IIO_ICM20690)
+	ts -= st->ts_algo.gyro_ts_shift;
+#endif
+	pr_debug("shift= %d ts = %lld\n", st->ts_algo.gyro_ts_shift, ts);
+
+	return ts;
+}
+
+static void inv_push_eis_ring(struct inv_mpu_state *st, int *q, bool sync,
+								s64 t)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(st);
+	struct inv_eis *eis = &st->eis;
+	u8 buf[IIO_BUFFER_BYTES];
+	int tmp, ii;
+
+	buf[0] = (EIS_GYRO_HDR & 0xff);
+	buf[1] = (EIS_GYRO_HDR >> 8);
+	memcpy(buf + 4, &q[0], sizeof(q[0]));
+	iio_push_to_buffers(indio_dev, buf);
+	for (ii = 0; ii < 2; ii++)
+		memcpy(buf + 4 * ii, &q[ii + 1], sizeof(q[ii]));
+	iio_push_to_buffers(indio_dev, buf);
+	tmp = eis->frame_count;
+	if (sync)
+		tmp |= 0x80000000;
+	memcpy(buf, &tmp, sizeof(tmp));
+	iio_push_to_buffers(indio_dev, buf);
+	inv_push_timestamp(indio_dev, t);
+}
+static int inv_do_interpolation_gyro(struct inv_mpu_state *st, int *prev,
+	s64 prev_t, int *curr, s64 curr_t, s64 t, bool trigger)
+{
+	int i;
+	int out[3];
+#if defined(CONFIG_INV_MPU_IIO_ICM20648) | defined(CONFIG_INV_MPU_IIO_ICM20690)
+	prev_t -= st->ts_algo.gyro_ts_shift;
+	prev_t += MPU_4X_TS_GYRO_SHIFT;
+	curr_t -= st->ts_algo.gyro_ts_shift;
+	curr_t += MPU_4X_TS_GYRO_SHIFT;
+#endif
+	if ((t > prev_t) && (t < curr_t)) {
+		for (i = 0; i < 3; i++)
+			out[i] = (int)div_s64((s64)(curr[i] - prev[i]) *
+				(s64)(t - prev_t), curr_t - prev_t) + prev[i];
+	} else if (t < prev_t) {
+		for (i = 0; i < 3; i++)
+			out[i] = prev[i];
+	} else {
+		for (i = 0; i < 3; i++)
+			out[i] = curr[i];
+	}
+	pr_debug("prev= %lld t = %lld curr= %lld\n", prev_t, t, curr_t);
+	pr_debug("prev = %d, %d, %d\n", prev[0], prev[1], prev[2]);
+	pr_debug("curr = %d, %d, %d\n", curr[0], curr[1], curr[2]);
+	pr_debug("out = %d, %d, %d\n", out[0], out[1], out[2]);
+	inv_push_eis_ring(st, out, trigger, t);
+
+	return 0;
+}
+#if defined(CONFIG_INV_MPU_IIO_ICM20648) | defined(CONFIG_INV_MPU_IIO_ICM20690)
+static void inv_handle_triggered_eis(struct inv_mpu_state *st)
+{
+	struct inv_eis *eis = &st->eis;
+	int delay;
+
+	if (st->eis.eis_frame) {
+		inv_calc_precision(st);
+		delay = ((int)st->eis.fsync_delay) * st->eis.count_precision;
+		eis->fsync_timestamp = calc_frame_ave(st, delay);
+		inv_do_interpolation_gyro(st,
+			st->eis.prev_gyro,    st->eis.prev_timestamp,
+			st->eis.current_gyro, st->eis.current_timestamp,
+			eis->fsync_timestamp, true);
+		pr_debug("fsync=%lld, curr=%lld, delay=%d\n",
+			eis->fsync_timestamp, eis->current_timestamp, delay);
+		inv_push_eis_ring(st, st->eis.current_gyro, false,
+			st->eis.current_timestamp - st->ts_algo.gyro_ts_shift
+						+ MPU_4X_TS_GYRO_SHIFT);
+		eis->last_fsync_timestamp = eis->fsync_timestamp;
+	} else {
+		pr_debug("cur= %lld\n", st->eis.current_timestamp);
+		inv_push_eis_ring(st, st->eis.current_gyro, false,
+			st->eis.current_timestamp - st->ts_algo.gyro_ts_shift
+						+ MPU_4X_TS_GYRO_SHIFT);
+	}
+}
+#else
+static void inv_handle_triggered_eis(struct inv_mpu_state *st)
+{
+	struct inv_eis *eis = &st->eis;
+	int delay;
+
+	if ((st->eis.eis_frame && (st->eis.fsync_delay != 5)) ||
+		(st->eis.eis_frame && (st->eis.fsync_delay == 5) &&
+		(!st->eis.current_sync))
+		) {
+		inv_calc_precision(st);
+		delay = ((int)st->eis.fsync_delay) * st->eis.count_precision;
+		eis->fsync_timestamp = calc_frame_ave(st, delay);
+		inv_do_interpolation_gyro(st,
+			st->eis.prev_gyro,    st->eis.prev_timestamp,
+			st->eis.current_gyro, st->eis.current_timestamp,
+			eis->fsync_timestamp, true);
+		pr_debug("fsync=%lld, curr=%lld, delay=%d\n",
+			eis->fsync_timestamp, eis->current_timestamp, delay);
+		inv_push_eis_ring(st, st->eis.current_gyro, false,
+				st->eis.current_timestamp);
+		eis->last_fsync_timestamp = eis->fsync_timestamp;
+		st->eis.eis_frame = false;
+	} else {
+		st->eis.current_sync = false;
+		pr_debug("cur= %lld\n", st->eis.current_timestamp);
+		inv_push_eis_ring(st, st->eis.current_gyro, false,
+				st->eis.current_timestamp);
+	}
+}
+#endif
+static void inv_push_eis_buffer(struct inv_mpu_state *st, u64 t, int *q)
+{
+	int ii;
+
+	if (st->eis.eis_triggered) {
+		for (ii = 0; ii < 3; ii++)
+			st->eis.prev_gyro[ii] = st->eis.current_gyro[ii];
+		st->eis.prev_timestamp = st->eis.current_timestamp;
+
+		for (ii = 0; ii < 3; ii++)
+			st->eis.current_gyro[ii] = q[ii];
+		st->eis.current_timestamp = t;
+		inv_handle_triggered_eis(st);
+	} else {
+		for (ii = 0; ii < 3; ii++)
+			st->eis.current_gyro[ii] = q[ii];
+		st->eis.current_timestamp = t;
+	}
+}
+static int inv_push_16bytes_final(struct inv_mpu_state *st, int j,
+						s32 *q, u64 t, s16 accur)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(st);
+	u8 buf[IIO_BUFFER_BYTES];
+	int ii;
+
+	memcpy(buf, &st->sensor_l[j].header, sizeof(st->sensor_l[j].header));
+	memcpy(buf + 2, &accur, sizeof(accur));
+	memcpy(buf + 4, &q[0], sizeof(q[0]));
+	iio_push_to_buffers(indio_dev, buf);
+	for (ii = 0; ii < 2; ii++)
+		memcpy(buf + 4 * ii, &q[ii + 1], sizeof(q[ii]));
+	iio_push_to_buffers(indio_dev, buf);
+	inv_push_timestamp(indio_dev, t);
+	st->sensor_l[j].counter = 0;
+	if (st->sensor_l[j].wake_on)
+		st->wake_sensor_received = true;
+
+	return 0;
+}
+int inv_push_16bytes_buffer(struct inv_mpu_state *st, u16 sensor,
+				    u64 t, int *q, s16 accur)
+{
+	int j;
+
+	for (j = 0; j < SENSOR_L_NUM_MAX; j++) {
+		if (st->sensor_l[j].on && (st->sensor_l[j].base == sensor)) {
+			st->sensor_l[j].counter++;
+			if ((st->sensor_l[j].div != 0xffff) &&
+				(st->sensor_l[j].counter >=
+						st->sensor_l[j].div)) {
+				pr_debug(
+	"Sensor_l = %d sensor = %d header [%04X] div [%d] ts [%lld] %d %d %d\n",
+					j, sensor,
+					st->sensor_l[j].header,
+					st->sensor_l[j].div,
+					t, q[0], q[1], q[2]);
+				inv_push_16bytes_final(st, j, q, t, accur);
+			}
+		}
+	}
+	return 0;
+}
+
+void inv_convert_and_push_16bytes(struct inv_mpu_state *st, u16 hdr,
+							u8 *d, u64 t, s8 *m)
+{
+	int i, j;
+	s32 in[3], out[3];
+
+	for (i = 0; i < 3; i++)
+		in[i] = be32_to_int(d + i * 4);
+	/* multiply with orientation matrix can be optimized like this */
+	for (i = 0; i < 3; i++)
+		for (j = 0; j < 3; j++)
+			if (m[i * 3 + j])
+				out[i] = in[j] * m[i * 3 + j];
+
+	inv_push_16bytes_buffer(st, hdr, t, out, 0);
+}
+
+void inv_convert_and_push_8bytes(struct inv_mpu_state *st, u16 hdr,
+						u8 *d, u64 t, s8 *m)
+{
+	int i, j;
+	s16 in[3], out[3];
+
+	for (i = 0; i < 3; i++)
+		in[i] = be16_to_cpup((__be16 *) (d + i * 2));
+
+	/* multiply with orientation matrix can be optimized like this */
+	for (i = 0; i < 3; i++)
+		for (j = 0; j < 3; j++)
+			if (m[i * 3 + j])
+				out[i] = in[j] * m[i * 3 + j];
+
+	inv_push_8bytes_buffer(st, hdr, t, out);
+}
+
+int inv_push_special_8bytes_buffer(struct inv_mpu_state *st,
+				   u16 hdr, u64 t, s16 *d)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(st);
+	u8 buf[IIO_BUFFER_BYTES];
+	int j;
+
+	memcpy(buf, &hdr, sizeof(hdr));
+	memcpy(&buf[2], &d[0], sizeof(d[0]));
+	for (j = 0; j < 2; j++)
+		memcpy(&buf[4 + j * 2], &d[j + 1], sizeof(d[j]));
+	iio_push_to_buffers(indio_dev, buf);
+	inv_push_timestamp(indio_dev, t);
+
+	return 0;
+}
+
+static int inv_s16_gyro_push(struct inv_mpu_state *st, int i, s16 *raw, u64 t)
+{
+	if (st->sensor_l[i].on) {
+		st->sensor_l[i].counter++;
+		if ((st->sensor_l[i].div != 0xffff) &&
+			(st->sensor_l[i].counter >= st->sensor_l[i].div)) {
+			inv_push_special_8bytes_buffer(st,
+					st->sensor_l[i].header, t, raw);
+			st->sensor_l[i].counter = 0;
+			if (st->sensor_l[i].wake_on)
+				st->wake_sensor_received = true;
+		}
+	}
+
+	return 0;
+}
+
+static int inv_s32_gyro_push(struct inv_mpu_state *st, int i, s32 *calib, u64 t)
+{
+	if (st->sensor_l[i].on) {
+		st->sensor_l[i].counter++;
+		if ((st->sensor_l[i].div != 0xffff) &&
+			(st->sensor_l[i].counter >= st->sensor_l[i].div)) {
+			inv_push_16bytes_final(st, i, calib, t, 0);
+			st->sensor_l[i].counter = 0;
+			if (st->sensor_l[i].wake_on)
+				st->wake_sensor_received = true;
+		}
+	}
+
+	return 0;
+}
+
+int inv_push_gyro_data(struct inv_mpu_state *st, s16 *raw, s32 *calib, u64 t)
+{
+	int gyro_data[] = {SENSOR_L_GYRO, SENSOR_L_GYRO_WAKE};
+	int calib_data[] = {SENSOR_L_GYRO_CAL, SENSOR_L_GYRO_CAL_WAKE};
+	int i;
+
+	if (st->sensor_l[SENSOR_L_EIS_GYRO].on)
+		inv_push_eis_buffer(st, t, calib);
+
+	for (i = 0; i < 2; i++)
+		inv_s16_gyro_push(st, gyro_data[i], raw, t);
+	for (i = 0; i < 2; i++)
+		inv_s32_gyro_push(st, calib_data[i], calib, t);
+
+	return 0;
+}
+int inv_push_8bytes_buffer(struct inv_mpu_state *st, u16 sensor, u64 t, s16 *d)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(st);
+	u8 buf[IIO_BUFFER_BYTES];
+	int ii, j;
+
+	if ((sensor == STEP_DETECTOR_HDR) ||
+					(sensor == STEP_DETECTOR_WAKE_HDR)) {
+		memcpy(buf, &sensor, sizeof(sensor));
+		memcpy(&buf[2], &d[0], sizeof(d[0]));
+		for (j = 0; j < 2; j++)
+			memcpy(&buf[4 + j * 2], &d[j + 1], sizeof(d[j]));
+		iio_push_to_buffers(indio_dev, buf);
+		inv_push_timestamp(indio_dev, t);
+		if (sensor == STEP_DETECTOR_WAKE_HDR)
+			st->wake_sensor_received = true;
+		return 0;
+	}
+	for (ii = 0; ii < SENSOR_L_NUM_MAX; ii++) {
+		if (st->sensor_l[ii].on &&
+		    (st->sensor_l[ii].base == sensor) &&
+		    (st->sensor_l[ii].div != 0xffff)) {
+			st->sensor_l[ii].counter++;
+			if (st->sensor_l[ii].counter >= st->sensor_l[ii].div) {
+				pr_debug(
+	"Sensor_l = %d sensor = %d header [%04X] div [%d] ts [%lld] %d %d %d\n",
+	ii, sensor, st->sensor_l[ii].header,
+	st->sensor_l[ii].div, t, d[0], d[1], d[2]);
+
+				memcpy(buf, &st->sensor_l[ii].header,
+				       sizeof(st->sensor_l[ii].header));
+				memcpy(&buf[2], &d[0], sizeof(d[0]));
+				for (j = 0; j < 2; j++)
+					memcpy(&buf[4 + j * 2], &d[j + 1],
+					       sizeof(d[j]));
+
+				iio_push_to_buffers(indio_dev, buf);
+				inv_push_timestamp(indio_dev, t);
+				st->sensor_l[ii].counter = 0;
+				if (st->sensor_l[ii].wake_on)
+					st->wake_sensor_received = true;
+			}
+		}
+	}
+
+	return 0;
+}
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+/* Implemented activity to string function for BAC test */
+#define TILT_DETECTED  0x1000
+#define NONE 0x00
+#define DRIVE 0x01
+#define WALK 0x02
+#define RUN 0x04
+#define BIKE 0x08
+#define TILT 0x10
+#define STILL 0x20
+#define DRIVE_WALK (DRIVE | WALK)
+#define DRIVE_RUN (DRIVE | RUN)
+
+char *act_string(s16 data)
+{
+	data &= (~TILT);
+	switch (data) {
+	case NONE:
+		return "None";
+	case DRIVE:
+		return "Drive";
+	case WALK:
+		return "Walk";
+	case RUN:
+		return "Run";
+	case BIKE:
+		return "Bike";
+	case STILL:
+		return "Still";
+	case DRIVE_WALK:
+		return "drive and walk";
+	case DRIVE_RUN:
+		return "drive and run";
+	default:
+		return "Unknown";
+	}
+	return "Unknown";
+}
+
+char *inv_tilt_check(s16 data)
+{
+	if (data & TILT)
+		return "Tilt";
+	else
+		return "None";
+}
+
+int inv_push_8bytes_kf(struct inv_mpu_state *st, u16 hdr, u64 t, s16 *d)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(st);
+	u8 buf[IIO_BUFFER_BYTES];
+	int i;
+
+	if (st->chip_config.activity_on) {
+		memcpy(buf, &hdr, sizeof(hdr));
+		for (i = 0; i < 3; i++)
+			memcpy(&buf[2 + i * 2], &d[i], sizeof(d[i]));
+
+		kfifo_in(&st->kf, buf, IIO_BUFFER_BYTES);
+		memcpy(buf, &t, sizeof(t));
+		kfifo_in(&st->kf, buf, IIO_BUFFER_BYTES);
+		st->activity_size += IIO_BUFFER_BYTES * 2;
+	}
+	if (st->chip_config.tilt_enable) {
+		pr_debug("d[0] = %04X,  [%X : %s] to [%X : %s]",
+		d[0], d[0] & 0x00FF,
+		inv_tilt_check(d[0] & 0x00FF),
+		(d[0] & 0xFF00) >> 8,  inv_tilt_check((d[0] & 0xFF00) >> 8));
+		sysfs_notify(&indio_dev->dev.kobj, NULL, "poll_tilt");
+	}
+
+	pr_debug("d[0] = %04X,  [%X : %s] to [%X : %s]", d[0], d[0] & 0x00FF,
+		act_string(d[0] & 0x00FF),
+		(d[0] & 0xFF00) >> 8,  act_string((d[0] & 0xFF00) >> 8));
+
+	read_be32_from_mem(st, &st->bac_drive_conf, BAC_DRIVE_CONFIDENCE);
+	read_be32_from_mem(st, &st->bac_walk_conf, BAC_WALK_CONFIDENCE);
+	read_be32_from_mem(st, &st->bac_smd_conf, BAC_SMD_CONFIDENCE);
+	read_be32_from_mem(st, &st->bac_bike_conf, BAC_BIKE_CONFIDENCE);
+	read_be32_from_mem(st, &st->bac_still_conf, BAC_STILL_CONFIDENCE);
+	read_be32_from_mem(st, &st->bac_run_conf, BAC_RUN_CONFIDENCE);
+
+	return 0;
+}
+#endif
+
+int inv_send_steps(struct inv_mpu_state *st, int step, u64 ts)
+{
+	s16 s[3];
+
+	s[0] = 0;
+	s[1] = (s16) (step & 0xffff);
+	s[2] = (s16) ((step >> 16) & 0xffff);
+	if (st->step_counter_l_on)
+		inv_push_special_8bytes_buffer(st, STEP_COUNTER_HDR, ts, s);
+	if (st->step_counter_wake_l_on) {
+		inv_push_special_8bytes_buffer(st, STEP_COUNTER_WAKE_HDR,
+					       ts, s);
+		st->wake_sensor_received = true;
+	}
+	return 0;
+}
+
+void inv_push_step_indicator(struct inv_mpu_state *st, u64 t)
+{
+	s16 sen[3];
+#define STEP_INDICATOR_HEADER 0x0001
+
+	sen[0] = 0;
+	sen[1] = 0;
+	sen[2] = 0;
+	inv_push_8bytes_buffer(st, STEP_INDICATOR_HEADER, t, sen);
+}
+
+/*
+ *  inv_irq_handler() - Cache a timestamp at each data ready interrupt.
+ */
+static irqreturn_t inv_irq_handler(int irq, void *dev_id)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+#ifdef TIMER_BASED_BATCHING
+static enum hrtimer_restart inv_batch_timer_handler(struct hrtimer *timer)
+{
+	struct inv_mpu_state *st =
+		container_of(timer, struct inv_mpu_state, hr_batch_timer);
+
+	if (st->chip_config.gyro_enable || st->chip_config.accel_enable) {
+		hrtimer_forward_now(&st->hr_batch_timer,
+			ns_to_ktime(st->batch_timeout));
+		schedule_work(&st->batch_work);
+		return HRTIMER_RESTART;
+	}
+	st->is_batch_timer_running = 0;
+	return HRTIMER_NORESTART;
+}
+#endif
+
+void inv_mpu_unconfigure_ring(struct iio_dev *indio_dev)
+{
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+#ifdef KERNEL_VERSION_4_X
+	devm_free_irq(st->dev, st->irq, st);
+	devm_iio_kfifo_free(st->dev, indio_dev->buffer);
+#else
+	free_irq(st->irq, st);
+	iio_kfifo_free(indio_dev->buffer);
+#endif
+};
+EXPORT_SYMBOL_GPL(inv_mpu_unconfigure_ring);
+
+#ifndef KERNEL_VERSION_4_X
+static int inv_predisable(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static int inv_preenable(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static const struct iio_buffer_setup_ops inv_mpu_ring_setup_ops = {
+	.preenable = &inv_preenable,
+	.predisable = &inv_predisable,
+};
+#endif
+
+int inv_mpu_configure_ring(struct iio_dev *indio_dev)
+{
+	int ret;
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+	struct iio_buffer *ring;
+
+#ifdef TIMER_BASED_BATCHING
+	/* configure hrtimer */
+	hrtimer_init(&st->hr_batch_timer, CLOCK_BOOTTIME, HRTIMER_MODE_REL);
+	st->hr_batch_timer.function = inv_batch_timer_handler;
+	INIT_WORK(&st->batch_work, inv_batch_work);
+#endif
+#ifdef KERNEL_VERSION_4_X
+	ring = devm_iio_kfifo_allocate(st->dev);
+	if (!ring)
+		return -ENOMEM;
+	ring->scan_timestamp = true;
+	iio_device_attach_buffer(indio_dev, ring);
+	ret = devm_request_threaded_irq(st->dev,
+		st->irq,
+		inv_irq_handler,
+		inv_read_fifo,
+		IRQF_TRIGGER_RISING | IRQF_SHARED,
+		"inv_irq",
+		st);
+	if (ret) {
+		devm_iio_kfifo_free(st->dev, ring);
+		return ret;
+	}
+
+	// this mode does not use ops
+	indio_dev->modes = INDIO_ALL_BUFFER_MODES;
+
+	return ret;
+#else
+	ring = iio_kfifo_allocate(indio_dev);
+	if (!ring)
+		return -ENOMEM;
+	indio_dev->buffer = ring;
+	/* setup ring buffer */
+	ring->scan_timestamp = true;
+	indio_dev->setup_ops = &inv_mpu_ring_setup_ops;
+	ret = request_threaded_irq(st->irq,
+			inv_irq_handler,
+			inv_read_fifo,
+			IRQF_TRIGGER_RISING | IRQF_SHARED,
+			"inv_irq",
+			st);
+	if (ret)
+		goto error_iio_sw_rb_free;
+
+	indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+
+	return 0;
+error_iio_sw_rb_free:
+	iio_kfifo_free(indio_dev->buffer);
+
+	return ret;
+#endif
+}
+EXPORT_SYMBOL_GPL(inv_mpu_configure_ring);
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu/inv_mpu_spi.c
new file mode 100644
index 0000000..fb91678
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_spi.c
@@ -0,0 +1,410 @@
+/*
+* Copyright (C) 2012-2018 InvenSense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+
+#include "inv_mpu_iio.h"
+#include "inv_mpu_dts.h"
+
+#define INV_SPI_READ 0x80
+
+static int inv_spi_single_write(struct inv_mpu_state *st, u8 reg, u8 data)
+{
+	struct spi_message msg;
+	int res;
+	u8 d[2];
+	struct spi_transfer xfers = {
+		.tx_buf = d,
+		.bits_per_word = 8,
+		.len = 2,
+	};
+
+	pr_debug("reg_write: reg=0x%x data=0x%x\n", reg, data);
+	d[0] = reg;
+	d[1] = data;
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfers, &msg);
+	res = spi_sync(to_spi_device(st->dev), &msg);
+
+	return res;
+}
+
+static int inv_spi_read(struct inv_mpu_state *st, u8 reg, int len, u8 *data)
+{
+	struct spi_message msg;
+	int res;
+	u8 d[1];
+	struct spi_transfer xfers[] = {
+		{
+		 .tx_buf = d,
+		 .bits_per_word = 8,
+		 .len = 1,
+		 },
+		{
+		 .rx_buf = data,
+		 .bits_per_word = 8,
+		 .len = len,
+		 }
+	};
+
+	if (!data)
+		return -EINVAL;
+
+	d[0] = (reg | INV_SPI_READ);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfers[0], &msg);
+	spi_message_add_tail(&xfers[1], &msg);
+	res = spi_sync(to_spi_device(st->dev), &msg);
+
+	if (len ==1)
+		pr_debug("reg_read: reg=0x%x length=%d data=0x%x\n",
+							reg, len, data[0]);
+	else
+		pr_debug("reg_read: reg=0x%x length=%d d0=0x%x d1=0x%x\n",
+					reg, len, data[0], data[1]);
+
+	return res;
+
+}
+
+static int inv_spi_mem_write(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+		     u32 len, u8 const *data)
+{
+	struct spi_message msg;
+	u8 buf[258];
+	int res;
+
+	struct spi_transfer xfers = {
+		.tx_buf = buf,
+		.bits_per_word = 8,
+		.len = len + 1,
+	};
+
+	if (!data || !st)
+		return -EINVAL;
+
+	if (len > (sizeof(buf) - 1))
+		return -ENOMEM;
+
+	inv_plat_single_write(st, REG_MEM_BANK_SEL, mem_addr >> 8);
+	inv_plat_single_write(st, REG_MEM_START_ADDR, mem_addr & 0xFF);
+
+	buf[0] = REG_MEM_R_W;
+	memcpy(buf + 1, data, len);
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfers, &msg);
+	res = spi_sync(to_spi_device(st->dev), &msg);
+
+	return res;
+}
+
+static int inv_spi_mem_read(struct inv_mpu_state *st, u8 mpu_addr, u16 mem_addr,
+		    u32 len, u8 *data)
+{
+	int res;
+
+	if (!data || !st)
+		return -EINVAL;
+
+	if (len > 256)
+		return -EINVAL;
+
+	res = inv_plat_single_write(st, REG_MEM_BANK_SEL, mem_addr >> 8);
+	res = inv_plat_single_write(st, REG_MEM_START_ADDR, mem_addr & 0xFF);
+	res = inv_plat_read(st, REG_MEM_R_W, len, data);
+
+	return res;
+}
+
+/*
+ *  inv_mpu_probe() - probe function.
+ */
+static int inv_mpu_probe(struct spi_device *spi)
+{
+	const struct spi_device_id *id = spi_get_device_id(spi);
+	struct inv_mpu_state *st;
+	struct iio_dev *indio_dev;
+	int result;
+
+#ifdef KERNEL_VERSION_4_X
+	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+	if (indio_dev == NULL) {
+		pr_err("memory allocation failed\n");
+		result = -ENOMEM;
+		goto out_no_free;
+	}
+#else
+	indio_dev = iio_device_alloc(sizeof(*st));
+	if (indio_dev == NULL) {
+		pr_err("memory allocation failed\n");
+		result = -ENOMEM;
+		goto out_no_free;
+	}
+#endif
+	st = iio_priv(indio_dev);
+	st->write = inv_spi_single_write;
+	st->read = inv_spi_read;
+	st->mem_write = inv_spi_mem_write;
+	st->mem_read = inv_spi_mem_read;
+	st->dev = &spi->dev;
+	st->irq = spi->irq;
+#if !defined(CONFIG_INV_MPU_IIO_ICM20602) \
+	&& !defined(CONFIG_INV_MPU_IIO_IAM20680)
+	st->i2c_dis = BIT_I2C_IF_DIS;
+#endif
+	st->bus_type = BUS_SPI;
+	spi_set_drvdata(spi, indio_dev);
+	indio_dev->dev.parent = &spi->dev;
+	indio_dev->name = id->name;
+
+#ifdef CONFIG_OF
+	result = invensense_mpu_parse_dt(st->dev, &st->plat_data);
+	if (result)
+#  ifdef KERNEL_VERSION_4_X
+		return -ENODEV;
+#  else
+		goto out_free;
+#  endif
+	/* Power on device */
+	if (st->plat_data.power_on) {
+		result = st->plat_data.power_on(&st->plat_data);
+		if (result < 0) {
+			dev_err(st->dev, "power_on failed: %d\n", result);
+#  ifdef KERNEL_VERSION_4_X
+			return -ENODEV;
+#  else
+			goto out_free;
+#  endif
+		}
+		pr_info("%s: power on here.\n", __func__);
+	}
+	pr_info("%s: power on.\n", __func__);
+
+	msleep(100);
+#else
+	if (dev_get_platdata(st->dev) == NULL)
+#  ifdef KERNEL_VERSION_4_X
+		return -ENODEV;
+#  else
+		goto out_free;
+#  endif
+	st->plat_data = *(struct mpu_platform_data *)dev_get_platdata(st->dev);
+#endif
+
+	/* power is turned on inside check chip type */
+	result = inv_check_chip_type(indio_dev, id->name);
+	if (result)
+#ifdef KERNEL_VERSION_4_X
+		return -ENODEV;
+#else
+		goto out_free;
+#endif
+
+	result = inv_mpu_configure_ring(indio_dev);
+	if (result) {
+		pr_err("configure ring buffer fail\n");
+		goto out_free;
+	}
+#ifdef KERNEL_VERSION_4_X
+	result = devm_iio_device_register(st->dev, indio_dev);
+	if (result) {
+		pr_err("IIO device register fail\n");
+		goto out_unreg_ring;
+	}
+#else
+	result = iio_buffer_register(indio_dev, indio_dev->channels,
+				     indio_dev->num_channels);
+	if (result) {
+		pr_err("ring buffer register fail\n");
+		goto out_unreg_ring;
+	}
+
+	result = iio_device_register(indio_dev);
+	if (result) {
+		pr_err("IIO device register fail\n");
+		goto out_remove_ring;
+	}
+#endif
+
+	result = inv_create_dmp_sysfs(indio_dev);
+	if (result) {
+		pr_err("create dmp sysfs failed\n");
+		goto out_unreg_iio;
+	}
+	init_waitqueue_head(&st->wait_queue);
+	st->resume_state = true;
+#ifdef CONFIG_HAS_WAKELOCK
+	wake_lock_init(&st->wake_lock, WAKE_LOCK_SUSPEND, "inv_mpu");
+#else
+	wakeup_source_init(&st->wake_lock, "inv_mpu");
+#endif
+	dev_info(st->dev, "%s ma-kernel-%s is ready to go!\n",
+	         indio_dev->name, INVENSENSE_DRIVER_VERSION);
+
+#ifdef SENSOR_DATA_FROM_REGISTERS
+	pr_info("Data read from registers\n");
+#else
+	pr_info("Data read from FIFO\n");
+#endif
+#ifdef TIMER_BASED_BATCHING
+	pr_info("Timer based batching\n");
+#endif
+
+	return 0;
+#ifdef KERNEL_VERSION_4_X
+out_unreg_iio:
+	devm_iio_device_unregister(st->dev, indio_dev);
+out_unreg_ring:
+	inv_mpu_unconfigure_ring(indio_dev);
+out_free:
+	devm_iio_device_free(st->dev, indio_dev);
+out_no_free:
+#else
+out_unreg_iio:
+	iio_device_unregister(indio_dev);
+out_remove_ring:
+	iio_buffer_unregister(indio_dev);
+out_unreg_ring:
+	inv_mpu_unconfigure_ring(indio_dev);
+out_free:
+	iio_device_free(indio_dev);
+out_no_free:
+#endif
+	dev_err(st->dev, "%s failed %d\n", __func__, result);
+
+	return -EIO;
+}
+
+static void inv_mpu_shutdown(struct spi_device *spi)
+{
+	struct iio_dev *indio_dev = spi_get_drvdata(spi);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+	int result;
+
+	mutex_lock(&indio_dev->mlock);
+	inv_switch_power_in_lp(st, true);
+	dev_dbg(st->dev, "Shutting down %s...\n", st->hw->name);
+
+	/* reset to make sure previous state are not there */
+	result = inv_plat_single_write(st, REG_PWR_MGMT_1, BIT_H_RESET);
+	if (result)
+		dev_err(st->dev, "Failed to reset %s\n",
+			st->hw->name);
+	msleep(POWER_UP_TIME);
+	/* turn off power to ensure gyro engine is off */
+	result = inv_set_power(st, false);
+	if (result)
+		dev_err(st->dev, "Failed to turn off %s\n",
+			st->hw->name);
+	inv_switch_power_in_lp(st, false);
+	mutex_unlock(&indio_dev->mlock);
+}
+
+/*
+ *  inv_mpu_remove() - remove function.
+ */
+static int inv_mpu_remove(struct spi_device *spi)
+{
+	struct iio_dev *indio_dev = spi_get_drvdata(spi);
+	struct inv_mpu_state *st = iio_priv(indio_dev);
+
+#ifdef KERNEL_VERSION_4_X
+	devm_iio_device_unregister(st->dev, indio_dev);
+#else
+	iio_device_unregister(indio_dev);
+	iio_buffer_unregister(indio_dev);
+#endif
+	inv_mpu_unconfigure_ring(indio_dev);
+#ifdef KERNEL_VERSION_4_X
+	devm_iio_device_free(st->dev, indio_dev);
+#else
+	iio_device_free(indio_dev);
+#endif
+	dev_info(st->dev, "inv-mpu-iio module removed.\n");
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int inv_mpu_spi_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
+
+	return inv_mpu_suspend(indio_dev);
+}
+
+static void inv_mpu_spi_complete(struct device *dev)
+{
+	struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
+
+	inv_mpu_complete(indio_dev);
+}
+#endif
+
+static const struct dev_pm_ops inv_mpu_spi_pmops = {
+#ifdef CONFIG_PM_SLEEP
+	.suspend = inv_mpu_spi_suspend,
+	.complete = inv_mpu_spi_complete,
+#endif
+};
+
+/* device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct spi_device_id inv_mpu_id[] = {
+#ifdef CONFIG_INV_MPU_IIO_ICM20648
+	{"icm20645", ICM20645},
+	{"icm10340", ICM10340},
+	{"icm20648", ICM20648},
+#else
+	{"icm20608d", ICM20608D},
+	{"icm20690", ICM20690},
+	{"icm20602", ICM20602},
+	{"iam20680", IAM20680},
+#endif
+	{}
+};
+
+MODULE_DEVICE_TABLE(spi, inv_mpu_id);
+
+static struct spi_driver inv_mpu_driver = {
+	.probe = inv_mpu_probe,
+	.remove = inv_mpu_remove,
+	.shutdown = inv_mpu_shutdown,
+	.id_table = inv_mpu_id,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "inv-mpu-iio-spi",
+		.pm = &inv_mpu_spi_pmops,
+	},
+};
+module_spi_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Invensense SPI device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu/inv_mpu_timestamp.c b/drivers/iio/imu/inv_mpu/inv_mpu_timestamp.c
new file mode 100644
index 0000000..2cc721b
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_mpu_timestamp.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2012-2018 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "inv_mpu: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/math64.h>
+
+#include "inv_mpu_iio.h"
+
+#define INV_TIME_CALIB_THRESHOLD_1 2
+
+#define MIN_DELAY (3 * NSEC_PER_MSEC)
+#define JITTER_THRESH ( 1 * NSEC_PER_MSEC)
+
+int inv_update_dmp_ts(struct inv_mpu_state *st, int ind)
+{
+	int i;
+	u32 counter;
+	u64 ts;
+	enum INV_ENGINE en_ind;
+	struct inv_timestamp_algo *ts_algo = &st->ts_algo;
+	u32 base_time;
+	u64 cal_period;
+
+	if (st->mode_1k_on)
+		cal_period = (NSEC_PER_SEC >> 2);
+	else
+		cal_period = 2 * NSEC_PER_SEC;
+
+	ts = ts_algo->last_run_time - st->sensor[ind].time_calib;
+	counter = st->sensor[ind].sample_calib;
+	en_ind = st->sensor[ind].engine_base;
+	if (en_ind != ts_algo->clock_base)
+		return 0;
+	/* we average over 2 seconds period to do the timestamp calculation */
+	if (ts < cal_period)
+		return 0;
+	/* this is the first time we do timestamp averaging, return */
+	/* after resume from suspend, the clock of linux has up to 1 seconds
+	   drift. We should start from the resume clock instead of using clock
+	   before resume */
+	if ((!st->sensor[ind].calib_flag) || ts_algo->resume_flag) {
+		st->sensor[ind].sample_calib = 0;
+		st->sensor[ind].time_calib = ts_algo->last_run_time;
+		st->sensor[ind].calib_flag = 1;
+		ts_algo->resume_flag = false;
+
+		return 0;
+	}
+	/* if the sample number in current FIFO is not zero and between now and
+		last update time is more than 2 seconds, we do calculation */
+	if ((counter > 0) &&
+		(ts_algo->last_run_time - st->eng_info[en_ind].last_update_time >
+		 cal_period)) {
+		/* duration for each sensor */
+		st->sensor[ind].dur = (u32) div_u64(ts, counter);
+		/* engine duration derived from each sensor */
+		if (st->sensor[ind].div)
+			st->eng_info[en_ind].dur = st->sensor[ind].dur /
+							st->sensor[ind].div;
+		else
+			pr_err("sensor %d divider zero!\n", ind);
+		/* update base time for each sensor */
+		if (st->eng_info[en_ind].divider) {
+			base_time = (st->eng_info[en_ind].dur /
+					st->eng_info[en_ind].divider) *
+					st->eng_info[en_ind].orig_rate;
+			if (st->mode_1k_on)
+				st->eng_info[en_ind].base_time_1k = base_time;
+			else
+				st->eng_info[en_ind].base_time = base_time;
+		} else {
+			pr_err("engine %d divider zero!\n", en_ind);
+		}
+
+		st->eng_info[en_ind].last_update_time = ts_algo->last_run_time;
+		/* update all the sensors duration based on the same engine */
+		for (i = 0; i < SENSOR_NUM_MAX; i++) {
+			if (st->sensor[i].on &&
+			    (st->sensor[i].engine_base == en_ind))
+				st->sensor[i].dur = st->sensor[i].div *
+				    st->eng_info[en_ind].dur;
+		}
+
+	}
+	st->sensor[ind].sample_calib = 0;
+	st->sensor[ind].time_calib = ts_algo->last_run_time;
+
+	return 0;
+}
+/**
+ *     int inv_get_last_run_time_non_dmp_record_mode(struct inv_mpu_state *st)
+ *     This is the function to get last run time in non dmp and record mode.
+ *     This function will update the last_run_time, which is important parameter
+ *     in overall timestamp algorithm.
+ *     return value: this function returns fifo count value.
+*/
+int inv_get_last_run_time_non_dmp_record_mode(struct inv_mpu_state *st)
+{
+	long long t_pre, t_post, dur;
+	int fifo_count;
+#ifndef SENSOR_DATA_FROM_REGISTERS
+	int res;
+	u8 data[2];
+#endif
+
+	t_pre = get_time_ns();
+#ifndef SENSOR_DATA_FROM_REGISTERS
+	res = inv_plat_read(st, REG_FIFO_COUNT_H, FIFO_COUNT_BYTE, data);
+	if (res) {
+		pr_info("read REG_FIFO_COUNT_H failed= %d\n", res);
+		return 0;
+	}
+#endif
+	t_post = get_time_ns();
+
+#ifdef SENSOR_DATA_FROM_REGISTERS
+	if (st->fifo_count_mode == BYTE_MODE)
+		fifo_count = st->batch.pk_size;
+	else
+		fifo_count = 1;
+#else
+	fifo_count = be16_to_cpup((__be16 *) (data));
+#endif
+	pr_debug("fifc=%d\n", fifo_count);
+	if (!fifo_count)
+		return 0;
+	if (st->special_mag_mode && (fifo_count == 2)) {
+		pr_debug("special trigger\n");
+		fifo_count = 1;
+	}
+
+	/* In non DMP mode, either gyro or accel duration is the duration
+           for each sample */
+	if (st->chip_config.gyro_enable)
+		dur = st->eng_info[ENGINE_GYRO].dur;
+	else
+		dur = st->eng_info[ENGINE_ACCEL].dur;
+
+	if (st->fifo_count_mode == BYTE_MODE) {
+		fifo_count /= st->batch.pk_size;
+	}
+
+	/* In record mode, each number in fifo_count is 1 record or 1 sample */
+	st->ts_algo.last_run_time += dur * fifo_count;
+	if (st->ts_algo.last_run_time < t_pre)
+		st->ts_algo.last_run_time = t_pre;
+	if (st->ts_algo.last_run_time > t_post)
+		st->ts_algo.last_run_time = t_post;
+
+	return fifo_count;
+}
+
+int inv_get_dmp_ts(struct inv_mpu_state *st, int i)
+{
+	u64 current_time;
+	int expected_lower_duration, expected_upper_duration;
+
+	current_time = get_time_ns();
+
+	st->sensor[i].ts += st->sensor[i].dur + st->sensor[i].ts_adj;
+
+	if (st->sensor[i].ts < st->sensor[i].previous_ts)
+		st->sensor[i].ts = st->sensor[i].previous_ts + st->sensor[i].dur;
+
+	//hifi sensor limits ts jitter to +/- 2%
+	expected_upper_duration = st->eng_info[st->sensor[i].engine_base].divider * 1020000;
+	expected_lower_duration = st->eng_info[st->sensor[i].engine_base].divider * 980000;
+#if defined(CONFIG_INV_MPU_IIO_ICM20602) || defined(CONFIG_INV_MPU_IIO_ICM20690) || defined(CONFIG_INV_MPU_IIO_IAM20680)
+	if (st->sensor[i].ts < st->sensor[i].previous_ts + expected_lower_duration)
+		st->sensor[i].ts = st->sensor[i].previous_ts + expected_lower_duration;
+	if (st->sensor[i].ts > st->sensor[i].previous_ts + expected_upper_duration)
+		st->sensor[i].ts = st->sensor[i].previous_ts + expected_upper_duration;
+#endif
+	if (st->sensor[i].ts > current_time )
+		st->sensor[i].ts = current_time;
+
+	st->sensor[i].previous_ts = st->sensor[i].ts;
+
+	pr_debug("ts=%lld, reset=%lld\n", st->sensor[i].ts, st->ts_algo.reset_ts);
+	if (st->sensor[i].ts < st->ts_algo.reset_ts) {
+		pr_debug("less than reset\n");
+		st->sensor[i].send = false;
+	} else {
+		st->sensor[i].send = true;
+	}
+
+	if (st->header_count == 1)
+		inv_update_dmp_ts(st, i);
+
+	return 0;
+}
+
+static void process_sensor_bounding(struct inv_mpu_state *st, int i)
+{
+	s64 elaps_time, thresh1, thresh2;
+	struct inv_timestamp_algo *ts_algo = &st->ts_algo;
+	u32 dur;
+
+	elaps_time = ((u64) (st->sensor[i].dur)) * st->sensor[i].count;
+	thresh1 = ts_algo->last_run_time - elaps_time;
+
+	dur = max(st->sensor[i].dur, (int)MIN_DELAY);
+	thresh2 = thresh1 - dur;
+	if (thresh1 < 0)
+		thresh1 = 0;
+	if (thresh2 < 0)
+		thresh2 = 0;
+	st->sensor[i].ts_adj = 0;
+	if ((ts_algo->calib_counter >= INV_TIME_CALIB_THRESHOLD_1) &&
+						(!ts_algo->resume_flag)) {
+		if (st->sensor[i].ts < thresh2)
+			st->sensor[i].ts_adj = thresh2 - st->sensor[i].ts;
+	} else if ((ts_algo->calib_counter >=
+		INV_TIME_CALIB_THRESHOLD_1) && ts_algo->resume_flag) {
+		if (st->sensor[i].ts < thresh2)
+			st->sensor[i].ts = ts_algo->last_run_time -
+						elaps_time - JITTER_THRESH;
+	} else {
+		st->sensor[i].ts = ts_algo->last_run_time - elaps_time -
+							JITTER_THRESH;
+		st->sensor[i].previous_ts = st->sensor[i].ts;
+	}
+
+	if (st->sensor[i].ts > thresh1)
+		st->sensor[i].ts_adj = thresh1 - st->sensor[i].ts;
+	pr_debug("cali=%d\n", st->ts_algo.calib_counter);
+	pr_debug("adj= %lld\n", st->sensor[i].ts_adj);
+	pr_debug("dur= %d count= %d last= %lld\n", st->sensor[i].dur,
+				st->sensor[i].count, ts_algo->last_run_time);
+	if (st->sensor[i].ts_adj && (st->sensor[i].count > 1))
+		st->sensor[i].ts_adj = div_s64(st->sensor[i].ts_adj,
+							st->sensor[i].count);
+}
+/* inv_bound_timestamp (struct inv_mpu_state *st)
+	The purpose this function is to give a generic bound to each
+	sensor timestamp. The timestamp cannot exceed current time.
+	The timestamp cannot backwards one sample time either, otherwise, there
+	would be another sample in between. Using this principle, we can bound
+	the sensor samples */
+int inv_bound_timestamp(struct inv_mpu_state *st)
+{
+	int i;
+	struct inv_timestamp_algo *ts_algo = &st->ts_algo;
+
+	for (i = 0; i < SENSOR_NUM_MAX; i++) {
+		if (st->sensor[i].on) {
+			if (st->sensor[i].count) {
+				process_sensor_bounding(st, i);
+			} else if (ts_algo->calib_counter <
+				   INV_TIME_CALIB_THRESHOLD_1) {
+				st->sensor[i].ts = ts_algo->reset_ts;
+				st->sensor[i].previous_ts = st->sensor[i].ts;
+			}
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/iio/imu/inv_mpu/inv_test/Kconfig b/drivers/iio/imu/inv_mpu/inv_test/Kconfig
new file mode 100644
index 0000000..a4dfd95
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_test/Kconfig
@@ -0,0 +1,13 @@
+#
+# Kconfig for Invensense IIO testing hooks
+#
+
+config INV_TESTING
+	boolean "Invensense IIO testing hooks"
+	depends on INV_MPU_IIO || INV_AMI306_IIO || INV_YAS530 || INV_HUB_IIO
+	default n
+	help
+	  This flag enables display of additional testing information from the
+	  Invensense IIO drivers
+	  It also enables the I2C counters facility to perform IO profiling.
+	  Some additional sysfs entries will appear when this flag is enabled.
diff --git a/drivers/iio/imu/inv_mpu/inv_test/Makefile b/drivers/iio/imu/inv_mpu/inv_test/Makefile
new file mode 100644
index 0000000..4f0edd3
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_test/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Invensense IIO testing hooks.
+#
+
+obj-$(CONFIG_INV_TESTING) += inv_counters.o
+
diff --git a/drivers/iio/imu/inv_mpu/inv_test/inv_counters.c b/drivers/iio/imu/inv_mpu/inv_test/inv_counters.c
new file mode 100644
index 0000000..f60337c
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_test/inv_counters.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2012-2017 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/kdev_t.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/kernel_stat.h>
+
+#include "inv_counters.h"
+
+static int mpu_irq;
+static int accel_irq;
+static int compass_irq;
+
+struct inv_counters {
+	uint32_t i2c_tempreads;
+	uint32_t i2c_mpureads;
+	uint32_t i2c_mpuwrites;
+	uint32_t i2c_accelreads;
+	uint32_t i2c_accelwrites;
+	uint32_t i2c_compassreads;
+	uint32_t i2c_compasswrites;
+	uint32_t i2c_compassirq;
+	uint32_t i2c_accelirq;
+};
+
+static struct inv_counters Counters;
+
+static ssize_t i2c_counters_show(struct class *cls,
+			struct class_attribute *attr, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE,
+		"%ld.%03ld %u %u %u %u %u %u %u %u %u %u\n",
+		jiffies / HZ, ((jiffies % HZ) * (1024 / HZ)),
+		mpu_irq ? kstat_irqs(mpu_irq) : 0,
+		Counters.i2c_tempreads,
+		Counters.i2c_mpureads, Counters.i2c_mpuwrites,
+		accel_irq ? kstat_irqs(accel_irq) : Counters.i2c_accelirq,
+		Counters.i2c_accelreads, Counters.i2c_accelwrites,
+		compass_irq ? kstat_irqs(compass_irq) : Counters.i2c_compassirq,
+		Counters.i2c_compassreads, Counters.i2c_compasswrites);
+}
+
+void inv_iio_counters_set_i2cirq(enum irqtype type, int irq)
+{
+	switch (type) {
+	case IRQ_MPU:
+		mpu_irq = irq;
+		break;
+	case IRQ_ACCEL:
+		accel_irq = irq;
+		break;
+	case IRQ_COMPASS:
+		compass_irq = irq;
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_set_i2cirq);
+
+void inv_iio_counters_tempread(int count)
+{
+	Counters.i2c_tempreads += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_tempread);
+
+void inv_iio_counters_mpuread(int count)
+{
+	Counters.i2c_mpureads += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_mpuread);
+
+void inv_iio_counters_mpuwrite(int count)
+{
+	Counters.i2c_mpuwrites += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_mpuwrite);
+
+void inv_iio_counters_accelread(int count)
+{
+	Counters.i2c_accelreads += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_accelread);
+
+void inv_iio_counters_accelwrite(int count)
+{
+	Counters.i2c_accelwrites += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_accelwrite);
+
+void inv_iio_counters_compassread(int count)
+{
+	Counters.i2c_compassreads += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_compassread);
+
+void inv_iio_counters_compasswrite(int count)
+{
+	Counters.i2c_compasswrites += count;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_compasswrite);
+
+void inv_iio_counters_compassirq(void)
+{
+	Counters.i2c_compassirq++;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_compassirq);
+
+void inv_iio_counters_accelirq(void)
+{
+	Counters.i2c_accelirq++;
+}
+EXPORT_SYMBOL_GPL(inv_iio_counters_accelirq);
+
+static struct class_attribute inv_class_attr[] = {
+	__ATTR(i2c_counter, S_IRUGO, i2c_counters_show, NULL),
+	__ATTR_NULL
+};
+
+static struct class inv_counters_class = {
+	.name = "inv_counters",
+	.owner = THIS_MODULE,
+	.class_attrs = (struct class_attribute *) &inv_class_attr
+};
+
+static int __init inv_counters_init(void)
+{
+	memset(&Counters, 0, sizeof(Counters));
+
+	return class_register(&inv_counters_class);
+}
+
+static void __exit inv_counters_exit(void)
+{
+	class_unregister(&inv_counters_class);
+}
+
+module_init(inv_counters_init);
+module_exit(inv_counters_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("GESL");
+MODULE_DESCRIPTION("inv_counters debug support");
+
diff --git a/drivers/iio/imu/inv_mpu/inv_test/inv_counters.h b/drivers/iio/imu/inv_mpu/inv_test/inv_counters.h
new file mode 100644
index 0000000..62f7627
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu/inv_test/inv_counters.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2012-2017 InvenSense, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _INV_COUNTERS_H_
+#define _INV_COUNTERS_H_
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_INV_TESTING
+
+enum irqtype {
+	IRQ_MPU,
+	IRQ_ACCEL,
+	IRQ_COMPASS
+};
+
+#define INV_I2C_INC_MPUREAD(x)		inv_iio_counters_mpuread(x)
+#define INV_I2C_INC_MPUWRITE(x)		inv_iio_counters_mpuwrite(x)
+#define INV_I2C_INC_ACCELREAD(x)	inv_iio_counters_accelread(x)
+#define INV_I2C_INC_ACCELWRITE(x)	inv_iio_counters_accelwrite(x)
+#define INV_I2C_INC_COMPASSREAD(x)	inv_iio_counters_compassread(x)
+#define INV_I2C_INC_COMPASSWRITE(x)	inv_iio_counters_compasswrite(x)
+
+#define INV_I2C_INC_TEMPREAD(x)		inv_iio_counters_tempread(x)
+
+#define INV_I2C_SETIRQ(type, irq)	inv_iio_counters_set_i2cirq(type, irq)
+#define INV_I2C_INC_COMPASSIRQ()	inv_iio_counters_compassirq()
+#define INV_I2C_INC_ACCELIRQ()		inv_iio_counters_accelirq()
+
+void inv_iio_counters_mpuread(int count);
+void inv_iio_counters_mpuwrite(int count);
+void inv_iio_counters_accelread(int count);
+void inv_iio_counters_accelwrite(int count);
+void inv_iio_counters_compassread(int count);
+void inv_iio_counters_compasswrite(int count);
+
+void inv_iio_counters_tempread(int count);
+
+void inv_iio_counters_set_i2cirq(enum irqtype type, int irq);
+void inv_iio_counters_compassirq(void);
+void inv_iio_counters_accelirq(void);
+
+#else
+
+#define INV_I2C_INC_MPUREAD(x)
+#define INV_I2C_INC_MPUWRITE(x)
+#define INV_I2C_INC_ACCELREAD(x)
+#define INV_I2C_INC_ACCELWRITE(x)
+#define INV_I2C_INC_COMPASSREAD(x)
+#define INV_I2C_INC_COMPASSWRITE(x)
+
+#define INV_I2C_INC_TEMPREAD(x)
+
+#define INV_I2C_SETIRQ(type, irq)
+#define INV_I2C_INC_COMPASSIRQ()
+#define INV_I2C_INC_ACCELIRQ()
+
+#endif /* CONFIG_INV_TESTING */
+
+#endif /* _INV_COUNTERS_H_ */
+
diff --git a/drivers/iio/imu/st_asm330lhh/Kconfig b/drivers/iio/imu/st_asm330lhh/Kconfig
new file mode 100644
index 0000000..092cc48
--- /dev/null
+++ b/drivers/iio/imu/st_asm330lhh/Kconfig
@@ -0,0 +1,23 @@
+
+config IIO_ST_ASM330LHH
+	tristate "STMicroelectronics ASM330LHH sensor"
+	depends on (I2C || SPI)
+	select IIO_BUFFER
+	select IIO_KFIFO_BUF
+	select IIO_ST_ASM330LHH_I2C if (I2C)
+	select IIO_ST_ASM330LHH_SPI if (SPI_MASTER)
+	help
+	  Say yes here to build support for STMicroelectronics ASM330LHH imu
+	  sensor.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called st_asm330lhh.
+
+config IIO_ST_ASM330LHH_I2C
+	tristate
+	depends on IIO_ST_ASM330LHH
+
+config IIO_ST_ASM330LHH_SPI
+	tristate
+	depends on IIO_ST_ASM330LHH
+
diff --git a/drivers/iio/imu/st_asm330lhh/Makefile b/drivers/iio/imu/st_asm330lhh/Makefile
new file mode 100644
index 0000000..7af80de
--- /dev/null
+++ b/drivers/iio/imu/st_asm330lhh/Makefile
@@ -0,0 +1,5 @@
+st_asm330lhh-y := st_asm330lhh_core.o st_asm330lhh_buffer.o
+
+obj-$(CONFIG_IIO_ST_ASM330LHH) += st_asm330lhh.o
+obj-$(CONFIG_IIO_ST_ASM330LHH_I2C) += st_asm330lhh_i2c.o
+obj-$(CONFIG_IIO_ST_ASM330LHH_SPI) += st_asm330lhh_spi.o
diff --git a/drivers/iio/imu/st_asm330lhh/README.md b/drivers/iio/imu/st_asm330lhh/README.md
new file mode 100644
index 0000000..f06afd3
--- /dev/null
+++ b/drivers/iio/imu/st_asm330lhh/README.md
@@ -0,0 +1,201 @@
+Index
+=======
+	* Introduction
+	* Driver Integration details
+	* Android SensorHAL integration
+	* Linux SensorHAL integration
+	* More information
+	* Copyright
+
+
+Introduction
+==============
+This repository contains asm330lhh IMU STMicroelectronics MEMS sensor linux driver support for kernel version 3.18, 4.4 and 4.9.
+
+Data collected by asm330lhh STM sensor are pushed to userland through the kernel buffers of Linux IIO framework. User space applications can get sensor events by reading the related IIO devices created in the /dev directory (*/dev/iio{x}*). Please see [IIO][1] for more information.
+
+Asm330lhh IMU STM MEMS sensor support *I2C/SPI* digital interface. Please refer to [I2C][2] and [SPI][3] for detailed documentation.
+
+The STM Hardware Abstraction Layer (*HAL*) defines a standard interface for STM sensors allowing Android to be agnostic about low level driver implementation. The HAL library is packaged into modules (.so) file and loaded by the Android or Linux system at the appropriate time. For more information see [AOSP HAL Interface](https://source.android.com/devices/sensors/hal-interface.html) 
+
+STM Sensor HAL is leaning on [Linux IIO framework](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/iio) to gather data from sensor device drivers and to forward samples to the Android Framework
+
+Driver Integration details
+=====================
+
+In order to explain how to integrate Asm330lhh IMU STM sensor into the kernel, please consider the following example
+
+### Source code integration
+
+> * Copy driver source code into your linux kernel target directory (e.g. *drivers/iio/imu*)
+> * Edit related Kconfig (e.g. *drivers/iio/imu/Kconfig*) adding *ASM330LHH* support:
+
+>         source "drivers/iio/imu/st_asm330lhh/Kconfig"
+
+> * Edit related Makefile (e.g. *drivers/iio/imu/Makefile*) adding the following line:
+
+>         obj-y += st_asm330lhh/
+
+### Device Tree configuration
+
+> To enable driver probing, add the asm330lhh node to the platform device tree as described below.
+
+> **Required properties:**
+
+> *- compatible*: "st,asm330lhh"
+
+> *- reg*: the I2C address or SPI chip select the device will respond to
+
+> *- interrupt-parent*: phandle to the parent interrupt controller as documented in [interrupts][4]
+
+> *- interrupts*: interrupt mapping for IRQ as documented in [interrupts][4]
+> 
+>**Recommended properties for SPI bus usage:**
+
+> *- spi-max-frequency*: maximum SPI bus frequency as documented in [SPI][3]
+> 
+> **Optional properties:**
+
+> *- st,drdy-int-pin*: MEMS sensor interrupt line to use (default 1)
+
+> I2C example (based on Raspberry PI 3):
+
+>		&i2c0 {
+>			status = "ok";
+>			#address-cells = <0x1>;
+>			#size-cells = <0x0>;
+>			asm330lhh@6b {
+>				compatible = "st,asm330lhh";
+>				reg = <0x6b>;
+>				interrupt-parent = <&gpio>;
+>				interrupts = <26 IRQ_TYPE_EDGE_RISING>;
+>		};
+
+> SPI example (based on Raspberry PI 3):
+
+>		&spi0 {
+>			status = "ok";
+>			#address-cells = <0x1>;
+>			#size-cells = <0x0>;
+>			asm330lhh@0 {
+>				spi-max-frequency = <500000>;
+>				compatible = "st,asm330lhh";
+>				reg = <0>;
+>				interrupt-parent = <&gpio>;
+>				interrupts = <26 IRQ_TYPE_EDGE_RISING>;
+>			};
+
+### Kernel configuration
+
+Configure kernel with *make menuconfig* (alternatively use *make xconfig* or *make qconfig*)
+ 
+>		Device Drivers  --->
+>			<M> Industrial I/O support  --->
+>				Inertial measurement units  --->
+>				<M>   STMicroelectronics ASM330LHH sensor  --->
+
+
+Android SensorHAL integration
+==============
+
+STM Sensor HAL is written in *C++* language using object-oriented design. For each hw sensor there is a custom class file (*Accelerometer.cpp*, *Gyroscope.cpp*) which extends the common base class (*SensorBase.cpp*).
+
+Copy the HAL source code into *<AOSP_DIR\>/hardware/STMicroelectronics/SensorHAL_IIO* folder. During building process Android will include automatically the SensorHAL Android.mk.
+In *<AOSP_DIR\>/device/<vendor\>/<board\>/device.mk* add package build information:
+
+	PRODUCT_PACKAGES += sensors.{TARGET_BOARD_PLATFORM}
+
+	Note: device.mk can not read $(TARGET_BOARD_PLATFORM) variable, read and replace the value from your BoardConfig.mk (e.g. PRODUCT_PACKAGES += sensors.msm8974 for Nexus 5)
+
+To compile the SensorHAL_IIO just build AOSP source code from *$TOP* folder
+
+	$ cd <AOSP_DIR>
+	$ source build/envsetup.sh
+	$ lunch <select target platform>
+	$ make V=99
+
+The compiled library will be placed in *<AOSP_DIR\>/out/target/product/<board\>/system/vendor/lib/hw/sensor.{TARGET_BOARD_PLATFORM}.so*
+
+To configure sensor the Sensor HAL IIO use mm utility from HAL root folder
+
+    since Android 7
+	$mm sensors-defconfig (default configuration)
+	$mm sensors-menuconfig
+
+    after Android 7
+    make -f Makefile_config sensors-defconfig (default configuration)
+    make -f Makefile_config sensors-menuconfig
+    
+Linux SensorHAL integration
+==============
+
+Linux Sensor HAL share the same source code of Android Sensor HAL. Before compiling the Linux Sensor HAL IIO
+you need to follow the same procedure listed in previous chapter "Android SensorHAL integration"
+To cross compile Linux Sensor HAL must export the following shell variables 
+
+>   export AOSP_DIR=<AOSP_DIR>
+>   export ARCH=<your target architecture>
+>   export CROSS_COMPILE=<toolchain for your target>
+
+then in *<AOSP_DIR\>/hardware/STMicroelectronics/SensorHAL_IIO* folder type
+>   make
+
+it will produce a SensorHAL.so file containing the library. 
+In relative pat Documentation/LinuxHal/ there are some examples explaining how to use Linux Sensor HAL
+
+Copyright
+========
+Copyright (C) 2017 STMicroelectronics
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+
+More Information
+=================
+[http://st.com](http://st.com)
+
+[https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/iio](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/input)
+
+[https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/i2c](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/i2c)
+
+[https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/spi](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/spi)
+
+[https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/devicetree/bings/interrupt-controller/interrupts.txt](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt)
+
+
+Copyright Driver
+===========
+Copyright (C) 2017 STMicroelectronics
+
+This software is distributed under the GNU General Public License - see the accompanying COPYING file for more details.
+
+[1]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/iio/iio_configfs.txt "IIO"
+[2]: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/i2c "I2C"
+[3]: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/spi "SPI"
+[4]: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt "interrupts"
+
+Copyright SensorHAL
+========
+Copyright (C) 2017 STMicroelectronics
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h b/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h
new file mode 100644
index 0000000..52b293f
--- /dev/null
+++ b/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h
@@ -0,0 +1,238 @@
+/*
+ * STMicroelectronics st_asm330lhh sensor driver
+ *
+ * Copyright 2018 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_ASM330LHH_H
+#define ST_ASM330LHH_H
+
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+
+#define ST_ASM330LHH_REVISION		"2.0.1"
+#define ST_ASM330LHH_PATCH		"1"
+
+#define ST_ASM330LHH_VERSION		"v"	\
+	ST_ASM330LHH_REVISION			\
+	"-"					\
+	ST_ASM330LHH_PATCH
+
+#define ST_ASM330LHH_DEV_NAME		"asm330lhh"
+
+#define ST_ASM330LHH_SAMPLE_SIZE	6
+#define ST_ASM330LHH_TS_SAMPLE_SIZE	4
+#define ST_ASM330LHH_TAG_SIZE		1
+#define ST_ASM330LHH_FIFO_SAMPLE_SIZE	(ST_ASM330LHH_SAMPLE_SIZE + \
+					 ST_ASM330LHH_TAG_SIZE)
+#define ST_ASM330LHH_MAX_FIFO_DEPTH	416
+
+#define ST_ASM330LHH_REG_FIFO_BATCH_ADDR	0x09
+#define ST_ASM330LHH_REG_FIFO_CTRL4_ADDR	0x0a
+#define ST_ASM330LHH_REG_STATUS_ADDR		0x1e
+#define ST_ASM330LHH_REG_STATUS_TDA		BIT(2)
+#define ST_ASM330LHH_REG_OUT_TEMP_L_ADDR	0x20
+#define ST_ASM330LHH_REG_OUT_TEMP_H_ADDR	0x21
+
+#define ST_ASM330LHH_MAX_ODR			416
+
+/* Define Custom events for FIFO flush */
+#define CUSTOM_IIO_EV_DIR_FIFO_EMPTY (IIO_EV_DIR_NONE + 1)
+#define CUSTOM_IIO_EV_DIR_FIFO_DATA (IIO_EV_DIR_NONE + 2)
+#define CUSTOM_IIO_EV_TYPE_FIFO_FLUSH (IIO_EV_TYPE_CHANGE + 1)
+
+#define ST_ASM330LHH_CHANNEL(chan_type, addr, mod, ch2, scan_idx,	\
+			   rb, sb, sg)					\
+{									\
+	.type = chan_type,						\
+	.address = addr,						\
+	.modified = mod,						\
+	.channel2 = ch2,						\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |			\
+			      BIT(IIO_CHAN_INFO_SCALE),			\
+	.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),	\
+	.scan_index = scan_idx,						\
+	.scan_type = {							\
+		.sign = sg,						\
+		.realbits = rb,						\
+		.storagebits = sb,					\
+		.endianness = IIO_LE,					\
+	},								\
+}
+
+static const struct iio_event_spec st_asm330lhh_flush_event = {
+	.type = CUSTOM_IIO_EV_TYPE_FIFO_FLUSH,
+	.dir = IIO_EV_DIR_EITHER,
+};
+
+#define ST_ASM330LHH_FLUSH_CHANNEL(dtype)		\
+{							\
+	.type = dtype,					\
+	.modified = 0,					\
+	.scan_index = -1,				\
+	.indexed = -1,					\
+	.event_spec = &st_asm330lhh_flush_event,	\
+	.num_event_specs = 1,				\
+}
+
+#define ST_ASM330LHH_RX_MAX_LENGTH	8
+#define ST_ASM330LHH_TX_MAX_LENGTH	8
+
+struct st_asm330lhh_transfer_buffer {
+	u8 rx_buf[ST_ASM330LHH_RX_MAX_LENGTH];
+	u8 tx_buf[ST_ASM330LHH_TX_MAX_LENGTH] ____cacheline_aligned;
+};
+
+struct st_asm330lhh_transfer_function {
+	int (*read)(struct device *dev, u8 addr, int len, u8 *data);
+	int (*write)(struct device *dev, u8 addr, int len, u8 *data);
+};
+
+struct st_asm330lhh_reg {
+	u8 addr;
+	u8 mask;
+};
+
+struct st_asm330lhh_odr {
+	u16 hz;
+	u8 val;
+};
+
+#define ST_ASM330LHH_ODR_LIST_SIZE	7
+struct st_asm330lhh_odr_table_entry {
+	struct st_asm330lhh_reg reg;
+	struct st_asm330lhh_odr odr_avl[ST_ASM330LHH_ODR_LIST_SIZE];
+};
+
+struct st_asm330lhh_fs {
+	u32 gain;
+	u8 val;
+};
+
+#define ST_ASM330LHH_FS_ACC_LIST_SIZE		4
+#define ST_ASM330LHH_FS_GYRO_LIST_SIZE		6
+#define ST_ASM330LHH_FS_TEMP_LIST_SIZE		1
+#define ST_ASM330LHH_FS_LIST_SIZE		6
+struct st_asm330lhh_fs_table_entry {
+	u32 size;
+	struct st_asm330lhh_reg reg;
+	struct st_asm330lhh_fs fs_avl[ST_ASM330LHH_FS_LIST_SIZE];
+};
+
+enum st_asm330lhh_sensor_id {
+	ST_ASM330LHH_ID_ACC,
+	ST_ASM330LHH_ID_GYRO,
+	ST_ASM330LHH_ID_TEMP,
+	ST_ASM330LHH_ID_MAX,
+};
+
+enum st_asm330lhh_fifo_mode {
+	ST_ASM330LHH_FIFO_BYPASS = 0x0,
+	ST_ASM330LHH_FIFO_CONT = 0x6,
+};
+
+enum {
+	ST_ASM330LHH_HW_FLUSH,
+	ST_ASM330LHH_HW_OPERATIONAL,
+};
+
+/**
+ * struct st_asm330lhh_sensor - ST IMU sensor instance
+ * @id: Sensor identifier.
+ * @hw: Pointer to instance of struct st_asm330lhh_hw.
+ * @gain: Configured sensor sensitivity.
+ * @odr: Output data rate of the sensor [Hz].
+ * @watermark: Sensor watermark level.
+ * @batch_mask: Sensor mask for FIFO batching register
+ */
+struct st_asm330lhh_sensor {
+	enum st_asm330lhh_sensor_id id;
+	struct st_asm330lhh_hw *hw;
+
+	u32 gain;
+	u16 odr;
+	u32 offset;
+
+	__le16 old_data;
+
+	u8 std_samples;
+	u8 std_level;
+
+	u16 watermark;
+	u8 batch_mask;
+	u8 batch_addr;
+};
+
+/**
+ * struct st_asm330lhh_hw - ST IMU MEMS hw instance
+ * @dev: Pointer to instance of struct device (I2C or SPI).
+ * @irq: Device interrupt line (I2C or SPI).
+ * @lock: Mutex to protect read and write operations.
+ * @fifo_lock: Mutex to prevent concurrent access to the hw FIFO.
+ * @fifo_mode: FIFO operating mode supported by the device.
+ * @state: hw operational state.
+ * @enable_mask: Enabled sensor bitmask.
+ * @ts_offset: Hw timestamp offset.
+ * @hw_ts: Latest hw timestamp from the sensor.
+ * @ts: Latest timestamp from irq handler.
+ * @delta_ts: Delta time between two consecutive interrupts.
+ * @iio_devs: Pointers to acc/gyro iio_dev instances.
+ * @tf: Transfer function structure used by I/O operations.
+ * @tb: Transfer buffers used by SPI I/O operations.
+ */
+struct st_asm330lhh_hw {
+	struct device *dev;
+	int irq;
+
+	struct mutex lock;
+	struct mutex fifo_lock;
+
+	enum st_asm330lhh_fifo_mode fifo_mode;
+	unsigned long state;
+	u8 enable_mask;
+
+	s64 ts_offset;
+	s64 hw_ts;
+	s64 delta_ts;
+	s64 ts;
+	s64 tsample;
+	s64 hw_ts_old;
+	s64 delta_hw_ts;
+
+	/* Timestamp sample ODR */
+	u16 odr;
+
+	struct iio_dev *iio_devs[ST_ASM330LHH_ID_MAX];
+
+	const struct st_asm330lhh_transfer_function *tf;
+	struct st_asm330lhh_transfer_buffer tb;
+};
+
+extern const struct dev_pm_ops st_asm330lhh_pm_ops;
+
+int st_asm330lhh_probe(struct device *dev, int irq,
+		       const struct st_asm330lhh_transfer_function *tf_ops);
+int st_asm330lhh_sensor_set_enable(struct st_asm330lhh_sensor *sensor,
+				   bool enable);
+int st_asm330lhh_fifo_setup(struct st_asm330lhh_hw *hw);
+int st_asm330lhh_write_with_mask(struct st_asm330lhh_hw *hw, u8 addr, u8 mask,
+				 u8 val);
+int st_asm330lhh_get_odr_val(enum st_asm330lhh_sensor_id id, u16 odr, u8 *val);
+ssize_t st_asm330lhh_flush_fifo(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size);
+ssize_t st_asm330lhh_get_max_watermark(struct device *dev,
+				       struct device_attribute *attr, char *buf);
+ssize_t st_asm330lhh_get_watermark(struct device *dev,
+				   struct device_attribute *attr, char *buf);
+ssize_t st_asm330lhh_set_watermark(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size);
+int st_asm330lhh_set_fifo_mode(struct st_asm330lhh_hw *hw,
+			       enum st_asm330lhh_fifo_mode fifo_mode);
+int st_asm330lhh_suspend_fifo(struct st_asm330lhh_hw *hw);
+#endif /* ST_ASM330LHH_H */
diff --git a/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c
new file mode 100644
index 0000000..af8c5ba
--- /dev/null
+++ b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c
@@ -0,0 +1,531 @@
+/*
+ * STMicroelectronics st_asm330lhh FIFO buffer library driver
+ *
+ * Copyright 2018 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/events.h>
+#include <asm/unaligned.h>
+#include <linux/of.h>
+
+#include "st_asm330lhh.h"
+
+#define ST_ASM330LHH_REG_FIFO_THL_ADDR		0x07
+#define ST_ASM330LHH_REG_FIFO_LEN_MASK		GENMASK(8, 0)
+#define ST_ASM330LHH_REG_FIFO_MODE_MASK		GENMASK(2, 0)
+#define ST_ASM330LHH_REG_DEC_TS_MASK		GENMASK(7, 6)
+#define ST_ASM330LHH_REG_HLACTIVE_ADDR		0x12
+#define ST_ASM330LHH_REG_HLACTIVE_MASK		BIT(5)
+#define ST_ASM330LHH_REG_PP_OD_ADDR		0x12
+#define ST_ASM330LHH_REG_PP_OD_MASK		BIT(4)
+#define ST_ASM330LHH_REG_FIFO_DIFFL_ADDR	0x3a
+#define ST_ASM330LHH_REG_TS0_ADDR		0x40
+#define ST_ASM330LHH_REG_TS2_ADDR		0x42
+#define ST_ASM330LHH_REG_FIFO_OUT_TAG_ADDR	0x78
+#define ST_ASM330LHH_GYRO_TAG			0x01
+#define ST_ASM330LHH_ACC_TAG			0x02
+#define ST_ASM330LHH_TS_TAG			0x04
+
+#define ST_ASM330LHH_TS_DELTA_NS		25000ULL /* 25us/LSB */
+
+static inline s64 st_asm330lhh_get_time_ns(void)
+{
+	struct timespec ts;
+
+	get_monotonic_boottime(&ts);
+	return timespec_to_ns(&ts);
+}
+
+#define ST_ASM330LHH_EWMA_LEVEL			120
+#define ST_ASM330LHH_EWMA_DIV			128
+static inline s64 st_asm330lhh_ewma(s64 old, s64 new, int weight)
+{
+	s64 diff, incr;
+
+	diff = new - old;
+	incr = div_s64((ST_ASM330LHH_EWMA_DIV - weight) * diff,
+		       ST_ASM330LHH_EWMA_DIV);
+
+	return old + incr;
+}
+
+static inline int st_asm330lhh_reset_hwts(struct st_asm330lhh_hw *hw)
+{
+	u8 data = 0xaa;
+
+	hw->ts = st_asm330lhh_get_time_ns();
+	hw->ts_offset = hw->ts;
+	hw->hw_ts_old = 0ull;
+	hw->tsample = 0ull;
+
+	return hw->tf->write(hw->dev, ST_ASM330LHH_REG_TS2_ADDR, sizeof(data),
+			     &data);
+}
+
+int st_asm330lhh_set_fifo_mode(struct st_asm330lhh_hw *hw,
+			       enum st_asm330lhh_fifo_mode fifo_mode)
+{
+	int err;
+
+	err = st_asm330lhh_write_with_mask(hw, ST_ASM330LHH_REG_FIFO_CTRL4_ADDR,
+					   ST_ASM330LHH_REG_FIFO_MODE_MASK,
+					   fifo_mode);
+	if (err < 0)
+		return err;
+
+	hw->fifo_mode = fifo_mode;
+
+	return 0;
+}
+
+static int st_asm330lhh_set_sensor_batching_odr(struct st_asm330lhh_sensor *sensor,
+						bool enable)
+{
+	struct st_asm330lhh_hw *hw = sensor->hw;
+	u8 data = 0;
+	int err;
+
+	if (enable) {
+		err = st_asm330lhh_get_odr_val(sensor->id, sensor->odr, &data);
+		if (err < 0)
+			return err;
+	}
+
+	return st_asm330lhh_write_with_mask(hw,
+					    sensor->batch_addr,
+					    sensor->batch_mask, data);
+}
+
+static u16 st_asm330lhh_ts_odr(struct st_asm330lhh_hw *hw)
+{
+	struct st_asm330lhh_sensor *sensor;
+	u16 odr = 0;
+	u8 i;
+
+	for (i = 0; i < ST_ASM330LHH_ID_MAX; i++) {
+		if (!hw->iio_devs[i])
+			continue;
+
+		sensor = iio_priv(hw->iio_devs[i]);
+		if (hw->enable_mask & BIT(sensor->id))
+			odr = max_t(u16, odr, sensor->odr);
+	}
+
+	return odr;
+}
+
+static int st_asm330lhh_update_watermark(struct st_asm330lhh_sensor *sensor,
+					 u16 watermark)
+{
+	u16 fifo_watermark = ST_ASM330LHH_MAX_FIFO_DEPTH, cur_watermark = 0;
+	struct st_asm330lhh_hw *hw = sensor->hw;
+	struct st_asm330lhh_sensor *cur_sensor;
+	__le16 wdata;
+	int i, err;
+	u8 data;
+
+	for (i = 0; i < ST_ASM330LHH_ID_MAX; i++) {
+		cur_sensor = iio_priv(hw->iio_devs[i]);
+
+		if (!(hw->enable_mask & BIT(cur_sensor->id)))
+			continue;
+
+		cur_watermark = (cur_sensor == sensor) ? watermark
+						       : cur_sensor->watermark;
+
+		fifo_watermark = min_t(u16, fifo_watermark, cur_watermark);
+	}
+
+	fifo_watermark = max_t(u16, fifo_watermark, 2);
+	mutex_lock(&hw->lock);
+
+	err = hw->tf->read(hw->dev, ST_ASM330LHH_REG_FIFO_THL_ADDR + 1,
+			   sizeof(data), &data);
+	if (err < 0)
+		goto out;
+
+	fifo_watermark = ((data << 8) & ~ST_ASM330LHH_REG_FIFO_LEN_MASK) |
+			 (fifo_watermark & ST_ASM330LHH_REG_FIFO_LEN_MASK);
+	wdata = cpu_to_le16(fifo_watermark);
+	err = hw->tf->write(hw->dev, ST_ASM330LHH_REG_FIFO_THL_ADDR,
+			    sizeof(wdata), (u8 *)&wdata);
+
+out:
+	mutex_unlock(&hw->lock);
+
+	return err < 0 ? err : 0;
+}
+
+static inline void st_asm330lhh_sync_hw_ts(struct st_asm330lhh_hw *hw, s64 ts)
+{
+	s64 delta = ts - hw->hw_ts;
+
+	hw->ts_offset = st_asm330lhh_ewma(hw->ts_offset, delta,
+					  ST_ASM330LHH_EWMA_LEVEL);
+}
+
+static struct iio_dev *st_asm330lhh_get_iiodev_from_tag(struct st_asm330lhh_hw *hw,
+							u8 tag)
+{
+	struct iio_dev *iio_dev;
+
+	switch (tag) {
+	case ST_ASM330LHH_GYRO_TAG:
+		iio_dev = hw->iio_devs[ST_ASM330LHH_ID_GYRO];
+		break;
+	case ST_ASM330LHH_ACC_TAG:
+		iio_dev = hw->iio_devs[ST_ASM330LHH_ID_ACC];
+		break;
+	default:
+		iio_dev = NULL;
+		break;
+	}
+
+	return iio_dev;
+}
+
+static int st_asm330lhh_read_fifo(struct st_asm330lhh_hw *hw)
+{
+	u8 iio_buf[ALIGN(ST_ASM330LHH_SAMPLE_SIZE, sizeof(s64)) + sizeof(s64)];
+	u8 buf[6 * ST_ASM330LHH_FIFO_SAMPLE_SIZE], tag, *ptr;
+	s64 ts_delta_hw_ts = 0, ts_irq;
+	s64 ts_delta_offs;
+	int i, err, read_len, word_len, fifo_len;
+	struct st_asm330lhh_sensor *sensor;
+	struct iio_dev *iio_dev;
+	__le16 fifo_status;
+	u16 fifo_depth;
+	u32 val;
+	int ts_processed = 0;
+	s64 hw_ts = 0ull, delta_hw_ts, cpu_timestamp;
+
+	ts_irq = hw->ts - hw->delta_ts;
+
+	do
+	{
+		err = hw->tf->read(hw->dev, ST_ASM330LHH_REG_FIFO_DIFFL_ADDR,
+				   sizeof(fifo_status), (u8 *)&fifo_status);
+		if (err < 0)
+			return err;
+
+		fifo_depth = le16_to_cpu(fifo_status) & ST_ASM330LHH_REG_FIFO_LEN_MASK;
+		if (!fifo_depth)
+			return 0;
+
+		read_len = 0;
+		fifo_len = fifo_depth * ST_ASM330LHH_FIFO_SAMPLE_SIZE;
+		while (read_len < fifo_len) {
+			word_len = min_t(int, fifo_len - read_len, sizeof(buf));
+			err = hw->tf->read(hw->dev,
+					   ST_ASM330LHH_REG_FIFO_OUT_TAG_ADDR,
+					   word_len, buf);
+			if (err < 0)
+				return err;
+
+			for (i = 0; i < word_len; i += ST_ASM330LHH_FIFO_SAMPLE_SIZE) {
+				ptr = &buf[i + ST_ASM330LHH_TAG_SIZE];
+				tag = buf[i] >> 3;
+
+				if (tag == ST_ASM330LHH_TS_TAG) {
+					val = get_unaligned_le32(ptr);
+					hw->hw_ts = val * ST_ASM330LHH_TS_DELTA_NS;
+					ts_delta_hw_ts = hw->hw_ts - hw->hw_ts_old;
+					hw_ts += ts_delta_hw_ts;
+					ts_delta_offs =
+						div_s64(hw->delta_hw_ts * ST_ASM330LHH_MAX_ODR, hw->odr);
+
+					hw->ts_offset = st_asm330lhh_ewma(hw->ts_offset, ts_irq -
+						hw->hw_ts + ts_delta_offs, ST_ASM330LHH_EWMA_LEVEL);
+
+					ts_irq += (hw->hw_ts + ts_delta_offs);
+					hw->hw_ts_old = hw->hw_ts;
+					ts_processed++;
+
+					if (!hw->tsample)
+						hw->tsample =
+							hw->ts_offset + (hw->hw_ts + ts_delta_offs);
+					else
+						hw->tsample =
+							hw->tsample + (ts_delta_hw_ts + ts_delta_offs);
+				} else {
+					iio_dev = st_asm330lhh_get_iiodev_from_tag(hw, tag);
+					if (!iio_dev)
+						continue;
+
+					sensor = iio_priv(iio_dev);
+					if (sensor->std_samples < sensor->std_level) {
+						sensor->std_samples++;
+						continue;
+					}
+
+					sensor = iio_priv(iio_dev);
+
+					/* Check if timestamp is in the future. */
+					cpu_timestamp = st_asm330lhh_get_time_ns();
+
+					/* Avoid samples in the future. */
+					if (hw->tsample > cpu_timestamp)
+						hw->tsample = cpu_timestamp;
+
+					memcpy(iio_buf, ptr, ST_ASM330LHH_SAMPLE_SIZE);
+					iio_push_to_buffers_with_timestamp(iio_dev,
+									   iio_buf,
+									   hw->tsample);
+				}
+			}
+			read_len += word_len;
+		}
+
+		delta_hw_ts = div_s64(hw->delta_ts - hw_ts, ts_processed);
+		delta_hw_ts = div_s64(delta_hw_ts * hw->odr, ST_ASM330LHH_MAX_ODR);
+		hw->delta_hw_ts = st_asm330lhh_ewma(hw->delta_hw_ts,
+							delta_hw_ts,
+							ST_ASM330LHH_EWMA_LEVEL);
+	} while(read_len);
+
+	return read_len;
+}
+
+ssize_t st_asm330lhh_get_max_watermark(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", ST_ASM330LHH_MAX_FIFO_DEPTH);
+}
+
+ssize_t st_asm330lhh_get_watermark(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *iio_dev = dev_get_drvdata(dev);
+	struct st_asm330lhh_sensor *sensor = iio_priv(iio_dev);
+
+	return sprintf(buf, "%d\n", sensor->watermark);
+}
+
+ssize_t st_asm330lhh_set_watermark(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	struct iio_dev *iio_dev = dev_get_drvdata(dev);
+	struct st_asm330lhh_sensor *sensor = iio_priv(iio_dev);
+	int err, val;
+
+	mutex_lock(&iio_dev->mlock);
+	if (iio_buffer_enabled(iio_dev)) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	err = kstrtoint(buf, 10, &val);
+	if (err < 0)
+		goto out;
+
+	err = st_asm330lhh_update_watermark(sensor, val);
+	if (err < 0)
+		goto out;
+
+	sensor->watermark = val;
+
+out:
+	mutex_unlock(&iio_dev->mlock);
+
+	return err < 0 ? err : size;
+}
+
+ssize_t st_asm330lhh_flush_fifo(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	struct iio_dev *iio_dev = dev_get_drvdata(dev);
+	struct st_asm330lhh_sensor *sensor = iio_priv(iio_dev);
+	struct st_asm330lhh_hw *hw = sensor->hw;
+	s64 type, event;
+	int count;
+	s64 ts;
+
+	mutex_lock(&hw->fifo_lock);
+	ts = st_asm330lhh_get_time_ns();
+	hw->delta_ts = ts - hw->ts;
+	hw->ts = ts;
+	set_bit(ST_ASM330LHH_HW_FLUSH, &hw->state);
+
+	count = st_asm330lhh_read_fifo(hw);
+
+	mutex_unlock(&hw->fifo_lock);
+
+	type = count > 0 ? CUSTOM_IIO_EV_DIR_FIFO_DATA : CUSTOM_IIO_EV_DIR_FIFO_EMPTY;
+	event = IIO_UNMOD_EVENT_CODE(iio_dev->channels[0].type, -1,
+				     CUSTOM_IIO_EV_TYPE_FIFO_FLUSH, type);
+	iio_push_event(iio_dev, event, st_asm330lhh_get_time_ns());
+
+	return size;
+}
+
+int st_asm330lhh_suspend_fifo(struct st_asm330lhh_hw *hw)
+{
+	int err;
+
+	mutex_lock(&hw->fifo_lock);
+
+	st_asm330lhh_read_fifo(hw);
+	err = st_asm330lhh_set_fifo_mode(hw, ST_ASM330LHH_FIFO_BYPASS);
+
+	mutex_unlock(&hw->fifo_lock);
+
+	return err;
+}
+
+static int st_asm330lhh_update_fifo(struct iio_dev *iio_dev, bool enable)
+{
+	struct st_asm330lhh_sensor *sensor = iio_priv(iio_dev);
+	struct st_asm330lhh_hw *hw = sensor->hw;
+	int err;
+
+	mutex_lock(&hw->fifo_lock);
+
+	err = st_asm330lhh_sensor_set_enable(sensor, enable);
+	if (err < 0)
+		goto out;
+
+	err = st_asm330lhh_set_sensor_batching_odr(sensor, enable);
+	if (err < 0)
+		goto out;
+
+	err = st_asm330lhh_update_watermark(sensor, sensor->watermark);
+	if (err < 0)
+		goto out;
+
+	hw->odr = st_asm330lhh_ts_odr(hw);
+
+	if (enable && hw->fifo_mode == ST_ASM330LHH_FIFO_BYPASS) {
+		st_asm330lhh_reset_hwts(hw);
+		err = st_asm330lhh_set_fifo_mode(hw, ST_ASM330LHH_FIFO_CONT);
+	} else if (!hw->enable_mask) {
+		err = st_asm330lhh_set_fifo_mode(hw, ST_ASM330LHH_FIFO_BYPASS);
+	}
+
+out:
+	mutex_unlock(&hw->fifo_lock);
+
+	return err;
+}
+
+static irqreturn_t st_asm330lhh_handler_irq(int irq, void *private)
+{
+	struct st_asm330lhh_hw *hw = (struct st_asm330lhh_hw *)private;
+	s64 ts = st_asm330lhh_get_time_ns();
+
+	hw->delta_ts = ts - hw->ts;
+	hw->ts = ts;
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t st_asm330lhh_handler_thread(int irq, void *private)
+{
+	struct st_asm330lhh_hw *hw = (struct st_asm330lhh_hw *)private;
+
+	mutex_lock(&hw->fifo_lock);
+
+	st_asm330lhh_read_fifo(hw);
+	clear_bit(ST_ASM330LHH_HW_FLUSH, &hw->state);
+
+	mutex_unlock(&hw->fifo_lock);
+
+	return IRQ_HANDLED;
+}
+
+static int st_asm330lhh_buffer_preenable(struct iio_dev *iio_dev)
+{
+	return st_asm330lhh_update_fifo(iio_dev, true);
+}
+
+static int st_asm330lhh_buffer_postdisable(struct iio_dev *iio_dev)
+{
+	return st_asm330lhh_update_fifo(iio_dev, false);
+}
+
+static const struct iio_buffer_setup_ops st_asm330lhh_buffer_ops = {
+	.preenable = st_asm330lhh_buffer_preenable,
+	.postdisable = st_asm330lhh_buffer_postdisable,
+};
+
+static int st_asm330lhh_fifo_init(struct st_asm330lhh_hw *hw)
+{
+	return st_asm330lhh_write_with_mask(hw, ST_ASM330LHH_REG_FIFO_CTRL4_ADDR,
+					    ST_ASM330LHH_REG_DEC_TS_MASK, 1);
+}
+
+int st_asm330lhh_fifo_setup(struct st_asm330lhh_hw *hw)
+{
+	struct device_node *np = hw->dev->of_node;
+	struct iio_buffer *buffer;
+	unsigned long irq_type;
+	bool irq_active_low;
+	int i, err;
+
+	irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
+
+	switch (irq_type) {
+	case IRQF_TRIGGER_HIGH:
+	case IRQF_TRIGGER_RISING:
+		irq_active_low = false;
+		break;
+	case IRQF_TRIGGER_LOW:
+	case IRQF_TRIGGER_FALLING:
+		irq_active_low = true;
+		break;
+	default:
+		dev_info(hw->dev, "mode %lx unsupported\n", irq_type);
+		return -EINVAL;
+	}
+
+	err = st_asm330lhh_write_with_mask(hw, ST_ASM330LHH_REG_HLACTIVE_ADDR,
+					   ST_ASM330LHH_REG_HLACTIVE_MASK,
+					   irq_active_low);
+	if (err < 0)
+		return err;
+
+	if (np && of_property_read_bool(np, "drive-open-drain")) {
+		err = st_asm330lhh_write_with_mask(hw,
+					ST_ASM330LHH_REG_PP_OD_ADDR,
+					ST_ASM330LHH_REG_PP_OD_MASK, 1);
+		if (err < 0)
+			return err;
+
+		irq_type |= IRQF_SHARED;
+	}
+
+	err = devm_request_threaded_irq(hw->dev, hw->irq,
+					st_asm330lhh_handler_irq,
+					st_asm330lhh_handler_thread,
+					irq_type | IRQF_ONESHOT,
+					"asm330lhh", hw);
+	if (err) {
+		dev_err(hw->dev, "failed to request trigger irq %d\n",
+			hw->irq);
+		return err;
+	}
+
+	for (i = ST_ASM330LHH_ID_ACC; i < ST_ASM330LHH_ID_MAX; i++) {
+		if (!hw->iio_devs[i])
+			continue;
+
+		buffer = devm_iio_kfifo_allocate(hw->dev);
+		if (!buffer)
+			return -ENOMEM;
+
+		iio_device_attach_buffer(hw->iio_devs[i], buffer);
+		hw->iio_devs[i]->modes |= INDIO_BUFFER_SOFTWARE;
+		hw->iio_devs[i]->setup_ops = &st_asm330lhh_buffer_ops;
+	}
+
+	return st_asm330lhh_fifo_init(hw);
+}
+
diff --git a/drivers/iio/imu/st_asm330lhh/st_asm330lhh_core.c b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_core.c
new file mode 100644
index 0000000..9d9ee20
--- /dev/null
+++ b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_core.c
@@ -0,0 +1,824 @@
+/*
+ * STMicroelectronics st_asm330lhh sensor driver
+ *
+ * Copyright 2018 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/pm.h>
+#include <linux/version.h>
+#include <linux/of.h>
+
+#include <linux/platform_data/st_sensors_pdata.h>
+
+#include "st_asm330lhh.h"
+
+#define ST_ASM330LHH_REG_INT1_ADDR		0x0d
+#define ST_ASM330LHH_REG_INT2_ADDR		0x0e
+#define ST_ASM330LHH_REG_FIFO_CTRL4_ADDR	0x0a
+#define ST_ASM330LHH_REG_FIFO_FTH_IRQ_MASK	BIT(3)
+#define ST_ASM330LHH_REG_WHOAMI_ADDR		0x0f
+#define ST_ASM330LHH_WHOAMI_VAL			0x6b
+#define ST_ASM330LHH_REG_CTRL1_XL_ADDR		0x10
+#define ST_ASM330LHH_REG_CTRL2_G_ADDR		0x11
+#define ST_ASM330LHH_REG_RESET_ADDR		0x12
+#define ST_ASM330LHH_REG_RESET_MASK		BIT(0)
+#define ST_ASM330LHH_REG_BDU_ADDR		0x12
+#define ST_ASM330LHH_REG_BDU_MASK		BIT(6)
+#define ST_ASM330LHH_REG_INT2_ON_INT1_ADDR	0x13
+#define ST_ASM330LHH_REG_INT2_ON_INT1_MASK	BIT(5)
+#define ST_ASM330LHH_REG_ROUNDING_ADDR		0x14
+#define ST_ASM330LHH_REG_ROUNDING_MASK		GENMASK(6, 5)
+#define ST_ASM330LHH_REG_TIMESTAMP_EN_ADDR	0x19
+#define ST_ASM330LHH_REG_TIMESTAMP_EN_MASK	BIT(5)
+
+#define ST_ASM330LHH_REG_GYRO_OUT_X_L_ADDR	0x22
+#define ST_ASM330LHH_REG_GYRO_OUT_Y_L_ADDR	0x24
+#define ST_ASM330LHH_REG_GYRO_OUT_Z_L_ADDR	0x26
+
+#define ST_ASM330LHH_REG_ACC_OUT_X_L_ADDR	0x28
+#define ST_ASM330LHH_REG_ACC_OUT_Y_L_ADDR	0x2a
+#define ST_ASM330LHH_REG_ACC_OUT_Z_L_ADDR	0x2c
+
+#define ST_ASM330LHH_REG_LIR_ADDR		0x56
+#define ST_ASM330LHH_REG_LIR_MASK		BIT(0)
+
+#define ST_ASM330LHH_ACC_FS_2G_GAIN		IIO_G_TO_M_S_2(61)
+#define ST_ASM330LHH_ACC_FS_4G_GAIN		IIO_G_TO_M_S_2(122)
+#define ST_ASM330LHH_ACC_FS_8G_GAIN		IIO_G_TO_M_S_2(244)
+#define ST_ASM330LHH_ACC_FS_16G_GAIN		IIO_G_TO_M_S_2(488)
+
+#define ST_ASM330LHH_GYRO_FS_125_GAIN		IIO_DEGREE_TO_RAD(4375)
+#define ST_ASM330LHH_GYRO_FS_250_GAIN		IIO_DEGREE_TO_RAD(8750)
+#define ST_ASM330LHH_GYRO_FS_500_GAIN		IIO_DEGREE_TO_RAD(17500)
+#define ST_ASM330LHH_GYRO_FS_1000_GAIN		IIO_DEGREE_TO_RAD(35000)
+#define ST_ASM330LHH_GYRO_FS_2000_GAIN		IIO_DEGREE_TO_RAD(70000)
+#define ST_ASM330LHH_GYRO_FS_4000_GAIN		IIO_DEGREE_TO_RAD(140000)
+
+/* Temperature in uC */
+#define ST_ASM330LHH_TEMP_GAIN			256
+#define ST_ASM330LHH_TEMP_FS_GAIN		(1000000 / ST_ASM330LHH_TEMP_GAIN)
+#define ST_ASM330LHH_OFFSET			(6400)
+
+struct st_asm330lhh_std_entry {
+	u16 odr;
+	u8 val;
+};
+
+/* Minimal number of sample to be discarded */
+struct st_asm330lhh_std_entry st_asm330lhh_std_table[] = {
+	{  13,  2 },
+	{  26,  3 },
+	{  52,  4 },
+	{ 104,  6 },
+	{ 208,  8 },
+	{ 416, 18 },
+};
+
+static const struct st_asm330lhh_odr_table_entry st_asm330lhh_odr_table[] = {
+	[ST_ASM330LHH_ID_ACC] = {
+		.reg = {
+			.addr = ST_ASM330LHH_REG_CTRL1_XL_ADDR,
+			.mask = GENMASK(7, 4),
+		},
+		.odr_avl[0] = {   0, 0x00 },
+		.odr_avl[1] = {  13, 0x01 },
+		.odr_avl[2] = {  26, 0x02 },
+		.odr_avl[3] = {  52, 0x03 },
+		.odr_avl[4] = { 104, 0x04 },
+		.odr_avl[5] = { 208, 0x05 },
+		.odr_avl[6] = { 416, 0x06 },
+	},
+	[ST_ASM330LHH_ID_GYRO] = {
+		.reg = {
+			.addr = ST_ASM330LHH_REG_CTRL2_G_ADDR,
+			.mask = GENMASK(7, 4),
+		},
+		.odr_avl[0] = {   0, 0x00 },
+		.odr_avl[1] = {  13, 0x01 },
+		.odr_avl[2] = {  26, 0x02 },
+		.odr_avl[3] = {  52, 0x03 },
+		.odr_avl[4] = { 104, 0x04 },
+		.odr_avl[5] = { 208, 0x05 },
+		.odr_avl[6] = { 416, 0x06 },
+	},
+	[ST_ASM330LHH_ID_TEMP] = {
+		.odr_avl[0] = {   0, 0x00 },
+		.odr_avl[1] = {  52, 0x01 },
+	}
+};
+
+static const struct st_asm330lhh_fs_table_entry st_asm330lhh_fs_table[] = {
+	[ST_ASM330LHH_ID_ACC] = {
+		.reg = {
+			.addr = ST_ASM330LHH_REG_CTRL1_XL_ADDR,
+			.mask = GENMASK(3, 2),
+		},
+		.size = ST_ASM330LHH_FS_ACC_LIST_SIZE,
+		.fs_avl[0] = {  ST_ASM330LHH_ACC_FS_2G_GAIN, 0x0 },
+		.fs_avl[1] = {  ST_ASM330LHH_ACC_FS_4G_GAIN, 0x2 },
+		.fs_avl[2] = {  ST_ASM330LHH_ACC_FS_8G_GAIN, 0x3 },
+		.fs_avl[3] = { ST_ASM330LHH_ACC_FS_16G_GAIN, 0x1 },
+	},
+	[ST_ASM330LHH_ID_GYRO] = {
+		.reg = {
+			.addr = ST_ASM330LHH_REG_CTRL2_G_ADDR,
+			.mask = GENMASK(3, 0),
+		},
+		.size = ST_ASM330LHH_FS_GYRO_LIST_SIZE,
+		.fs_avl[0] = {  ST_ASM330LHH_GYRO_FS_125_GAIN, 0x2 },
+		.fs_avl[1] = {  ST_ASM330LHH_GYRO_FS_250_GAIN, 0x0 },
+		.fs_avl[2] = {  ST_ASM330LHH_GYRO_FS_500_GAIN, 0x4 },
+		.fs_avl[3] = { ST_ASM330LHH_GYRO_FS_1000_GAIN, 0x8 },
+		.fs_avl[4] = { ST_ASM330LHH_GYRO_FS_2000_GAIN, 0xC },
+		.fs_avl[5] = { ST_ASM330LHH_GYRO_FS_4000_GAIN, 0x1 },
+	},
+	[ST_ASM330LHH_ID_TEMP] = {
+		.size = ST_ASM330LHH_FS_TEMP_LIST_SIZE,
+		.fs_avl[0] = {  ST_ASM330LHH_TEMP_FS_GAIN, 0x0 },
+	}
+};
+
+static const struct iio_chan_spec st_asm330lhh_acc_channels[] = {
+	ST_ASM330LHH_CHANNEL(IIO_ACCEL, ST_ASM330LHH_REG_ACC_OUT_X_L_ADDR,
+			   1, IIO_MOD_X, 0, 16, 16, 's'),
+	ST_ASM330LHH_CHANNEL(IIO_ACCEL, ST_ASM330LHH_REG_ACC_OUT_Y_L_ADDR,
+			   1, IIO_MOD_Y, 1, 16, 16, 's'),
+	ST_ASM330LHH_CHANNEL(IIO_ACCEL, ST_ASM330LHH_REG_ACC_OUT_Z_L_ADDR,
+			   1, IIO_MOD_Z, 2, 16, 16, 's'),
+	ST_ASM330LHH_FLUSH_CHANNEL(IIO_ACCEL),
+	IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static const struct iio_chan_spec st_asm330lhh_gyro_channels[] = {
+	ST_ASM330LHH_CHANNEL(IIO_ANGL_VEL, ST_ASM330LHH_REG_GYRO_OUT_X_L_ADDR,
+			   1, IIO_MOD_X, 0, 16, 16, 's'),
+	ST_ASM330LHH_CHANNEL(IIO_ANGL_VEL, ST_ASM330LHH_REG_GYRO_OUT_Y_L_ADDR,
+			   1, IIO_MOD_Y, 1, 16, 16, 's'),
+	ST_ASM330LHH_CHANNEL(IIO_ANGL_VEL, ST_ASM330LHH_REG_GYRO_OUT_Z_L_ADDR,
+			   1, IIO_MOD_Z, 2, 16, 16, 's'),
+	ST_ASM330LHH_FLUSH_CHANNEL(IIO_ANGL_VEL),
+	IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static const struct iio_chan_spec st_asm330lhh_temp_channels[] = {
+	{
+		.type = IIO_TEMP,
+		.address = ST_ASM330LHH_REG_OUT_TEMP_L_ADDR,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+				| BIT(IIO_CHAN_INFO_OFFSET)
+				| BIT(IIO_CHAN_INFO_SCALE),
+		.scan_index = -1,
+	},
+};
+
+int st_asm330lhh_write_with_mask(struct st_asm330lhh_hw *hw, u8 addr, u8 mask,
+				 u8 val)
+{
+	u8 data;
+	int err;
+
+	mutex_lock(&hw->lock);
+
+	err = hw->tf->read(hw->dev, addr, sizeof(data), &data);
+	if (err < 0) {
+		dev_err(hw->dev, "failed to read %02x register\n", addr);
+		goto out;
+	}
+
+	data = (data & ~mask) | ((val << __ffs(mask)) & mask);
+
+	err = hw->tf->write(hw->dev, addr, sizeof(data), &data);
+	if (err < 0)
+		dev_err(hw->dev, "failed to write %02x register\n", addr);
+
+out:
+	mutex_unlock(&hw->lock);
+
+	return err;
+}
+
+static int st_asm330lhh_check_whoami(struct st_asm330lhh_hw *hw)
+{
+	int err;
+	u8 data;
+
+	err = hw->tf->read(hw->dev, ST_ASM330LHH_REG_WHOAMI_ADDR, sizeof(data),
+			   &data);
+	if (err < 0) {
+		dev_err(hw->dev, "failed to read whoami register\n");
+		return err;
+	}
+
+	if (data != ST_ASM330LHH_WHOAMI_VAL) {
+		dev_err(hw->dev, "unsupported whoami [%02x]\n", data);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int st_asm330lhh_set_full_scale(struct st_asm330lhh_sensor *sensor,
+				       u32 gain)
+{
+	enum st_asm330lhh_sensor_id id = sensor->id;
+	int i, err;
+	u8 val;
+
+	for (i = 0; i < st_asm330lhh_fs_table[id].size; i++)
+		if (st_asm330lhh_fs_table[id].fs_avl[i].gain == gain)
+			break;
+
+	if (i == st_asm330lhh_fs_table[id].size)
+		return -EINVAL;
+
+	val = st_asm330lhh_fs_table[id].fs_avl[i].val;
+	err = st_asm330lhh_write_with_mask(sensor->hw,
+					st_asm330lhh_fs_table[id].reg.addr,
+					st_asm330lhh_fs_table[id].reg.mask,
+					val);
+	if (err < 0)
+		return err;
+
+	sensor->gain = gain;
+
+	return 0;
+}
+
+int st_asm330lhh_get_odr_val(enum st_asm330lhh_sensor_id id, u16 odr, u8 *val)
+{
+	int i;
+
+	for (i = 0; i < ST_ASM330LHH_ODR_LIST_SIZE; i++)
+		if (st_asm330lhh_odr_table[id].odr_avl[i].hz >= odr)
+			break;
+
+	if (i == ST_ASM330LHH_ODR_LIST_SIZE)
+		return -EINVAL;
+
+	*val = st_asm330lhh_odr_table[id].odr_avl[i].val;
+
+	return 0;
+}
+
+static int st_asm330lhh_set_std_level(struct st_asm330lhh_sensor *sensor,
+			u16 odr)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(st_asm330lhh_std_table); i++)
+		if (st_asm330lhh_std_table[i].odr == odr)
+			break;
+
+	if (i == ARRAY_SIZE(st_asm330lhh_std_table))
+		return -EINVAL;
+
+	sensor->std_level = st_asm330lhh_std_table[i].val;
+	sensor->std_samples = 0;
+
+	return 0;
+}
+
+static int st_asm330lhh_set_odr(struct st_asm330lhh_sensor *sensor, u16 odr)
+{
+	struct st_asm330lhh_hw *hw = sensor->hw;
+	u8 val;
+
+	if (st_asm330lhh_get_odr_val(sensor->id, odr, &val) < 0)
+		return -EINVAL;
+
+	return st_asm330lhh_write_with_mask(hw,
+				st_asm330lhh_odr_table[sensor->id].reg.addr,
+				st_asm330lhh_odr_table[sensor->id].reg.mask, val);
+}
+
+int st_asm330lhh_sensor_set_enable(struct st_asm330lhh_sensor *sensor,
+				   bool enable)
+{
+	u16 odr = enable ? sensor->odr : 0;
+	int err;
+
+	if (sensor->id != ST_ASM330LHH_ID_TEMP) {
+		err = st_asm330lhh_set_odr(sensor, odr);
+		if (err < 0)
+			return err;
+	}
+
+	if (enable)
+		sensor->hw->enable_mask |= BIT(sensor->id);
+	else
+		sensor->hw->enable_mask &= ~BIT(sensor->id);
+
+	return 0;
+}
+
+static int st_asm330lhh_read_oneshot(struct st_asm330lhh_sensor *sensor,
+				     u8 addr, int *val)
+{
+	int err, delay;
+	__le16 data;
+
+	if (sensor->id == ST_ASM330LHH_ID_TEMP) {
+		u8 status;
+
+		mutex_lock(&sensor->hw->fifo_lock);
+		err = sensor->hw->tf->read(sensor->hw->dev,
+					   ST_ASM330LHH_REG_STATUS_ADDR, sizeof(status), &status);
+		if (err < 0)
+			goto unlock;
+
+		if (status & ST_ASM330LHH_REG_STATUS_TDA) {
+			err = sensor->hw->tf->read(sensor->hw->dev, addr, sizeof(data),
+					   (u8 *)&data);
+			if (err < 0)
+				goto unlock;
+
+			sensor->old_data = data;
+		} else
+			data = sensor->old_data;
+unlock:
+		mutex_unlock(&sensor->hw->fifo_lock);
+
+	} else {
+		err = st_asm330lhh_sensor_set_enable(sensor, true);
+		if (err < 0)
+			return err;
+
+		delay = 1000000 / sensor->odr;
+		usleep_range(delay, 2 * delay);
+
+		err = sensor->hw->tf->read(sensor->hw->dev, addr, sizeof(data),
+					   (u8 *)&data);
+		if (err < 0)
+			return err;
+
+		st_asm330lhh_sensor_set_enable(sensor, false);
+	}
+
+	*val = (s16)data;
+
+	return IIO_VAL_INT;
+}
+
+static int st_asm330lhh_read_raw(struct iio_dev *iio_dev,
+				 struct iio_chan_spec const *ch,
+				 int *val, int *val2, long mask)
+{
+	struct st_asm330lhh_sensor *sensor = iio_priv(iio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		mutex_lock(&iio_dev->mlock);
+		if (iio_buffer_enabled(iio_dev)) {
+			ret = -EBUSY;
+			mutex_unlock(&iio_dev->mlock);
+			break;
+		}
+		ret = st_asm330lhh_read_oneshot(sensor, ch->address, val);
+		mutex_unlock(&iio_dev->mlock);
+		break;
+	case IIO_CHAN_INFO_OFFSET:
+		switch (ch->type) {
+		case IIO_TEMP:
+			*val = sensor->offset;
+			ret = IIO_VAL_INT;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		*val = sensor->odr;
+		ret = IIO_VAL_INT;
+		break;
+	case IIO_CHAN_INFO_SCALE:
+		switch (ch->type) {
+		case IIO_TEMP:
+			*val = 1;
+			*val2 = ST_ASM330LHH_TEMP_GAIN;
+			ret = IIO_VAL_FRACTIONAL;
+			break;
+		case IIO_ACCEL:
+		case IIO_ANGL_VEL:
+			*val = 0;
+			*val2 = sensor->gain;
+			ret = IIO_VAL_INT_PLUS_MICRO;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int st_asm330lhh_write_raw(struct iio_dev *iio_dev,
+				  struct iio_chan_spec const *chan,
+				  int val, int val2, long mask)
+{
+	struct st_asm330lhh_sensor *sensor = iio_priv(iio_dev);
+	int err;
+
+	mutex_lock(&iio_dev->mlock);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_SCALE:
+		err = st_asm330lhh_set_full_scale(sensor, val2);
+		break;
+	case IIO_CHAN_INFO_SAMP_FREQ: {
+		u8 data;
+
+		err = st_asm330lhh_set_std_level(sensor, val);
+		if (err < 0)
+			break;
+
+		err = st_asm330lhh_get_odr_val(sensor->id, val, &data);
+		if (!err)
+			sensor->odr = val;
+
+		err = st_asm330lhh_set_odr(sensor, sensor->odr);
+		break;
+	}
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&iio_dev->mlock);
+
+	return err;
+}
+
+static ssize_t
+st_asm330lhh_sysfs_sampling_frequency_avail(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct st_asm330lhh_sensor *sensor = iio_priv(dev_get_drvdata(dev));
+	enum st_asm330lhh_sensor_id id = sensor->id;
+	int i, len = 0;
+
+	for (i = 1; i < ST_ASM330LHH_ODR_LIST_SIZE; i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+				 st_asm330lhh_odr_table[id].odr_avl[i].hz);
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+static ssize_t st_asm330lhh_sysfs_scale_avail(struct device *dev,
+					      struct device_attribute *attr,
+					      char *buf)
+{
+	struct st_asm330lhh_sensor *sensor = iio_priv(dev_get_drvdata(dev));
+	enum st_asm330lhh_sensor_id id = sensor->id;
+	int i, len = 0;
+
+	for (i = 0; i < st_asm330lhh_fs_table[id].size; i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
+				 st_asm330lhh_fs_table[id].fs_avl[i].gain);
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(st_asm330lhh_sysfs_sampling_frequency_avail);
+static IIO_DEVICE_ATTR(in_accel_scale_available, 0444,
+		       st_asm330lhh_sysfs_scale_avail, NULL, 0);
+static IIO_DEVICE_ATTR(in_anglvel_scale_available, 0444,
+		       st_asm330lhh_sysfs_scale_avail, NULL, 0);
+static IIO_DEVICE_ATTR(in_temp_scale_available, 0444,
+		       st_asm330lhh_sysfs_scale_avail, NULL, 0);
+static IIO_DEVICE_ATTR(hwfifo_watermark_max, 0444,
+		       st_asm330lhh_get_max_watermark, NULL, 0);
+static IIO_DEVICE_ATTR(hwfifo_flush, 0200, NULL, st_asm330lhh_flush_fifo, 0);
+static IIO_DEVICE_ATTR(hwfifo_watermark, 0644, st_asm330lhh_get_watermark,
+		       st_asm330lhh_set_watermark, 0);
+
+static struct attribute *st_asm330lhh_acc_attributes[] = {
+	&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+	&iio_dev_attr_in_accel_scale_available.dev_attr.attr,
+	&iio_dev_attr_hwfifo_watermark_max.dev_attr.attr,
+	&iio_dev_attr_hwfifo_watermark.dev_attr.attr,
+	&iio_dev_attr_hwfifo_flush.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group st_asm330lhh_acc_attribute_group = {
+	.attrs = st_asm330lhh_acc_attributes,
+};
+
+static const struct iio_info st_asm330lhh_acc_info = {
+	.driver_module = THIS_MODULE,
+	.attrs = &st_asm330lhh_acc_attribute_group,
+	.read_raw = st_asm330lhh_read_raw,
+	.write_raw = st_asm330lhh_write_raw,
+};
+
+static struct attribute *st_asm330lhh_gyro_attributes[] = {
+	&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+	&iio_dev_attr_in_anglvel_scale_available.dev_attr.attr,
+	&iio_dev_attr_hwfifo_watermark_max.dev_attr.attr,
+	&iio_dev_attr_hwfifo_watermark.dev_attr.attr,
+	&iio_dev_attr_hwfifo_flush.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group st_asm330lhh_gyro_attribute_group = {
+	.attrs = st_asm330lhh_gyro_attributes,
+};
+
+static const struct iio_info st_asm330lhh_gyro_info = {
+	.driver_module = THIS_MODULE,
+	.attrs = &st_asm330lhh_gyro_attribute_group,
+	.read_raw = st_asm330lhh_read_raw,
+	.write_raw = st_asm330lhh_write_raw,
+};
+
+static struct attribute *st_asm330lhh_temp_attributes[] = {
+	&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+	&iio_dev_attr_in_temp_scale_available.dev_attr.attr,
+	&iio_dev_attr_hwfifo_watermark_max.dev_attr.attr,
+	&iio_dev_attr_hwfifo_watermark.dev_attr.attr,
+	&iio_dev_attr_hwfifo_flush.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group st_asm330lhh_temp_attribute_group = {
+	.attrs = st_asm330lhh_temp_attributes,
+};
+
+static const struct iio_info st_asm330lhh_temp_info = {
+	.driver_module = THIS_MODULE,
+	.attrs = &st_asm330lhh_temp_attribute_group,
+	.read_raw = st_asm330lhh_read_raw,
+	.write_raw = st_asm330lhh_write_raw,
+};
+
+static const unsigned long st_asm330lhh_available_scan_masks[] = { 0x7, 0x0 };
+
+static int st_asm330lhh_of_get_drdy_pin(struct st_asm330lhh_hw *hw, int *drdy_pin)
+{
+	struct device_node *np = hw->dev->of_node;
+
+	if (!np)
+		return -EINVAL;
+
+	return of_property_read_u32(np, "st,drdy-int-pin", drdy_pin);
+}
+
+static int st_asm330lhh_get_drdy_reg(struct st_asm330lhh_hw *hw, u8 *drdy_reg)
+{
+	int err = 0, drdy_pin;
+
+	if (st_asm330lhh_of_get_drdy_pin(hw, &drdy_pin) < 0) {
+		struct st_sensors_platform_data *pdata;
+		struct device *dev = hw->dev;
+
+		pdata = (struct st_sensors_platform_data *)dev->platform_data;
+		drdy_pin = pdata ? pdata->drdy_int_pin : 1;
+	}
+
+	switch (drdy_pin) {
+	case 1:
+		*drdy_reg = ST_ASM330LHH_REG_INT1_ADDR;
+		break;
+	case 2:
+		*drdy_reg = ST_ASM330LHH_REG_INT2_ADDR;
+		break;
+	default:
+		dev_err(hw->dev, "unsupported data ready pin\n");
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+static int st_asm330lhh_init_device(struct st_asm330lhh_hw *hw)
+{
+	u8 drdy_int_reg;
+	int err;
+
+	err = st_asm330lhh_write_with_mask(hw, ST_ASM330LHH_REG_RESET_ADDR,
+					   ST_ASM330LHH_REG_RESET_MASK, 1);
+	if (err < 0)
+		return err;
+
+	msleep(200);
+
+	/* latch interrupts */
+	err = st_asm330lhh_write_with_mask(hw, ST_ASM330LHH_REG_LIR_ADDR,
+					   ST_ASM330LHH_REG_LIR_MASK, 1);
+	if (err < 0)
+		return err;
+
+	/* enable Block Data Update */
+	err = st_asm330lhh_write_with_mask(hw, ST_ASM330LHH_REG_BDU_ADDR,
+					   ST_ASM330LHH_REG_BDU_MASK, 1);
+	if (err < 0)
+		return err;
+
+	err = st_asm330lhh_write_with_mask(hw, ST_ASM330LHH_REG_ROUNDING_ADDR,
+					   ST_ASM330LHH_REG_ROUNDING_MASK, 3);
+	if (err < 0)
+		return err;
+
+	/* init timestamp engine */
+	err = st_asm330lhh_write_with_mask(hw, ST_ASM330LHH_REG_TIMESTAMP_EN_ADDR,
+					   ST_ASM330LHH_REG_TIMESTAMP_EN_MASK, 1);
+	if (err < 0)
+		return err;
+
+	/* enable FIFO watermak interrupt */
+	err = st_asm330lhh_get_drdy_reg(hw, &drdy_int_reg);
+	if (err < 0)
+		return err;
+
+	return st_asm330lhh_write_with_mask(hw, drdy_int_reg,
+					    ST_ASM330LHH_REG_FIFO_FTH_IRQ_MASK, 1);
+}
+
+static struct iio_dev *st_asm330lhh_alloc_iiodev(struct st_asm330lhh_hw *hw,
+						 enum st_asm330lhh_sensor_id id)
+{
+	struct st_asm330lhh_sensor *sensor;
+	struct iio_dev *iio_dev;
+
+	iio_dev = devm_iio_device_alloc(hw->dev, sizeof(*sensor));
+	if (!iio_dev)
+		return NULL;
+
+	iio_dev->modes = INDIO_DIRECT_MODE;
+	iio_dev->dev.parent = hw->dev;
+	iio_dev->available_scan_masks = st_asm330lhh_available_scan_masks;
+
+	sensor = iio_priv(iio_dev);
+	sensor->id = id;
+	sensor->hw = hw;
+	sensor->odr = st_asm330lhh_odr_table[id].odr_avl[1].hz;
+	sensor->gain = st_asm330lhh_fs_table[id].fs_avl[0].gain;
+	sensor->watermark = 1;
+	sensor->old_data = 0;
+
+	switch (id) {
+	case ST_ASM330LHH_ID_ACC:
+		iio_dev->channels = st_asm330lhh_acc_channels;
+		iio_dev->num_channels = ARRAY_SIZE(st_asm330lhh_acc_channels);
+		iio_dev->name = "asm330lhh_accel";
+		iio_dev->info = &st_asm330lhh_acc_info;
+		sensor->batch_addr = ST_ASM330LHH_REG_FIFO_BATCH_ADDR;
+		sensor->batch_mask = GENMASK(3, 0);
+		sensor->offset = 0;
+		break;
+	case ST_ASM330LHH_ID_GYRO:
+		iio_dev->channels = st_asm330lhh_gyro_channels;
+		iio_dev->num_channels = ARRAY_SIZE(st_asm330lhh_gyro_channels);
+		iio_dev->name = "asm330lhh_gyro";
+		iio_dev->info = &st_asm330lhh_gyro_info;
+		sensor->batch_addr = ST_ASM330LHH_REG_FIFO_BATCH_ADDR;
+		sensor->batch_mask = GENMASK(7, 4);
+		sensor->offset = 0;
+		break;
+	case ST_ASM330LHH_ID_TEMP:
+		iio_dev->channels = st_asm330lhh_temp_channels;
+		iio_dev->num_channels = ARRAY_SIZE(st_asm330lhh_temp_channels);
+		iio_dev->name = "asm330lhh_temp";
+		iio_dev->info = &st_asm330lhh_temp_info;
+		sensor->offset = ST_ASM330LHH_OFFSET;
+		break;
+	default:
+		return NULL;
+	}
+
+	return iio_dev;
+}
+
+int st_asm330lhh_probe(struct device *dev, int irq,
+		       const struct st_asm330lhh_transfer_function *tf_ops)
+{
+	struct st_asm330lhh_hw *hw;
+	int i, err;
+
+	hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+	if (!hw)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, (void *)hw);
+
+	mutex_init(&hw->lock);
+	mutex_init(&hw->fifo_lock);
+
+	hw->dev = dev;
+	hw->irq = irq;
+	hw->tf = tf_ops;
+
+	dev_info(hw->dev, "Ver: %s\n", ST_ASM330LHH_VERSION);
+	err = st_asm330lhh_check_whoami(hw);
+	if (err < 0)
+		return err;
+
+	err = st_asm330lhh_init_device(hw);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < ST_ASM330LHH_ID_MAX; i++) {
+		hw->iio_devs[i] = st_asm330lhh_alloc_iiodev(hw, i);
+		if (!hw->iio_devs[i])
+			return -ENOMEM;
+	}
+
+	if (hw->irq > 0) {
+		err = st_asm330lhh_fifo_setup(hw);
+		if (err < 0)
+			return err;
+	}
+
+	for (i = 0; i < ST_ASM330LHH_ID_MAX; i++) {
+		if (!hw->iio_devs[i])
+			continue;
+
+		err = devm_iio_device_register(hw->dev, hw->iio_devs[i]);
+		if (err)
+			return err;
+	}
+
+	dev_info(hw->dev, "probe ok\n");
+
+	return 0;
+}
+EXPORT_SYMBOL(st_asm330lhh_probe);
+
+static int __maybe_unused st_asm330lhh_suspend(struct device *dev)
+{
+	struct st_asm330lhh_hw *hw = dev_get_drvdata(dev);
+	struct st_asm330lhh_sensor *sensor;
+	int i, err = 0;
+
+	for (i = 0; i < ST_ASM330LHH_ID_MAX; i++) {
+		if (!hw->iio_devs[i])
+			continue;
+
+		sensor = iio_priv(hw->iio_devs[i]);
+
+		if (!(hw->enable_mask & BIT(sensor->id)))
+			continue;
+
+		err = st_asm330lhh_set_odr(sensor, 0);
+		if (err < 0)
+			return err;
+	}
+
+	if (hw->enable_mask)
+		err = st_asm330lhh_suspend_fifo(hw);
+
+	return err;
+}
+
+static int __maybe_unused st_asm330lhh_resume(struct device *dev)
+{
+	struct st_asm330lhh_hw *hw = dev_get_drvdata(dev);
+	struct st_asm330lhh_sensor *sensor;
+	int i, err = 0;
+
+	for (i = 0; i < ST_ASM330LHH_ID_MAX; i++) {
+		if (!hw->iio_devs[i])
+			continue;
+
+		sensor = iio_priv(hw->iio_devs[i]);
+		if (!(hw->enable_mask & BIT(sensor->id)))
+			continue;
+
+		err = st_asm330lhh_set_odr(sensor, sensor->odr);
+		if (err < 0)
+			return err;
+	}
+
+	if (hw->enable_mask)
+		err = st_asm330lhh_set_fifo_mode(hw, ST_ASM330LHH_FIFO_CONT);
+
+	return err;
+}
+
+const struct dev_pm_ops st_asm330lhh_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(st_asm330lhh_suspend, st_asm330lhh_resume)
+};
+EXPORT_SYMBOL(st_asm330lhh_pm_ops);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics st_asm330lhh driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(ST_ASM330LHH_VERSION);
diff --git a/drivers/iio/imu/st_asm330lhh/st_asm330lhh_i2c.c b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_i2c.c
new file mode 100644
index 0000000..4875097
--- /dev/null
+++ b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_i2c.c
@@ -0,0 +1,94 @@
+/*
+ * STMicroelectronics st_asm330lhh i2c driver
+ *
+ * Copyright 2018 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include "st_asm330lhh.h"
+
+static int st_asm330lhh_i2c_read(struct device *dev, u8 addr, int len, u8 *data)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct i2c_msg msg[2];
+
+	msg[0].addr = client->addr;
+	msg[0].flags = client->flags;
+	msg[0].len = 1;
+	msg[0].buf = &addr;
+
+	msg[1].addr = client->addr;
+	msg[1].flags = client->flags | I2C_M_RD;
+	msg[1].len = len;
+	msg[1].buf = data;
+
+	return i2c_transfer(client->adapter, msg, 2);
+}
+
+static int st_asm330lhh_i2c_write(struct device *dev, u8 addr, int len, u8 *data)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct i2c_msg msg;
+	u8 send[len + 1];
+
+	send[0] = addr;
+	memcpy(&send[1], data, len * sizeof(u8));
+
+	msg.addr = client->addr;
+	msg.flags = client->flags;
+	msg.len = len + 1;
+	msg.buf = send;
+
+	return i2c_transfer(client->adapter, &msg, 1);
+}
+
+static const struct st_asm330lhh_transfer_function st_asm330lhh_transfer_fn = {
+	.read = st_asm330lhh_i2c_read,
+	.write = st_asm330lhh_i2c_write,
+};
+
+static int st_asm330lhh_i2c_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	return st_asm330lhh_probe(&client->dev, client->irq,
+				&st_asm330lhh_transfer_fn);
+}
+
+static const struct of_device_id st_asm330lhh_i2c_of_match[] = {
+	{
+		.compatible = "st,asm330lhh",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, st_asm330lhh_i2c_of_match);
+
+static const struct i2c_device_id st_asm330lhh_i2c_id_table[] = {
+	{ ST_ASM330LHH_DEV_NAME },
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, st_asm330lhh_i2c_id_table);
+
+static struct i2c_driver st_asm330lhh_driver = {
+	.driver = {
+		.name = "st_asm330lhh_i2c",
+		.pm = &st_asm330lhh_pm_ops,
+		.of_match_table = of_match_ptr(st_asm330lhh_i2c_of_match),
+	},
+	.probe = st_asm330lhh_i2c_probe,
+	.id_table = st_asm330lhh_i2c_id_table,
+};
+module_i2c_driver(st_asm330lhh_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics st_asm330lhh i2c driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(ST_ASM330LHH_VERSION);
diff --git a/drivers/iio/imu/st_asm330lhh/st_asm330lhh_spi.c b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_spi.c
new file mode 100644
index 0000000..07b8400
--- /dev/null
+++ b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_spi.c
@@ -0,0 +1,109 @@
+/*
+ * STMicroelectronics st_asm330lhh spi driver
+ *
+ * Copyright 2018 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include "st_asm330lhh.h"
+
+#define SENSORS_SPI_READ	BIT(7)
+
+static int st_asm330lhh_spi_read(struct device *dev, u8 addr, int len,
+			       u8 *data)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct st_asm330lhh_hw *hw = spi_get_drvdata(spi);
+	int err;
+
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = hw->tb.tx_buf,
+			.bits_per_word = 8,
+			.len = 1,
+		},
+		{
+			.rx_buf = hw->tb.rx_buf,
+			.bits_per_word = 8,
+			.len = len,
+		}
+	};
+
+	hw->tb.tx_buf[0] = addr | SENSORS_SPI_READ;
+
+	err = spi_sync_transfer(spi, xfers,  ARRAY_SIZE(xfers));
+	if (err < 0)
+		return err;
+
+	memcpy(data, hw->tb.rx_buf, len * sizeof(u8));
+
+	return len;
+}
+
+static int st_asm330lhh_spi_write(struct device *dev, u8 addr, int len,
+				u8 *data)
+{
+	struct st_asm330lhh_hw *hw;
+	struct spi_device *spi;
+
+	if (len >= ST_ASM330LHH_TX_MAX_LENGTH)
+		return -ENOMEM;
+
+	spi = to_spi_device(dev);
+	hw = spi_get_drvdata(spi);
+
+	hw->tb.tx_buf[0] = addr;
+	memcpy(&hw->tb.tx_buf[1], data, len);
+
+	return spi_write(spi, hw->tb.tx_buf, len + 1);
+}
+
+static const struct st_asm330lhh_transfer_function st_asm330lhh_transfer_fn = {
+	.read = st_asm330lhh_spi_read,
+	.write = st_asm330lhh_spi_write,
+};
+
+static int st_asm330lhh_spi_probe(struct spi_device *spi)
+{
+	return st_asm330lhh_probe(&spi->dev, spi->irq,
+				&st_asm330lhh_transfer_fn);
+}
+
+static const struct of_device_id st_asm330lhh_spi_of_match[] = {
+	{
+		.compatible = "st,asm330lhh",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, st_asm330lhh_spi_of_match);
+
+static const struct spi_device_id st_asm330lhh_spi_id_table[] = {
+	{ ST_ASM330LHH_DEV_NAME },
+	{},
+};
+MODULE_DEVICE_TABLE(spi, st_asm330lhh_spi_id_table);
+
+static struct spi_driver st_asm330lhh_driver = {
+	.driver = {
+		.name = "st_asm330lhh_spi",
+		.pm = &st_asm330lhh_pm_ops,
+		.of_match_table = of_match_ptr(st_asm330lhh_spi_of_match),
+	},
+	.probe = st_asm330lhh_spi_probe,
+	.id_table = st_asm330lhh_spi_id_table,
+};
+module_spi_driver(st_asm330lhh_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics st_asm330lhh spi driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(ST_ASM330LHH_VERSION);
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 19aa957..c9263ac 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -347,10 +347,9 @@
 	adc_humidity = be16_to_cpu(tmp);
 	comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
 
-	*val = comp_humidity;
-	*val2 = 1024;
+	*val = comp_humidity * 1000 / 1024;
 
-	return IIO_VAL_FRACTIONAL;
+	return IIO_VAL_INT;
 }
 
 static int bmp280_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index fb3fb89..5d5368a 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -34,6 +34,18 @@
 	  libibverbs, libibcm and a hardware driver library from
 	  <http://www.openfabrics.org/git/>.
 
+config INFINIBAND_USER_ACCESS_UCM
+	bool "Userspace CM (UCM, DEPRECATED)"
+	depends on BROKEN
+	depends on INFINIBAND_USER_ACCESS
+	help
+	  The UCM module has known security flaws, which no one is
+	  interested to fix. The user-space part of this code was
+	  dropped from the upstream a long time ago.
+
+	  This option is DEPRECATED and planned to be removed.
+
+
 config INFINIBAND_USER_MEM
 	bool
 	depends on INFINIBAND_USER_ACCESS != n
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index edaae9f..33dc00c 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -4,8 +4,8 @@
 obj-$(CONFIG_INFINIBAND) +=		ib_core.o ib_cm.o iw_cm.o \
 					$(infiniband-y)
 obj-$(CONFIG_INFINIBAND_USER_MAD) +=	ib_umad.o
-obj-$(CONFIG_INFINIBAND_USER_ACCESS) +=	ib_uverbs.o ib_ucm.o \
-					$(user_access-y)
+obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y)
+obj-$(CONFIG_INFINIBAND_USER_ACCESS_UCM) += ib_ucm.o $(user_access-y)
 
 ib_core-y :=			packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
 				device.o fmr_pool.o cache.o netlink.o \
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index cbe5324..85d4ef3 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -673,6 +673,7 @@
 	dgid = (union ib_gid *) &addr->sib_addr;
 	pkey = ntohs(addr->sib_pkey);
 
+	mutex_lock(&lock);
 	list_for_each_entry(cur_dev, &dev_list, list) {
 		for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
 			if (!rdma_cap_af_ib(cur_dev->device, p))
@@ -696,18 +697,19 @@
 					cma_dev = cur_dev;
 					sgid = gid;
 					id_priv->id.port_num = p;
+					goto found;
 				}
 			}
 		}
 	}
-
-	if (!cma_dev)
-		return -ENODEV;
+	mutex_unlock(&lock);
+	return -ENODEV;
 
 found:
 	cma_attach_to_dev(id_priv, cma_dev);
-	addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
-	memcpy(&addr->sib_addr, &sgid, sizeof sgid);
+	mutex_unlock(&lock);
+	addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
+	memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
 	cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
 	return 0;
 }
@@ -1409,9 +1411,16 @@
 		       (addr->src_addr.ss_family == AF_IB ||
 			cma_protocol_roce_dev_port(id->device, port_num));
 
-	return !addr->dev_addr.bound_dev_if ||
-	       (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
-		addr->dev_addr.bound_dev_if == net_dev->ifindex);
+	/*
+	 * Net namespaces must match, and if the listner is listening
+	 * on a specific netdevice than netdevice must match as well.
+	 */
+	if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
+	    (!!addr->dev_addr.bound_dev_if ==
+	     (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
+		return true;
+	else
+		return false;
 }
 
 static struct rdma_id_private *cma_find_listener(
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 2395fe2..3e2ab04 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1549,7 +1549,8 @@
 			    mad_reg_req->oui, 3)) {
 			method = &(*vendor_table)->vendor_class[
 						vclass]->method_table[i];
-			BUG_ON(!*method);
+			if (!*method)
+				goto error3;
 			goto check_in_use;
 		}
 	}
@@ -1559,10 +1560,12 @@
 				vclass]->oui[i])) {
 			method = &(*vendor_table)->vendor_class[
 				vclass]->method_table[i];
-			BUG_ON(*method);
 			/* Allocate method table for this OUI */
-			if ((ret = allocate_method_table(method)))
-				goto error3;
+			if (!*method) {
+				ret = allocate_method_table(method);
+				if (ret)
+					goto error3;
+			}
 			memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
 			       mad_reg_req->oui, 3);
 			goto check_in_use;
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index dbfd854..1d90a12 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -87,7 +87,7 @@
 	}
 
 	ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
-	if (ret < nents) {
+	if (ret < 0 || ret < nents) {
 		ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
 		return -EINVAL;
 	}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index a036d70..fa9ef8e 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -124,6 +124,8 @@
 static DEFINE_IDR(ctx_idr);
 static DEFINE_IDR(multicast_idr);
 
+static const struct file_operations ucma_fops;
+
 static inline struct ucma_context *_ucma_find_context(int id,
 						      struct ucma_file *file)
 {
@@ -218,7 +220,7 @@
 		return NULL;
 
 	mutex_lock(&mut);
-	mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
+	mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL);
 	mutex_unlock(&mut);
 	if (mc->id < 0)
 		goto error;
@@ -1385,6 +1387,10 @@
 		goto err3;
 	}
 
+	mutex_lock(&mut);
+	idr_replace(&multicast_idr, mc, mc->id);
+	mutex_unlock(&mut);
+
 	mutex_unlock(&file->mut);
 	ucma_put_ctx(ctx);
 	return 0;
@@ -1541,6 +1547,10 @@
 	f = fdget(cmd.fd);
 	if (!f.file)
 		return -ENOENT;
+	if (f.file->f_op != &ucma_fops) {
+		ret = -EINVAL;
+		goto file_put;
+	}
 
 	/* Validate current fd and prevent destruction of id. */
 	ctx = ucma_get_ctx(f.file->private_data, cmd.id);
@@ -1710,6 +1720,8 @@
 		mutex_lock(&mut);
 		if (!ctx->closing) {
 			mutex_unlock(&mut);
+			ucma_put_ctx(ctx);
+			wait_for_completion(&ctx->comp);
 			/* rdma_destroy_id ensures that no event handlers are
 			 * inflight for that id before releasing it.
 			 */
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index e74aa1d..99cebf3 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -122,16 +122,7 @@
 	umem->address   = addr;
 	umem->page_size = PAGE_SIZE;
 	umem->pid       = get_task_pid(current, PIDTYPE_PID);
-	/*
-	 * We ask for writable memory if any of the following
-	 * access flags are set.  "Local write" and "remote write"
-	 * obviously require write access.  "Remote atomic" can do
-	 * things like fetch and add, which will modify memory, and
-	 * "MW bind" can change permissions by binding a window.
-	 */
-	umem->writable  = !!(access &
-		(IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
-		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
+	umem->writable   = ib_access_writable(access);
 
 	if (access & IB_ACCESS_ON_DEMAND) {
 		put_pid(umem->pid);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 4b717cf..6f875bf 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -3725,6 +3725,11 @@
 		goto err_uobj;
 	}
 
+	if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
+		err = -EINVAL;
+		goto err_put;
+	}
+
 	flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs *
 			    sizeof(union ib_flow_spec), GFP_KERNEL);
 	if (!flow_attr) {
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 410408f..0c21535 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -724,7 +724,7 @@
 {
 	struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
 
-	if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
+	if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
 		return -ENOMEM;
 
 	mhp->mpl[mhp->mpl_len++] = addr;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index cc2243f..bb45eb2 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1258,6 +1258,12 @@
 
 	t4_set_wq_in_error(&qhp->wq);
 	if (qhp->ibqp.uobject) {
+
+		/* for user qps, qhp->wq.flushed is protected by qhp->mutex */
+		if (qhp->wq.flushed)
+			return;
+
+		qhp->wq.flushed = 1;
 		t4_set_cq_in_error(&rchp->cq);
 		spin_lock_irqsave(&rchp->comp_handler_lock, flag);
 		(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index d89b874..c2982bb 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -88,6 +88,7 @@
 	unsigned long flags;
 	int write = 1;	/* write sendctrl back */
 	int flush = 0;	/* re-read sendctrl to make sure it is flushed */
+	int i;
 
 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
 
@@ -97,9 +98,13 @@
 		reg |= SEND_CTRL_SEND_ENABLE_SMASK;
 	/* Fall through */
 	case PSC_DATA_VL_ENABLE:
+		mask = 0;
+		for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
+			if (!dd->vld[i].mtu)
+				mask |= BIT_ULL(i);
 		/* Disallow sending on VLs not enabled */
-		mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
-				SEND_CTRL_UNSUPPORTED_VL_SHIFT;
+		mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
+			SEND_CTRL_UNSUPPORTED_VL_SHIFT;
 		reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
 		break;
 	case PSC_GLOBAL_DISABLE:
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 613074e9..e8e0fa5 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -397,7 +397,7 @@
 
 	lockdep_assert_held(&qp->s_lock);
 	ps->s_txreq = get_txreq(ps->dev, qp);
-	if (IS_ERR(ps->s_txreq))
+	if (!ps->s_txreq)
 		goto bail_no_tx;
 
 	ohdr = &ps->s_txreq->phdr.hdr.u.oth;
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 5e6d1ba..de21128 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -72,7 +72,7 @@
 	int middle = 0;
 
 	ps->s_txreq = get_txreq(ps->dev, qp);
-	if (IS_ERR(ps->s_txreq))
+	if (!ps->s_txreq)
 		goto bail_no_tx;
 
 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 97ae24b..1a7ce1d 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -285,7 +285,7 @@
 	u8 sc5;
 
 	ps->s_txreq = get_txreq(ps->dev, qp);
-	if (IS_ERR(ps->s_txreq))
+	if (!ps->s_txreq)
 		goto bail_no_tx;
 
 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 77697d6..018a415 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -956,7 +956,7 @@
 			if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
 				if (++req->iov_idx == req->data_iovs) {
 					ret = -EFAULT;
-					goto free_txreq;
+					goto free_tx;
 				}
 				iovec = &req->iovs[req->iov_idx];
 				WARN_ON(iovec->offset);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 01a380e..14ddb75 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1511,12 +1511,18 @@
 	struct hfi1_pportdata *ppd;
 	struct hfi1_devdata *dd;
 	u8 sc5;
+	u8 sl;
 
 	/* test the mapping for validity */
 	ibp = to_iport(ibdev, ah_attr->port_num);
 	ppd = ppd_from_ibp(ibp);
-	sc5 = ibp->sl_to_sc[ah_attr->sl];
 	dd = dd_from_ppd(ppd);
+
+	sl = ah_attr->sl;
+	if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
+		return -EINVAL;
+
+	sc5 = ibp->sl_to_sc[sl];
 	if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
 		return -EINVAL;
 	return 0;
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index 094ab82..d8a5bad 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -94,7 +94,7 @@
 				struct rvt_qp *qp)
 	__must_hold(&qp->s_lock)
 {
-	struct verbs_txreq *tx = ERR_PTR(-EBUSY);
+	struct verbs_txreq *tx = NULL;
 
 	write_seqlock(&dev->iowait_lock);
 	if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 5660897..31ded57 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -82,7 +82,7 @@
 	if (unlikely(!tx)) {
 		/* call slow path to get the lock */
 		tx = __get_txreq(dev, qp);
-		if (IS_ERR(tx))
+		if (!tx)
 			return tx;
 	}
 	tx->qp = qp;
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 05db7d5..da61ce8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -35,7 +35,7 @@
 
 static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
 {
-	return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
+	return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0;
 }
 
 static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index e86dd8d..33cf103 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -114,7 +114,10 @@
 {
 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
 
-	return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
+	return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
+					   base) ?
+		       -ENOMEM :
+		       0;
 }
 
 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index ae41623..0d4878e 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -131,6 +131,40 @@
 	return err;
 }
 
+static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
+					u64 length, u64 virt_addr,
+					int access_flags)
+{
+	/*
+	 * Force registering the memory as writable if the underlying pages
+	 * are writable.  This is so rereg can change the access permissions
+	 * from readable to writable without having to run through ib_umem_get
+	 * again
+	 */
+	if (!ib_access_writable(access_flags)) {
+		struct vm_area_struct *vma;
+
+		down_read(&current->mm->mmap_sem);
+		/*
+		 * FIXME: Ideally this would iterate over all the vmas that
+		 * cover the memory, but for now it requires a single vma to
+		 * entirely cover the MR to support RO mappings.
+		 */
+		vma = find_vma(current->mm, start);
+		if (vma && vma->vm_end >= start + length &&
+		    vma->vm_start <= start) {
+			if (vma->vm_flags & VM_WRITE)
+				access_flags |= IB_ACCESS_LOCAL_WRITE;
+		} else {
+			access_flags |= IB_ACCESS_LOCAL_WRITE;
+		}
+
+		up_read(&current->mm->mmap_sem);
+	}
+
+	return ib_umem_get(context, start, length, access_flags, 0);
+}
+
 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 				  u64 virt_addr, int access_flags,
 				  struct ib_udata *udata)
@@ -145,10 +179,8 @@
 	if (!mr)
 		return ERR_PTR(-ENOMEM);
 
-	/* Force registering the memory as writable. */
-	/* Used for memory re-registeration. HCA protects the access */
-	mr->umem = ib_umem_get(pd->uobject->context, start, length,
-			       access_flags | IB_ACCESS_LOCAL_WRITE, 0);
+	mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
+				    virt_addr, access_flags);
 	if (IS_ERR(mr->umem)) {
 		err = PTR_ERR(mr->umem);
 		goto err_free;
@@ -215,6 +247,9 @@
 	}
 
 	if (flags & IB_MR_REREG_ACCESS) {
+		if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
+			return -EPERM;
+
 		err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
 					       convert_access(mr_access_flags));
 
@@ -228,10 +263,9 @@
 
 		mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
 		ib_umem_release(mmr->umem);
-		mmr->umem = ib_umem_get(mr->uobject->context, start, length,
-					mr_access_flags |
-					IB_ACCESS_LOCAL_WRITE,
-					0);
+		mmr->umem =
+			mlx4_get_umem_mr(mr->uobject->context, start, length,
+					 virt_addr, mr_access_flags);
 		if (IS_ERR(mmr->umem)) {
 			err = PTR_ERR(mmr->umem);
 			/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 5c1dbe2..d6e5002 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -268,18 +268,24 @@
 
 	desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
 		    srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
-	if (desc_size == 0 || srq->msrq.max_gs > desc_size)
-		return ERR_PTR(-EINVAL);
+	if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
+		err = -EINVAL;
+		goto err_srq;
+	}
 	desc_size = roundup_pow_of_two(desc_size);
 	desc_size = max_t(size_t, 32, desc_size);
-	if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
-		return ERR_PTR(-EINVAL);
+	if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
+		err = -EINVAL;
+		goto err_srq;
+	}
 	srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
 		sizeof(struct mlx5_wqe_data_seg);
 	srq->msrq.wqe_shift = ilog2(desc_size);
 	buf_size = srq->msrq.max * desc_size;
-	if (buf_size < desc_size)
-		return ERR_PTR(-EINVAL);
+	if (buf_size < desc_size) {
+		err = -EINVAL;
+		goto err_srq;
+	}
 	in.type = init_attr->srq_type;
 
 	if (pd->uobject)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 2659430..84349d9 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -645,7 +645,7 @@
 	struct ocrdma_stats *pstats = filp->private_data;
 	struct ocrdma_dev *dev = pstats->dev;
 
-	if (count > 32)
+	if (*ppos != 0 || count == 0 || count > sizeof(tmp_str))
 		goto err;
 
 	if (copy_from_user(tmp_str, buffer, count))
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 6c5e29d..df15b6d 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -273,6 +273,7 @@
 	case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
 		if (wqe->wr.opcode != IB_WR_RDMA_READ &&
 		    wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
+			wqe->status = IB_WC_FATAL_ERR;
 			return COMPST_ERROR;
 		}
 		reset_retry_counters(qp);
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 46f0628..db6bb02 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -225,9 +225,14 @@
 		goto err1;
 	}
 
+	if (unlikely(qpn == 0)) {
+		pr_warn_once("QP 0 not supported");
+		goto err1;
+	}
+
 	if (qpn != IB_MULTICAST_QPN) {
-		index = (qpn == 0) ? port->qp_smi_index :
-			((qpn == 1) ? port->qp_gsi_index : qpn);
+		index = (qpn == 1) ? port->qp_gsi_index : qpn;
+
 		qp = rxe_pool_get_index(&rxe->qp_pool, index);
 		if (unlikely(!qp)) {
 			pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 5b0ca35..47219eb 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -648,6 +648,9 @@
 		} else {
 			goto exit;
 		}
+		if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+		    qp->sq_sig_type == IB_SIGNAL_ALL_WR)
+			rxe_run_task(&qp->comp.task, 1);
 		qp->req.wqe_index = next_index(qp->sq.queue,
 						qp->req.wqe_index);
 		goto next_wqe;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7576166..ad9b486 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1009,12 +1009,14 @@
 
 	skb_queue_head_init(&skqueue);
 
+	netif_tx_lock_bh(p->dev);
 	spin_lock_irq(&priv->lock);
 	set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
 	if (p->neigh)
 		while ((skb = __skb_dequeue(&p->neigh->queue)))
 			__skb_queue_tail(&skqueue, skb);
 	spin_unlock_irq(&priv->lock);
+	netif_tx_unlock_bh(p->dev);
 
 	while ((skb = __skb_dequeue(&skqueue))) {
 		skb->dev = p->dev;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 463ea59..6463590 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2639,7 +2639,7 @@
 {
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
 	struct srp_rdma_ch *ch;
-	int i;
+	int i, j;
 	u8 status;
 
 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2653,8 +2653,8 @@
 
 	for (i = 0; i < target->ch_count; i++) {
 		ch = &target->ch[i];
-		for (i = 0; i < target->req_ring_size; ++i) {
-			struct srp_request *req = &ch->req_ring[i];
+		for (j = 0; j < target->req_ring_size; ++j) {
+			struct srp_request *req = &ch->req_ring[j];
 
 			srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
 		}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 9888c9b..32a26e7 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1701,8 +1701,7 @@
 	int ret;
 
 	if (!srpt_set_ch_state(ch, CH_DRAINING)) {
-		pr_debug("%s-%d: already closed\n", ch->sess_name,
-			 ch->qp->qp_num);
+		pr_debug("%s: already closed\n", ch->sess_name);
 		return false;
 	}
 
diff --git a/drivers/input/misc/vl53l0x/src/vl53l0x_i2c_platform.c b/drivers/input/misc/vl53l0x/src/vl53l0x_i2c_platform.c
index f8633a0..b63f880 100644
--- a/drivers/input/misc/vl53l0x/src/vl53l0x_i2c_platform.c
+++ b/drivers/input/misc/vl53l0x/src/vl53l0x_i2c_platform.c
@@ -147,7 +147,7 @@
 			int32_t count)
 {
 	int32_t status = STATUS_OK;
-	uint8_t *buffer;
+	uint8_t buffer[64];
 
 #ifdef VL_LOG_ENABLE
 	int32_t i = 0;
@@ -166,7 +166,6 @@
 #endif
 	if ((count + 1) > VL_MAX_I2C_XFER_SIZE)
 		return STATUS_FAIL;
-	buffer =  VL_GetLocalBuffer(dev, (count+1));
 	buffer[0] = index;
 	memcpy(&buffer[1], pdata, count);
 	status = VL_I2CWrite(dev, buffer, (count+1));
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 97f6e05..a716482 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1251,6 +1251,8 @@
 	{ "ELAN0611", 0 },
 	{ "ELAN0612", 0 },
 	{ "ELAN0618", 0 },
+	{ "ELAN061D", 0 },
+	{ "ELAN0622", 0 },
 	{ "ELAN1000", 0 },
 	{ }
 };
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 4e77adb..c120afd 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1176,6 +1176,8 @@
 static const char * const middle_button_pnp_ids[] = {
 	"LEN2131", /* ThinkPad P52 w/ NFC */
 	"LEN2132", /* ThinkPad P52 */
+	"LEN2133", /* ThinkPad P72 w/ NFC */
+	"LEN2134", /* ThinkPad P72 */
 	NULL
 };
 
diff --git a/drivers/input/sensors/bmi160/Kconfig b/drivers/input/sensors/bmi160/Kconfig
new file mode 100644
index 0000000..0b1feda
--- /dev/null
+++ b/drivers/input/sensors/bmi160/Kconfig
@@ -0,0 +1,121 @@
+#
+# Makefile for Bosch sensors driver.
+#
+config BOSCH_DRIVER_LOG_FUNC
+	tristate "Bosch Sensortec driver smart log function support"
+	depends on (I2C || SPI_MASTER) && SYSFS
+	help
+	 If you say yes here, you get support for smart log function in Bosch Sensortec driver.
+
+config SENSORS_BMA2X2
+	tristate "BMA2x2 acceleration sensor support"
+	depends on I2C
+	help
+	  If you say yes here, you get support for Bosch Sensortec's
+	  acceleration sensors BMA255/BMA254/BMA355/BMA250E/BMA222E/BMA280.
+
+config SENSORS_BMA2X2_ENABLE_INT1
+	tristate "BMA2X2 acceleration sensor interrupt INT1 support"
+	depends on SENSORS_BMA2X2
+	help
+	 If you say yes here, you get INT1 support for Bosch Sensortec
+	 acceleration sensors BMA255/BMA250E/BMA222E/BMA280.
+	 Select it will disable interrupt INT2 support
+
+config SENSORS_BMA2X2_ENABLE_INT2
+	tristate "BMA2X2 acceleration sensor interrupt INT2 support"
+	depends on SENSORS_BMA2X2 && !SENSORS_BMA2X2_ENABLE_INT1
+	help
+	 If you say yes here, you get INT2 support for Bosch Sensortec
+	 acceleration sensors BMA255/BMA250E/BMA222E/BMA280.
+	 Can only open if you do NOT open interrupt INT1 support
+
+config SIG_MOTION
+	tristate "support significant motion sensor function"
+	depends on SENSORS_BMA2X2  && ( SENSORS_BMA2X2_ENABLE_INT1 || SENSORS_BMA2X2_ENABLE_INT2)
+	help
+	 If you say yes here, if you want to support Bosch significant motion sensor function
+
+config DOUBLE_TAP
+	tristate "support double tap sensor function"
+	depends on SENSORS_BMA2X2  && ( SENSORS_BMA2X2_ENABLE_INT1 || SENSORS_BMA2X2_ENABLE_INT2)
+	help
+	 If you say yes here, you get support Bosch double tap sensor function
+
+config SENSORS_BMG
+	tristate "Bosch Gyroscope Sensor Driver"
+	depends on I2C
+	help
+	 If you say yes here, you get support for Bosch Sensortec's
+	 gyroscope sensor drivers of BMG160/BMI055/BMI058 e.t.c.
+
+config SENSORS_BMG_FIFO
+	tristate "Bosch Gyroscope FIFO Support"
+	depends on SENSORS_BMG
+	help
+	 If you say yes here, you get support for Gyroscope sensor FIFO operations.
+	 Please check whether the chip supports fifo feature to open it.
+
+config SENSORS_BMI058
+	tristate "BMI058 Sensor Support"
+	depends on (SENSORS_BMG || SENSORS_BMA2X2)
+	help
+	 If you say yes here, you get support for Bosch Sensortec's
+	 sensor driver of BMI058.
+
+config SENSORS_YAS537
+	tristate "YAS537 Magnetic Sensor Driver"
+	depends on I2C
+	help
+	 If you say yes here, you get support for YAMAHA
+	 sensor YAS537 Magnetic Sensor
+
+config SENSORS_BMM050
+	tristate "BMM050 Magnetic Sensor Driver"
+	depends on I2C
+	help
+	 If you say yes here, you get support for Bosch Sensortec's
+	 sensor BMM050 Magnetic Sensor
+
+config SENSORS_AKM09911
+	tristate "AKM09911 Mag Sensor Driver"
+	depends on I2C
+	help
+	 If you say yes here, you get support AKM09911 Sensor support.
+
+config SENSORS_AKM09912
+	tristate "AKM09912 Mag Sensor Driver"
+	depends on I2C
+	help
+	 If you say yes here, you get support AKM09912 Sensor support.
+
+config SENSORS_BMA420
+	tristate "BMA4XY Sensor Support"
+	depends on I2C || SPI_MASTER
+	help
+	If you say yes here, you get support for Bosch Sensortec's sensor driver of BMA420.
+config SENSORS_BMA421
+	tristate "BMA4XY Sensor Support"
+	depends on I2C || SPI_MASTER
+	help
+	If you say yes here, you get support for Bosch Sensortec's sensor driver of BMA421.
+config SENSORS_BMA422
+	tristate "BMA4XY Sensor Support"
+	depends on I2C || SPI_MASTER
+	help
+	If you say yes here, you get support for Bosch Sensortec's sensor driver of BMA422.
+config SENSORS_BMA455
+	tristate "BMA4XY Sensor Support"
+	depends on I2C || SPI_MASTER
+	help
+	If you say yes here, you get support for Bosch Sensortec's sensor driver of BMA455.
+
+config BMA4XY_MAG_INTERFACE_SUPPORT
+tristate "BMA4XY Sensor mag interface support"
+depends on SENSORS_BMA4XY
+	help
+	 If you say yes here, you get support for Bosch Sensortec's
+	 sensor driver of BMA4XY with mag sensor support.
+
+
+
diff --git a/drivers/input/sensors/bmi160/Makefile b/drivers/input/sensors/bmi160/Makefile
new file mode 100644
index 0000000..0a9bb56
--- /dev/null
+++ b/drivers/input/sensors/bmi160/Makefile
@@ -0,0 +1,41 @@
+#
+# Makefile for Bosch sensor driver.
+#
+
+obj-$(CONFIG_BOSCH_DRIVER_LOG_FUNC) += bs_log.o
+obj-y += bstclass.o
+ifeq ($(CONFIG_BOSCH_DRIVER_LOG_FUNC),y)
+	EXTRA_CFLAGS += -DBOSCH_DRIVER_LOG_FUNC
+endif
+
+#obj-y   += bma2x2.o
+
+ifeq ($(CONFIG_SENSORS_BMA2X2_ENABLE_INT1),y)
+	EXTRA_CFLAGS += -DBMA2X2_ENABLE_INT1
+endif
+
+ifeq ($(CONFIG_BOSCH_BMA2X2_ENABLE_INT2),y)
+	EXTRA_CFLAGS += -DBMA2X2_ENABLE_INT2
+endif
+
+#obj-y    += bmg160_driver.o bmg160.o
+EXTRA_CFLAGS += -DBMG_USE_BASIC_I2C_FUNC
+
+obj-y    += bmi160_driver.o bmi160.o
+ifeq ($(CONFIG_BMI160_MAG_INTERFACE_SUPPORT),y)
+		EXTRA_CFLAGS += -DBMI160_MAG_INTERFACE_SUPPORT
+endif
+ifeq ($(CONFIG_SENSORS_BMI160_ENABLE_INT1),y)
+		EXTRA_CFLAGS += -DBMI160_ENABLE_INT1
+endif
+
+ifeq ($(CONFIG_SENSORS_BMI160_ENABLE_INT2),y)
+		EXTRA_CFLAGS += -DBMI160_ENABLE_INT2
+endif
+
+obj-y  += bmi160_i2c.o
+
+EXTRA_CFLAGS += -DBMI_USE_BASIC_I2C_FUNC
+
+obj-$(CONFIG_SENSORS_BMI160_SPI)  += bmi160_spi.o
+
diff --git a/drivers/input/sensors/bmi160/bmi160.c b/drivers/input/sensors/bmi160/bmi160.c
new file mode 100644
index 0000000..071f7e3
--- /dev/null
+++ b/drivers/input/sensors/bmi160/bmi160.c
@@ -0,0 +1,18753 @@
+/*
+* @section LICENSE
+* (C) Copyright 2011~2018 Bosch Sensortec GmbH All Rights Reserved
+*
+* This software program is licensed subject to the GNU General
+* Public License (GPL).Version 2,June 1991,
+* available at http://www.fsf.org/copyleft/gpl.html
+
+*
+* @filename bmi160.c
+* @Date: 2015/04/02
+* @id       836294d
+* @Revision: 2.0.9 $
+*
+* Usage: Sensor Driver for BMI160 sensor
+*
+****************************************************************************
+* \section Disclaimer
+*
+* Common:
+* Bosch Sensortec products are developed for the consumer goods industry.
+* They may only be used within the parameters of the respective valid
+* product data sheet.  Bosch Sensortec products are provided with the
+* express understanding that there is no warranty of fitness for a
+* particular purpose.They are not fit for use in life-sustaining,
+* safety or security sensitive systems or any system or device
+* that may lead to bodily harm or property damage if the system
+* or device malfunctions. In addition,Bosch Sensortec products are
+* not fit for use in products which interact with motor vehicle systems.
+* The resale and or use of products are at the purchasers own risk and
+* his own responsibility. The examination of fitness for the intended use
+* is the sole responsibility of the Purchaser.
+*
+* The purchaser shall indemnify Bosch Sensortec from all third party
+* claims, including any claims for incidental, or consequential damages,
+* arising from any product use not covered by the parameters of
+* the respective valid product data sheet or not approved by
+* Bosch Sensortec and reimburse Bosch Sensortec for all costs in
+* connection with such claims.
+*
+* The purchaser must monitor the market for the purchased products,
+* particularly with regard to product safety and inform Bosch Sensortec
+* without delay of all security relevant incidents.
+*
+* Engineering Samples are marked with an asterisk (*) or (e).
+* Samples may vary from the valid technical specifications of the product
+* series. They are therefore not intended or fit for resale to third
+* parties or for use in end products. Their sole purpose is internal
+* client testing. The testing of an engineering sample may in no way
+* replace the testing of a product series. Bosch Sensortec assumes
+* no liability for the use of engineering samples.
+* By accepting the engineering samples, the Purchaser agrees to indemnify
+* Bosch Sensortec from all claims arising from the use of engineering
+* samples.
+*
+* Special:
+* This software module (hereinafter called "Software") and any information
+* on application-sheets (hereinafter called "Information") is provided
+* free of charge for the sole purpose to support your application work.
+* The Software and Information is subject to the following
+* terms and conditions:
+*
+* The Software is specifically designed for the exclusive use for
+* Bosch Sensortec products by personnel who have special experience
+* and training. Do not use this Software if you do not have the
+* proper experience or training.
+*
+* This Software package is provided `` as is `` and without any expressed
+* or implied warranties,including without limitation, the implied warranties
+* of merchantability and fitness for a particular purpose.
+*
+* Bosch Sensortec and their representatives and agents deny any liability
+* for the functional impairment
+* of this Software in terms of fitness, performance and safety.
+* Bosch Sensortec and their representatives and agents shall not be liable
+* for any direct or indirect damages or injury, except as
+* otherwise stipulated in mandatory applicable law.
+*
+* The Information provided is believed to be accurate and reliable.
+* Bosch Sensortec assumes no responsibility for the consequences of use
+* of such Information nor for any infringement of patents or
+* other rights of third parties which may result from its use.
+* No license is granted by implication or otherwise under any patent or
+* patent rights of Bosch. Specifications mentioned in the Information are
+* subject to change without notice.
+**************************************************************************/
+/*! file <BMI160 >
+    brief <Sensor driver for BMI160> */
+#include "bmi160.h"
+#include <linux/kernel.h>
+
+/* user defined code to be added here ... */
+struct bmi160_t *p_bmi160;
+/* used for reading the mag trim values for compensation*/
+struct trim_data_t mag_trim;
+/* the following variable used for avoiding the selecting of auto mode
+when it is running in the manual mode of BMM150 mag interface*/
+u8 V_bmm150_maual_auto_condition_u8 = BMI160_INIT_VALUE;
+/* used for reading the AKM compensating data */
+struct bst_akm_sensitivity_data_t akm_asa_data;
+/* Assign the fifo time */
+u32 V_fifo_time_U32 = BMI160_INIT_VALUE;
+
+/* FIFO data read for 1024 bytes of data */
+u8 v_fifo_data_u8[FIFO_FRAME] = {BMI160_INIT_VALUE};
+/* YAMAHA-YAS532*/
+/* value of coeff*/
+static const int yas532_version_ac_coef[] = {YAS532_VERSION_AC_COEF_X,
+YAS532_VERSION_AC_COEF_Y1, YAS532_VERSION_AC_COEF_Y2};
+/* used for reading the yas532 calibration data*/
+struct yas532_t yas532_data;
+/* used for reading the yas537 calibration data*/
+struct yas537_t yas537_data;
+/*!
+ *	@brief
+ *	This function is used for initialize
+ *	bus read and bus write functions
+ *	assign the chip id and device address
+ *	chip id is read in the register 0x00 bit from 0 to 7
+ *
+ *	@param bmi160 : structure pointer
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *	@note
+ *	While changing the parameter of the bmi160_t
+ *	consider the following point:
+ *	Changing the reference value of the parameter
+ *	will changes the local copy or local reference
+ *	make sure your changes will not
+ *	affect the reference value of the parameter
+ *	(Better case don't change the reference value of the parameter)
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_init(struct bmi160_t *bmi160)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	u8 v_pmu_data_u8 = BMI160_INIT_VALUE;
+	/* assign bmi160 ptr */
+	p_bmi160 = bmi160;
+	com_rslt =
+	p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+	BMI160_USER_CHIP_ID__REG,
+	&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	/* read Chip Id */
+	p_bmi160->chip_id = v_data_u8;
+	/* To avoid gyro wakeup it is required to write 0x00 to 0x6C*/
+	com_rslt += bmi160_write_reg(BMI160_USER_PMU_TRIGGER_ADDR,
+	&v_pmu_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	return com_rslt;
+}
+/*!
+ * @brief
+ *	This API write the data to
+ *	the given register
+ *
+ *
+ *	@param v_addr_u8 -> Address of the register
+ *	@param v_data_u8 -> The data from the register
+ *	@param v_len_u8 -> no of bytes to read
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_write_reg(u8 v_addr_u8,
+u8 *v_data_u8, u8 v_len_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write data from register*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->dev_addr,
+			v_addr_u8, v_data_u8, v_len_u8);
+		}
+	return com_rslt;
+}
+/*!
+ * @brief
+ *	This API reads the data from
+ *	the given register
+ *
+ *
+ *	@param v_addr_u8 -> Address of the register
+ *	@param v_data_u8 -> The data from the register
+ *	@param v_len_u8 -> no of bytes to read
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_reg(u8 v_addr_u8,
+u8 *v_data_u8, u8 v_len_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* Read data from register*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			v_addr_u8, v_data_u8, v_len_u8);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to reads the fatal error
+ *	from the Register 0x02 bit 0
+ *	This flag will be reset only by power-on-reset and soft reset
+ *
+ *
+ *  @param v_fatal_err_u8 : The status of fatal error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fatal_err(u8
+*v_fatal_err_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* reading the fatal error status*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FATAL_ERR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fatal_err_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FATAL_ERR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to read the error code
+ *	from register 0x02 bit 1 to 4
+ *
+ *
+ *  @param v_err_code_u8 : The status of error codes
+ *	error_code  |    description
+ *  ------------|---------------
+ *	0x00        |no error
+ *	0x01        |ACC_CONF error (accel ODR and bandwidth not compatible)
+ *	0x02        |GYR_CONF error (Gyroscope ODR and bandwidth not compatible)
+ *	0x03        |Under sampling mode and interrupt uses pre filtered data
+ *	0x04        |reserved
+ *	0x05        |Selected trigger-readout offset in
+ *    -         |MAG_IF greater than selected ODR
+ *	0x06        |FIFO configuration error for header less mode
+ *	0x07        |Under sampling mode and pre filtered data as FIFO source
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_err_code(u8
+*v_err_code_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_ERR_CODE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_err_code_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_ERR_CODE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API Reads the i2c error code from the
+ *	Register 0x02 bit 5.
+ *	This error occurred in I2C master detected
+ *
+ *  @param v_i2c_err_code_u8 : The status of i2c fail error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_i2c_fail_err(u8
+*v_i2c_err_code_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_I2C_FAIL_ERR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_i2c_err_code_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_I2C_FAIL_ERR);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API Reads the dropped command error
+ *	from the register 0x02 bit 6
+ *
+ *
+ *  @param v_drop_cmd_err_u8 : The status of drop command error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_drop_cmd_err(u8
+*v_drop_cmd_err_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_DROP_CMD_ERR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_drop_cmd_err_u8 = BMI160_GET_BITSLICE(
+			v_data_u8,
+			BMI160_USER_DROP_CMD_ERR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the magnetometer data ready
+ *	interrupt not active.
+ *	It reads from the error register 0x0x2 bit 7
+ *
+ *
+ *
+ *
+ *  @param v_mag_data_rdy_err_u8 : The status of mag data ready interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_dada_rdy_err(
+u8 *v_mag_data_rdy_err_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_MAG_DADA_RDY_ERR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_data_rdy_err_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_MAG_DADA_RDY_ERR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the error status
+ *	from the error register 0x02 bit 0 to 7
+ *
+ *  @param v_mag_data_rdy_err_u8 : The status of mag data ready interrupt
+ *  @param v_fatal_er_u8r : The status of fatal error
+ *  @param v_err_code_u8 : The status of error code
+ *  @param v_i2c_fail_err_u8 : The status of I2C fail error
+ *  @param v_drop_cmd_err_u8 : The status of drop command error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_error_status(u8 *v_fatal_er_u8r,
+u8 *v_err_code_u8, u8 *v_i2c_fail_err_u8,
+u8 *v_drop_cmd_err_u8, u8 *v_mag_data_rdy_err_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the error codes*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_ERR_STAT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			/* fatal error*/
+			*v_fatal_er_u8r =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FATAL_ERR);
+			/* user error*/
+			*v_err_code_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_ERR_CODE);
+			/* i2c fail error*/
+			*v_i2c_fail_err_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_I2C_FAIL_ERR);
+			/* drop command error*/
+			*v_drop_cmd_err_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_DROP_CMD_ERR);
+			/* mag data ready error*/
+			*v_mag_data_rdy_err_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_MAG_DADA_RDY_ERR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the magnetometer power mode from
+ *	PMU status register 0x03 bit 0 and 1
+ *
+ *  @param v_mag_power_mode_stat_u8 : The value of mag power mode
+ *	mag_powermode    |   value
+ * ------------------|----------
+ *    SUSPEND        |   0x00
+ *    NORMAL         |   0x01
+ *   LOW POWER       |   0x02
+ *
+ *
+ * @note The power mode of mag set by the 0x7E command register
+ * @note using the function "bmi160_set_command_register()"
+ *  value    |   mode
+ *  ---------|----------------
+ *   0x18    | MAG_MODE_SUSPEND
+ *   0x19    | MAG_MODE_NORMAL
+ *   0x1A    | MAG_MODE_LOWPOWER
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_power_mode_stat(u8
+*v_mag_power_mode_stat_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_MAG_POWER_MODE_STAT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_power_mode_stat_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_MAG_POWER_MODE_STAT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the gyroscope power mode from
+ *	PMU status register 0x03 bit 2 and 3
+ *
+ *  @param v_gyro_power_mode_stat_u8 :	The value of gyro power mode
+ *	gyro_powermode   |   value
+ * ------------------|----------
+ *    SUSPEND        |   0x00
+ *    NORMAL         |   0x01
+ *   FAST POWER UP   |   0x03
+ *
+ * @note The power mode of gyro set by the 0x7E command register
+ * @note using the function "bmi160_set_command_register()"
+ *  value    |   mode
+ *  ---------|----------------
+ *   0x14    | GYRO_MODE_SUSPEND
+ *   0x15    | GYRO_MODE_NORMAL
+ *   0x17    | GYRO_MODE_FASTSTARTUP
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_power_mode_stat(u8
+*v_gyro_power_mode_stat_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_GYRO_POWER_MODE_STAT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_power_mode_stat_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_GYRO_POWER_MODE_STAT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the accelerometer power mode from
+ *	PMU status register 0x03 bit 4 and 5
+ *
+ *
+ *  @param v_accel_power_mode_stat_u8 :	The value of accel power mode
+ *	accel_powermode  |   value
+ * ------------------|----------
+ *    SUSPEND        |   0x00
+ *    NORMAL         |   0x01
+ *  LOW POWER        |   0x02
+ *
+ * @note The power mode of accel set by the 0x7E command register
+ * @note using the function "bmi160_set_command_register()"
+ *  value    |   mode
+ *  ---------|----------------
+ *   0x11    | ACCEL_MODE_NORMAL
+ *   0x12    | ACCEL_LOWPOWER
+ *   0x10    | ACCEL_SUSPEND
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_power_mode_stat(u8
+*v_accel_power_mode_stat_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_ACCEL_POWER_MODE_STAT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_power_mode_stat_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_ACCEL_POWER_MODE_STAT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API switch mag interface to normal mode
+ *	and confirm whether the mode switching done successfully or not
+*
+ *	@return results of bus communication function and current MAG_PMU result
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_interface_normal(void)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = BMI160_INIT_VALUE;
+	/* aim to check the result of switching mag normal */
+	u8 v_try_times_u8 = BMI160_MAG_NOAMRL_SWITCH_TIMES;
+	u8 v_mag_pum_status_u8 = BMI160_INIT_VALUE;
+
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	com_rslt = bmi160_set_command_register(MAG_MODE_NORMAL);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	while (v_try_times_u8) {
+		com_rslt = bmi160_get_mag_power_mode_stat(&v_mag_pum_status_u8);
+		if (v_mag_pum_status_u8 == MAG_INTERFACE_PMU_ENABLE)
+			break;
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		v_try_times_u8--;
+	}
+	if (v_mag_pum_status_u8 == MAG_INTERFACE_PMU_ENABLE)
+		com_rslt += SUCCESS;
+	else
+		com_rslt += E_BMI160_COMM_RES;
+
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads magnetometer data X values
+ *	from the register 0x04 and 0x05
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param v_mag_x_s16 : The value of mag x
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note bmi160_set_mag_output_data_rate()
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_mag_x(s16 *v_mag_x_s16,
+u8 v_sensor_select_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the mag X lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[1] - MSB*/
+	u8 v_data_u8[BMI160_MAG_X_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_sensor_select_u8) {
+		case BST_BMM:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_DATA_MAG_X_LSB__REG,
+			v_data_u8, BMI160_MAG_X_DATA_LENGTH);
+			/* X axis*/
+			v_data_u8[BMI160_MAG_X_LSB_BYTE] =
+			BMI160_GET_BITSLICE(v_data_u8[BMI160_MAG_X_LSB_BYTE],
+			BMI160_USER_DATA_MAG_X_LSB);
+			*v_mag_x_s16 = (s16)
+			((((s32)((s8)v_data_u8[BMI160_MAG_X_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_05_BITS) |
+			(v_data_u8[BMI160_MAG_X_LSB_BYTE]));
+		break;
+		case BST_AKM:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_DATA_0_MAG_X_LSB__REG,
+			v_data_u8, BMI160_MAG_X_DATA_LENGTH);
+			*v_mag_x_s16 = (s16)
+			((((s32)((s8)v_data_u8[BMI160_MAG_X_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[BMI160_MAG_X_LSB_BYTE]));
+		break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads magnetometer data Y values
+ *	from the register 0x06 and 0x07
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param v_mag_y_s16 : The value of mag y
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note bmi160_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_mag_y(s16 *v_mag_y_s16,
+u8 v_sensor_select_u8)
+{
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_OUT_OF_RANGE;
+	/* Array contains the mag Y lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[1] - MSB*/
+	u8 v_data_u8[BMI160_MAG_Y_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_sensor_select_u8) {
+		case BST_BMM:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_DATA_MAG_Y_LSB__REG,
+			v_data_u8, BMI160_MAG_Y_DATA_LENGTH);
+			/*Y-axis lsb value shifting*/
+			v_data_u8[BMI160_MAG_Y_LSB_BYTE] =
+			BMI160_GET_BITSLICE(v_data_u8[BMI160_MAG_Y_LSB_BYTE],
+			BMI160_USER_DATA_MAG_Y_LSB);
+			*v_mag_y_s16 = (s16)
+			((((s32)((s8)v_data_u8[BMI160_MAG_Y_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_05_BITS) |
+			(v_data_u8[BMI160_MAG_Y_LSB_BYTE]));
+		break;
+		case BST_AKM:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_DATA_2_MAG_Y_LSB__REG,
+			v_data_u8, BMI160_MAG_Y_DATA_LENGTH);
+			*v_mag_y_s16 = (s16)
+			((((s32)((s8)v_data_u8[BMI160_MAG_Y_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[BMI160_MAG_Y_LSB_BYTE]));
+		break;
+		default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads magnetometer data Z values
+ *	from the register 0x08 and 0x09
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param v_mag_z_s16 : The value of mag z
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note bmi160_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_mag_z(s16 *v_mag_z_s16,
+u8 v_sensor_select_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the mag Z lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[1] - MSB*/
+	u8 v_data_u8[BMI160_MAG_Z_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_sensor_select_u8) {
+		case BST_BMM:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_DATA_MAG_Z_LSB__REG,
+			v_data_u8, BMI160_MAG_Z_DATA_LENGTH);
+			/*Z-axis lsb value shifting*/
+			v_data_u8[BMI160_MAG_Z_LSB_BYTE] =
+			BMI160_GET_BITSLICE(v_data_u8[BMI160_MAG_Z_LSB_BYTE],
+			BMI160_USER_DATA_MAG_Z_LSB);
+			*v_mag_z_s16 = (s16)
+			((((s32)((s8)v_data_u8[BMI160_MAG_Z_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_07_BITS) |
+			(v_data_u8[BMI160_MAG_Z_LSB_BYTE]));
+		break;
+		case BST_AKM:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_DATA_4_MAG_Z_LSB__REG,
+			v_data_u8, BMI160_MAG_Z_DATA_LENGTH);
+			*v_mag_z_s16 = (s16)
+			((((s32)((s8)v_data_u8[BMI160_MAG_Z_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) | (
+			v_data_u8[BMI160_MAG_Z_LSB_BYTE]));
+		break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads magnetometer data RHALL values
+ *	from the register 0x0A and 0x0B
+ *
+ *
+ *  @param v_mag_r_s16 : The value of BMM150 r data
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_mag_r(s16 *v_mag_r_s16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the mag R lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[1] - MSB*/
+	u8 v_data_u8[BMI160_MAG_R_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_DATA_6_RHALL_LSB__REG,
+			v_data_u8, BMI160_MAG_R_DATA_LENGTH);
+			/*R-axis lsb value shifting*/
+			v_data_u8[BMI160_MAG_R_LSB_BYTE] =
+			BMI160_GET_BITSLICE(v_data_u8[BMI160_MAG_R_LSB_BYTE],
+			BMI160_USER_DATA_MAG_R_LSB);
+			*v_mag_r_s16 = (s16)
+			((((s32)((s8)v_data_u8[BMI160_MAG_R_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_06_BITS) |
+			(v_data_u8[BMI160_MAG_R_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads magnetometer data X,Y,Z values
+ *	from the register 0x04 to 0x09
+ *
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param mag : The value of mag xyz data
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note bmi160_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_mag_xyz(
+struct bmi160_mag_t *mag, u8 v_sensor_select_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the mag XYZ lSB and MSB data
+		v_data_u8[0] - X-LSB
+		v_data_u8[1] - X-MSB
+		v_data_u8[0] - Y-LSB
+		v_data_u8[1] - Y-MSB
+		v_data_u8[0] - Z-LSB
+		v_data_u8[1] - Z-MSB
+		*/
+	u8 v_data_u8[BMI160_MAG_XYZ_DATA_SIZE] = {
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_sensor_select_u8) {
+		case BST_BMM:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_DATA_MAG_X_LSB__REG,
+			v_data_u8, BMI160_MAG_XYZ_DATA_LENGTH);
+			/*X-axis lsb value shifting*/
+			v_data_u8[BMI160_DATA_FRAME_MAG_X_LSB_BYTE] =
+			BMI160_GET_BITSLICE(
+			v_data_u8[BMI160_DATA_FRAME_MAG_X_LSB_BYTE],
+			BMI160_USER_DATA_MAG_X_LSB);
+			/* Data X */
+			mag->x = (s16)
+			((((s32)((s8)v_data_u8[
+			BMI160_DATA_FRAME_MAG_X_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_05_BITS) |
+			(v_data_u8[BMI160_DATA_FRAME_MAG_X_LSB_BYTE]));
+			/* Data Y */
+			/*Y-axis lsb value shifting*/
+			v_data_u8[BMI160_DATA_FRAME_MAG_Y_LSB_BYTE] =
+			BMI160_GET_BITSLICE(
+			v_data_u8[BMI160_DATA_FRAME_MAG_Y_LSB_BYTE],
+			BMI160_USER_DATA_MAG_Y_LSB);
+			mag->y = (s16)
+			((((s32)((s8)v_data_u8[
+			BMI160_DATA_FRAME_MAG_Y_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_05_BITS) |
+			(v_data_u8[BMI160_DATA_FRAME_MAG_Y_LSB_BYTE]));
+
+			/* Data Z */
+			/*Z-axis lsb value shifting*/
+			v_data_u8[BMI160_DATA_FRAME_MAG_Z_LSB_BYTE]
+			= BMI160_GET_BITSLICE(
+			v_data_u8[BMI160_DATA_FRAME_MAG_Z_LSB_BYTE],
+			BMI160_USER_DATA_MAG_Z_LSB);
+			mag->z = (s16)
+			((((s32)((s8)v_data_u8[
+			BMI160_DATA_FRAME_MAG_Z_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_07_BITS) |
+			(v_data_u8[BMI160_DATA_FRAME_MAG_Z_LSB_BYTE]));
+		break;
+		case BST_AKM:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_DATA_0_MAG_X_LSB__REG,
+			v_data_u8, BMI160_MAG_XYZ_DATA_LENGTH);
+			/* Data X */
+			mag->x = (s16)
+			((((s32)((s8)v_data_u8[
+			BMI160_DATA_FRAME_MAG_X_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[BMI160_DATA_FRAME_MAG_X_LSB_BYTE]));
+			/* Data Y */
+			mag->y  = ((((s32)((s8)v_data_u8[
+			BMI160_DATA_FRAME_MAG_Y_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[BMI160_DATA_FRAME_MAG_Y_LSB_BYTE]));
+			/* Data Z */
+			mag->z = (s16)
+			((((s32)((s8)v_data_u8[
+			BMI160_DATA_FRAME_MAG_Z_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[BMI160_DATA_FRAME_MAG_Z_LSB_BYTE]));
+		break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+ /*!*
+ *	@brief This API reads magnetometer data X,Y,Z,r
+ *	values from the register 0x04 to 0x0B
+ *
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param mag : The value of mag-BMM150 xyzr data
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note bmi160_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_mag_xyzr(
+struct bmi160_mag_xyzr_t *mag)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8[BMI160_MAG_XYZR_DATA_SIZE] = {
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_DATA_MAG_X_LSB__REG,
+			v_data_u8, BMI160_MAG_XYZR_DATA_LENGTH);
+
+			/* Data X */
+			/*X-axis lsb value shifting*/
+			v_data_u8[BMI160_DATA_FRAME_MAG_X_LSB_BYTE]
+			= BMI160_GET_BITSLICE(
+			v_data_u8[BMI160_DATA_FRAME_MAG_X_LSB_BYTE],
+			BMI160_USER_DATA_MAG_X_LSB);
+			mag->x = (s16)
+			((((s32)((s8)v_data_u8[
+			BMI160_DATA_FRAME_MAG_X_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_05_BITS)
+			| (v_data_u8[BMI160_DATA_FRAME_MAG_X_LSB_BYTE]));
+			/* Data Y */
+			/*Y-axis lsb value shifting*/
+			v_data_u8[BMI160_DATA_FRAME_MAG_Y_LSB_BYTE]
+			= BMI160_GET_BITSLICE(
+			v_data_u8[BMI160_DATA_FRAME_MAG_Y_LSB_BYTE],
+			BMI160_USER_DATA_MAG_Y_LSB);
+			mag->y = (s16)
+			((((s32)((s8)v_data_u8[
+			BMI160_DATA_FRAME_MAG_Y_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_05_BITS)
+			| (v_data_u8[
+			BMI160_DATA_FRAME_MAG_Y_LSB_BYTE]));
+
+			/* Data Z */
+			/*Z-axis lsb value shifting*/
+			v_data_u8[BMI160_DATA_FRAME_MAG_Z_LSB_BYTE]
+			= BMI160_GET_BITSLICE(
+			v_data_u8[BMI160_DATA_FRAME_MAG_Z_LSB_BYTE],
+			BMI160_USER_DATA_MAG_Z_LSB);
+			mag->z = (s16)
+			((((s32)((s8)v_data_u8[
+			BMI160_DATA_FRAME_MAG_Z_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_07_BITS)
+			| (v_data_u8[BMI160_DATA_FRAME_MAG_Z_LSB_BYTE]));
+
+			/* RHall */
+			/*R-axis lsb value shifting*/
+			v_data_u8[BMI160_DATA_FRAME_MAG_R_LSB_BYTE]
+			= BMI160_GET_BITSLICE(
+			v_data_u8[BMI160_DATA_FRAME_MAG_R_LSB_BYTE],
+			BMI160_USER_DATA_MAG_R_LSB);
+			mag->r = (s16)
+			((((s32)((s8)v_data_u8[
+			BMI160_DATA_FRAME_MAG_R_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_06_BITS)
+			| (v_data_u8[BMI160_DATA_FRAME_MAG_R_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads gyro data X values
+ *	form the register 0x0C and 0x0D
+ *
+ *
+ *
+ *
+ *  @param v_gyro_x_s16 : The value of gyro x data
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note bmi160_set_gyro_output_data_rate()
+ *	@note bmi160_set_gyro_bw()
+ *	@note bmi160_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_gyro_x(s16 *v_gyro_x_s16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the gyro X lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[MSB_ONE] - MSB*/
+	u8 v_data_u8[BMI160_GYRO_X_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_DATA_8_GYRO_X_LSB__REG,
+			v_data_u8, BMI160_GYRO_DATA_LENGTH);
+
+			*v_gyro_x_s16 = (s16)
+			((((s32)((s8)v_data_u8[BMI160_GYRO_X_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[BMI160_GYRO_X_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads gyro data Y values
+ *	form the register 0x0E and 0x0F
+ *
+ *
+ *
+ *
+ *  @param v_gyro_y_s16 : The value of gyro y data
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note bmi160_set_gyro_output_data_rate()
+ *	@note bmi160_set_gyro_bw()
+ *	@note bmi160_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error result of communication routines
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_gyro_y(s16 *v_gyro_y_s16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the gyro Y lSB and MSB data
+		v_data_u8[LSB_ZERO] - LSB
+		v_data_u8[MSB_ONE] - MSB*/
+	u8 v_data_u8[BMI160_GYRO_Y_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read gyro y data*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_DATA_10_GYRO_Y_LSB__REG,
+			v_data_u8, BMI160_GYRO_DATA_LENGTH);
+
+			*v_gyro_y_s16 = (s16)
+			((((s32)((s8)v_data_u8[BMI160_GYRO_Y_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[BMI160_GYRO_Y_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads gyro data Z values
+ *	form the register 0x10 and 0x11
+ *
+ *
+ *
+ *
+ *  @param v_gyro_z_s16 : The value of gyro z data
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note bmi160_set_gyro_output_data_rate()
+ *	@note bmi160_set_gyro_bw()
+ *	@note bmi160_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_gyro_z(s16 *v_gyro_z_s16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the gyro Z lSB and MSB data
+		v_data_u8[LSB_ZERO] - LSB
+		v_data_u8[MSB_ONE] - MSB*/
+	u8 v_data_u8[BMI160_GYRO_Z_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read gyro z data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_DATA_12_GYRO_Z_LSB__REG,
+			v_data_u8, BMI160_GYRO_DATA_LENGTH);
+
+			*v_gyro_z_s16 = (s16)
+			((((s32)((s8)v_data_u8[BMI160_GYRO_Z_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[BMI160_GYRO_Z_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads gyro data X,Y,Z values
+ *	from the register 0x0C to 0x11
+ *
+ *
+ *
+ *
+ *  @param gyro : The value of gyro xyz
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note bmi160_set_gyro_output_data_rate()
+ *	@note bmi160_set_gyro_bw()
+ *	@note bmi160_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_gyro_xyz(struct bmi160_gyro_t *gyro)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the mag XYZ lSB and MSB data
+		v_data_u8[0] - X-LSB
+		v_data_u8[1] - X-MSB
+		v_data_u8[0] - Y-LSB
+		v_data_u8[1] - Y-MSB
+		v_data_u8[0] - Z-LSB
+		v_data_u8[1] - Z-MSB
+		*/
+	u8 v_data_u8[BMI160_GYRO_XYZ_DATA_SIZE] = {
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the gyro xyz data*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_DATA_8_GYRO_X_LSB__REG,
+			v_data_u8, BMI160_GYRO_XYZ_DATA_LENGTH);
+
+			/* Data X */
+			gyro->x = (s16)
+			((((s32)((s8)v_data_u8[
+			BMI160_DATA_FRAME_GYRO_X_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[BMI160_DATA_FRAME_GYRO_X_LSB_BYTE]));
+			/* Data Y */
+			gyro->y = (s16)
+			((((s32)((s8)v_data_u8[
+			BMI160_DATA_FRAME_GYRO_Y_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[BMI160_DATA_FRAME_GYRO_Y_LSB_BYTE]));
+
+			/* Data Z */
+			gyro->z = (s16)
+			((((s32)((s8)v_data_u8[
+			BMI160_DATA_FRAME_GYRO_Z_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[BMI160_DATA_FRAME_GYRO_Z_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads accelerometer data X values
+ *	form the register 0x12 and 0x13
+ *
+ *
+ *
+ *
+ *  @param v_accel_x_s16 : The value of accel x
+ *
+ *	@note For accel configuration use the following functions
+ *	@note bmi160_set_accel_output_data_rate()
+ *	@note bmi160_set_accel_bw()
+ *	@note bmi160_set_accel_under_sampling_parameter()
+ *	@note bmi160_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_accel_x(s16 *v_accel_x_s16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the accel X lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[1] - MSB*/
+	u8 v_data_u8[BMI160_ACCEL_X_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_DATA_14_ACCEL_X_LSB__REG,
+			v_data_u8, BMI160_ACCEL_DATA_LENGTH);
+
+			*v_accel_x_s16 = (s16)
+			((((s32)((s8)v_data_u8[BMI160_ACCEL_X_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[BMI160_ACCEL_X_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads accelerometer data Y values
+ *	form the register 0x14 and 0x15
+ *
+ *
+ *
+ *
+ *  @param v_accel_y_s16 : The value of accel y
+ *
+ *	@note For accel configuration use the following functions
+ *	@note bmi160_set_accel_output_data_rate()
+ *	@note bmi160_set_accel_bw()
+ *	@note bmi160_set_accel_under_sampling_parameter()
+ *	@note bmi160_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_accel_y(s16 *v_accel_y_s16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the accel Y lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[1] - MSB*/
+	u8 v_data_u8[BMI160_ACCEL_Y_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_DATA_16_ACCEL_Y_LSB__REG,
+			v_data_u8, BMI160_ACCEL_DATA_LENGTH);
+
+			*v_accel_y_s16 = (s16)
+			((((s32)((s8)v_data_u8[BMI160_ACCEL_Y_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[BMI160_ACCEL_Y_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads accelerometer data Z values
+ *	form the register 0x16 and 0x17
+ *
+ *
+ *
+ *
+ *  @param v_accel_z_s16 : The value of accel z
+ *
+ *	@note For accel configuration use the following functions
+ *	@note bmi160_set_accel_output_data_rate()
+ *	@note bmi160_set_accel_bw()
+ *	@note bmi160_set_accel_under_sampling_parameter()
+ *	@note bmi160_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_accel_z(s16 *v_accel_z_s16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the accel Z lSB and MSB data
+		a_data_u8r[LSB_ZERO] - LSB
+		a_data_u8r[MSB_ONE] - MSB*/
+	u8 a_data_u8r[BMI160_ACCEL_Z_DATA_SIZE] = {
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_DATA_18_ACCEL_Z_LSB__REG,
+			a_data_u8r, BMI160_ACCEL_DATA_LENGTH);
+
+			*v_accel_z_s16 = (s16)
+			((((s32)((s8)a_data_u8r[BMI160_ACCEL_Z_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (a_data_u8r[BMI160_ACCEL_Z_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads accelerometer data X,Y,Z values
+ *	from the register 0x12 to 0x17
+ *
+ *
+ *
+ *
+ *  @param accel :The value of accel xyz
+ *
+ *	@note For accel configuration use the following functions
+ *	@note bmi160_set_accel_output_data_rate()
+ *	@note bmi160_set_accel_bw()
+ *	@note bmi160_set_accel_under_sampling_parameter()
+ *	@note bmi160_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_accel_xyz(
+struct bmi160_accel_t *accel)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the accel XYZ lSB and MSB data
+	a_data_u8r[0] - X-LSB
+	a_data_u8r[1] - X-MSB
+	a_data_u8r[0] - Y-LSB
+	a_data_u8r[1] - Y-MSB
+	a_data_u8r[0] - Z-LSB
+	a_data_u8r[1] - Z-MSB
+	*/
+	u8 a_data_u8r[BMI160_ACCEL_XYZ_DATA_SIZE] = {
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_DATA_14_ACCEL_X_LSB__REG,
+			a_data_u8r, BMI160_ACCEL_XYZ_DATA_LENGTH);
+
+			/* Data X */
+			accel->x = (s16)
+			((((s32)((s8)a_data_u8r[
+			BMI160_DATA_FRAME_ACCEL_X_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (a_data_u8r[BMI160_DATA_FRAME_ACCEL_X_LSB_BYTE]));
+			/* Data Y */
+			accel->y = (s16)
+			((((s32)((s8)a_data_u8r[
+			BMI160_DATA_FRAME_ACCEL_Y_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (a_data_u8r[BMI160_DATA_FRAME_ACCEL_Y_LSB_BYTE]));
+
+			/* Data Z */
+			accel->z = (s16)
+			((((s32)((s8)a_data_u8r[
+			BMI160_DATA_FRAME_ACCEL_Z_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (a_data_u8r[BMI160_DATA_FRAME_ACCEL_Z_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads sensor_time from the register
+ *	0x18 to 0x1A
+ *
+ *
+ *  @param v_sensor_time_u32 : The value of sensor time
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_sensor_time(u32 *v_sensor_time_u32)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the sensor time it is 32 bit data
+	a_data_u8r[0] - sensor time
+	a_data_u8r[1] - sensor time
+	a_data_u8r[0] - sensor time
+	*/
+	u8 a_data_u8r[BMI160_SENSOR_TIME_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_SENSORTIME_0_SENSOR_TIME_LSB__REG,
+			a_data_u8r, BMI160_SENSOR_TIME_LENGTH);
+
+			*v_sensor_time_u32 = (u32)
+			((((u32)a_data_u8r[BMI160_SENSOR_TIME_MSB_BYTE])
+			<< BMI160_SHIFT_BIT_POSITION_BY_16_BITS)
+			|(((u32)a_data_u8r[BMI160_SENSOR_TIME_XLSB_BYTE])
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (a_data_u8r[BMI160_SENSOR_TIME_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the Gyroscope self test
+ *	status from the register 0x1B bit 1
+ *
+ *
+ *  @param v_gyro_selftest_u8 : The value of gyro self test status
+ *  value    |   status
+ *  ---------|----------------
+ *   0       | Gyroscope self test is running or failed
+ *   1       | Gyroscope self test completed successfully
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_selftest(u8
+*v_gyro_selftest_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_STAT_GYRO_SELFTEST_OK__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_selftest_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_STAT_GYRO_SELFTEST_OK);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of
+ *	mag manual interface operation form the register 0x1B bit 2
+ *
+ *
+ *
+ *  @param v_mag_manual_stat_u8 : The value of mag manual operation status
+ *  value    |   status
+ *  ---------|----------------
+ *   0       | Indicates no manual magnetometer
+ *   -       | interface operation is ongoing
+ *   1       | Indicates manual magnetometer
+ *   -       | interface operation is ongoing
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_manual_operation_stat(u8
+*v_mag_manual_stat_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read manual operation*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_STAT_MAG_MANUAL_OPERATION__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_manual_stat_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_STAT_MAG_MANUAL_OPERATION);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the fast offset compensation
+ *	status form the register 0x1B bit 3
+ *
+ *
+ *  @param v_foc_rdy_u8 : The status of fast compensation
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_foc_rdy(u8
+*v_foc_rdy_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the FOC status*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_STAT_FOC_RDY__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_foc_rdy_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_STAT_FOC_RDY);
+		}
+	return com_rslt;
+}
+/*!
+ * @brief This API Reads the nvm_rdy status from the
+ *	resister 0x1B bit 4
+ *
+ *
+ *  @param v_nvm_rdy_u8 : The value of NVM ready status
+ *  value    |   status
+ *  ---------|----------------
+ *   0       | NVM write operation in progress
+ *   1       | NVM is ready to accept a new write trigger
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_nvm_rdy(u8
+*v_nvm_rdy_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the nvm ready status*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_STAT_NVM_RDY__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_nvm_rdy_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_STAT_NVM_RDY);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of mag data ready
+ *	from the register 0x1B bit 5
+ *	The status get reset when one mag data register is read out
+ *
+ *  @param v_data_rdy_u8 : The value of mag data ready status
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_data_rdy_mag(u8
+*v_data_rdy_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_STAT_DATA_RDY_MAG__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_data_rdy_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_STAT_DATA_RDY_MAG);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of gyro data ready form the
+ *	register 0x1B bit 6
+ *	The status get reset when gyro data register read out
+ *
+ *
+ *	@param v_data_rdy_u8 :	The value of gyro data ready
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_data_rdy(u8
+*v_data_rdy_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_STAT_DATA_RDY_GYRO__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_data_rdy_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_STAT_DATA_RDY_GYRO);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of accel data ready form the
+ *	register 0x1B bit 7
+ *	The status get reset when accel data register read out
+ *
+ *
+ *	@param v_data_rdy_u8 :	The value of accel data ready status
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_data_rdy(u8
+*v_data_rdy_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/*reads the status of accel data ready*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_STAT_DATA_RDY_ACCEL__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_data_rdy_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_STAT_DATA_RDY_ACCEL);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the step detector interrupt status
+ *	from the register 0x1C bit 0
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_step_intr_u8 : The status of step detector interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_step_intr(u8
+*v_step_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_0_STEP_INTR__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_step_intr_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_0_STEP_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the
+ *	significant motion interrupt status
+ *	from the register 0x1C bit 1
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *
+ *  @param v_significant_intr_u8 : The status of step
+ *	motion interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_significant_intr(u8
+*v_significant_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_0_SIGNIFICANT_INTR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_significant_intr_u8  = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_0_SIGNIFICANT_INTR);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API reads the any motion interrupt status
+ *	from the register 0x1C bit 2
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *  @param v_any_motion_intr_u8 : The status of any-motion interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_any_motion_intr(u8
+*v_any_motion_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_0_ANY_MOTION__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_any_motion_intr_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_0_ANY_MOTION);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the power mode trigger interrupt status
+ *	from the register 0x1C bit 3
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *
+ *  @param v_pmu_trigger_intr_u8 : The status of power mode trigger interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_pmu_trigger_intr(u8
+*v_pmu_trigger_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_0_PMU_TRIGGER__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_pmu_trigger_intr_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_0_PMU_TRIGGER);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the double tab status
+ *	from the register 0x1C bit 4
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_double_tap_intr_u8 :The status of double tab interrupt
+ *
+ *	@note Double tap interrupt can be configured by the following functions
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_double_tap()
+ *	@note AXIS MAPPING
+ *	@note bmi160_get_stat2_tap_first_x()
+ *	@note bmi160_get_stat2_tap_first_y()
+ *	@note bmi160_get_stat2_tap_first_z()
+ *	@note DURATION
+ *	@note bmi160_set_intr_tap_durn()
+ *	@note THRESHOLD
+ *	@note bmi160_set_intr_tap_thres()
+ *	@note TAP QUIET
+ *	@note bmi160_set_intr_tap_quiet()
+ *	@note TAP SHOCK
+ *	@note bmi160_set_intr_tap_shock()
+ *	@note TAP SOURCE
+ *	@note bmi160_set_intr_tap_source()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_double_tap_intr(u8
+*v_double_tap_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_0_DOUBLE_TAP_INTR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_double_tap_intr_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_0_DOUBLE_TAP_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the single tab status
+ *	from the register 0x1C bit 5
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_single_tap_intr_u8 :The status of single tap interrupt
+ *
+ *	@note Single tap interrupt can be configured by the following functions
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_single_tap()
+ *	@note AXIS MAPPING
+ *	@note bmi160_get_stat2_tap_first_x()
+ *	@note bmi160_get_stat2_tap_first_y()
+ *	@note bmi160_get_stat2_tap_first_z()
+ *	@note DURATION
+ *	@note bmi160_set_intr_tap_durn()
+ *	@note THRESHOLD
+ *	@note bmi160_set_intr_tap_thres()
+ *	@note TAP QUIET
+ *	@note bmi160_set_intr_tap_quiet()
+ *	@note TAP SHOCK
+ *	@note bmi160_set_intr_tap_shock()
+ *	@note TAP SOURCE
+ *	@note bmi160_set_intr_tap_source()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_single_tap_intr(u8
+*v_single_tap_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_0_SINGLE_TAP_INTR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_single_tap_intr_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_0_SINGLE_TAP_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the orient status
+ *	from the register 0x1C bit 6
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the orient interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_orient_intr_u8 : The status of orient interrupt
+ *
+ *	@note For orient interrupt configuration use the following functions
+ *	@note STATUS
+ *	@note bmi160_get_stat0_orient_intr()
+ *	@note AXIS MAPPING
+ *	@note bmi160_get_stat3_orient_xy()
+ *	@note bmi160_get_stat3_orient_z()
+ *	@note bmi160_set_intr_orient_axes_enable()
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_orient()
+ *	@note INTERRUPT OUTPUT
+ *	@note bmi160_set_intr_orient_ud_enable()
+ *	@note THETA
+ *	@note bmi160_set_intr_orient_theta()
+ *	@note HYSTERESIS
+ *	@note bmi160_set_intr_orient_hyst()
+ *	@note BLOCKING
+ *	@note bmi160_set_intr_orient_blocking()
+ *	@note MODE
+ *	@note bmi160_set_intr_orient_mode()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_orient_intr(u8
+*v_orient_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_0_ORIENT__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_intr_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_0_ORIENT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the flat interrupt status
+ *	from the register 0x1C bit 7
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the flat interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_flat_intr_u8 : The status of  flat interrupt
+ *
+ *	@note For flat configuration use the following functions
+ *	@note STATS
+ *	@note bmi160_get_stat0_flat_intr()
+ *	@note bmi160_get_stat3_flat()
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_flat()
+ *	@note THETA
+ *	@note bmi160_set_intr_flat_theta()
+ *	@note HOLD TIME
+ *	@note bmi160_set_intr_flat_hold()
+ *	@note HYSTERESIS
+ *	@note bmi160_set_intr_flat_hyst()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_flat_intr(u8
+*v_flat_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_0_FLAT__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_flat_intr_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_0_FLAT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the high_g interrupt status
+ *	from the register 0x1D bit 2
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the high g  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be permanently
+ *	latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_high_g_intr_u8 : The status of high_g interrupt
+ *
+ *	@note High_g interrupt configured by following functions
+ *	@note STATUS
+ *	@note bmi160_get_stat1_high_g_intr()
+ *	@note AXIS MAPPING
+ *	@note bmi160_get_stat3_high_g_first_x()
+ *	@note bmi160_get_stat3_high_g_first_y()
+ *	@note bmi160_get_stat3_high_g_first_z()
+ *	@note SIGN MAPPING
+ *	@note bmi160_get_stat3_high_g_first_sign()
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_high_g()
+  *	@note HYSTERESIS
+ *	@note bmi160_set_intr_high_g_hyst()
+ *	@note DURATION
+ *	@note bmi160_set_intr_high_g_durn()
+ *	@note THRESHOLD
+ *	@note bmi160_set_intr_high_g_thres()
+ *	@note SOURCE
+ *	@note bmi160_set_intr_low_high_source()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat1_high_g_intr(u8
+*v_high_g_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_1_HIGH_G_INTR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_intr_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_1_HIGH_G_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the low g interrupt status
+ *	from the register 0x1D bit 3
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the low g  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_low_g_intr_u8 : The status of low_g interrupt
+ *
+ *	@note Low_g interrupt configured by following functions
+ *	@note STATUS
+ *	@note bmi160_get_stat1_low_g_intr()
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_low_g()
+ *	@note SOURCE
+ *	@note bmi160_set_intr_low_high_source()
+ *	@note DURATION
+ *	@note bmi160_set_intr_low_g_durn()
+ *	@note THRESHOLD
+ *	@note bmi160_set_intr_low_g_thres()
+ *	@note HYSTERESIS
+ *	@note bmi160_set_intr_low_g_hyst()
+ *	@note MODE
+ *	@note bmi160_set_intr_low_g_mode()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat1_low_g_intr(u8
+*v_low_g_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_1_LOW_G_INTR__REG, &v_data_u8,
+			 BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_low_g_intr_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_1_LOW_G_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads data ready interrupt status
+ *	from the register 0x1D bit 4
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the  data ready  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_data_rdy_intr_u8 : The status of data ready interrupt
+ *
+ *	@note Data ready interrupt configured by following functions
+ *	@note STATUS
+ *	@note bmi160_get_stat1_data_rdy_intr()
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_data_rdy()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat1_data_rdy_intr(u8
+*v_data_rdy_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_1_DATA_RDY_INTR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_data_rdy_intr_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_1_DATA_RDY_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads data ready FIFO full interrupt status
+ *	from the register 0x1D bit 5
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the FIFO full interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will
+ *	be permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_fifo_full_intr_u8 : The status of fifo full interrupt
+ *
+ *	@note FIFO full interrupt can be configured by following functions
+ *	@note bmi160_set_intr_fifo_full()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat1_fifo_full_intr(u8
+*v_fifo_full_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_1_FIFO_FULL_INTR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_full_intr_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_1_FIFO_FULL_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads data
+ *	 ready FIFO watermark interrupt status
+ *	from the register 0x1D bit 6
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the FIFO watermark interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_fifo_wm_intr_u8 : The status of fifo water mark interrupt
+ *
+ *	@note FIFO full interrupt can be configured by following functions
+ *	@note bmi160_set_intr_fifo_wm()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat1_fifo_wm_intr(u8
+*v_fifo_wm_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_1_FIFO_WM_INTR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_wm_intr_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_1_FIFO_WM_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads data ready no motion interrupt status
+ *	from the register 0x1D bit 7
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the no motion  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be permanently
+ *	latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_nomotion_intr_u8 : The status of no motion interrupt
+ *
+ *	@note No motion interrupt can be configured by following function
+ *	@note STATUS
+ *	@note bmi160_get_stat1_nomotion_intr()
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_nomotion()
+ *	@note DURATION
+ *	@note bmi160_set_intr_slow_no_motion_durn()
+ *	@note THRESHOLD
+ *	@note bmi160_set_intr_slow_no_motion_thres()
+ *	@note SLOW/NO MOTION SELECT
+ *	@note bmi160_set_intr_slow_no_motion_select()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat1_nomotion_intr(u8
+*v_nomotion_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the no motion interrupt*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_1_NOMOTION_INTR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_nomotion_intr_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_1_NOMOTION_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *@brief This API reads the status of any motion first x
+ *	from the register 0x1E bit 0
+ *
+ *
+ *@param v_anymotion_first_x_u8 : The status of any motion first x interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by x axis
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_any_motion_first_x(u8
+*v_anymotion_first_x_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the any motion first x interrupt*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_X__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_anymotion_first_x_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_X);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of any motion first y interrupt
+ *	from the register 0x1E bit 1
+ *
+ *
+ *
+ *@param v_any_motion_first_y_u8 : The status of any motion first y interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_any_motion_first_y(u8
+*v_any_motion_first_y_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the any motion first y interrupt*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_any_motion_first_y_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of any motion first z interrupt
+ *	from the register 0x1E bit 2
+ *
+ *
+ *
+ *
+ *@param v_any_motion_first_z_u8 : The status of any motion first z interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_any_motion_first_z(u8
+*v_any_motion_first_z_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the any motion first z interrupt*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_any_motion_first_z_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the any motion sign status from the
+ *	register 0x1E bit 3
+ *
+ *
+ *
+ *
+ *  @param v_anymotion_sign_u8 : The status of any motion sign
+ *  value     |  sign
+ * -----------|-------------
+ *   0        | positive
+ *   1        | negative
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_any_motion_sign(u8
+*v_anymotion_sign_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read any motion sign interrupt status */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_2_ANY_MOTION_SIGN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_anymotion_sign_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_2_ANY_MOTION_SIGN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the any motion tap first x status from the
+ *	register 0x1E bit 4
+ *
+ *
+ *
+ *
+ *  @param v_tap_first_x_u8 :The status of any motion tap first x
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by x axis
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_tap_first_x(u8
+*v_tap_first_x_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read tap first x interrupt status */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_2_TAP_FIRST_X__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_first_x_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_2_TAP_FIRST_X);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the tap first y interrupt status from the
+ *	register 0x1E bit 5
+ *
+ *
+ *
+ *
+ *  @param v_tap_first_y_u8 :The status of tap first y interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_tap_first_y(u8
+*v_tap_first_y_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read tap first y interrupt status */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_2_TAP_FIRST_Y__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_first_y_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_2_TAP_FIRST_Y);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the tap first z interrupt status  from the
+ *	register 0x1E bit 6
+ *
+ *
+ *
+ *
+ *  @param v_tap_first_z_u8 :The status of tap first z interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_tap_first_z(u8
+*v_tap_first_z_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read tap first z interrupt status */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_2_TAP_FIRST_Z__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_first_z_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_2_TAP_FIRST_Z);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the tap sign status from the
+ *	register 0x1E bit 7
+ *
+ *
+ *
+ *
+ *  @param v_tap_sign_u8 : The status of tap sign
+ *  value     |  sign
+ * -----------|-------------
+ *   0        | positive
+ *   1        | negative
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_tap_sign(u8
+*v_tap_sign_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read tap_sign interrupt status */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_2_TAP_SIGN__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_sign_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_2_TAP_SIGN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the high_g first x status from the
+ *	register 0x1F bit 0
+ *
+ *
+ *
+ *
+ *  @param v_high_g_first_x_u8 :The status of high_g first x
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_high_g_first_x(u8
+*v_high_g_first_x_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read highg_x interrupt status */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_X__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_first_x_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_X);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the high_g first y status from the
+ *	register 0x1F bit 1
+ *
+ *
+ *
+ *
+ *  @param v_high_g_first_y_u8 : The status of high_g first y
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_high_g_first_y(u8
+*v_high_g_first_y_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read highg_y interrupt status */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_Y__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_first_y_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_Y);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the high_g first z status from the
+ *	register 0x1F bit 3
+ *
+ *
+ *
+ *
+ *  @param v_high_g_first_z_u8 : The status of high_g first z
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_high_g_first_z(u8
+*v_high_g_first_z_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read highg_z interrupt status */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_Z__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_first_z_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_Z);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the high sign status from the
+ *	register 0x1F bit 3
+ *
+ *
+ *
+ *
+ *  @param v_high_g_sign_u8 :The status of high sign
+ *  value     |  sign
+ * -----------|-------------
+ *   0        | positive
+ *   1        | negative
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_high_g_sign(u8
+*v_high_g_sign_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read highg_sign interrupt status */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_3_HIGH_G_SIGN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_sign_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_3_HIGH_G_SIGN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of orient_xy plane
+ *	from the register 0x1F bit 4 and 5
+ *
+ *
+ *  @param v_orient_xy_u8 :The status of orient_xy plane
+ *  value     |  status
+ * -----------|-------------
+ *   0x00     | portrait upright
+ *   0x01     | portrait upside down
+ *   0x02     | landscape left
+ *   0x03     | landscape right
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_orient_xy(u8
+*v_orient_xy_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read orient plane xy interrupt status */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_3_ORIENT_XY__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_xy_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_3_ORIENT_XY);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of orient z plane
+ *	from the register 0x1F bit 6
+ *
+ *
+ *  @param v_orient_z_u8 :The status of orient z
+ *  value     |  status
+ * -----------|-------------
+ *   0x00     | upward looking
+ *   0x01     | downward looking
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_orient_z(u8
+*v_orient_z_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read orient z plane interrupt status */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_3_ORIENT_Z__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_z_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_3_ORIENT_Z);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the flat status from the register
+ *	0x1F bit 7
+ *
+ *
+ *  @param v_flat_u8 : The status of flat interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0x00     | non flat
+ *   0x01     | flat position
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_flat(u8
+*v_flat_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read flat interrupt status */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_INTR_STAT_3_FLAT__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_flat_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_STAT_3_FLAT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the temperature of the sensor
+ *	from the register 0x21 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_temp_s16 : The value of temperature
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_temp(s16
+*v_temp_s16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the temperature lSB and MSB data
+	v_data_u8[0] - LSB
+	v_data_u8[1] - MSB*/
+	u8 v_data_u8[BMI160_TEMP_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read temperature data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_TEMP_LSB_VALUE__REG, v_data_u8,
+			BMI160_TEMP_DATA_LENGTH);
+			*v_temp_s16 =
+			(s16)(((s32)((s8) (v_data_u8[BMI160_TEMP_MSB_BYTE]) <<
+			BMI160_SHIFT_BIT_POSITION_BY_08_BITS))
+			| v_data_u8[BMI160_TEMP_LSB_BYTE]);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the  of the sensor
+ *	form the register 0x23 and 0x24 bit 0 to 7 and 0 to 2
+ *	@brief this byte counter is updated each time a complete frame
+ *	was read or writtern
+ *
+ *
+ *  @param v_fifo_length_u32 : The value of fifo byte counter
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_fifo_length(u32 *v_fifo_length_u32)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array contains the fifo length data
+	v_data_u8[0] - fifo length
+	v_data_u8[1] - fifo length*/
+	u8 a_data_u8r[BMI160_FIFO_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read fifo length*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_BYTE_COUNTER_LSB__REG, a_data_u8r,
+			 BMI160_FIFO_DATA_LENGTH);
+
+			a_data_u8r[BMI160_FIFO_LENGTH_MSB_BYTE] =
+			BMI160_GET_BITSLICE(
+			a_data_u8r[BMI160_FIFO_LENGTH_MSB_BYTE],
+			BMI160_USER_FIFO_BYTE_COUNTER_MSB);
+
+			*v_fifo_length_u32 =
+			(u32)(((u32)((u8) (
+			a_data_u8r[BMI160_FIFO_LENGTH_MSB_BYTE]) <<
+			BMI160_SHIFT_BIT_POSITION_BY_08_BITS))
+			| a_data_u8r[BMI160_FIFO_LENGTH_LSB_BYTE]);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the fifo data of the sensor
+ *	from the register 0x24
+ *	@brief Data format depends on the setting of register FIFO_CONFIG
+ *
+ *
+ *
+ *  @param v_fifodata_u8 : Pointer holding the fifo data
+ *  @param fifo_length_u16 : The value of fifo length maximum
+ *	1024
+ *
+ *	@note For reading FIFO data use the following functions
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_fifo_data(
+u8 *v_fifodata_u8, u16 v_fifo_length_u16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read fifo data*/
+			com_rslt =
+			p_bmi160->BMI160_BURST_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_DATA__REG,
+			v_fifodata_u8, v_fifo_length_u16);
+
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the
+ *	accel output date rate form the register 0x40 bit 0 to 3
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of accel output date rate
+ *  value |  output data rate
+ * -------|--------------------------
+ *	 0    |	BMI160_ACCEL_OUTPUT_DATA_RATE_RESERVED
+ *	 1	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_0_78HZ
+ *	 2	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_1_56HZ
+ *	 3    |	BMI160_ACCEL_OUTPUT_DATA_RATE_3_12HZ
+ *	 4    | BMI160_ACCEL_OUTPUT_DATA_RATE_6_25HZ
+ *	 5	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_12_5HZ
+ *	 6	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_25HZ
+ *	 7	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_50HZ
+ *	 8	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_100HZ
+ *	 9	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_200HZ
+ *	 10	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_400HZ
+ *	 11	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_800HZ
+ *	 12	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_1600HZ
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_output_data_rate(
+u8 *v_output_data_rate_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the accel output data rate*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_output_data_rate_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the
+ *	accel output date rate form the register 0x40 bit 0 to 3
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of accel output date rate
+ *  value |  output data rate
+ * -------|--------------------------
+ *	 0    |	BMI160_ACCEL_OUTPUT_DATA_RATE_RESERVED
+ *	 1	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_0_78HZ
+ *	 2	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_1_56HZ
+ *	 3    |	BMI160_ACCEL_OUTPUT_DATA_RATE_3_12HZ
+ *	 4    | BMI160_ACCEL_OUTPUT_DATA_RATE_6_25HZ
+ *	 5	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_12_5HZ
+ *	 6	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_25HZ
+ *	 7	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_50HZ
+ *	 8	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_100HZ
+ *	 9	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_200HZ
+ *	 10	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_400HZ
+ *	 11	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_800HZ
+ *	 12	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_1600HZ
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_output_data_rate(
+u8 v_output_data_rate_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		/* accel output data rate selection */
+		if ((v_output_data_rate_u8 != BMI160_INIT_VALUE) &&
+		(v_output_data_rate_u8 <= BMI160_MAX_ACCEL_OUTPUT_DATA_RATE)) {
+			/* write accel output data rate */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE,
+				v_output_data_rate_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the
+ *	accel bandwidth from the register 0x40 bit 4 to 6
+ *	@brief bandwidth parameter determines filter configuration(acc_us=0)
+ *	and averaging for under sampling mode(acc_us=1)
+ *
+ *
+ *  @param  v_bw_u8 : The value of accel bandwidth
+ *
+ *	@note accel bandwidth depends on under sampling parameter
+ *	@note under sampling parameter cab be set by the function
+ *	"BMI160_SET_ACCEL_UNDER_SAMPLING_PARAMETER"
+ *
+ *	@note Filter configuration
+ *  accel_us  | Filter configuration
+ * -----------|---------------------
+ *    0x00    |  OSR4 mode
+ *    0x01    |  OSR2 mode
+ *    0x02    |  normal mode
+ *    0x03    |  CIC mode
+ *    0x04    |  Reserved
+ *    0x05    |  Reserved
+ *    0x06    |  Reserved
+ *    0x07    |  Reserved
+ *
+ *	@note accel under sampling mode
+ *  accel_us  | Under sampling mode
+ * -----------|---------------------
+ *    0x00    |  no averaging
+ *    0x01    |  average 2 samples
+ *    0x02    |  average 4 samples
+ *    0x03    |  average 8 samples
+ *    0x04    |  average 16 samples
+ *    0x05    |  average 32 samples
+ *    0x06    |  average 64 samples
+ *    0x07    |  average 128 samples
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_bw(u8 *v_bw_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the accel bandwidth */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_ACCEL_CONFIG_ACCEL_BW__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_bw_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_ACCEL_CONFIG_ACCEL_BW);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the
+ *	accel bandwidth from the register 0x40 bit 4 to 6
+ *	@brief bandwidth parameter determines filter configuration(acc_us=0)
+ *	and averaging for under sampling mode(acc_us=1)
+ *
+ *
+ *  @param  v_bw_u8 : The value of accel bandwidth
+ *
+ *	@note accel bandwidth depends on under sampling parameter
+ *	@note under sampling parameter cab be set by the function
+ *	"BMI160_SET_ACCEL_UNDER_SAMPLING_PARAMETER"
+ *
+ *	@note Filter configuration
+ *  accel_us  | Filter configuration
+ * -----------|---------------------
+ *    0x00    |  OSR4 mode
+ *    0x01    |  OSR2 mode
+ *    0x02    |  normal mode
+ *    0x03    |  CIC mode
+ *    0x04    |  Reserved
+ *    0x05    |  Reserved
+ *    0x06    |  Reserved
+ *    0x07    |  Reserved
+ *
+ *	@note accel under sampling mode
+ *  accel_us  | Under sampling mode
+ * -----------|---------------------
+ *    0x00    |  no averaging
+ *    0x01    |  average 2 samples
+ *    0x02    |  average 4 samples
+ *    0x03    |  average 8 samples
+ *    0x04    |  average 16 samples
+ *    0x05    |  average 32 samples
+ *    0x06    |  average 64 samples
+ *    0x07    |  average 128 samples
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_bw(u8 v_bw_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		/* select accel bandwidth*/
+		if (v_bw_u8 <= BMI160_MAX_ACCEL_BW) {
+			/* write accel bandwidth*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_ACCEL_CONFIG_ACCEL_BW__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_ACCEL_CONFIG_ACCEL_BW,
+				v_bw_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_ACCEL_CONFIG_ACCEL_BW__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the accel
+ *	under sampling parameter form the register 0x40 bit 7
+ *
+ *
+ *
+ *
+ *	@param  v_accel_under_sampling_u8 : The value of accel under sampling
+ *	value    | under_sampling
+ * ----------|---------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_under_sampling_parameter(
+u8 *v_accel_under_sampling_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the accel under sampling parameter */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_under_sampling_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the accel
+ *	under sampling parameter form the register 0x40 bit 7
+ *
+ *
+ *
+ *
+ *	@param  v_accel_under_sampling_u8 : The value of accel under sampling
+ *	value    | under_sampling
+ * ----------|---------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_under_sampling_parameter(
+u8 v_accel_under_sampling_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	if (v_accel_under_sampling_u8 <= BMI160_MAX_UNDER_SAMPLING) {
+		com_rslt =
+		p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+		BMI160_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			/* write the accel under sampling parameter */
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING,
+			v_accel_under_sampling_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_BMI160_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the ranges
+ *	(g values) of the accel from the register 0x41 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param v_range_u8 : The value of accel g range
+ *	value    | g_range
+ * ----------|-----------
+ *   0x03    | BMI160_ACCEL_RANGE_2G
+ *   0x05    | BMI160_ACCEL_RANGE_4G
+ *   0x08    | BMI160_ACCEL_RANGE_8G
+ *   0x0C    | BMI160_ACCEL_RANGE_16G
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_range(
+u8 *v_range_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the accel range*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_ACCEL_RANGE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_range_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_ACCEL_RANGE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the ranges
+ *	(g values) of the accel from the register 0x41 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param v_range_u8 : The value of accel g range
+ *	value    | g_range
+ * ----------|-----------
+ *   0x03    | BMI160_ACCEL_RANGE_2G
+ *   0x05    | BMI160_ACCEL_RANGE_4G
+ *   0x08    | BMI160_ACCEL_RANGE_8G
+ *   0x0C    | BMI160_ACCEL_RANGE_16G
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_range(u8 v_range_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if ((v_range_u8 == BMI160_ACCEL_RANGE0) ||
+			(v_range_u8 == BMI160_ACCEL_RANGE1) ||
+			(v_range_u8 == BMI160_ACCEL_RANGE3) ||
+			(v_range_u8 == BMI160_ACCEL_RANGE4)) {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_ACCEL_RANGE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8  = BMI160_SET_BITSLICE(
+				v_data_u8, BMI160_USER_ACCEL_RANGE,
+				v_range_u8);
+				/* write the accel range*/
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_ACCEL_RANGE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the
+ *	gyroscope output data rate from the register 0x42 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of gyro output data rate
+ *  value     |      gyro output data rate
+ * -----------|-----------------------------
+ *   0x00     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x01     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x02     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x03     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x04     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x05     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x06     | BMI160_GYRO_OUTPUT_DATA_RATE_25HZ
+ *   0x07     | BMI160_GYRO_OUTPUT_DATA_RATE_50HZ
+ *   0x08     | BMI160_GYRO_OUTPUT_DATA_RATE_100HZ
+ *   0x09     | BMI160_GYRO_OUTPUT_DATA_RATE_200HZ
+ *   0x0A     | BMI160_GYRO_OUTPUT_DATA_RATE_400HZ
+ *   0x0B     | BMI160_GYRO_OUTPUT_DATA_RATE_800HZ
+ *   0x0C     | BMI160_GYRO_OUTPUT_DATA_RATE_1600HZ
+ *   0x0D     | BMI160_GYRO_OUTPUT_DATA_RATE_3200HZ
+ *   0x0E     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x0F     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_output_data_rate(
+u8 *v_output_data_rate_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the gyro output data rate*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_output_data_rate_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_GYRO_CONFIG_OUTPUT_DATA_RATE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the
+ *	gyroscope output data rate from the register 0x42 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of gyro output data rate
+ *  value     |      gyro output data rate
+ * -----------|-----------------------------
+ *   0x00     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x01     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x02     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x03     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x04     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x05     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x06     | BMI160_GYRO_OUTPUT_DATA_RATE_25HZ
+ *   0x07     | BMI160_GYRO_OUTPUT_DATA_RATE_50HZ
+ *   0x08     | BMI160_GYRO_OUTPUT_DATA_RATE_100HZ
+ *   0x09     | BMI160_GYRO_OUTPUT_DATA_RATE_200HZ
+ *   0x0A     | BMI160_GYRO_OUTPUT_DATA_RATE_400HZ
+ *   0x0B     | BMI160_GYRO_OUTPUT_DATA_RATE_800HZ
+ *   0x0C     | BMI160_GYRO_OUTPUT_DATA_RATE_1600HZ
+ *   0x0D     | BMI160_GYRO_OUTPUT_DATA_RATE_3200HZ
+ *   0x0E     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x0F     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_output_data_rate(
+u8 v_output_data_rate_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		/* select the gyro output data rate*/
+		if ((v_output_data_rate_u8 <  BMI160_OUTPUT_DATA_RATE6) &&
+		(v_output_data_rate_u8 != BMI160_INIT_VALUE)
+		&& (v_output_data_rate_u8 !=  BMI160_OUTPUT_DATA_RATE1)
+		&& (v_output_data_rate_u8 !=  BMI160_OUTPUT_DATA_RATE2)
+		&& (v_output_data_rate_u8 !=  BMI160_OUTPUT_DATA_RATE3)
+		&& (v_output_data_rate_u8 !=  BMI160_OUTPUT_DATA_RATE4)
+		&& (v_output_data_rate_u8 !=  BMI160_OUTPUT_DATA_RATE5)
+		&& (v_output_data_rate_u8 !=  BMI160_OUTPUT_DATA_RATE6)
+		&& (v_output_data_rate_u8 !=  BMI160_OUTPUT_DATA_RATE7)) {
+			/* write the gyro output data rate */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_GYRO_CONFIG_OUTPUT_DATA_RATE,
+				v_output_data_rate_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the
+ *	data of gyro from the register 0x42 bit 4 to 5
+ *
+ *
+ *
+ *
+ *  @param  v_bw_u8 : The value of gyro bandwidth
+ *  value     | gyro bandwidth
+ *  ----------|----------------
+ *   0x00     | BMI160_GYRO_OSR4_MODE
+ *   0x01     | BMI160_GYRO_OSR2_MODE
+ *   0x02     | BMI160_GYRO_NORMAL_MODE
+ *   0x03     | BMI160_GYRO_CIC_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_bw(u8 *v_bw_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read gyro bandwidth*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_GYRO_CONFIG_BW__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_bw_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_GYRO_CONFIG_BW);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the
+ *	data of gyro from the register 0x42 bit 4 to 5
+ *
+ *
+ *
+ *
+ *  @param  v_bw_u8 : The value of gyro bandwidth
+ *  value     | gyro bandwidth
+ *  ----------|----------------
+ *   0x00     | BMI160_GYRO_OSR4_MODE
+ *   0x01     | BMI160_GYRO_OSR2_MODE
+ *   0x02     | BMI160_GYRO_NORMAL_MODE
+ *   0x03     | BMI160_GYRO_CIC_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_bw(u8 v_bw_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_bw_u8 <= BMI160_MAX_GYRO_BW) {
+			/* write the gyro bandwidth*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_GYRO_CONFIG_BW__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_GYRO_CONFIG_BW, v_bw_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_GYRO_CONFIG_BW__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the range
+ *	of gyro from the register 0x43 bit 0 to 2
+ *
+ *  @param  v_range_u8 : The value of gyro range
+ *   value    |    range
+ *  ----------|-------------------------------
+ *    0x00    | BMI160_GYRO_RANGE_2000_DEG_SEC
+ *    0x01    | BMI160_GYRO_RANGE_1000_DEG_SEC
+ *    0x02    | BMI160_GYRO_RANGE_500_DEG_SEC
+ *    0x03    | BMI160_GYRO_RANGE_250_DEG_SEC
+ *    0x04    | BMI160_GYRO_RANGE_125_DEG_SEC
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_range(u8 *v_range_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the gyro range */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_GYRO_RANGE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_range_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_GYRO_RANGE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API set the range
+ *	of gyro from the register 0x43 bit 0 to 2
+ *
+ *  @param  v_range_u8 : The value of gyro range
+ *   value    |    range
+ *  ----------|-------------------------------
+ *    0x00    | BMI160_GYRO_RANGE_2000_DEG_SEC
+ *    0x01    | BMI160_GYRO_RANGE_1000_DEG_SEC
+ *    0x02    | BMI160_GYRO_RANGE_500_DEG_SEC
+ *    0x03    | BMI160_GYRO_RANGE_250_DEG_SEC
+ *    0x04    | BMI160_GYRO_RANGE_125_DEG_SEC
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_range(u8 v_range_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_range_u8 <= BMI160_MAX_GYRO_RANGE) {
+			/* write the gyro range value */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_GYRO_RANGE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_GYRO_RANGE,
+				v_range_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_GYRO_RANGE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the
+ *	output data rate of magnetometer from the register 0x44 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rat_u8e : The value of mag output data rate
+ *  value   |    mag output data rate
+ * ---------|---------------------------
+ *  0x00    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED
+ *  0x01    |BMI160_MAG_OUTPUT_DATA_RATE_0_78HZ
+ *  0x02    |BMI160_MAG_OUTPUT_DATA_RATE_1_56HZ
+ *  0x03    |BMI160_MAG_OUTPUT_DATA_RATE_3_12HZ
+ *  0x04    |BMI160_MAG_OUTPUT_DATA_RATE_6_25HZ
+ *  0x05    |BMI160_MAG_OUTPUT_DATA_RATE_12_5HZ
+ *  0x06    |BMI160_MAG_OUTPUT_DATA_RATE_25HZ
+ *  0x07    |BMI160_MAG_OUTPUT_DATA_RATE_50HZ
+ *  0x08    |BMI160_MAG_OUTPUT_DATA_RATE_100HZ
+ *  0x09    |BMI160_MAG_OUTPUT_DATA_RATE_200HZ
+ *  0x0A    |BMI160_MAG_OUTPUT_DATA_RATE_400HZ
+ *  0x0B    |BMI160_MAG_OUTPUT_DATA_RATE_800HZ
+ *  0x0C    |BMI160_MAG_OUTPUT_DATA_RATE_1600HZ
+ *  0x0D    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED0
+ *  0x0E    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED1
+ *  0x0F    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED2
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_output_data_rate(
+u8 *v_output_data_rat_u8e)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the mag data output rate*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_MAG_CONFIG_OUTPUT_DATA_RATE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_output_data_rat_u8e = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_MAG_CONFIG_OUTPUT_DATA_RATE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the
+ *	output data rate of magnetometer from the register 0x44 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rat_u8e : The value of mag output data rate
+ *  value   |    mag output data rate
+ * ---------|---------------------------
+ *  0x00    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED
+ *  0x01    |BMI160_MAG_OUTPUT_DATA_RATE_0_78HZ
+ *  0x02    |BMI160_MAG_OUTPUT_DATA_RATE_1_56HZ
+ *  0x03    |BMI160_MAG_OUTPUT_DATA_RATE_3_12HZ
+ *  0x04    |BMI160_MAG_OUTPUT_DATA_RATE_6_25HZ
+ *  0x05    |BMI160_MAG_OUTPUT_DATA_RATE_12_5HZ
+ *  0x06    |BMI160_MAG_OUTPUT_DATA_RATE_25HZ
+ *  0x07    |BMI160_MAG_OUTPUT_DATA_RATE_50HZ
+ *  0x08    |BMI160_MAG_OUTPUT_DATA_RATE_100HZ
+ *  0x09    |BMI160_MAG_OUTPUT_DATA_RATE_200HZ
+ *  0x0A    |BMI160_MAG_OUTPUT_DATA_RATE_400HZ
+ *  0x0B    |BMI160_MAG_OUTPUT_DATA_RATE_800HZ
+ *  0x0C    |BMI160_MAG_OUTPUT_DATA_RATE_1600HZ
+ *  0x0D    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED0
+ *  0x0E    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED1
+ *  0x0F    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED2
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_output_data_rate(
+u8 v_output_data_rat_u8e)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		/* select the mag data output rate*/
+		if ((v_output_data_rat_u8e
+		<= BMI160_MAX_ACCEL_OUTPUT_DATA_RATE)
+		&& (v_output_data_rat_u8e
+		!= BMI160_OUTPUT_DATA_RATE0)
+		&& (v_output_data_rat_u8e
+		!=  BMI160_OUTPUT_DATA_RATE6)
+		&& (v_output_data_rat_u8e
+		!=  BMI160_OUTPUT_DATA_RATE7)) {
+			/* write the mag data output rate*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_MAG_CONFIG_OUTPUT_DATA_RATE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_MAG_CONFIG_OUTPUT_DATA_RATE,
+				v_output_data_rat_u8e);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_MAG_CONFIG_OUTPUT_DATA_RATE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read Down sampling
+ *	for gyro (2**downs_gyro) in the register 0x45 bit 0 to 2
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_gyro_u8 :The value of gyro fifo down
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_down_gyro(
+u8 *v_fifo_down_gyro_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the gyro fifo down*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_DOWN_GYRO__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_down_gyro_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FIFO_DOWN_GYRO);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to set Down sampling
+ *	for gyro (2**downs_gyro) in the register 0x45 bit 0 to 2
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_gyro_u8 :The value of gyro fifo down
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_down_gyro(
+u8 v_fifo_down_gyro_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write the gyro fifo down*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_DOWN_GYRO__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(
+				v_data_u8,
+				BMI160_USER_FIFO_DOWN_GYRO,
+				v_fifo_down_gyro_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FIFO_DOWN_GYRO__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read gyro fifo filter data
+ *	from the register 0x45 bit 3
+ *
+ *
+ *
+ *  @param v_gyro_fifo_filter_data_u8 :The value of gyro filter data
+ *  value      |  gyro_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_fifo_filter_data(
+u8 *v_gyro_fifo_filter_data_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the gyro fifo filter data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_FILTER_GYRO__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_fifo_filter_data_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FIFO_FILTER_GYRO);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set gyro fifo filter data
+ *	from the register 0x45 bit 3
+ *
+ *
+ *
+ *  @param v_gyro_fifo_filter_data_u8 :The value of gyro filter data
+ *  value      |  gyro_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_fifo_filter_data(
+u8 v_gyro_fifo_filter_data_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_gyro_fifo_filter_data_u8
+		<= BMI160_MAX_VALUE_FIFO_FILTER) {
+			/* write the gyro fifo filter data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_FILTER_GYRO__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(
+				v_data_u8,
+				BMI160_USER_FIFO_FILTER_GYRO,
+				v_gyro_fifo_filter_data_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FIFO_FILTER_GYRO__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read Down sampling
+ *	for accel (2*downs_accel) from the register 0x45 bit 4 to 6
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_u8 :The value of accel fifo down
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_down_accel(
+u8 *v_fifo_down_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the accel fifo down data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_DOWN_ACCEL__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_down_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FIFO_DOWN_ACCEL);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to set Down sampling
+ *	for accel (2*downs_accel) from the register 0x45 bit 4 to 6
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_u8 :The value of accel fifo down
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_down_accel(
+u8 v_fifo_down_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write the accel fifo down data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_DOWN_ACCEL__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FIFO_DOWN_ACCEL, v_fifo_down_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FIFO_DOWN_ACCEL__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read accel fifo filter data
+ *	from the register 0x45 bit 7
+ *
+ *
+ *
+ *  @param v_accel_fifo_filter_u8 :The value of accel filter data
+ *  value      |  accel_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_fifo_filter_data(
+u8 *v_accel_fifo_filter_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the accel fifo filter data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_FILTER_ACCEL__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_fifo_filter_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FIFO_FILTER_ACCEL);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set accel fifo filter data
+ *	from the register 0x45 bit 7
+ *
+ *
+ *
+ *  @param v_accel_fifo_filter_u8 :The value of accel filter data
+ *  value      |  accel_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_fifo_filter_data(
+u8 v_accel_fifo_filter_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_accel_fifo_filter_u8 <= BMI160_MAX_VALUE_FIFO_FILTER) {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_FILTER_ACCEL__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				/* write accel fifo filter data */
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FIFO_FILTER_ACCEL,
+				v_accel_fifo_filter_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FIFO_FILTER_ACCEL__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to Trigger an interrupt
+ *	when FIFO contains water mark level from the register 0x46 bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_fifo_wm_u8 : The value of fifo water mark level
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_wm(
+u8 *v_fifo_wm_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the fifo water mark level*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_WM__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_wm_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FIFO_WM);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to Trigger an interrupt
+ *	when FIFO contains water mark level from the register 0x46 bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_fifo_wm_u8 : The value of fifo water mark level
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_wm(
+u8 v_fifo_wm_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write the fifo water mark level*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_WM__REG,
+			&v_fifo_wm_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads fifo sensor time
+ *	frame after the last valid data frame form the register  0x47 bit 1
+ *
+ *
+ *
+ *
+ *  @param v_fifo_time_enable_u8 : The value of sensor time
+ *  value      |  fifo sensor time
+ * ------------|-------------------------
+ *    0x00     |  do not return sensortime frame
+ *    0x01     |  return sensortime frame
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_time_enable(
+u8 *v_fifo_time_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the fifo sensor time*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_TIME_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_time_enable_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FIFO_TIME_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API set fifo sensor time
+ *	frame after the last valid data frame form the register  0x47 bit 1
+ *
+ *
+ *
+ *
+ *  @param v_fifo_time_enable_u8 : The value of sensor time
+ *  value      |  fifo sensor time
+ * ------------|-------------------------
+ *    0x00     |  do not return sensortime frame
+ *    0x01     |  return sensortime frame
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_time_enable(
+u8 v_fifo_time_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_fifo_time_enable_u8 <= BMI160_MAX_VALUE_FIFO_TIME) {
+			/* write the fifo sensor time*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_TIME_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FIFO_TIME_ENABLE,
+				v_fifo_time_enable_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FIFO_TIME_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads FIFO tag interrupt2 enable status
+ *	from the resister 0x47 bit 2
+ *
+ *  @param v_fifo_tag_intr2_u8 : The value of fifo tag interrupt
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_tag_intr2_enable(
+u8 *v_fifo_tag_intr2_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the fifo tag interrupt2*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_TAG_INTR2_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_tag_intr2_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FIFO_TAG_INTR2_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API set FIFO tag interrupt2 enable status
+ *	from the resister 0x47 bit 2
+ *
+ *  @param v_fifo_tag_intr2_u8 : The value of fifo tag interrupt
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_tag_intr2_enable(
+u8 v_fifo_tag_intr2_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_fifo_tag_intr2_u8 <= BMI160_MAX_VALUE_FIFO_INTR) {
+			/* write the fifo tag interrupt2*/
+			com_rslt = bmi160_set_input_enable(1,
+			v_fifo_tag_intr2_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_TAG_INTR2_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FIFO_TAG_INTR2_ENABLE,
+				v_fifo_tag_intr2_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FIFO_TAG_INTR2_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API get FIFO tag interrupt1 enable status
+ *	from the resister 0x47 bit 3
+ *
+ *  @param v_fifo_tag_intr1_u8 :The value of fifo tag interrupt1
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_tag_intr1_enable(
+u8 *v_fifo_tag_intr1_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read fifo tag interrupt*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_TAG_INTR1_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_tag_intr1_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FIFO_TAG_INTR1_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API set FIFO tag interrupt1 enable status
+ *	from the resister 0x47 bit 3
+ *
+ *  @param v_fifo_tag_intr1_u8 :The value of fifo tag interrupt1
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_tag_intr1_enable(
+u8 v_fifo_tag_intr1_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_fifo_tag_intr1_u8 <= BMI160_MAX_VALUE_FIFO_INTR) {
+			/* write the fifo tag interrupt*/
+			com_rslt = bmi160_set_input_enable(BMI160_INIT_VALUE,
+			v_fifo_tag_intr1_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_TAG_INTR1_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FIFO_TAG_INTR1_ENABLE,
+				v_fifo_tag_intr1_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FIFO_TAG_INTR1_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads FIFO frame
+ *	header enable from the register 0x47 bit 4
+ *
+ *  @param v_fifo_header_u8 :The value of fifo header
+ *	value    | fifo header
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_header_enable(
+u8 *v_fifo_header_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read fifo header */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_HEADER_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_header_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FIFO_HEADER_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API set FIFO frame
+ *	header enable from the register 0x47 bit 4
+ *
+ *  @param v_fifo_header_u8 :The value of fifo header
+ *	value    | fifo header
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_header_enable(
+u8 v_fifo_header_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_fifo_header_u8 <= BMI160_MAX_VALUE_FIFO_HEADER) {
+			/* write the fifo header */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_HEADER_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FIFO_HEADER_ENABLE,
+				v_fifo_header_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FIFO_HEADER_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read stored
+ *	magnetometer data in FIFO (all 3 axes) from the register 0x47 bit 5
+ *
+ *  @param v_fifo_mag_u8 : The value of fifo mag enble
+ *	value    | fifo mag
+ * ----------|-------------------
+ *  0x00     |  no magnetometer data is stored
+ *  0x01     |  magnetometer data is stored
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_mag_enable(
+u8 *v_fifo_mag_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the fifo mag enable*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_MAG_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_mag_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FIFO_MAG_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set stored
+ *	magnetometer data in FIFO (all 3 axes) from the register 0x47 bit 5
+ *
+ *  @param v_fifo_mag_u8 : The value of fifo mag enble
+ *	value    | fifo mag
+ * ----------|-------------------
+ *  0x00     |  no magnetometer data is stored
+ *  0x01     |  magnetometer data is stored
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_mag_enable(
+u8 v_fifo_mag_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			if (v_fifo_mag_u8 <= BMI160_MAX_VALUE_FIFO_MAG) {
+				/* write the fifo mag enable*/
+				com_rslt =
+				p_bmi160->BMI160_BUS_READ_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_FIFO_MAG_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+				if (com_rslt == SUCCESS) {
+					v_data_u8 =
+					BMI160_SET_BITSLICE(v_data_u8,
+					BMI160_USER_FIFO_MAG_ENABLE,
+					v_fifo_mag_u8);
+					com_rslt +=
+					p_bmi160->BMI160_BUS_WRITE_FUNC
+					(p_bmi160->dev_addr,
+					BMI160_USER_FIFO_MAG_ENABLE__REG,
+					&v_data_u8,
+					BMI160_GEN_READ_WRITE_DATA_LENGTH);
+				}
+			} else {
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read stored
+ *	accel data in FIFO (all 3 axes) from the register 0x47 bit 6
+ *
+ *  @param v_fifo_accel_u8 : The value of fifo accel enble
+ *	value    | fifo accel
+ * ----------|-------------------
+ *  0x00     |  no accel data is stored
+ *  0x01     |  accel data is stored
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_accel_enable(
+u8 *v_fifo_accel_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the accel fifo enable*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_ACCEL_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_accel_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FIFO_ACCEL_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set stored
+ *	accel data in FIFO (all 3 axes) from the register 0x47 bit 6
+ *
+ *  @param v_fifo_accel_u8 : The value of fifo accel enble
+ *	value    | fifo accel
+ * ----------|-------------------
+ *  0x00     |  no accel data is stored
+ *  0x01     |  accel data is stored
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_accel_enable(
+u8 v_fifo_accel_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_fifo_accel_u8 <= BMI160_MAX_VALUE_FIFO_ACCEL) {
+			/* write the fifo mag enables*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_ACCEL_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FIFO_ACCEL_ENABLE, v_fifo_accel_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FIFO_ACCEL_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read stored
+ *	 gyro data in FIFO (all 3 axes) from the resister 0x47 bit 7
+ *
+ *
+ *  @param v_fifo_gyro_u8 : The value of fifo gyro enble
+ *	value    | fifo gyro
+ * ----------|-------------------
+ *  0x00     |  no gyro data is stored
+ *  0x01     |  gyro data is stored
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_gyro_enable(
+u8 *v_fifo_gyro_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read fifo gyro enable */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_GYRO_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_gyro_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FIFO_GYRO_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set stored
+ *	gyro data in FIFO (all 3 axes) from the resister 0x47 bit 7
+ *
+ *
+ *  @param v_fifo_gyro_u8 : The value of fifo gyro enble
+ *	value    | fifo gyro
+ * ----------|-------------------
+ *  0x00     |  no gyro data is stored
+ *  0x01     |  gyro data is stored
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_gyro_enable(
+u8 v_fifo_gyro_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_fifo_gyro_u8 <= BMI160_MAX_VALUE_FIFO_GYRO) {
+			/* write fifo gyro enable*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_FIFO_GYRO_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FIFO_GYRO_ENABLE, v_fifo_gyro_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FIFO_GYRO_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read
+ *	I2C device address of auxiliary mag from the register 0x4B bit 1 to 7
+ *
+ *
+ *
+ *
+ *  @param v_i2c_device_addr_u8 : The value of mag I2C device address
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_i2c_device_addr(
+u8 *v_i2c_device_addr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the mag I2C device address*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_I2C_DEVICE_ADDR__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_i2c_device_addr_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_I2C_DEVICE_ADDR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set
+ *	I2C device address of auxiliary mag from the register 0x4B bit 1 to 7
+ *
+ *
+ *
+ *
+ *  @param v_i2c_device_addr_u8 : The value of mag I2C device address
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_i2c_device_addr(
+u8 v_i2c_device_addr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write the mag I2C device address*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_I2C_DEVICE_ADDR__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_I2C_DEVICE_ADDR,
+				v_i2c_device_addr_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_I2C_DEVICE_ADDR__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read
+ *	Burst data length (1,2,6,8 byte) from the register 0x4C bit 0 to 1
+ *
+ *
+ *
+ *
+ *  @param v_mag_burst_u8 : The data of mag burst read lenth
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_burst(
+u8 *v_mag_burst_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read mag burst mode length*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_MAG_BURST__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_burst_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_MAG_BURST);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set
+ *	Burst data length (1,2,6,8 byte) from the register 0x4C bit 0 to 1
+ *
+ *
+ *
+ *
+ *  @param v_mag_burst_u8 : The data of mag burst read lenth
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_burst(
+u8 v_mag_burst_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write mag burst mode length*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_MAG_BURST__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_MAG_BURST, v_mag_burst_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_MAG_BURST__REG, &v_data_u8,
+				BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read
+ *	trigger-readout offset in units of 2.5 ms. If set to zero,
+ *	the offset is maximum, i.e. after readout a trigger
+ *	is issued immediately. from the register 0x4C bit 2 to 5
+ *
+ *
+ *
+ *
+ *  @param v_mag_offset_u8 : The value of mag offset
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_offset(
+u8 *v_mag_offset_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_MAG_OFFSET__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_offset_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_MAG_OFFSET);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set
+ *	trigger-readout offset in units of 2.5 ms. If set to zero,
+ *	the offset is maximum, i.e. after readout a trigger
+ *	is issued immediately. from the register 0x4C bit 2 to 5
+ *
+ *
+ *
+ *
+ *  @param v_mag_offset_u8 : The value of mag offset
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_offset(
+u8 v_mag_offset_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		com_rslt =
+		p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+		BMI160_USER_MAG_OFFSET__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 =
+			BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_MAG_OFFSET, v_mag_offset_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_MAG_OFFSET__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API is used to read
+ *	Enable register access on MAG_IF[2] or MAG_IF[3] writes.
+ *	This implies that the DATA registers are not updated with
+ *	magnetometer values. Accessing magnetometer requires
+ *	the magnetometer in normal mode in PMU_STATUS.
+ *	from the register 0x4C bit 7
+ *
+ *
+ *
+ *  @param v_mag_manual_u8 : The value of mag manual enable
+ *	value    | mag manual
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_manual_enable(
+u8 *v_mag_manual_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read mag manual */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_MAG_MANUAL_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_manual_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_MAG_MANUAL_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set
+ *	Enable register access on MAG_IF[2] or MAG_IF[3] writes.
+ *	This implies that the DATA registers are not updated with
+ *	magnetometer values. Accessing magnetometer requires
+ *	the magnetometer in normal mode in PMU_STATUS.
+ *	from the register 0x4C bit 7
+ *
+ *
+ *
+ *  @param v_mag_manual_u8 : The value of mag manual enable
+ *	value    | mag manual
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_manual_enable(
+u8 v_mag_manual_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = BMI160_INIT_VALUE;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		/* write the mag manual*/
+		com_rslt =
+		p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+		BMI160_USER_MAG_MANUAL_ENABLE__REG, &v_data_u8,
+		BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		if (com_rslt == SUCCESS) {
+			/* set the bit of mag manual enable*/
+			v_data_u8 =
+			BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_MAG_MANUAL_ENABLE, v_mag_manual_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_MAG_MANUAL_ENABLE__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		if (com_rslt == SUCCESS)
+			p_bmi160->mag_manual_enable = v_mag_manual_u8;
+		else
+			p_bmi160->mag_manual_enable = E_BMI160_COMM_RES;
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API is used to read data
+ *	magnetometer address to read from the register 0x4D bit 0 to 7
+ *	@brief It used to provide mag read address of auxiliary mag
+ *
+ *
+ *
+ *
+ *  @param  v_mag_read_addr_u8 : The value of address need to be read
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_read_addr(
+u8 *v_mag_read_addr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the written address*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_READ_ADDR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_read_addr_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_READ_ADDR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set
+ *	magnetometer write address from the register 0x4D bit 0 to 7
+ *	@brief mag write address writes the address of auxiliary mag to write
+ *
+ *
+ *
+ *  @param v_mag_read_addr_u8:
+ *	The data of auxiliary mag address to write data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_read_addr(
+u8 v_mag_read_addr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write the mag read address*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_READ_ADDR__REG, &v_mag_read_addr_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read
+ *	magnetometer write address from the register 0x4E bit 0 to 7
+ *	@brief mag write address writes the address of auxiliary mag to write
+ *
+ *
+ *
+ *  @param  v_mag_write_addr_u8:
+ *	The data of auxiliary mag address to write data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_write_addr(
+u8 *v_mag_write_addr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the address of last written */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_WRITE_ADDR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_write_addr_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_WRITE_ADDR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set
+ *	magnetometer write address from the register 0x4E bit 0 to 7
+ *	@brief mag write address writes the address of auxiliary mag to write
+ *
+ *
+ *
+ *  @param  v_mag_write_addr_u8:
+ *	The data of auxiliary mag address to write data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_write_addr(
+u8 v_mag_write_addr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write the data of mag address to write data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_WRITE_ADDR__REG, &v_mag_write_addr_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read magnetometer write data
+ *	form the resister 0x4F bit 0 to 7
+ *	@brief This writes the data will be wrote to mag
+ *
+ *
+ *
+ *  @param  v_mag_write_data_u8: The value of mag data
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_write_data(
+u8 *v_mag_write_data_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_WRITE_DATA__REG, &v_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_write_data_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_WRITE_DATA);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set magnetometer write data
+ *	form the resister 0x4F bit 0 to 7
+ *	@brief This writes the data will be wrote to mag
+ *
+ *
+ *
+ *  @param  v_mag_write_data_u8: The value of mag data
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_write_data(
+u8 v_mag_write_data_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt =
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_WRITE_DATA__REG, &v_mag_write_data_u8,
+			BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief  This API is used to read
+ *	interrupt enable from the register 0x50 bit 0 to 7
+ *
+ *
+ *
+ *
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_ANY_MOTION_X_ENABLE
+ *       1         | BMI160_ANY_MOTION_Y_ENABLE
+ *       2         | BMI160_ANY_MOTION_Z_ENABLE
+ *       3         | BMI160_DOUBLE_TAP_ENABLE
+ *       4         | BMI160_SINGLE_TAP_ENABLE
+ *       5         | BMI160_ORIENT_ENABLE
+ *       6         | BMI160_FLAT_ENABLE
+ *
+ *	@param v_intr_enable_zero_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_enable_0(
+u8 v_enable_u8, u8 *v_intr_enable_zero_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		/* select interrupt to read*/
+		switch (v_enable_u8) {
+		case BMI160_ANY_MOTION_X_ENABLE:
+			/* read the any motion interrupt x data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE);
+		break;
+		case BMI160_ANY_MOTION_Y_ENABLE:
+			/* read the any motion interrupt y data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE);
+		break;
+		case BMI160_ANY_MOTION_Z_ENABLE:
+			/* read the any motion interrupt z data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE);
+		break;
+		case BMI160_DOUBLE_TAP_ENABLE:
+			/* read the double tap interrupt data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE);
+		break;
+		case BMI160_SINGLE_TAP_ENABLE:
+			/* read the single tap interrupt data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE);
+		break;
+		case BMI160_ORIENT_ENABLE:
+			/* read the orient interrupt data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_ENABLE_0_ORIENT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_ORIENT_ENABLE);
+		break;
+		case BMI160_FLAT_ENABLE:
+			/* read the flat interrupt data */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_ENABLE_0_FLAT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_FLAT_ENABLE);
+		break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  This API is used to set
+ *	interrupt enable from the register 0x50 bit 0 to 7
+ *
+ *
+ *
+ *
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_ANY_MOTION_X_ENABLE
+ *       1         | BMI160_ANY_MOTION_Y_ENABLE
+ *       2         | BMI160_ANY_MOTION_Z_ENABLE
+ *       3         | BMI160_DOUBLE_TAP_ENABLE
+ *       4         | BMI160_SINGLE_TAP_ENABLE
+ *       5         | BMI160_ORIENT_ENABLE
+ *       6         | BMI160_FLAT_ENABLE
+ *
+ *	@param v_intr_enable_zero_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_enable_0(
+u8 v_enable_u8, u8 v_intr_enable_zero_u8)
+{
+/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	switch (v_enable_u8) {
+	case BMI160_ANY_MOTION_X_ENABLE:
+		/* write any motion x*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case BMI160_ANY_MOTION_Y_ENABLE:
+		/* write any motion y*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case BMI160_ANY_MOTION_Z_ENABLE:
+		/* write any motion z*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case BMI160_DOUBLE_TAP_ENABLE:
+		/* write double tap*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case BMI160_SINGLE_TAP_ENABLE:
+		/* write single tap */
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case BMI160_ORIENT_ENABLE:
+		/* write orient interrupt*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_ENABLE_0_ORIENT_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_ORIENT_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_0_ORIENT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case BMI160_FLAT_ENABLE:
+		/* write flat interrupt*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_ENABLE_0_FLAT_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_0_FLAT_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_0_FLAT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief  This API is used to read
+ *	interrupt enable byte1 from the register 0x51 bit 0 to 6
+ *	@brief It read the high_g_x,high_g_y,high_g_z,low_g_enable
+ *	data ready, fifo full and fifo water mark.
+ *
+ *
+ *
+ *  @param  v_enable_u8 :  The value of interrupt enable
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_HIGH_G_X_ENABLE
+ *       1         | BMI160_HIGH_G_Y_ENABLE
+ *       2         | BMI160_HIGH_G_Z_ENABLE
+ *       3         | BMI160_LOW_G_ENABLE
+ *       4         | BMI160_DATA_RDY_ENABLE
+ *       5         | BMI160_FIFO_FULL_ENABLE
+ *       6         | BMI160_FIFO_WM_ENABLE
+ *
+ *	@param v_intr_enable_1_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_enable_1(
+u8 v_enable_u8, u8 *v_intr_enable_1_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_enable_u8) {
+		case BMI160_HIGH_G_X_ENABLE:
+			/* read high_g_x interrupt*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE);
+			break;
+		case BMI160_HIGH_G_Y_ENABLE:
+			/* read high_g_y interrupt*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE);
+			break;
+		case BMI160_HIGH_G_Z_ENABLE:
+			/* read high_g_z interrupt*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE);
+			break;
+		case BMI160_LOW_G_ENABLE:
+			/* read low_g interrupt */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_1_LOW_G_ENABLE);
+			break;
+		case BMI160_DATA_RDY_ENABLE:
+			/* read data ready interrupt */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_1_DATA_RDY_ENABLE);
+			break;
+		case BMI160_FIFO_FULL_ENABLE:
+			/* read fifo full interrupt */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE);
+			break;
+		case BMI160_FIFO_WM_ENABLE:
+			/* read fifo water mark interrupt */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_1_FIFO_WM_ENABLE);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  This API is used to set
+ *	interrupt enable byte1 from the register 0x51 bit 0 to 6
+ *	@brief It read the high_g_x,high_g_y,high_g_z,low_g_enable
+ *	data ready, fifo full and fifo water mark.
+ *
+ *
+ *
+ *  @param  v_enable_u8 :  The value of interrupt enable
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_HIGH_G_X_ENABLE
+ *       1         | BMI160_HIGH_G_Y_ENABLE
+ *       2         | BMI160_HIGH_G_Z_ENABLE
+ *       3         | BMI160_LOW_G_ENABLE
+ *       4         | BMI160_DATA_RDY_ENABLE
+ *       5         | BMI160_FIFO_FULL_ENABLE
+ *       6         | BMI160_FIFO_WM_ENABLE
+ *
+ *	@param v_intr_enable_1_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_enable_1(
+u8 v_enable_u8, u8 v_intr_enable_1_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_enable_u8) {
+		case BMI160_HIGH_G_X_ENABLE:
+			/* write high_g_x interrupt*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr,
+				BMI160_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case BMI160_HIGH_G_Y_ENABLE:
+			/* write high_g_y interrupt*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr,
+				BMI160_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case BMI160_HIGH_G_Z_ENABLE:
+			/* write high_g_z interrupt*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr,
+				BMI160_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case BMI160_LOW_G_ENABLE:
+			/* write low_g interrupt*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_ENABLE_1_LOW_G_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr,
+				BMI160_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case BMI160_DATA_RDY_ENABLE:
+			/* write data ready interrupt*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_ENABLE_1_DATA_RDY_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr,
+				BMI160_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case BMI160_FIFO_FULL_ENABLE:
+			/* write fifo full interrupt*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr,
+				BMI160_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case BMI160_FIFO_WM_ENABLE:
+			/* write fifo water mark interrupt*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_ENABLE_1_FIFO_WM_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr,
+				BMI160_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  This API is used to read
+ *	interrupt enable byte2 from the register bit 0x52 bit 0 to 3
+ *	@brief It reads no motion x,y and z
+ *
+ *
+ *
+ *	@param v_enable_u8: The value of interrupt enable
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_NOMOTION_X_ENABLE
+ *       1         | BMI160_NOMOTION_Y_ENABLE
+ *       2         | BMI160_NOMOTION_Z_ENABLE
+ *
+ *	@param v_intr_enable_2_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_enable_2(
+u8 v_enable_u8, u8 *v_intr_enable_2_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_enable_u8) {
+		case BMI160_NOMOTION_X_ENABLE:
+			/* read no motion x */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_2_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE);
+			break;
+		case BMI160_NOMOTION_Y_ENABLE:
+			/* read no motion y */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_2_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE);
+			break;
+		case BMI160_NOMOTION_Z_ENABLE:
+			/* read no motion z */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_2_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  This API is used to set
+ *	interrupt enable byte2 from the register bit 0x52 bit 0 to 3
+ *	@brief It reads no motion x,y and z
+ *
+ *
+ *
+ *	@param v_enable_u8: The value of interrupt enable
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_NOMOTION_X_ENABLE
+ *       1         | BMI160_NOMOTION_Y_ENABLE
+ *       2         | BMI160_NOMOTION_Z_ENABLE
+ *
+ *	@param v_intr_enable_2_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_enable_2(
+u8 v_enable_u8, u8 v_intr_enable_2_u8)
+{
+/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	switch (v_enable_u8) {
+	case BMI160_NOMOTION_X_ENABLE:
+		/* write no motion x */
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr,
+		BMI160_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE,
+			v_intr_enable_2_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case BMI160_NOMOTION_Y_ENABLE:
+		/* write no motion y */
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr,
+		BMI160_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE,
+			v_intr_enable_2_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case BMI160_NOMOTION_Z_ENABLE:
+		/* write no motion z */
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr,
+		BMI160_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE,
+			v_intr_enable_2_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read
+ *	interrupt enable step detector interrupt from
+ *	the register bit 0x52 bit 3
+ *
+ *
+ *
+ *
+ *	@param v_step_intr_u8 : The value of step detector interrupt enable
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_step_detector_enable(
+u8 *v_step_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the step detector interrupt*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_step_intr_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to set
+ *	interrupt enable step detector interrupt from
+ *	the register bit 0x52 bit 3
+ *
+ *
+ *
+ *
+ *	@param v_step_intr_u8 : The value of step detector interrupt enable
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_step_detector_enable(
+u8 v_step_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr,
+		BMI160_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE,
+			v_step_intr_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr,
+			BMI160_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  Configure trigger condition of interrupt1
+ *	and interrupt2 pin from the register 0x53
+ *	@brief interrupt1 - bit 0
+ *	@brief interrupt2 - bit 4
+ *
+ *  @param v_channel_u8: The value of edge trigger selection
+ *   v_channel_u8  |   Edge trigger
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_EDGE_CTRL
+ *       1         | BMI160_INTR2_EDGE_CTRL
+ *
+ *	@param v_intr_edge_ctrl_u8 : The value of edge trigger enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_EDGE
+ *  0x00     |  BMI160_LEVEL
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_edge_ctrl(
+u8 v_channel_u8, u8 *v_intr_edge_ctrl_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case BMI160_INTR1_EDGE_CTRL:
+			/* read the edge trigger interrupt1*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR1_EDGE_CTRL__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_edge_ctrl_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR1_EDGE_CTRL);
+			break;
+		case BMI160_INTR2_EDGE_CTRL:
+			/* read the edge trigger interrupt2*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR2_EDGE_CTRL__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_edge_ctrl_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR2_EDGE_CTRL);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  Configure trigger condition of interrupt1
+ *	and interrupt2 pin from the register 0x53
+ *	@brief interrupt1 - bit 0
+ *	@brief interrupt2 - bit 4
+ *
+ *  @param v_channel_u8: The value of edge trigger selection
+ *   v_channel_u8  |   Edge trigger
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_EDGE_CTRL
+ *       1         | BMI160_INTR2_EDGE_CTRL
+ *
+ *	@param v_intr_edge_ctrl_u8 : The value of edge trigger enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_EDGE
+ *  0x00     |  BMI160_LEVEL
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_edge_ctrl(
+u8 v_channel_u8, u8 v_intr_edge_ctrl_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case BMI160_INTR1_EDGE_CTRL:
+			/* write the edge trigger interrupt1*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR1_EDGE_CTRL__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR1_EDGE_CTRL,
+				v_intr_edge_ctrl_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr, BMI160_USER_INTR1_EDGE_CTRL__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		case BMI160_INTR2_EDGE_CTRL:
+			/* write the edge trigger interrupt2*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR2_EDGE_CTRL__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR2_EDGE_CTRL,
+				v_intr_edge_ctrl_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr, BMI160_USER_INTR2_EDGE_CTRL__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  API used for get the Configure level condition of interrupt1
+ *	and interrupt2 pin form the register 0x53
+ *	@brief interrupt1 - bit 1
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of level condition selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_LEVEL
+ *       1         | BMI160_INTR2_LEVEL
+ *
+ *	@param v_intr_level_u8 : The value of level of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_LEVEL_HIGH
+ *  0x00     |  BMI160_LEVEL_LOW
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_level(
+u8 v_channel_u8, u8 *v_intr_level_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case BMI160_INTR1_LEVEL:
+			/* read the interrupt1 level*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR1_LEVEL__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_level_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR1_LEVEL);
+			break;
+		case BMI160_INTR2_LEVEL:
+			/* read the interrupt2 level*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR2_LEVEL__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_level_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR2_LEVEL);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  API used for set the Configure level condition of interrupt1
+ *	and interrupt2 pin form the register 0x53
+ *	@brief interrupt1 - bit 1
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of level condition selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_LEVEL
+ *       1         | BMI160_INTR2_LEVEL
+ *
+ *	@param v_intr_level_u8 : The value of level of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_LEVEL_HIGH
+ *  0x00     |  BMI160_LEVEL_LOW
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_level(
+u8 v_channel_u8, u8 v_intr_level_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case BMI160_INTR1_LEVEL:
+			/* write the interrupt1 level*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR1_LEVEL__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR1_LEVEL, v_intr_level_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr, BMI160_USER_INTR1_LEVEL__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		case BMI160_INTR2_LEVEL:
+			/* write the interrupt2 level*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR2_LEVEL__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR2_LEVEL, v_intr_level_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr, BMI160_USER_INTR2_LEVEL__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  API used to get configured output enable of interrupt1
+ *	and interrupt2 from the register 0x53
+ *	@brief interrupt1 - bit 2
+ *	@brief interrupt2 - bit 6
+ *
+ *
+ *  @param v_channel_u8: The value of output type enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_OUTPUT_TYPE
+ *       1         | BMI160_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_intr_output_type_u8 :
+ *	The value of output type of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_OPEN_DRAIN
+ *  0x00     |  BMI160_PUSH_PULL
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_output_type(
+u8 v_channel_u8, u8 *v_intr_output_type_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case BMI160_INTR1_OUTPUT_TYPE:
+			/* read the output type of interrupt1*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR1_OUTPUT_TYPE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_output_type_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR1_OUTPUT_TYPE);
+			break;
+		case BMI160_INTR2_OUTPUT_TYPE:
+			/* read the output type of interrupt2*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR2_OUTPUT_TYPE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_output_type_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR2_OUTPUT_TYPE);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  API used to set output enable of interrupt1
+ *	and interrupt2 from the register 0x53
+ *	@brief interrupt1 - bit 2
+ *	@brief interrupt2 - bit 6
+ *
+ *
+ *  @param v_channel_u8: The value of output type enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_OUTPUT_TYPE
+ *       1         | BMI160_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_intr_output_type_u8 :
+ *	The value of output type of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_OPEN_DRAIN
+ *  0x00     |  BMI160_PUSH_PULL
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_output_type(
+u8 v_channel_u8, u8 v_intr_output_type_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case BMI160_INTR1_OUTPUT_TYPE:
+			/* write the output type of interrupt1*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR1_OUTPUT_TYPE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR1_OUTPUT_TYPE,
+				v_intr_output_type_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr, BMI160_USER_INTR1_OUTPUT_TYPE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		case BMI160_INTR2_OUTPUT_TYPE:
+			/* write the output type of interrupt2*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR2_OUTPUT_TYPE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR2_OUTPUT_TYPE,
+				v_intr_output_type_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr, BMI160_USER_INTR2_OUTPUT_TYPE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief API used to get the Output enable for interrupt1
+ *	and interrupt1 pin from the register 0x53
+ *	@brief interrupt1 - bit 3
+ *	@brief interrupt2 - bit 7
+ *
+ *  @param v_channel_u8: The value of output enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_OUTPUT_TYPE
+ *       1         | BMI160_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_output_enable_u8 :
+ *	The value of output enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_INPUT
+ *  0x00     |  BMI160_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_output_enable(
+u8 v_channel_u8, u8 *v_output_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case BMI160_INTR1_OUTPUT_ENABLE:
+			/* read the output enable of interrupt1*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR1_OUTPUT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_output_enable_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR1_OUTPUT_ENABLE);
+			break;
+		case BMI160_INTR2_OUTPUT_ENABLE:
+			/* read the output enable of interrupt2*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR2_OUTPUT_EN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_output_enable_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR2_OUTPUT_EN);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief API used to set the Output enable for interrupt1
+ *	and interrupt1 pin from the register 0x53
+ *	@brief interrupt1 - bit 3
+ *	@brief interrupt2 - bit 7
+ *
+ *  @param v_channel_u8: The value of output enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_OUTPUT_TYPE
+ *       1         | BMI160_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_output_enable_u8 :
+ *	The value of output enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_INPUT
+ *  0x00     |  BMI160_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_output_enable(
+u8 v_channel_u8, u8 v_output_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case BMI160_INTR1_OUTPUT_ENABLE:
+			/* write the output enable of interrupt1*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR1_OUTPUT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR1_OUTPUT_ENABLE,
+				v_output_enable_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr, BMI160_USER_INTR1_OUTPUT_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case BMI160_INTR2_OUTPUT_ENABLE:
+			/* write the output enable of interrupt2*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR2_OUTPUT_EN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR2_OUTPUT_EN,
+				v_output_enable_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr, BMI160_USER_INTR2_OUTPUT_EN__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+*	@brief This API is used to get the latch duration
+*	from the register 0x54 bit 0 to 3
+*	@brief This latch selection is not applicable for data ready,
+*	orientation and flat interrupts.
+*
+*
+*
+*  @param v_latch_intr_u8 : The value of latch duration
+*	Latch Duration                      |     value
+* --------------------------------------|------------------
+*    BMI160_LATCH_DUR_NONE              |      0x00
+*    BMI160_LATCH_DUR_312_5_MICRO_SEC   |      0x01
+*    BMI160_LATCH_DUR_625_MICRO_SEC     |      0x02
+*    BMI160_LATCH_DUR_1_25_MILLI_SEC    |      0x03
+*    BMI160_LATCH_DUR_2_5_MILLI_SEC     |      0x04
+*    BMI160_LATCH_DUR_5_MILLI_SEC       |      0x05
+*    BMI160_LATCH_DUR_10_MILLI_SEC      |      0x06
+*    BMI160_LATCH_DUR_20_MILLI_SEC      |      0x07
+*    BMI160_LATCH_DUR_40_MILLI_SEC      |      0x08
+*    BMI160_LATCH_DUR_80_MILLI_SEC      |      0x09
+*    BMI160_LATCH_DUR_160_MILLI_SEC     |      0x0A
+*    BMI160_LATCH_DUR_320_MILLI_SEC     |      0x0B
+*    BMI160_LATCH_DUR_640_MILLI_SEC     |      0x0C
+*    BMI160_LATCH_DUR_1_28_SEC          |      0x0D
+*    BMI160_LATCH_DUR_2_56_SEC          |      0x0E
+*    BMI160_LATCHED                     |      0x0F
+*
+*
+*
+*	@return results of bus communication function
+*	@retval 0 -> Success
+*	@retval -1 -> Error
+*
+*
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_latch_intr(
+u8 *v_latch_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the latch duration value */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_LATCH__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_latch_intr_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_LATCH);
+		}
+	return com_rslt;
+}
+/*!
+*	@brief This API is used to set the latch duration
+*	from the register 0x54 bit 0 to 3
+*	@brief This latch selection is not applicable for data ready,
+*	orientation and flat interrupts.
+*
+*
+*
+*  @param v_latch_intr_u8 : The value of latch duration
+*	Latch Duration                      |     value
+* --------------------------------------|------------------
+*    BMI160_LATCH_DUR_NONE              |      0x00
+*    BMI160_LATCH_DUR_312_5_MICRO_SEC   |      0x01
+*    BMI160_LATCH_DUR_625_MICRO_SEC     |      0x02
+*    BMI160_LATCH_DUR_1_25_MILLI_SEC    |      0x03
+*    BMI160_LATCH_DUR_2_5_MILLI_SEC     |      0x04
+*    BMI160_LATCH_DUR_5_MILLI_SEC       |      0x05
+*    BMI160_LATCH_DUR_10_MILLI_SEC      |      0x06
+*    BMI160_LATCH_DUR_20_MILLI_SEC      |      0x07
+*    BMI160_LATCH_DUR_40_MILLI_SEC      |      0x08
+*    BMI160_LATCH_DUR_80_MILLI_SEC      |      0x09
+*    BMI160_LATCH_DUR_160_MILLI_SEC     |      0x0A
+*    BMI160_LATCH_DUR_320_MILLI_SEC     |      0x0B
+*    BMI160_LATCH_DUR_640_MILLI_SEC     |      0x0C
+*    BMI160_LATCH_DUR_1_28_SEC          |      0x0D
+*    BMI160_LATCH_DUR_2_56_SEC          |      0x0E
+*    BMI160_LATCHED                     |      0x0F
+*
+*
+*
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+*
+*
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_latch_intr(u8 v_latch_intr_u8)
+{
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_latch_intr_u8 <= BMI160_MAX_LATCH_INTR) {
+			/* write the latch duration value */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_LATCH__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_LATCH, v_latch_intr_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr, BMI160_USER_INTR_LATCH__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief API used to get input enable for interrupt1
+ *	and interrupt2 pin from the register 0x54
+ *	@brief interrupt1 - bit 4
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of input enable selection
+ *   v_channel_u8  |   input selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_INPUT_ENABLE
+ *       1         | BMI160_INTR2_INPUT_ENABLE
+ *
+ *	@param v_input_en_u8 :
+ *	The value of input enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_INPUT
+ *  0x00     |  BMI160_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_input_enable(
+u8 v_channel_u8, u8 *v_input_en_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read input enable of interrup1 and interrupt2*/
+		case BMI160_INTR1_INPUT_ENABLE:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR1_INPUT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_input_en_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR1_INPUT_ENABLE);
+			break;
+		case BMI160_INTR2_INPUT_ENABLE:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR2_INPUT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_input_en_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR2_INPUT_ENABLE);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief API used to set input enable for interrupt1
+ *	and interrupt2 pin from the register 0x54
+ *	@brief interrupt1 - bit 4
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of input enable selection
+ *   v_channel_u8  |   input selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_INPUT_ENABLE
+ *       1         | BMI160_INTR2_INPUT_ENABLE
+ *
+ *	@param v_input_en_u8 :
+ *	The value of input enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_INPUT
+ *  0x00     |  BMI160_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_input_enable(
+u8 v_channel_u8, u8 v_input_en_u8)
+{
+/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* write input enable of interrup1 and interrupt2*/
+	case BMI160_INTR1_INPUT_ENABLE:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR1_INPUT_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR1_INPUT_ENABLE, v_input_en_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR1_INPUT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	case BMI160_INTR2_INPUT_ENABLE:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR2_INPUT_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR2_INPUT_ENABLE, v_input_en_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR2_INPUT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+	break;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief reads the Low g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 0 in the register 0x55
+ *	@brief interrupt2 bit 0 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of low_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_LOW_G
+ *       1         | BMI160_INTR2_MAP_LOW_G
+ *
+ *	@param v_intr_low_g_u8 : The value of low_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_low_g(
+u8 v_channel_u8, u8 *v_intr_low_g_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the low_g interrupt */
+		case BMI160_INTR1_MAP_LOW_G:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_LOW_G__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_low_g_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_LOW_G);
+			break;
+		case BMI160_INTR2_MAP_LOW_G:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_LOW_G__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_low_g_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_LOW_G);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief set the Low g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 0 in the register 0x55
+ *	@brief interrupt2 bit 0 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of low_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_LOW_G
+ *       1         | BMI160_INTR2_MAP_LOW_G
+ *
+ *	@param v_intr_low_g_u8 : The value of low_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_low_g(
+u8 v_channel_u8, u8 v_intr_low_g_u8)
+{
+/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+u8 v_step_cnt_stat_u8 = BMI160_INIT_VALUE;
+u8 v_step_det_stat_u8 = BMI160_INIT_VALUE;
+
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	/* check the step detector interrupt enable status*/
+	com_rslt = bmi160_get_step_detector_enable(&v_step_det_stat_u8);
+	/* disable the step detector interrupt */
+	if (v_step_det_stat_u8 != BMI160_INIT_VALUE)
+		com_rslt += bmi160_set_step_detector_enable(BMI160_INIT_VALUE);
+	/* check the step counter interrupt enable status*/
+	com_rslt += bmi160_get_step_counter_enable(&v_step_cnt_stat_u8);
+	/* disable the step counter interrupt */
+	if (v_step_cnt_stat_u8 != BMI160_INIT_VALUE)
+			com_rslt += bmi160_set_step_counter_enable(
+			BMI160_INIT_VALUE);
+	switch (v_channel_u8) {
+	/* write the low_g interrupt*/
+	case BMI160_INTR1_MAP_LOW_G:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_0_INTR1_LOW_G__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_LOW_G, v_intr_low_g_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_LOW_G__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case BMI160_INTR2_MAP_LOW_G:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_2_INTR2_LOW_G__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_LOW_G, v_intr_low_g_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_LOW_G__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads the HIGH g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 1 in the register 0x55
+ *	@brief interrupt2 bit 1 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of high_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_HIGH_G
+ *       1         | BMI160_INTR2_MAP_HIGH_G
+ *
+ *	@param v_intr_high_g_u8 : The value of high_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_high_g(
+u8 v_channel_u8, u8 *v_intr_high_g_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		/* read the high_g interrupt*/
+		switch (v_channel_u8) {
+		case BMI160_INTR1_MAP_HIGH_G:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_HIGH_G__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_high_g_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_HIGH_G);
+		break;
+		case BMI160_INTR2_MAP_HIGH_G:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_HIGH_G__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_high_g_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_HIGH_G);
+		break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write the HIGH g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 1 in the register 0x55
+ *	@brief interrupt2 bit 1 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of high_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_HIGH_G
+ *       1         | BMI160_INTR2_MAP_HIGH_G
+ *
+ *	@param v_intr_high_g_u8 : The value of high_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_high_g(
+u8 v_channel_u8, u8 v_intr_high_g_u8)
+{
+/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* write the high_g interrupt*/
+	case BMI160_INTR1_MAP_HIGH_G:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_0_INTR1_HIGH_G__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_HIGH_G, v_intr_high_g_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_HIGH_G__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	case BMI160_INTR2_MAP_HIGH_G:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_2_INTR2_HIGH_G__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_HIGH_G, v_intr_high_g_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_HIGH_G__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+	break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads the Any motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 2 in the register 0x55
+ *	@brief interrupt2 bit 2 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of any motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_ANY_MOTION
+ *       1         | BMI160_INTR2_MAP_ANY_MOTION
+ *
+ *	@param v_intr_any_motion_u8 : The value of any motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_any_motion(
+u8 v_channel_u8, u8 *v_intr_any_motion_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the any motion interrupt */
+		case BMI160_INTR1_MAP_ANY_MOTION:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_ANY_MOTION__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_any_motion_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_ANY_MOTION);
+		break;
+		case BMI160_INTR2_MAP_ANY_MOTION:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_ANY_MOTION__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_any_motion_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_ANY_MOTION);
+		break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write the Any motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 2 in the register 0x55
+ *	@brief interrupt2 bit 2 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of any motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_ANY_MOTION
+ *       1         | BMI160_INTR2_MAP_ANY_MOTION
+ *
+ *	@param v_intr_any_motion_u8 : The value of any motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_any_motion(
+u8 v_channel_u8, u8 v_intr_any_motion_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+u8 sig_mot_stat = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	/* read the status of significant motion interrupt */
+	com_rslt = bmi160_get_intr_significant_motion_select(&sig_mot_stat);
+	/* disable the significant motion interrupt */
+	if (sig_mot_stat != BMI160_INIT_VALUE)
+		com_rslt += bmi160_set_intr_significant_motion_select(
+		BMI160_INIT_VALUE);
+	switch (v_channel_u8) {
+	/* write the any motion interrupt */
+	case BMI160_INTR1_MAP_ANY_MOTION:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_0_INTR1_ANY_MOTION__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_ANY_MOTION,
+			v_intr_any_motion_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_ANY_MOTION__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	case BMI160_INTR2_MAP_ANY_MOTION:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_2_INTR2_ANY_MOTION__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_ANY_MOTION,
+			v_intr_any_motion_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_ANY_MOTION__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+	break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads the No motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 3 in the register 0x55
+ *	@brief interrupt2 bit 3 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of no motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_NOMO
+ *       1         | BMI160_INTR2_MAP_NOMO
+ *
+ *	@param v_intr_nomotion_u8 : The value of no motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_nomotion(
+u8 v_channel_u8, u8 *v_intr_nomotion_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the no motion interrupt*/
+		case BMI160_INTR1_MAP_NOMO:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_NOMOTION__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_nomotion_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_NOMOTION);
+			break;
+		case BMI160_INTR2_MAP_NOMO:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_NOMOTION__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_nomotion_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_NOMOTION);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write the No motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 3 in the register 0x55
+ *	@brief interrupt2 bit 3 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of no motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_NOMO
+ *       1         | BMI160_INTR2_MAP_NOMO
+ *
+ *	@param v_intr_nomotion_u8 : The value of no motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_nomotion(
+u8 v_channel_u8, u8 v_intr_nomotion_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* write the no motion interrupt*/
+	case BMI160_INTR1_MAP_NOMO:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_0_INTR1_NOMOTION__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_NOMOTION,
+			v_intr_nomotion_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_NOMOTION__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case BMI160_INTR2_MAP_NOMO:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_2_INTR2_NOMOTION__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_NOMOTION,
+			v_intr_nomotion_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_NOMOTION__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads the Double Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 4 in the register 0x55
+ *	@brief interrupt2 bit 4 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of double tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_DOUBLE_TAP
+ *       1         | BMI160_INTR2_MAP_DOUBLE_TAP
+ *
+ *	@param v_intr_double_tap_u8 : The value of double tap enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_double_tap(
+u8 v_channel_u8, u8 *v_intr_double_tap_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case BMI160_INTR1_MAP_DOUBLE_TAP:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_double_tap_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_DOUBLE_TAP);
+			break;
+		case BMI160_INTR2_MAP_DOUBLE_TAP:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_double_tap_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_DOUBLE_TAP);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write the Double Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 4 in the register 0x55
+ *	@brief interrupt2 bit 4 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of double tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_DOUBLE_TAP
+ *       1         | BMI160_INTR2_MAP_DOUBLE_TAP
+ *
+ *	@param v_intr_double_tap_u8 : The value of double tap enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_double_tap(
+u8 v_channel_u8, u8 v_intr_double_tap_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* set the double tap interrupt */
+	case BMI160_INTR1_MAP_DOUBLE_TAP:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_DOUBLE_TAP,
+			v_intr_double_tap_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case BMI160_INTR2_MAP_DOUBLE_TAP:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_DOUBLE_TAP,
+			v_intr_double_tap_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads the Single Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 5 in the register 0x55
+ *	@brief interrupt2 bit 5 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of single tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_SINGLE_TAP
+ *       1         | BMI160_INTR2_MAP_SINGLE_TAP
+ *
+ *	@param v_intr_single_tap_u8 : The value of single tap  enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_single_tap(
+u8 v_channel_u8, u8 *v_intr_single_tap_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* reads the single tap interrupt*/
+		case BMI160_INTR1_MAP_SINGLE_TAP:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_SINGLE_TAP__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_single_tap_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_SINGLE_TAP);
+			break;
+		case BMI160_INTR2_MAP_SINGLE_TAP:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_SINGLE_TAP__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_single_tap_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_SINGLE_TAP);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write the Single Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 5 in the register 0x55
+ *	@brief interrupt2 bit 5 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of single tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_SINGLE_TAP
+ *       1         | BMI160_INTR2_MAP_SINGLE_TAP
+ *
+ *	@param v_intr_single_tap_u8 : The value of single tap  enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_single_tap(
+u8 v_channel_u8, u8 v_intr_single_tap_u8)
+{
+/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* write the single tap interrupt */
+	case BMI160_INTR1_MAP_SINGLE_TAP:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_0_INTR1_SINGLE_TAP__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_SINGLE_TAP,
+			v_intr_single_tap_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_SINGLE_TAP__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case BMI160_INTR2_MAP_SINGLE_TAP:
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_2_INTR2_SINGLE_TAP__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_SINGLE_TAP,
+			v_intr_single_tap_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_SINGLE_TAP__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads the Orient interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 6 in the register 0x55
+ *	@brief interrupt2 bit 6 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of orient interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_ORIENT
+ *       1         | BMI160_INTR2_MAP_ORIENT
+ *
+ *	@param v_intr_orient_u8 : The value of orient enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient(
+u8 v_channel_u8, u8 *v_intr_orient_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the orientation interrupt*/
+		case BMI160_INTR1_MAP_ORIENT:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_ORIENT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_orient_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_ORIENT);
+			break;
+		case BMI160_INTR2_MAP_ORIENT:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_ORIENT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_orient_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_ORIENT);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write the Orient interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 6 in the register 0x55
+ *	@brief interrupt2 bit 6 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of orient interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_ORIENT
+ *       1         | BMI160_INTR2_MAP_ORIENT
+ *
+ *	@param v_intr_orient_u8 : The value of orient enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient(
+u8 v_channel_u8, u8 v_intr_orient_u8)
+{
+/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* write the orientation interrupt*/
+	case BMI160_INTR1_MAP_ORIENT:
+		com_rslt =
+		p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_0_INTR1_ORIENT__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_ORIENT, v_intr_orient_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_ORIENT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case BMI160_INTR2_MAP_ORIENT:
+		com_rslt =
+		p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_2_INTR2_ORIENT__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 =
+			BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_ORIENT, v_intr_orient_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_ORIENT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief Reads the Flat interrupt
+ *	mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 7 in the register 0x55
+ *	@brief interrupt2 bit 7 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of flat interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_FLAT
+ *       1         | BMI160_INTR2_MAP_FLAT
+ *
+ *	@param v_intr_flat_u8 : The value of flat enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_flat(
+u8 v_channel_u8, u8 *v_intr_flat_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the flat interrupt*/
+		case BMI160_INTR1_MAP_FLAT:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_FLAT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_flat_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_0_INTR1_FLAT);
+			break;
+		case BMI160_INTR2_MAP_FLAT:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_FLAT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_flat_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_2_INTR2_FLAT);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief Write the Flat interrupt
+ *	mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 7 in the register 0x55
+ *	@brief interrupt2 bit 7 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of flat interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_FLAT
+ *       1         | BMI160_INTR2_MAP_FLAT
+ *
+ *	@param v_intr_flat_u8 : The value of flat enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_flat(
+u8 v_channel_u8, u8 v_intr_flat_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* write the flat interrupt */
+		case BMI160_INTR1_MAP_FLAT:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_0_INTR1_FLAT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_MAP_0_INTR1_FLAT,
+				v_intr_flat_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr,
+				BMI160_USER_INTR_MAP_0_INTR1_FLAT__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		case BMI160_INTR2_MAP_FLAT:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_2_INTR2_FLAT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_MAP_2_INTR2_FLAT,
+				v_intr_flat_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr,
+				BMI160_USER_INTR_MAP_2_INTR2_FLAT__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Reads PMU trigger interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 0 and 4
+ *	@brief interrupt1 bit 0 in the register 0x56
+ *	@brief interrupt2 bit 4 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of pmu trigger selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_PMUTRIG
+ *       1         | BMI160_INTR2_MAP_PMUTRIG
+ *
+ *	@param v_intr_pmu_trig_u8 : The value of pmu trigger enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_pmu_trig(
+u8 v_channel_u8, u8 *v_intr_pmu_trig_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the pmu trigger interrupt*/
+		case BMI160_INTR1_MAP_PMUTRIG:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR1_PMU_TRIG__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_pmu_trig_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_1_INTR1_PMU_TRIG);
+			break;
+		case BMI160_INTR2_MAP_PMUTRIG:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR2_PMU_TRIG__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_pmu_trig_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_1_INTR2_PMU_TRIG);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write PMU trigger interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 0 and 4
+ *	@brief interrupt1 bit 0 in the register 0x56
+ *	@brief interrupt2 bit 4 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of pmu trigger selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_PMUTRIG
+ *       1         | BMI160_INTR2_MAP_PMUTRIG
+ *
+ *	@param v_intr_pmu_trig_u8 : The value of pmu trigger enable
+ *	value    | trigger enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_pmu_trig(
+u8 v_channel_u8, u8 v_intr_pmu_trig_u8)
+{
+/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* write the pmu trigger interrupt */
+	case BMI160_INTR1_MAP_PMUTRIG:
+		com_rslt =
+		p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_1_INTR1_PMU_TRIG__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 =
+			BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_1_INTR1_PMU_TRIG,
+			v_intr_pmu_trig_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR1_PMU_TRIG__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	case BMI160_INTR2_MAP_PMUTRIG:
+		com_rslt =
+		p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_1_INTR2_PMU_TRIG__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 =
+			BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_1_INTR2_PMU_TRIG,
+			v_intr_pmu_trig_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR2_PMU_TRIG__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+	break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads FIFO Full interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 5 and 1
+ *	@brief interrupt1 bit 5 in the register 0x56
+ *	@brief interrupt2 bit 1 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo full interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_FIFO_FULL
+ *       1         | BMI160_INTR2_MAP_FIFO_FULL
+ *
+ *	@param v_intr_fifo_full_u8 : The value of fifo full interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_fifo_full(
+u8 v_channel_u8, u8 *v_intr_fifo_full_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the fifo full interrupt */
+		case BMI160_INTR1_MAP_FIFO_FULL:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR1_FIFO_FULL__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_fifo_full_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_1_INTR1_FIFO_FULL);
+		break;
+		case BMI160_INTR2_MAP_FIFO_FULL:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR2_FIFO_FULL__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_fifo_full_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_1_INTR2_FIFO_FULL);
+		break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write FIFO Full interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 5 and 1
+ *	@brief interrupt1 bit 5 in the register 0x56
+ *	@brief interrupt2 bit 1 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo full interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_FIFO_FULL
+ *       1         | BMI160_INTR2_MAP_FIFO_FULL
+ *
+ *	@param v_intr_fifo_full_u8 : The value of fifo full interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_fifo_full(
+u8 v_channel_u8, u8 v_intr_fifo_full_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* write the fifo full interrupt */
+		case BMI160_INTR1_MAP_FIFO_FULL:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR1_FIFO_FULL__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_MAP_1_INTR1_FIFO_FULL,
+				v_intr_fifo_full_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr,
+				BMI160_USER_INTR_MAP_1_INTR1_FIFO_FULL__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case BMI160_INTR2_MAP_FIFO_FULL:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR2_FIFO_FULL__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_MAP_1_INTR2_FIFO_FULL,
+				v_intr_fifo_full_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr,
+				BMI160_USER_INTR_MAP_1_INTR2_FIFO_FULL__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Reads FIFO Watermark interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 6 and 2
+ *	@brief interrupt1 bit 6 in the register 0x56
+ *	@brief interrupt2 bit 2 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo Watermark interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_FIFO_WM
+ *       1         | BMI160_INTR2_MAP_FIFO_WM
+ *
+ *	@param v_intr_fifo_wm_u8 : The value of fifo Watermark interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_fifo_wm(
+u8 v_channel_u8, u8 *v_intr_fifo_wm_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the fifo water mark interrupt */
+		case BMI160_INTR1_MAP_FIFO_WM:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR1_FIFO_WM__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_fifo_wm_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_1_INTR1_FIFO_WM);
+			break;
+		case BMI160_INTR2_MAP_FIFO_WM:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR2_FIFO_WM__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_fifo_wm_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_1_INTR2_FIFO_WM);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write FIFO Watermark interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 6 and 2
+ *	@brief interrupt1 bit 6 in the register 0x56
+ *	@brief interrupt2 bit 2 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo Watermark interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_FIFO_WM
+ *       1         | BMI160_INTR2_MAP_FIFO_WM
+ *
+ *	@param v_intr_fifo_wm_u8 : The value of fifo Watermark interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_fifo_wm(
+u8 v_channel_u8, u8 v_intr_fifo_wm_u8)
+{
+/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* write the fifo water mark interrupt */
+		case BMI160_INTR1_MAP_FIFO_WM:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR1_FIFO_WM__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_MAP_1_INTR1_FIFO_WM,
+				v_intr_fifo_wm_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr,
+				BMI160_USER_INTR_MAP_1_INTR1_FIFO_WM__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		case BMI160_INTR2_MAP_FIFO_WM:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR2_FIFO_WM__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_MAP_1_INTR2_FIFO_WM,
+				v_intr_fifo_wm_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+				dev_addr,
+				BMI160_USER_INTR_MAP_1_INTR2_FIFO_WM__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Reads Data Ready interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56
+ *	@brief interrupt1 bit 7 in the register 0x56
+ *	@brief interrupt2 bit 3 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of data ready interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_DATA_RDY
+ *       1         | BMI160_INTR2_MAP_DATA_RDY
+ *
+ *	@param v_intr_data_rdy_u8 : The value of data ready interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_data_rdy(
+u8 v_channel_u8, u8 *v_intr_data_rdy_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/*Read Data Ready interrupt*/
+		case BMI160_INTR1_MAP_DATA_RDY:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR1_DATA_RDY__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_data_rdy_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_1_INTR1_DATA_RDY);
+			break;
+		case BMI160_INTR2_MAP_DATA_RDY:
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR2_DATA_RDY__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_data_rdy_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_1_INTR2_DATA_RDY);
+			break;
+		default:
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write Data Ready interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56
+ *	@brief interrupt1 bit 7 in the register 0x56
+ *	@brief interrupt2 bit 3 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of data ready interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_DATA_RDY
+ *       1         | BMI160_INTR2_MAP_DATA_RDY
+ *
+ *	@param v_intr_data_rdy_u8 : The value of data ready interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_data_rdy(
+u8 v_channel_u8, u8 v_intr_data_rdy_u8)
+{
+/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/*Write Data Ready interrupt*/
+	case BMI160_INTR1_MAP_DATA_RDY:
+		com_rslt =
+		p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_1_INTR1_DATA_RDY__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_1_INTR1_DATA_RDY,
+			v_intr_data_rdy_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR1_DATA_RDY__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	case BMI160_INTR2_MAP_DATA_RDY:
+		com_rslt =
+		p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->
+		dev_addr, BMI160_USER_INTR_MAP_1_INTR2_DATA_RDY__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MAP_1_INTR2_DATA_RDY,
+			v_intr_data_rdy_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC(p_bmi160->
+			dev_addr, BMI160_USER_INTR_MAP_1_INTR2_DATA_RDY__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	default:
+	com_rslt = E_BMI160_OUT_OF_RANGE;
+	break;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API reads data source for the interrupt
+ *	engine for the single and double tap interrupts from the register
+ *	0x58 bit 3
+ *
+ *
+ *  @param v_tap_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_tap_source(u8 *v_tap_source_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the tap source interrupt */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_DATA_0_INTR_TAP_SOURCE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_source_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_DATA_0_INTR_TAP_SOURCE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write data source for the interrupt
+ *	engine for the single and double tap interrupts from the register
+ *	0x58 bit 3
+ *
+ *
+ *  @param v_tap_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_tap_source(
+u8 v_tap_source_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_tap_source_u8 <= BMI160_MAX_VALUE_SOURCE_INTR) {
+			/* write the tap source interrupt */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_DATA_0_INTR_TAP_SOURCE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_DATA_0_INTR_TAP_SOURCE,
+				v_tap_source_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_INTR_DATA_0_INTR_TAP_SOURCE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API Reads Data source for the
+ *	interrupt engine for the low and high g interrupts
+ *	from the register 0x58 bit 7
+ *
+ *  @param v_low_high_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_low_high_source(
+u8 *v_low_high_source_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the high_low_g source interrupt */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_low_high_source_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write Data source for the
+ *	interrupt engine for the low and high g interrupts
+ *	from the register 0x58 bit 7
+ *
+ *  @param v_low_high_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_low_high_source(
+u8 v_low_high_source_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	if (v_low_high_source_u8 <= BMI160_MAX_VALUE_SOURCE_INTR) {
+		/* write the high_low_g source interrupt */
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE,
+			v_low_high_source_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_BMI160_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API reads Data source for the
+ *	interrupt engine for the nomotion and anymotion interrupts
+ *	from the register 0x59 bit 7
+ *
+ *  @param v_motion_source_u8 :
+ *	The value of the any/no motion interrupt source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_motion_source(
+u8 *v_motion_source_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the any/no motion interrupt  */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_DATA_1_INTR_MOTION_SOURCE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_motion_source_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_DATA_1_INTR_MOTION_SOURCE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write Data source for the
+ *	interrupt engine for the nomotion and anymotion interrupts
+ *	from the register 0x59 bit 7
+ *
+ *  @param v_motion_source_u8 :
+ *	The value of the any/no motion interrupt source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_motion_source(
+u8 v_motion_source_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_motion_source_u8 <= BMI160_MAX_VALUE_SOURCE_INTR) {
+			/* write the any/no motion interrupt  */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_DATA_1_INTR_MOTION_SOURCE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_DATA_1_INTR_MOTION_SOURCE,
+				v_motion_source_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_INTR_DATA_1_INTR_MOTION_SOURCE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read the low_g duration from register
+ *	0x5A bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_durn_u8 : The value of low_g duration
+ *
+ *	@note Low_g duration trigger trigger delay according to
+ *	"(v_low_g_durn_u8 * 2.5)ms" in a range from 2.5ms to 640ms.
+ *	the default corresponds delay is 20ms
+ *	@note When low_g data source of interrupt is unfiltered
+ *	the sensor must not be in low power mode
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_low_g_durn(
+u8 *v_low_g_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the low_g interrupt */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_LOWHIGH_0_INTR_LOW_DURN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_low_g_durn_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_LOWHIGH_0_INTR_LOW_DURN);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to write the low_g duration from register
+ *	0x5A bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_durn_u8 : The value of low_g duration
+ *
+ *	@note Low_g duration trigger trigger delay according to
+ *	"(v_low_g_durn_u8 * 2.5)ms" in a range from 2.5ms to 640ms.
+ *	the default corresponds delay is 20ms
+ *	@note When low_g data source of interrupt is unfiltered
+ *	the sensor must not be in low power mode
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_low_g_durn(u8 v_low_g_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write the low_g interrupt */
+			com_rslt = p_bmi160->BMI160_BUS_WRITE_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_LOWHIGH_0_INTR_LOW_DURN__REG,
+			&v_low_g_durn_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read Threshold
+ *	definition for the low-g interrupt from the register 0x5B bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_thres_u8 : The value of low_g threshold
+ *
+ *	@note Low_g interrupt trigger threshold according to
+ *	(v_low_g_thres_u8 * 7.81)mg for v_low_g_thres_u8 > 0
+ *	3.91 mg for v_low_g_thres_u8 = 0
+ *	The threshold range is form 3.91mg to 2.000mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_low_g_thres(
+u8 *v_low_g_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read low_g threshold */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_LOWHIGH_1_INTR_LOW_THRES__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_low_g_thres_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_LOWHIGH_1_INTR_LOW_THRES);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to write Threshold
+ *	definition for the low-g interrupt from the register 0x5B bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_thres_u8 : The value of low_g threshold
+ *
+ *	@note Low_g interrupt trigger threshold according to
+ *	(v_low_g_thres_u8 * 7.81)mg for v_low_g_thres_u8 > 0
+ *	3.91 mg for v_low_g_thres_u8 = 0
+ *	The threshold range is form 3.91mg to 2.000mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_low_g_thres(
+u8 v_low_g_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write low_g threshold */
+			com_rslt = p_bmi160->BMI160_BUS_WRITE_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_LOWHIGH_1_INTR_LOW_THRES__REG,
+			&v_low_g_thres_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API Reads Low-g interrupt hysteresis
+ *	from the register 0x5C bit 0 to 1
+ *
+ *  @param v_low_hyst_u8 :The value of low_g hysteresis
+ *
+ *	@note Low_g hysteresis calculated by v_low_hyst_u8*125 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_low_g_hyst(
+u8 *v_low_hyst_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read low_g hysteresis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_low_hyst_u8 = BMI160_GET_BITSLICE(
+			v_data_u8,
+			BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write Low-g interrupt hysteresis
+ *	from the register 0x5C bit 0 to 1
+ *
+ *  @param v_low_hyst_u8 :The value of low_g hysteresis
+ *
+ *	@note Low_g hysteresis calculated by v_low_hyst_u8*125 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_low_g_hyst(
+u8 v_low_hyst_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write low_g hysteresis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST,
+				v_low_hyst_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads Low-g interrupt mode
+ *	from the register 0x5C bit 2
+ *
+ *  @param v_low_g_mode_u8 : The value of low_g mode
+ *	Value    |  Description
+ * ----------|-----------------
+ *	   0     | single-axis
+ *     1     | axis-summing
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_low_g_mode(u8 *v_low_g_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/*read Low-g interrupt mode*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_low_g_mode_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write Low-g interrupt mode
+ *	from the register 0x5C bit 2
+ *
+ *  @param v_low_g_mode_u8 : The value of low_g mode
+ *	Value    |  Description
+ * ----------|-----------------
+ *	   0     | single-axis
+ *     1     | axis-summing
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_low_g_mode(
+u8 v_low_g_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_low_g_mode_u8 <= BMI160_MAX_VALUE_LOW_G_MODE) {
+			/*write Low-g interrupt mode*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE,
+				v_low_g_mode_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads High-g interrupt hysteresis
+ *	from the register 0x5C bit 6 and 7
+ *
+ *  @param v_high_g_hyst_u8 : The value of high hysteresis
+ *
+ *	@note High_g hysteresis changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g hysteresis
+ *  ----------------|---------------------
+ *      2g          |  high_hy*125 mg
+ *      4g          |  high_hy*250 mg
+ *      8g          |  high_hy*500 mg
+ *      16g         |  high_hy*1000 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_high_g_hyst(
+u8 *v_high_g_hyst_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read high_g hysteresis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_hyst_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write High-g interrupt hysteresis
+ *	from the register 0x5C bit 6 and 7
+ *
+ *  @param v_high_g_hyst_u8 : The value of high hysteresis
+ *
+ *	@note High_g hysteresis changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g hysteresis
+ *  ----------------|---------------------
+ *      2g          |  high_hy*125 mg
+ *      4g          |  high_hy*250 mg
+ *      8g          |  high_hy*500 mg
+ *      16g         |  high_hy*1000 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_high_g_hyst(
+u8 v_high_g_hyst_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		/* write high_g hysteresis*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+		p_bmi160->dev_addr,
+		BMI160_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST,
+			v_high_g_hyst_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API is used to read Delay
+ *	time definition for the high-g interrupt from the register
+ *	0x5D bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_high_g_durn_u8 :  The value of high duration
+ *
+ *	@note High_g interrupt delay triggered according to
+ *	v_high_g_durn_u8 * 2.5ms in a range from 2.5ms to 640ms
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_high_g_durn(
+u8 *v_high_g_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read high_g duration*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_durn_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to write Delay
+ *	time definition for the high-g interrupt from the register
+ *	0x5D bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_high_g_durn_u8 :  The value of high duration
+ *
+ *	@note High_g interrupt delay triggered according to
+ *	v_high_g_durn_u8 * 2.5ms in a range from 2.5ms to 640ms
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_high_g_durn(
+u8 v_high_g_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write high_g duration*/
+			com_rslt = p_bmi160->BMI160_BUS_WRITE_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN__REG,
+			&v_high_g_durn_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read Threshold
+ *	definition for the high-g interrupt from the register 0x5E 0 to 7
+ *
+ *
+ *
+ *
+ *  @param  v_high_g_thres_u8 : Pointer holding the value of Threshold
+ *	@note High_g threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  v_high_g_thres_u8*7.81 mg
+ *      4g          |  v_high_g_thres_u8*15.63 mg
+ *      8g          |  v_high_g_thres_u8*31.25 mg
+ *      16g         |  v_high_g_thres_u8*62.5 mg
+ *	@note when v_high_g_thres_u8 = 0
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  3.91 mg
+ *      4g          |  7.81 mg
+ *      8g          |  15.63 mg
+ *      16g         |  31.25 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_high_g_thres(
+u8 *v_high_g_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_thres_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES);
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to write Threshold
+ *	definition for the high-g interrupt from the register 0x5E 0 to 7
+ *
+ *
+ *
+ *
+ *  @param  v_high_g_thres_u8 : Pointer holding the value of Threshold
+ *	@note High_g threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  v_high_g_thres_u8*7.81 mg
+ *      4g          |  v_high_g_thres_u8*15.63 mg
+ *      8g          |  v_high_g_thres_u8*31.25 mg
+ *      16g         |  v_high_g_thres_u8*62.5 mg
+ *	@note when v_high_g_thres_u8 = 0
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  3.91 mg
+ *      4g          |  7.81 mg
+ *      8g          |  15.63 mg
+ *      16g         |  31.25 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_high_g_thres(
+u8 v_high_g_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		com_rslt = p_bmi160->BMI160_BUS_WRITE_FUNC(
+		p_bmi160->dev_addr,
+		BMI160_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES__REG,
+		&v_high_g_thres_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads any motion duration
+ *	from the register 0x5F bit 0 and 1
+ *
+ *  @param v_any_motion_durn_u8 : The value of any motion duration
+ *
+ *	@note Any motion duration can be calculated by "v_any_motion_durn_u8 + 1"
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_any_motion_durn(
+u8 *v_any_motion_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		/* read any motion duration*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		*v_any_motion_durn_u8 = BMI160_GET_BITSLICE
+		(v_data_u8,
+		BMI160_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN);
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write any motion duration
+ *	from the register 0x5F bit 0 and 1
+ *
+ *  @param v_any_motion_durn_u8 : The value of any motion duration
+ *
+ *	@note Any motion duration can be calculated by "v_any_motion_durn_u8 + 1"
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_any_motion_durn(
+u8 v_any_motion_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		/* write any motion duration*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN,
+			v_any_motion_durn_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read Slow/no-motion
+ *	interrupt trigger delay duration from the register 0x5F bit 2 to 7
+ *
+ *  @param v_slow_no_motion_u8 :The value of slow no motion duration
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *	@note
+ *	@note v_slow_no_motion_u8(5:4)=0b00 ->
+ *	[v_slow_no_motion_u8(3:0) + 1] * 1.28s (1.28s-20.48s)
+ *	@note v_slow_no_motion_u8(5:4)=1 ->
+ *	[v_slow_no_motion_u8(3:0)+5] * 5.12s (25.6s-102.4s)
+ *	@note v_slow_no_motion_u8(5)='1' ->
+ *	[(v_slow_no_motion_u8:0)+11] * 10.24s (112.64s-430.08s);
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_slow_no_motion_durn(
+u8 *v_slow_no_motion_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		/* read slow no motion duration*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		*v_slow_no_motion_u8 = BMI160_GET_BITSLICE
+		(v_data_u8,
+		BMI160_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN);
+	}
+return com_rslt;
+}
+ /*!
+ *	@brief This API write Slow/no-motion
+ *	interrupt trigger delay duration from the register 0x5F bit 2 to 7
+ *
+ *  @param v_slow_no_motion_u8 :The value of slow no motion duration
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *	@note
+ *	@note v_slow_no_motion_u8(5:4)=0b00 ->
+ *	[v_slow_no_motion_u8(3:0) + 1] * 1.28s (1.28s-20.48s)
+ *	@note v_slow_no_motion_u8(5:4)=1 ->
+ *	[v_slow_no_motion_u8(3:0)+5] * 5.12s (25.6s-102.4s)
+ *	@note v_slow_no_motion_u8(5)='1' ->
+ *	[(v_slow_no_motion_u8:0)+11] * 10.24s (112.64s-430.08s);
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_slow_no_motion_durn(
+u8 v_slow_no_motion_u8)
+{
+/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	/* write slow no motion duration*/
+	com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+	(p_bmi160->dev_addr,
+	BMI160_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__REG,
+	&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	if (com_rslt == SUCCESS) {
+		v_data_u8 = BMI160_SET_BITSLICE
+		(v_data_u8,
+		BMI160_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN,
+		v_slow_no_motion_u8);
+		com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief This API is used to read threshold
+ *	definition for the any-motion interrupt
+ *	from the register 0x60 bit 0 to 7
+ *
+ *
+ *  @param  v_any_motion_thres_u8 : The value of any motion threshold
+ *
+ *	@note any motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_any_motion_thres_u8*3.91 mg
+ *      4g          |  v_any_motion_thres_u8*7.81 mg
+ *      8g          |  v_any_motion_thres_u8*15.63 mg
+ *      16g         |  v_any_motion_thres_u8*31.25 mg
+ *	@note when v_any_motion_thres_u8 = 0
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_any_motion_thres(
+u8 *v_any_motion_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read any motion threshold*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_any_motion_thres_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to write threshold
+ *	definition for the any-motion interrupt
+ *	from the register 0x60 bit 0 to 7
+ *
+ *
+ *  @param  v_any_motion_thres_u8 : The value of any motion threshold
+ *
+ *	@note any motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_any_motion_thres_u8*3.91 mg
+ *      4g          |  v_any_motion_thres_u8*7.81 mg
+ *      8g          |  v_any_motion_thres_u8*15.63 mg
+ *      16g         |  v_any_motion_thres_u8*31.25 mg
+ *	@note when v_any_motion_thres_u8 = 0
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_any_motion_thres(
+u8 v_any_motion_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		/* write any motion threshold*/
+		com_rslt = p_bmi160->BMI160_BUS_WRITE_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES__REG,
+		&v_any_motion_thres_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read threshold
+ *	for the slow/no-motion interrupt
+ *	from the register 0x61 bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_slow_no_motion_thres_u8 : The value of slow no motion threshold
+ *	@note slow no motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_slow_no_motion_thres_u8*3.91 mg
+ *      4g          |  v_slow_no_motion_thres_u8*7.81 mg
+ *      8g          |  v_slow_no_motion_thres_u8*15.63 mg
+ *      16g         |  v_slow_no_motion_thres_u8*31.25 mg
+ *	@note when v_slow_no_motion_thres_u8 = 0
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_slow_no_motion_thres(
+u8 *v_slow_no_motion_thres_u8)
+{
+BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		/* read slow no motion threshold*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		*v_slow_no_motion_thres_u8 =
+		BMI160_GET_BITSLICE(v_data_u8,
+		BMI160_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES);
+	}
+return com_rslt;
+}
+ /*!
+ *	@brief This API is used to write threshold
+ *	for the slow/no-motion interrupt
+ *	from the register 0x61 bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_slow_no_motion_thres_u8 : The value of slow no motion threshold
+ *	@note slow no motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_slow_no_motion_thres_u8*3.91 mg
+ *      4g          |  v_slow_no_motion_thres_u8*7.81 mg
+ *      8g          |  v_slow_no_motion_thres_u8*15.63 mg
+ *      16g         |  v_slow_no_motion_thres_u8*31.25 mg
+ *	@note when v_slow_no_motion_thres_u8 = 0
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_slow_no_motion_thres(
+u8 v_slow_no_motion_thres_u8)
+{
+BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		/* write slow no motion threshold*/
+		com_rslt = p_bmi160->BMI160_BUS_WRITE_FUNC(
+		p_bmi160->dev_addr,
+		BMI160_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES__REG,
+		&v_slow_no_motion_thres_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	}
+return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read
+ *	the slow/no-motion selection from the register 0x62 bit 0
+ *
+ *
+ *
+ *
+ *  @param  v_intr_slow_no_motion_select_u8 :
+ *	The value of slow/no-motion select
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  SLOW_MOTION
+ *  0x01     |  NO_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_slow_no_motion_select(
+u8 *v_intr_slow_no_motion_select_u8)
+{
+BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		/* read slow no motion select*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+		p_bmi160->dev_addr,
+		BMI160_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		*v_intr_slow_no_motion_select_u8 =
+		BMI160_GET_BITSLICE(v_data_u8,
+		BMI160_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT);
+	}
+return com_rslt;
+}
+ /*!
+ *	@brief This API is used to write
+ *	the slow/no-motion selection from the register 0x62 bit 0
+ *
+ *
+ *
+ *
+ *  @param  v_intr_slow_no_motion_select_u8 :
+ *	The value of slow/no-motion select
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  SLOW_MOTION
+ *  0x01     |  NO_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_slow_no_motion_select(
+u8 v_intr_slow_no_motion_select_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+} else {
+if (v_intr_slow_no_motion_select_u8 <= BMI160_MAX_VALUE_NO_MOTION) {
+	/* write slow no motion select*/
+	com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+	(p_bmi160->dev_addr,
+	BMI160_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__REG,
+	&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	if (com_rslt == SUCCESS) {
+		v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+		BMI160_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT,
+		v_intr_slow_no_motion_select_u8);
+		com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	}
+} else {
+com_rslt = E_BMI160_OUT_OF_RANGE;
+}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API is used to select
+ *	the significant or any motion interrupt from the register 0x62 bit 1
+ *
+ *
+ *
+ *
+ *  @param  v_intr_significant_motion_select_u8 :
+ *	the value of significant or any motion interrupt selection
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  ANY_MOTION
+ *  0x01     |  SIGNIFICANT_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_significant_motion_select(
+u8 *v_intr_significant_motion_select_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the significant or any motion interrupt*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_SIGNIFICATION_MOTION_SELECT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_significant_motion_select_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_SIGNIFICATION_MOTION_SELECT);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to write, select
+ *	the significant or any motion interrupt from the register 0x62 bit 1
+ *
+ *
+ *
+ *
+ *  @param  v_intr_significant_motion_select_u8 :
+ *	the value of significant or any motion interrupt selection
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  ANY_MOTION
+ *  0x01     |  SIGNIFICANT_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_significant_motion_select(
+u8 v_intr_significant_motion_select_u8)
+{
+/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	if (v_intr_significant_motion_select_u8 <=
+	BMI160_MAX_VALUE_SIGNIFICANT_MOTION) {
+		/* write the significant or any motion interrupt*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_INTR_SIGNIFICATION_MOTION_SELECT__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_SIGNIFICATION_MOTION_SELECT,
+			v_intr_significant_motion_select_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_SIGNIFICATION_MOTION_SELECT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_BMI160_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read
+ *	the significant skip time from the register 0x62 bit  2 and 3
+ *
+ *
+ *
+ *
+ *  @param  v_int_sig_mot_skip_u8 : the value of significant skip time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  skip time 1.5 seconds
+ *  0x01     |  skip time 3 seconds
+ *  0x02     |  skip time 6 seconds
+ *  0x03     |  skip time 12 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_significant_motion_skip(
+u8 *v_int_sig_mot_skip_u8)
+{
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read significant skip time*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_SIGNIFICANT_MOTION_SKIP__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_int_sig_mot_skip_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_SIGNIFICANT_MOTION_SKIP);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to write
+ *	the significant skip time from the register 0x62 bit  2 and 3
+ *
+ *
+ *
+ *
+ *  @param  v_int_sig_mot_skip_u8 : the value of significant skip time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  skip time 1.5 seconds
+ *  0x01     |  skip time 3 seconds
+ *  0x02     |  skip time 6 seconds
+ *  0x03     |  skip time 12 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_significant_motion_skip(
+u8 v_int_sig_mot_skip_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_int_sig_mot_skip_u8 <= BMI160_MAX_UNDER_SIG_MOTION) {
+			/* write significant skip time*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_SIGNIFICANT_MOTION_SKIP__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_SIGNIFICANT_MOTION_SKIP,
+				v_int_sig_mot_skip_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_INTR_SIGNIFICANT_MOTION_SKIP__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read
+ *	the significant proof time from the register 0x62 bit  4 and 5
+ *
+ *
+ *
+ *
+ *  @param  v_significant_motion_proof_u8 :
+ *	the value of significant proof time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  proof time 0.25 seconds
+ *  0x01     |  proof time 0.5 seconds
+ *  0x02     |  proof time 1 seconds
+ *  0x03     |  proof time 2 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_significant_motion_proof(
+u8 *v_significant_motion_proof_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read significant proof time */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_SIGNIFICANT_MOTION_PROOF__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_significant_motion_proof_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_SIGNIFICANT_MOTION_PROOF);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to write
+ *	the significant proof time from the register 0x62 bit  4 and 5
+ *
+ *
+ *
+ *
+ *  @param  v_significant_motion_proof_u8 :
+ *	the value of significant proof time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  proof time 0.25 seconds
+ *  0x01     |  proof time 0.5 seconds
+ *  0x02     |  proof time 1 seconds
+ *  0x03     |  proof time 2 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_significant_motion_proof(
+u8 v_significant_motion_proof_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_significant_motion_proof_u8
+		<= BMI160_MAX_UNDER_SIG_MOTION) {
+			/* write significant proof time */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_SIGNIFICANT_MOTION_PROOF__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_SIGNIFICANT_MOTION_PROOF,
+				v_significant_motion_proof_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_INTR_SIGNIFICANT_MOTION_PROOF__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the tap duration
+ *	from the register 0x63 bit 0 to 2
+ *
+ *
+ *
+ *  @param v_tap_durn_u8 : The value of tap duration
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | BMI160_TAP_DURN_50MS
+ *  0x01     | BMI160_TAP_DURN_100MS
+ *  0x03     | BMI160_TAP_DURN_150MS
+ *  0x04     | BMI160_TAP_DURN_200MS
+ *  0x05     | BMI160_TAP_DURN_250MS
+ *  0x06     | BMI160_TAP_DURN_375MS
+ *  0x07     | BMI160_TAP_DURN_700MS
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_tap_durn(
+u8 *v_tap_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read tap duration*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_TAP_0_INTR_TAP_DURN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_durn_u8 = BMI160_GET_BITSLICE(
+			v_data_u8,
+			BMI160_USER_INTR_TAP_0_INTR_TAP_DURN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to write the tap duration
+ *	from the register 0x63 bit 0 to 2
+ *
+ *
+ *
+ *  @param v_tap_durn_u8 : The value of tap duration
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | BMI160_TAP_DURN_50MS
+ *  0x01     | BMI160_TAP_DURN_100MS
+ *  0x03     | BMI160_TAP_DURN_150MS
+ *  0x04     | BMI160_TAP_DURN_200MS
+ *  0x05     | BMI160_TAP_DURN_250MS
+ *  0x06     | BMI160_TAP_DURN_375MS
+ *  0x07     | BMI160_TAP_DURN_700MS
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_tap_durn(
+u8 v_tap_durn_u8)
+{
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_tap_durn_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_tap_durn_u8 <= BMI160_MAX_TAP_TURN) {
+			switch (v_tap_durn_u8) {
+			case BMI160_TAP_DURN_50MS:
+				v_data_tap_durn_u8 = BMI160_TAP_DURN_50MS;
+				break;
+			case BMI160_TAP_DURN_100MS:
+				v_data_tap_durn_u8 = BMI160_TAP_DURN_100MS;
+				break;
+			case BMI160_TAP_DURN_150MS:
+				v_data_tap_durn_u8 = BMI160_TAP_DURN_150MS;
+				break;
+			case BMI160_TAP_DURN_200MS:
+				v_data_tap_durn_u8 = BMI160_TAP_DURN_200MS;
+				break;
+			case BMI160_TAP_DURN_250MS:
+				v_data_tap_durn_u8 = BMI160_TAP_DURN_250MS;
+				break;
+			case BMI160_TAP_DURN_375MS:
+				v_data_tap_durn_u8 = BMI160_TAP_DURN_375MS;
+				break;
+			case BMI160_TAP_DURN_500MS:
+				v_data_tap_durn_u8 = BMI160_TAP_DURN_500MS;
+				break;
+			case BMI160_TAP_DURN_700MS:
+				v_data_tap_durn_u8 = BMI160_TAP_DURN_700MS;
+				break;
+			default:
+				break;
+			}
+			/* write tap duration*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_TAP_0_INTR_TAP_DURN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_TAP_0_INTR_TAP_DURN,
+				v_data_tap_durn_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_INTR_TAP_0_INTR_TAP_DURN__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read the
+ *	tap shock duration from the register 0x63 bit 2
+ *
+ *  @param v_tap_shock_u8 :The value of tap shock
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | BMI160_TAP_SHOCK_50MS
+ *  0x01     | BMI160_TAP_SHOCK_75MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_tap_shock(
+u8 *v_tap_shock_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read tap shock duration*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_TAP_0_INTR_TAP_SHOCK__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_shock_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_TAP_0_INTR_TAP_SHOCK);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write the
+ *	tap shock duration from the register 0x63 bit 2
+ *
+ *  @param v_tap_shock_u8 :The value of tap shock
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | BMI160_TAP_SHOCK_50MS
+ *  0x01     | BMI160_TAP_SHOCK_75MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_tap_shock(u8 v_tap_shock_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_tap_shock_u8 <= BMI160_MAX_VALUE_TAP_SHOCK) {
+			/* write tap shock duration*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_TAP_0_INTR_TAP_SHOCK__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_TAP_0_INTR_TAP_SHOCK,
+				v_tap_shock_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_INTR_TAP_0_INTR_TAP_SHOCK__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read
+ *	tap quiet duration from the register 0x63 bit 7
+ *
+ *
+ *  @param v_tap_quiet_u8 : The value of tap quiet
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | BMI160_TAP_QUIET_30MS
+ *  0x01     | BMI160_TAP_QUIET_20MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_tap_quiet(
+u8 *v_tap_quiet_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read tap quiet duration*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_TAP_0_INTR_TAP_QUIET__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_quiet_u8 = BMI160_GET_BITSLICE(
+			v_data_u8,
+			BMI160_USER_INTR_TAP_0_INTR_TAP_QUIET);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write
+ *	tap quiet duration from the register 0x63 bit 7
+ *
+ *
+ *  @param v_tap_quiet_u8 : The value of tap quiet
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | BMI160_TAP_QUIET_30MS
+ *  0x01     | BMI160_TAP_QUIET_20MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_tap_quiet(u8 v_tap_quiet_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_tap_quiet_u8 <= BMI160_MAX_VALUE_TAP_QUIET) {
+			/* write tap quiet duration*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_TAP_0_INTR_TAP_QUIET__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_TAP_0_INTR_TAP_QUIET,
+				v_tap_quiet_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_INTR_TAP_0_INTR_TAP_QUIET__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read Threshold of the
+ *	single/double tap interrupt from the register 0x64 bit 0 to 4
+ *
+ *
+ *	@param v_tap_thres_u8 : The value of single/double tap threshold
+ *
+ *	@note single/double tap threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | single/double tap threshold
+ *  ----------------|---------------------
+ *      2g          |  ((v_tap_thres_u8 + 1) * 62.5)mg
+ *      4g          |  ((v_tap_thres_u8 + 1) * 125)mg
+ *      8g          |  ((v_tap_thres_u8 + 1) * 250)mg
+ *      16g         |  ((v_tap_thres_u8 + 1) * 500)mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_tap_thres(
+u8 *v_tap_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read tap threshold*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_TAP_1_INTR_TAP_THRES__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_thres_u8 = BMI160_GET_BITSLICE
+			(v_data_u8,
+			BMI160_USER_INTR_TAP_1_INTR_TAP_THRES);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write Threshold of the
+ *	single/double tap interrupt from the register 0x64 bit 0 to 4
+ *
+ *
+ *	@param v_tap_thres_u8 : The value of single/double tap threshold
+ *
+ *	@note single/double tap threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | single/double tap threshold
+ *  ----------------|---------------------
+ *      2g          |  ((v_tap_thres_u8 + 1) * 62.5)mg
+ *      4g          |  ((v_tap_thres_u8 + 1) * 125)mg
+ *      8g          |  ((v_tap_thres_u8 + 1) * 250)mg
+ *      16g         |  ((v_tap_thres_u8 + 1) * 500)mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_tap_thres(
+u8 v_tap_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write tap threshold*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_TAP_1_INTR_TAP_THRES__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_TAP_1_INTR_TAP_THRES,
+				v_tap_thres_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_INTR_TAP_1_INTR_TAP_THRES__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read the threshold for orientation interrupt
+ *	from the register 0x65 bit 0 and 1
+ *
+ *  @param v_orient_mode_u8 : The value of threshold for orientation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | symmetrical
+ *  0x01     | high-asymmetrical
+ *  0x02     | low-asymmetrical
+ *  0x03     | symmetrical
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient_mode(
+u8 *v_orient_mode_u8)
+{
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read orientation threshold*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_mode_u8 = BMI160_GET_BITSLICE
+			(v_data_u8,
+			BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_MODE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write the threshold for orientation interrupt
+ *	from the register 0x65 bit 0 and 1
+ *
+ *  @param v_orient_mode_u8 : The value of threshold for orientation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | symmetrical
+ *  0x01     | high-asymmetrical
+ *  0x02     | low-asymmetrical
+ *  0x03     | symmetrical
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient_mode(
+u8 v_orient_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_orient_mode_u8 <= BMI160_MAX_ORIENT_MODE) {
+			/* write orientation threshold*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_MODE,
+				v_orient_mode_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read the orient blocking mode
+ *	that is used for the generation of the orientation interrupt.
+ *	from the register 0x65 bit 2 and 3
+ *
+ *  @param v_orient_blocking_u8 : The value of orient blocking mode
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | No blocking
+ *  0x01     | Theta blocking or acceleration in any axis > 1.5g
+ *  0x02     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.2g or acceleration in any axis > 1.5g
+ *  0x03     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.4g or acceleration in any axis >
+ *   -       | 1.5g and value of orient is not stable
+ *   -       | for at least 100 ms
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient_blocking(
+u8 *v_orient_blocking_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read orient blocking mode*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_blocking_u8 = BMI160_GET_BITSLICE
+			(v_data_u8,
+			BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write the orient blocking mode
+ *	that is used for the generation of the orientation interrupt.
+ *	from the register 0x65 bit 2 and 3
+ *
+ *  @param v_orient_blocking_u8 : The value of orient blocking mode
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | No blocking
+ *  0x01     | Theta blocking or acceleration in any axis > 1.5g
+ *  0x02     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.2g or acceleration in any axis > 1.5g
+ *  0x03     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.4g or acceleration in any axis >
+ *   -       | 1.5g and value of orient is not stable
+ *   -       | for at least 100 ms
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient_blocking(
+u8 v_orient_blocking_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	if (v_orient_blocking_u8 <= BMI160_MAX_ORIENT_BLOCKING) {
+		/* write orient blocking mode*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING,
+			v_orient_blocking_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_BMI160_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief This API read Orient interrupt
+ *	hysteresis, from the register 0x64 bit 4 to 7
+ *
+ *
+ *
+ *  @param v_orient_hyst_u8 : The value of orient hysteresis
+ *
+ *	@note 1 LSB corresponds to 62.5 mg,
+ *	irrespective of the selected accel range
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient_hyst(
+u8 *v_orient_hyst_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read orient hysteresis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_hyst_u8 = BMI160_GET_BITSLICE
+			(v_data_u8,
+			BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_HYST);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write Orient interrupt
+ *	hysteresis, from the register 0x64 bit 4 to 7
+ *
+ *
+ *
+ *  @param v_orient_hyst_u8 : The value of orient hysteresis
+ *
+ *	@note 1 LSB corresponds to 62.5 mg,
+ *	irrespective of the selected accel range
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient_hyst(
+u8 v_orient_hyst_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write orient hysteresis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_HYST,
+				v_orient_hyst_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read Orient
+ *	blocking angle (0 to 44.8) from the register 0x66 bit 0 to 5
+ *
+ *  @param v_orient_theta_u8 : The value of Orient blocking angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient_theta(
+u8 *v_orient_theta_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read Orient blocking angle*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_theta_u8 = BMI160_GET_BITSLICE
+			(v_data_u8,
+			BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_THETA);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write Orient
+ *	blocking angle (0 to 44.8) from the register 0x66 bit 0 to 5
+ *
+ *  @param v_orient_theta_u8 : The value of Orient blocking angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient_theta(
+u8 v_orient_theta_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	if (v_orient_theta_u8 <= BMI160_MAX_ORIENT_THETA) {
+		/* write Orient blocking angle*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_THETA,
+			v_orient_theta_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_BMI160_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief This API read orient change
+ *	of up/down bit from the register 0x66 bit 6
+ *
+ *  @param v_orient_ud_u8 : The value of orient change of up/down
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | Is ignored
+ *  0x01     | Generates orientation interrupt
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient_ud_enable(
+u8 *v_orient_ud_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read orient up/down enable*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_ud_u8 = BMI160_GET_BITSLICE
+			(v_data_u8,
+			BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write orient change
+ *	of up/down bit from the register 0x66 bit 6
+ *
+ *  @param v_orient_ud_u8 : The value of orient change of up/down
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | Is ignored
+ *  0x01     | Generates orientation interrupt
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient_ud_enable(
+u8 v_orient_ud_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	if (v_orient_ud_u8 <= BMI160_MAX_VALUE_ORIENT_UD) {
+		/* write orient up/down enable */
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE,
+			v_orient_ud_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_BMI160_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API read orientation axes changes
+ *	from the register 0x66 bit 7
+ *
+ *  @param v_orient_axes_u8 : The value of orient axes assignment
+ *	value    |       Behaviour    | Name
+ * ----------|--------------------|------
+ *  0x00     | x = x, y = y, z = z|orient_ax_noex
+ *  0x01     | x = y, y = z, z = x|orient_ax_ex
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient_axes_enable(
+u8 *v_orient_axes_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read orientation axes changes  */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_axes_u8 = BMI160_GET_BITSLICE
+			(v_data_u8,
+			BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write orientation axes changes
+ *	from the register 0x66 bit 7
+ *
+ *  @param v_orient_axes_u8 : The value of orient axes assignment
+ *	value    |       Behaviour    | Name
+ * ----------|--------------------|------
+ *  0x00     | x = x, y = y, z = z|orient_ax_noex
+ *  0x01     | x = y, y = z, z = x|orient_ax_ex
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient_axes_enable(
+u8 v_orient_axes_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+	if (v_orient_axes_u8 <= BMI160_MAX_VALUE_ORIENT_AXES) {
+		/*write orientation axes changes  */
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX,
+			v_orient_axes_u8);
+			com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_BMI160_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API read Flat angle (0 to 44.8) for flat interrupt
+ *	from the register 0x67 bit 0 to 5
+ *
+ *  @param v_flat_theta_u8 : The value of flat angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_flat_theta(
+u8 *v_flat_theta_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read Flat angle*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_FLAT_0_INTR_FLAT_THETA__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_flat_theta_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_FLAT_0_INTR_FLAT_THETA);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write Flat angle (0 to 44.8) for flat interrupt
+ *	from the register 0x67 bit 0 to 5
+ *
+ *  @param v_flat_theta_u8 : The value of flat angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_flat_theta(
+u8 v_flat_theta_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_flat_theta_u8 <= BMI160_MAX_FLAT_THETA) {
+			/* write Flat angle */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_FLAT_0_INTR_FLAT_THETA__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_FLAT_0_INTR_FLAT_THETA,
+				v_flat_theta_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_INTR_FLAT_0_INTR_FLAT_THETA__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read Flat interrupt hold time;
+ *	from the register 0x68 bit 4 and 5
+ *
+ *  @param v_flat_hold_u8 : The value of flat hold time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | 0ms
+ *  0x01     | 512ms
+ *  0x01     | 1024ms
+ *  0x01     | 2048ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_flat_hold(
+u8 *v_flat_hold_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read flat hold time*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_FLAT_1_INTR_FLAT_HOLD__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_flat_hold_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_INTR_FLAT_1_INTR_FLAT_HOLD);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write Flat interrupt hold time;
+ *	from the register 0x68 bit 4 and 5
+ *
+ *  @param v_flat_hold_u8 : The value of flat hold time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | 0ms
+ *  0x01     | 512ms
+ *  0x01     | 1024ms
+ *  0x01     | 2048ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_flat_hold(
+u8 v_flat_hold_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_flat_hold_u8 <= BMI160_MAX_FLAT_HOLD) {
+			/* write flat hold time*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_FLAT_1_INTR_FLAT_HOLD__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_FLAT_1_INTR_FLAT_HOLD,
+				v_flat_hold_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_INTR_FLAT_1_INTR_FLAT_HOLD__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read flat interrupt hysteresis
+ *	from the register 0x68 bit 0 to 3
+ *
+ *  @param v_flat_hyst_u8 : The value of flat hysteresis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_flat_hyst(
+u8 *v_flat_hyst_u8)
+{
+	/* variable used to return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the flat hysteresis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_INTR_FLAT_1_INTR_FLAT_HYST__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_flat_hyst_u8 = BMI160_GET_BITSLICE(
+			v_data_u8,
+			BMI160_USER_INTR_FLAT_1_INTR_FLAT_HYST);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write flat interrupt hysteresis
+ *	from the register 0x68 bit 0 to 3
+ *
+ *  @param v_flat_hyst_u8 : The value of flat hysteresis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_flat_hyst(
+u8 v_flat_hyst_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_flat_hyst_u8 <= BMI160_MAX_FLAT_HYST) {
+			/* read the flat hysteresis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_INTR_FLAT_1_INTR_FLAT_HYST__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_INTR_FLAT_1_INTR_FLAT_HYST,
+				v_flat_hyst_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_INTR_FLAT_1_INTR_FLAT_HYST__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read accel offset compensation
+ *	target value for z-axis from the register 0x69 bit 0 and 1
+ *
+ *  @param v_foc_accel_z_u8 : the value of accel offset compensation z axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_foc_accel_z(u8 *v_foc_accel_z_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the accel offset compensation for z axis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_FOC_ACCEL_Z__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_foc_accel_z_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FOC_ACCEL_Z);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write accel offset compensation
+ *	target value for z-axis from the register 0x69 bit 0 and 1
+ *
+ *  @param v_foc_accel_z_u8 : the value of accel offset compensation z axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_foc_accel_z(
+u8 v_foc_accel_z_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write the accel offset compensation for z axis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_FOC_ACCEL_Z__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FOC_ACCEL_Z,
+				v_foc_accel_z_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_FOC_ACCEL_Z__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel offset compensation
+ *	target value for y-axis
+ *	from the register 0x69 bit 2 and 3
+ *
+ *  @param v_foc_accel_y_u8 : the value of accel offset compensation y axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_foc_accel_y(u8 *v_foc_accel_y_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the accel offset compensation for y axis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_FOC_ACCEL_Y__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_foc_accel_y_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FOC_ACCEL_Y);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel offset compensation
+ *	target value for y-axis
+ *	from the register 0x69 bit 2 and 3
+ *
+ *  @param v_foc_accel_y_u8 : the value of accel offset compensation y axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x02     | -1g
+ *  0x03     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_foc_accel_y(u8 v_foc_accel_y_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_foc_accel_y_u8 <= BMI160_MAX_ACCEL_FOC) {
+			/* write the accel offset compensation for y axis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_FOC_ACCEL_Y__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FOC_ACCEL_Y,
+				v_foc_accel_y_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_FOC_ACCEL_Y__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel offset compensation
+ *	target value for x-axis is
+ *	from the register 0x69 bit 4 and 5
+ *
+ *  @param v_foc_accel_x_u8 : the value of accel offset compensation x axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x02     | -1g
+ *  0x03     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_foc_accel_x(u8 *v_foc_accel_x_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		/* read the accel offset compensation for x axis*/
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+		p_bmi160->dev_addr,
+		BMI160_USER_FOC_ACCEL_X__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		*v_foc_accel_x_u8 = BMI160_GET_BITSLICE(v_data_u8,
+		BMI160_USER_FOC_ACCEL_X);
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel offset compensation
+ *	target value for x-axis is
+ *	from the register 0x69 bit 4 and 5
+ *
+ *  @param v_foc_accel_x_u8 : the value of accel offset compensation x axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_foc_accel_x(u8 v_foc_accel_x_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_foc_accel_x_u8 <= BMI160_MAX_ACCEL_FOC) {
+			/* write the accel offset compensation for x axis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_FOC_ACCEL_X__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FOC_ACCEL_X,
+				v_foc_accel_x_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FOC_ACCEL_X__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API writes accel fast offset compensation
+ *	from the register 0x69 bit 0 to 5
+ *	@brief This API writes each axis individually
+ *	FOC_X_AXIS - bit 4 and 5
+ *	FOC_Y_AXIS - bit 2 and 3
+ *	FOC_Z_AXIS - bit 0 and 1
+ *
+ *  @param  v_foc_accel_u8: The value of accel offset compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_axis_u8: The value of accel offset axis selection
+  *	value    | axis
+ * ----------|-------------------
+ *  0        | FOC_X_AXIS
+ *  1        | FOC_Y_AXIS
+ *  2        | FOC_Z_AXIS
+ *
+ *	@param v_accel_offset_s8: The accel offset value
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_foc_trigger(u8 v_axis_u8,
+u8 v_foc_accel_u8, s8 *v_accel_offset_s8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+s8 v_status_s8 = SUCCESS;
+u8 v_timeout_u8 = BMI160_INIT_VALUE;
+s8 v_foc_accel_offset_x_s8  = BMI160_INIT_VALUE;
+s8 v_foc_accel_offset_y_s8 =  BMI160_INIT_VALUE;
+s8 v_foc_accel_offset_z_s8 =  BMI160_INIT_VALUE;
+u8 focstatus = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+} else {
+	v_status_s8 = bmi160_set_accel_offset_enable(
+	ACCEL_OFFSET_ENABLE);
+	if (v_status_s8 == SUCCESS) {
+		switch (v_axis_u8) {
+		case FOC_X_AXIS:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_FOC_ACCEL_X__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FOC_ACCEL_X,
+				v_foc_accel_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FOC_ACCEL_X__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* trigger the
+			FOC need to write
+			0x03 in the register 0x7e*/
+			com_rslt +=
+			bmi160_set_command_register(
+			START_FOC_ACCEL_GYRO);
+
+			com_rslt +=
+			bmi160_get_foc_rdy(&focstatus);
+			if ((com_rslt != SUCCESS) ||
+			(focstatus != BMI160_FOC_STAT_HIGH)) {
+				while ((com_rslt != SUCCESS) ||
+				(focstatus != BMI160_FOC_STAT_HIGH
+				&& v_timeout_u8 <
+				BMI160_MAXIMUM_TIMEOUT)) {
+					p_bmi160->delay_msec(
+					BMI160_DELAY_SETTLING_TIME);
+					com_rslt = bmi160_get_foc_rdy(
+					&focstatus);
+					v_timeout_u8++;
+				}
+			}
+			if ((com_rslt == SUCCESS) &&
+				(focstatus == BMI160_FOC_STAT_HIGH)) {
+				com_rslt +=
+				bmi160_get_accel_offset_compensation_xaxis(
+				&v_foc_accel_offset_x_s8);
+				*v_accel_offset_s8 =
+				v_foc_accel_offset_x_s8;
+			}
+		break;
+		case FOC_Y_AXIS:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_FOC_ACCEL_Y__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FOC_ACCEL_Y,
+				v_foc_accel_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FOC_ACCEL_Y__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* trigger the FOC
+			need to write 0x03
+			in the register 0x7e*/
+			com_rslt +=
+			bmi160_set_command_register(
+			START_FOC_ACCEL_GYRO);
+
+			com_rslt +=
+			bmi160_get_foc_rdy(&focstatus);
+			if ((com_rslt != SUCCESS) ||
+			(focstatus != BMI160_FOC_STAT_HIGH)) {
+				while ((com_rslt != SUCCESS) ||
+				(focstatus != BMI160_FOC_STAT_HIGH
+				&& v_timeout_u8 <
+				BMI160_MAXIMUM_TIMEOUT)) {
+					p_bmi160->delay_msec(
+					BMI160_DELAY_SETTLING_TIME);
+					com_rslt = bmi160_get_foc_rdy(
+					&focstatus);
+					v_timeout_u8++;
+				}
+			}
+			if ((com_rslt == SUCCESS) &&
+			(focstatus == BMI160_FOC_STAT_HIGH)) {
+				com_rslt +=
+				bmi160_get_accel_offset_compensation_yaxis(
+				&v_foc_accel_offset_y_s8);
+				*v_accel_offset_s8 =
+				v_foc_accel_offset_y_s8;
+			}
+		break;
+		case FOC_Z_AXIS:
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_FOC_ACCEL_Z__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FOC_ACCEL_Z,
+				v_foc_accel_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FOC_ACCEL_Z__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* trigger the FOC need to write
+			0x03 in the register 0x7e*/
+			com_rslt +=
+			bmi160_set_command_register(
+			START_FOC_ACCEL_GYRO);
+
+			com_rslt +=
+			bmi160_get_foc_rdy(&focstatus);
+			if ((com_rslt != SUCCESS) ||
+			(focstatus != BMI160_FOC_STAT_HIGH)) {
+				while ((com_rslt != SUCCESS) ||
+				(focstatus != BMI160_FOC_STAT_HIGH
+				&& v_timeout_u8 <
+				BMI160_MAXIMUM_TIMEOUT)) {
+					p_bmi160->delay_msec(
+					BMI160_DELAY_SETTLING_TIME);
+					com_rslt = bmi160_get_foc_rdy(
+					&focstatus);
+					v_timeout_u8++;
+				}
+			}
+			if ((com_rslt == SUCCESS) &&
+			(focstatus == BMI160_FOC_STAT_HIGH)) {
+				com_rslt +=
+				bmi160_get_accel_offset_compensation_zaxis(
+				&v_foc_accel_offset_z_s8);
+				*v_accel_offset_s8 =
+				v_foc_accel_offset_z_s8;
+			}
+		break;
+		default:
+		break;
+		}
+	} else {
+	com_rslt =  ERROR;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief This API write fast accel offset compensation
+ *	it writes all axis together.To the register 0x69 bit 0 to 5
+ *	FOC_X_AXIS - bit 4 and 5
+ *	FOC_Y_AXIS - bit 2 and 3
+ *	FOC_Z_AXIS - bit 0 and 1
+ *
+ *  @param  v_foc_accel_x_u8: The value of accel offset x compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_foc_accel_y_u8: The value of accel offset y compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_foc_accel_z_u8: The value of accel offset z compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_accel_off_x_s8: The value of accel offset x axis
+ *  @param  v_accel_off_y_s8: The value of accel offset y axis
+ *  @param  v_accel_off_z_s8: The value of accel offset z axis
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_accel_foc_trigger_xyz(u8 v_foc_accel_x_u8,
+u8 v_foc_accel_y_u8, u8 v_foc_accel_z_u8, s8 *v_accel_off_x_s8,
+s8 *v_accel_off_y_s8, s8 *v_accel_off_z_s8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 focx = BMI160_INIT_VALUE;
+u8 focy = BMI160_INIT_VALUE;
+u8 focz = BMI160_INIT_VALUE;
+s8 v_foc_accel_offset_x_s8 = BMI160_INIT_VALUE;
+s8 v_foc_accel_offset_y_s8 = BMI160_INIT_VALUE;
+s8 v_foc_accel_offset_z_s8 = BMI160_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+u8 v_timeout_u8 = BMI160_INIT_VALUE;
+u8 focstatus = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		v_status_s8 = bmi160_set_accel_offset_enable(
+		ACCEL_OFFSET_ENABLE);
+		if (v_status_s8 == SUCCESS) {
+			/* foc x axis*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_FOC_ACCEL_X__REG,
+			&focx, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				focx = BMI160_SET_BITSLICE(focx,
+				BMI160_USER_FOC_ACCEL_X,
+				v_foc_accel_x_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FOC_ACCEL_X__REG,
+				&focx, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* foc y axis*/
+			com_rslt +=
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_FOC_ACCEL_Y__REG,
+			&focy, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				focy = BMI160_SET_BITSLICE(focy,
+				BMI160_USER_FOC_ACCEL_Y,
+				v_foc_accel_y_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FOC_ACCEL_Y__REG,
+				&focy, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* foc z axis*/
+			com_rslt +=
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_FOC_ACCEL_Z__REG,
+			&focz, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				focz = BMI160_SET_BITSLICE(focz,
+				BMI160_USER_FOC_ACCEL_Z,
+				v_foc_accel_z_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_FOC_ACCEL_Z__REG,
+				&focz, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* trigger the FOC need to
+			write 0x03 in the register 0x7e*/
+			com_rslt += bmi160_set_command_register(
+			START_FOC_ACCEL_GYRO);
+
+			com_rslt += bmi160_get_foc_rdy(
+			&focstatus);
+			if ((com_rslt != SUCCESS) ||
+			(focstatus != BMI160_FOC_STAT_HIGH)) {
+				while ((com_rslt != SUCCESS) ||
+				(focstatus != BMI160_FOC_STAT_HIGH
+				&& v_timeout_u8 <
+				BMI160_MAXIMUM_TIMEOUT)) {
+					p_bmi160->delay_msec(
+					BMI160_DELAY_SETTLING_TIME);
+					com_rslt = bmi160_get_foc_rdy(
+					&focstatus);
+					v_timeout_u8++;
+				}
+			}
+			if ((com_rslt == SUCCESS) &&
+			(focstatus == BMI160_GEN_READ_WRITE_DATA_LENGTH)) {
+				com_rslt +=
+				bmi160_get_accel_offset_compensation_xaxis(
+				&v_foc_accel_offset_x_s8);
+				*v_accel_off_x_s8 =
+				v_foc_accel_offset_x_s8;
+				com_rslt +=
+				bmi160_get_accel_offset_compensation_yaxis(
+				&v_foc_accel_offset_y_s8);
+				*v_accel_off_y_s8 =
+				v_foc_accel_offset_y_s8;
+				com_rslt +=
+				bmi160_get_accel_offset_compensation_zaxis(
+				&v_foc_accel_offset_z_s8);
+				*v_accel_off_z_s8 =
+				v_foc_accel_offset_z_s8;
+			}
+		} else {
+		com_rslt =  ERROR;
+		}
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API read gyro fast offset enable
+ *	from the register 0x69 bit 6
+ *
+ *  @param v_foc_gyro_u8 : The value of gyro fast offset enable
+ *  value    |  Description
+ * ----------|-------------
+ *    0      | fast offset compensation disabled
+ *    1      |  fast offset compensation enabled
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_foc_gyro_enable(
+u8 *v_foc_gyro_u8)
+{
+	/* used for return the status of bus communication */
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the gyro fast offset enable*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_FOC_GYRO_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_foc_gyro_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_FOC_GYRO_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro fast offset enable
+ *	from the register 0x69 bit 6
+ *
+ *  @param v_foc_gyro_u8 : The value of gyro fast offset enable
+ *  value    |  Description
+ * ----------|-------------
+ *    0      | fast offset compensation disabled
+ *    1      |  fast offset compensation enabled
+ *
+ *	@param v_gyro_off_x_s16 : The value of gyro fast offset x axis data
+ *	@param v_gyro_off_y_s16 : The value of gyro fast offset y axis data
+ *	@param v_gyro_off_z_s16 : The value of gyro fast offset z axis data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_foc_gyro_enable(
+u8 v_foc_gyro_u8, s16 *v_gyro_off_x_s16,
+s16 *v_gyro_off_y_s16, s16 *v_gyro_off_z_s16)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+u8 v_timeout_u8 = BMI160_INIT_VALUE;
+s16 offsetx = BMI160_INIT_VALUE;
+s16 offsety = BMI160_INIT_VALUE;
+s16 offsetz = BMI160_INIT_VALUE;
+u8 focstatus = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		v_status_s8 = bmi160_set_gyro_offset_enable(
+		GYRO_OFFSET_ENABLE);
+		if (v_status_s8 == SUCCESS) {
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_FOC_GYRO_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_FOC_GYRO_ENABLE,
+				v_foc_gyro_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_FOC_GYRO_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* trigger the FOC need to write 0x03
+			in the register 0x7e*/
+			com_rslt += bmi160_set_command_register
+			(START_FOC_ACCEL_GYRO);
+
+			com_rslt += bmi160_get_foc_rdy(&focstatus);
+			if ((com_rslt != SUCCESS) ||
+			(focstatus != BMI160_FOC_STAT_HIGH)) {
+				while ((com_rslt != SUCCESS) ||
+				(focstatus != BMI160_FOC_STAT_HIGH
+				&& v_timeout_u8 <
+				BMI160_MAXIMUM_TIMEOUT)) {
+					p_bmi160->delay_msec(
+					BMI160_DELAY_SETTLING_TIME);
+					com_rslt = bmi160_get_foc_rdy(
+					&focstatus);
+					v_timeout_u8++;
+				}
+			}
+			if ((com_rslt == SUCCESS) &&
+			(focstatus == BMI160_FOC_STAT_HIGH)) {
+				com_rslt +=
+				bmi160_get_gyro_offset_compensation_xaxis
+				(&offsetx);
+				*v_gyro_off_x_s16 = offsetx;
+
+				com_rslt +=
+				bmi160_get_gyro_offset_compensation_yaxis
+				(&offsety);
+				*v_gyro_off_y_s16 = offsety;
+
+				com_rslt +=
+				bmi160_get_gyro_offset_compensation_zaxis(
+				&offsetz);
+				*v_gyro_off_z_s16 = offsetz;
+			}
+		} else {
+		com_rslt = ERROR;
+		}
+	}
+return com_rslt;
+}
+ /*!
+ *	@brief This API read NVM program enable
+ *	from the register 0x6A bit 1
+ *
+ *  @param v_nvm_prog_u8 : The value of NVM program enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_nvm_prog_enable(
+u8 *v_nvm_prog_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read NVM program*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_CONFIG_NVM_PROG_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_nvm_prog_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_CONFIG_NVM_PROG_ENABLE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write NVM program enable
+ *	from the register 0x6A bit 1
+ *
+ *  @param v_nvm_prog_u8 : The value of NVM program enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_nvm_prog_enable(
+u8 v_nvm_prog_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_nvm_prog_u8 <= BMI160_MAX_VALUE_NVM_PROG) {
+			/* write the NVM program*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_CONFIG_NVM_PROG_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_CONFIG_NVM_PROG_ENABLE,
+				v_nvm_prog_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_CONFIG_NVM_PROG_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ * @brief This API read to configure SPI
+ * Interface Mode for primary and OIS interface
+ * from the register 0x6B bit 0
+ *
+ *  @param v_spi3_u8 : The value of SPI mode selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  SPI 4-wire mode
+ *   1     |  SPI 3-wire mode
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_spi3(
+u8 *v_spi3_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read SPI mode*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_IF_CONFIG_SPI3__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_spi3_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_IF_CONFIG_SPI3);
+		}
+	return com_rslt;
+}
+/*!
+ * @brief This API write to configure SPI
+ * Interface Mode for primary and OIS interface
+ * from the register 0x6B bit 0
+ *
+ *  @param v_spi3_u8 : The value of SPI mode selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  SPI 4-wire mode
+ *   1     |  SPI 3-wire mode
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_spi3(
+u8 v_spi3_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_spi3_u8 <= BMI160_MAX_VALUE_SPI3) {
+			/* write SPI mode*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_IF_CONFIG_SPI3__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_IF_CONFIG_SPI3,
+				v_spi3_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_IF_CONFIG_SPI3__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read I2C Watchdog timer
+ *	from the register 0x70 bit 1
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watch dog timer
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  I2C watchdog v_timeout_u8 after 1 ms
+ *   1     |  I2C watchdog v_timeout_u8 after 50 ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_i2c_wdt_select(
+u8 *v_i2c_wdt_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read I2C watch dog timer */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_IF_CONFIG_I2C_WDT_SELECT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_i2c_wdt_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_IF_CONFIG_I2C_WDT_SELECT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write I2C Watchdog timer
+ *	from the register 0x70 bit 1
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watch dog timer
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  I2C watchdog v_timeout_u8 after 1 ms
+ *   1     |  I2C watchdog v_timeout_u8 after 50 ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_i2c_wdt_select(
+u8 v_i2c_wdt_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_i2c_wdt_u8 <= BMI160_MAX_VALUE_I2C_WDT) {
+			/* write I2C watch dog timer */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_IF_CONFIG_I2C_WDT_SELECT__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_IF_CONFIG_I2C_WDT_SELECT,
+				v_i2c_wdt_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_IF_CONFIG_I2C_WDT_SELECT__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read I2C watchdog enable
+ *	from the register 0x70 bit 2
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watchdog enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_i2c_wdt_enable(
+u8 *v_i2c_wdt_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read i2c watch dog eneble */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_IF_CONFIG_I2C_WDT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_i2c_wdt_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_IF_CONFIG_I2C_WDT_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write I2C watchdog enable
+ *	from the register 0x70 bit 2
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watchdog enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_i2c_wdt_enable(
+u8 v_i2c_wdt_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_i2c_wdt_u8 <= BMI160_MAX_VALUE_I2C_WDT) {
+			/* write i2c watch dog eneble */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_IF_CONFIG_I2C_WDT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_IF_CONFIG_I2C_WDT_ENABLE,
+				v_i2c_wdt_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_IF_CONFIG_I2C_WDT_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ * @brief This API read I2C interface configuration(if) moe
+ * from the register 0x6B bit 4 and 5
+ *
+ *  @param  v_if_mode_u8 : The value of interface configuration mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  |  Primary interface:autoconfig / secondary interface:off
+ *   0x01  |  Primary interface:I2C / secondary interface:OIS
+ *   0x02  |  Primary interface:autoconfig/secondary interface:Magnetometer
+ *   0x03  |   Reserved
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_if_mode(
+u8 *v_if_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read if mode*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_IF_CONFIG_IF_MODE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_if_mode_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_IF_CONFIG_IF_MODE);
+		}
+	return com_rslt;
+}
+/*!
+ * @brief This API write I2C interface configuration(if) moe
+ * from the register 0x6B bit 4 and 5
+ *
+ *  @param  v_if_mode_u8 : The value of interface configuration mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  |  Primary interface:autoconfig / secondary interface:off
+ *   0x01  |  Primary interface:I2C / secondary interface:OIS
+ *   0x02  |  Primary interface:autoconfig/secondary interface:Magnetometer
+ *   0x03  |   Reserved
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_if_mode(
+u8 v_if_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_if_mode_u8 <= BMI160_MAX_IF_MODE) {
+			/* write if mode*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_IF_CONFIG_IF_MODE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_IF_CONFIG_IF_MODE,
+				v_if_mode_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_IF_CONFIG_IF_MODE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read gyro sleep trigger
+ *	from the register 0x6C bit 0 to 2
+ *
+ *  @param v_gyro_sleep_trigger_u8 : The value of gyro sleep trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | nomotion: no / Not INT1 pin: no / INT2 pin: no
+ *   0x01  | nomotion: no / Not INT1 pin: no / INT2 pin: yes
+ *   0x02  | nomotion: no / Not INT1 pin: yes / INT2 pin: no
+ *   0x03  | nomotion: no / Not INT1 pin: yes / INT2 pin: yes
+ *   0x04  | nomotion: yes / Not INT1 pin: no / INT2 pin: no
+ *   0x05  | anymotion: yes / Not INT1 pin: no / INT2 pin: yes
+ *   0x06  | anymotion: yes / Not INT1 pin: yes / INT2 pin: no
+ *   0x07  | anymotion: yes / Not INT1 pin: yes / INT2 pin: yes
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_sleep_trigger(
+u8 *v_gyro_sleep_trigger_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read gyro sleep trigger */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_GYRO_SLEEP_TRIGGER__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_sleep_trigger_u8 =
+			BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_GYRO_SLEEP_TRIGGER);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro sleep trigger
+ *	from the register 0x6C bit 0 to 2
+ *
+ *  @param v_gyro_sleep_trigger_u8 : The value of gyro sleep trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | nomotion: no / Not INT1 pin: no / INT2 pin: no
+ *   0x01  | nomotion: no / Not INT1 pin: no / INT2 pin: yes
+ *   0x02  | nomotion: no / Not INT1 pin: yes / INT2 pin: no
+ *   0x03  | nomotion: no / Not INT1 pin: yes / INT2 pin: yes
+ *   0x04  | nomotion: yes / Not INT1 pin: no / INT2 pin: no
+ *   0x05  | anymotion: yes / Not INT1 pin: no / INT2 pin: yes
+ *   0x06  | anymotion: yes / Not INT1 pin: yes / INT2 pin: no
+ *   0x07  | anymotion: yes / Not INT1 pin: yes / INT2 pin: yes
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_sleep_trigger(
+u8 v_gyro_sleep_trigger_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_gyro_sleep_trigger_u8 <= BMI160_MAX_GYRO_SLEEP_TIGGER) {
+			/* write gyro sleep trigger */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_GYRO_SLEEP_TRIGGER__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_GYRO_SLEEP_TRIGGER,
+				v_gyro_sleep_trigger_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_GYRO_SLEEP_TRIGGER__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read gyro wakeup trigger
+ *	from the register 0x6C bit 3 and 4
+ *
+ *  @param v_gyro_wakeup_trigger_u8 : The value of gyro wakeup trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | anymotion: no / INT1 pin: no
+ *   0x01  | anymotion: no / INT1 pin: yes
+ *   0x02  | anymotion: yes / INT1 pin: no
+ *   0x03  | anymotion: yes / INT1 pin: yes
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_wakeup_trigger(
+u8 *v_gyro_wakeup_trigger_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read gyro wakeup trigger */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_GYRO_WAKEUP_TRIGGER__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_wakeup_trigger_u8 = BMI160_GET_BITSLICE(
+			v_data_u8,
+			BMI160_USER_GYRO_WAKEUP_TRIGGER);
+	  }
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro wakeup trigger
+ *	from the register 0x6C bit 3 and 4
+ *
+ *  @param v_gyro_wakeup_trigger_u8 : The value of gyro wakeup trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | anymotion: no / INT1 pin: no
+ *   0x01  | anymotion: no / INT1 pin: yes
+ *   0x02  | anymotion: yes / INT1 pin: no
+ *   0x03  | anymotion: yes / INT1 pin: yes
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_wakeup_trigger(
+u8 v_gyro_wakeup_trigger_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_gyro_wakeup_trigger_u8
+		<= BMI160_MAX_GYRO_WAKEUP_TRIGGER) {
+			/* write gyro wakeup trigger */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_GYRO_WAKEUP_TRIGGER__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_GYRO_WAKEUP_TRIGGER,
+				v_gyro_wakeup_trigger_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_GYRO_WAKEUP_TRIGGER__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read Target state for gyro sleep mode
+ *	from the register 0x6C bit 5
+ *
+ *  @param v_gyro_sleep_state_u8 : The value of gyro sleep mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | Sleep transition to fast wake up state
+ *   0x01  | Sleep transition to suspend state
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_sleep_state(
+u8 *v_gyro_sleep_state_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read gyro sleep state*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_GYRO_SLEEP_STATE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_sleep_state_u8 = BMI160_GET_BITSLICE(
+			v_data_u8,
+			BMI160_USER_GYRO_SLEEP_STATE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write Target state for gyro sleep mode
+ *	from the register 0x6C bit 5
+ *
+ *  @param v_gyro_sleep_state_u8 : The value of gyro sleep mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | Sleep transition to fast wake up state
+ *   0x01  | Sleep transition to suspend state
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_sleep_state(
+u8 v_gyro_sleep_state_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_gyro_sleep_state_u8 <= BMI160_MAX_VALUE_SLEEP_STATE) {
+			/* write gyro sleep state*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_GYRO_SLEEP_STATE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_GYRO_SLEEP_STATE,
+				v_gyro_sleep_state_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_GYRO_SLEEP_STATE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read gyro wakeup interrupt
+ *	from the register 0x6C bit 6
+ *
+ *  @param v_gyro_wakeup_intr_u8 : The valeu of gyro wakeup interrupt
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | DISABLE
+ *   0x01  | ENABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_wakeup_intr(
+u8 *v_gyro_wakeup_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read gyro wakeup interrupt */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_GYRO_WAKEUP_INTR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_wakeup_intr_u8 = BMI160_GET_BITSLICE(
+			v_data_u8,
+			BMI160_USER_GYRO_WAKEUP_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro wakeup interrupt
+ *	from the register 0x6C bit 6
+ *
+ *  @param v_gyro_wakeup_intr_u8 : The valeu of gyro wakeup interrupt
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | DISABLE
+ *   0x01  | ENABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_wakeup_intr(
+u8 v_gyro_wakeup_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_gyro_wakeup_intr_u8 <= BMI160_MAX_VALUE_WAKEUP_INTR) {
+			/* write gyro wakeup interrupt */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_GYRO_WAKEUP_INTR__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_GYRO_WAKEUP_INTR,
+				v_gyro_wakeup_intr_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_GYRO_WAKEUP_INTR__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ * @brief This API read accel select axis to be self-test
+ *
+ *  @param v_accel_selftest_axis_u8 :
+ *	The value of accel self test axis selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | disabled
+ *   0x01  | x-axis
+ *   0x02  | y-axis
+ *   0x03  | z-axis
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_selftest_axis(
+u8 *v_accel_selftest_axis_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read accel self test axis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_ACCEL_SELFTEST_AXIS__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_selftest_axis_u8 = BMI160_GET_BITSLICE(
+			v_data_u8,
+			BMI160_USER_ACCEL_SELFTEST_AXIS);
+		}
+	return com_rslt;
+}
+/*!
+ * @brief This API write accel select axis to be self-test
+ *
+ *  @param v_accel_selftest_axis_u8 :
+ *	The value of accel self test axis selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | disabled
+ *   0x01  | x-axis
+ *   0x02  | y-axis
+ *   0x03  | z-axis
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_selftest_axis(
+u8 v_accel_selftest_axis_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_accel_selftest_axis_u8
+		<= BMI160_MAX_ACCEL_SELFTEST_AXIS) {
+			/* write accel self test axis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_ACCEL_SELFTEST_AXIS__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_ACCEL_SELFTEST_AXIS,
+				v_accel_selftest_axis_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_ACCEL_SELFTEST_AXIS__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel self test axis sign
+ *	from the register 0x6D bit 2
+ *
+ *  @param v_accel_selftest_sign_u8: The value of accel self test axis sign
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | negative
+ *   0x01  | positive
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_selftest_sign(
+u8 *v_accel_selftest_sign_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read accel self test axis sign*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_ACCEL_SELFTEST_SIGN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_selftest_sign_u8 = BMI160_GET_BITSLICE(
+			v_data_u8,
+			BMI160_USER_ACCEL_SELFTEST_SIGN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel self test axis sign
+ *	from the register 0x6D bit 2
+ *
+ *  @param v_accel_selftest_sign_u8: The value of accel self test axis sign
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | negative
+ *   0x01  | positive
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_selftest_sign(
+u8 v_accel_selftest_sign_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_accel_selftest_sign_u8 <=
+		BMI160_MAX_VALUE_SELFTEST_SIGN) {
+			/* write accel self test axis sign*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_ACCEL_SELFTEST_SIGN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_ACCEL_SELFTEST_SIGN,
+				v_accel_selftest_sign_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_ACCEL_SELFTEST_SIGN__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+			com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel self test amplitude
+ *	from the register 0x6D bit 3
+ *        select amplitude of the selftest deflection:
+ *
+ *  @param v_accel_selftest_amp_u8 : The value of accel self test amplitude
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | LOW
+ *   0x01  | HIGH
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_selftest_amp(
+u8 *v_accel_selftest_amp_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read  self test amplitude*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_SELFTEST_AMP__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_selftest_amp_u8 = BMI160_GET_BITSLICE(
+			v_data_u8,
+			BMI160_USER_SELFTEST_AMP);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel self test amplitude
+ *	from the register 0x6D bit 3
+ *        select amplitude of the selftest deflection:
+ *
+ *  @param v_accel_selftest_amp_u8 : The value of accel self test amplitude
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | LOW
+ *   0x01  | HIGH
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_selftest_amp(
+u8 v_accel_selftest_amp_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_accel_selftest_amp_u8 <=
+		BMI160_MAX_VALUE_SELFTEST_AMP) {
+			/* write  self test amplitude*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_SELFTEST_AMP__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_SELFTEST_AMP,
+				v_accel_selftest_amp_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_SELFTEST_AMP__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read gyro self test trigger
+ *
+ *	@param v_gyro_selftest_start_u8: The value of gyro self test start
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_selftest_start(
+u8 *v_gyro_selftest_start_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read gyro self test start */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_GYRO_SELFTEST_START__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_selftest_start_u8 = BMI160_GET_BITSLICE(
+			v_data_u8,
+			BMI160_USER_GYRO_SELFTEST_START);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro self test trigger
+ *
+ *	@param v_gyro_selftest_start_u8: The value of gyro self test start
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_selftest_start(
+u8 v_gyro_selftest_start_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_gyro_selftest_start_u8 <=
+		BMI160_MAX_VALUE_SELFTEST_START) {
+			/* write gyro self test start */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_GYRO_SELFTEST_START__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_GYRO_SELFTEST_START,
+				v_gyro_selftest_start_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_GYRO_SELFTEST_START__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ * @brief This API read primary interface selection I2C or SPI
+ *	from the register 0x70 bit 0
+ *
+ *  @param v_spi_enable_u8: The value of Interface selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | I2C Enable
+ *   0x01  | I2C DISBALE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_spi_enable(u8 *v_spi_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read interface section*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_NV_CONFIG_SPI_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_spi_enable_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_NV_CONFIG_SPI_ENABLE);
+		}
+	return com_rslt;
+}
+ /*!
+ * @brief This API write primary interface selection I2C or SPI
+ *	from the register 0x70 bit 0
+ *
+ *  @param v_spi_enable_u8: The value of Interface selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | I2C Enable
+ *   0x01  | I2C DISBALE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_spi_enable(u8 v_spi_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write interface section*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_NV_CONFIG_SPI_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_NV_CONFIG_SPI_ENABLE,
+				v_spi_enable_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_NV_CONFIG_SPI_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read the spare zero
+ *	form register 0x70 bit 3
+ *
+ *
+ *  @param v_spare0_trim_u8: The value of spare zero
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_spare0_trim(u8 *v_spare0_trim_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read spare zero*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_NV_CONFIG_SPARE0__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_spare0_trim_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_NV_CONFIG_SPARE0);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write the spare zero
+ *	form register 0x70 bit 3
+ *
+ *
+ *  @param v_spare0_trim_u8: The value of spare zero
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_spare0_trim(u8 v_spare0_trim_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write  spare zero*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_NV_CONFIG_SPARE0__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_NV_CONFIG_SPARE0,
+				v_spare0_trim_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_NV_CONFIG_SPARE0__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read the NVM counter
+ *	form register 0x70 bit 4 to 7
+ *
+ *
+ *  @param v_nvm_counter_u8: The value of NVM counter
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_nvm_counter(u8 *v_nvm_counter_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read NVM counter*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_NV_CONFIG_NVM_COUNTER__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_nvm_counter_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_NV_CONFIG_NVM_COUNTER);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write the NVM counter
+ *	form register 0x70 bit 4 to 7
+ *
+ *
+ *  @param v_nvm_counter_u8: The value of NVM counter
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_nvm_counter(
+u8 v_nvm_counter_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write NVM counter*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_NV_CONFIG_NVM_COUNTER__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_NV_CONFIG_NVM_COUNTER,
+				v_nvm_counter_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_NV_CONFIG_NVM_COUNTER__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel manual offset compensation of x axis
+ *	from the register 0x71 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_x_s8:
+ *	The value of accel manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_offset_compensation_xaxis(
+s8 *v_accel_off_x_s8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read accel manual offset compensation of x axis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_0_ACCEL_OFF_X__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_off_x_s8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_OFFSET_0_ACCEL_OFF_X);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel manual offset compensation of x axis
+ *	from the register 0x71 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_x_s8:
+ *	The value of accel manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_offset_compensation_xaxis(
+s8 v_accel_off_x_s8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		/* enable accel offset */
+		v_status_s8 = bmi160_set_accel_offset_enable(
+		ACCEL_OFFSET_ENABLE);
+		if (v_status_s8 == SUCCESS) {
+			/* write accel manual offset compensation of x axis*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_0_ACCEL_OFF_X__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				BMI160_SET_BITSLICE(
+				v_data_u8,
+				BMI160_USER_OFFSET_0_ACCEL_OFF_X,
+				v_accel_off_x_s8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_OFFSET_0_ACCEL_OFF_X__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt =  ERROR;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel manual offset compensation of y axis
+ *	from the register 0x72 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_y_s8:
+ *	The value of accel manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_offset_compensation_yaxis(
+s8 *v_accel_off_y_s8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read accel manual offset compensation of y axis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_1_ACCEL_OFF_Y__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_off_y_s8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_OFFSET_1_ACCEL_OFF_Y);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel manual offset compensation of y axis
+ *	from the register 0x72 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_y_s8:
+ *	The value of accel manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_offset_compensation_yaxis(
+s8 v_accel_off_y_s8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		/* enable accel offset */
+		v_status_s8 = bmi160_set_accel_offset_enable(
+		ACCEL_OFFSET_ENABLE);
+		if (v_status_s8 == SUCCESS) {
+			/* write accel manual offset compensation of y axis*/
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_1_ACCEL_OFF_Y__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				BMI160_SET_BITSLICE(
+				v_data_u8,
+				BMI160_USER_OFFSET_1_ACCEL_OFF_Y,
+				v_accel_off_y_s8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_OFFSET_1_ACCEL_OFF_Y__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = ERROR;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel manual offset compensation of z axis
+ *	from the register 0x73 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_z_s8:
+ *	The value of accel manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_offset_compensation_zaxis(
+s8 *v_accel_off_z_s8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read accel manual offset compensation of z axis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_2_ACCEL_OFF_Z__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_off_z_s8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_OFFSET_2_ACCEL_OFF_Z);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel manual offset compensation of z axis
+ *	from the register 0x73 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_z_s8:
+ *	The value of accel manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_offset_compensation_zaxis(
+s8 v_accel_off_z_s8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	u8 v_status_s8 = SUCCESS;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* enable accel offset */
+			v_status_s8 = bmi160_set_accel_offset_enable(
+			ACCEL_OFFSET_ENABLE);
+			if (v_status_s8 == SUCCESS) {
+				/* write accel manual offset
+				compensation of z axis*/
+				com_rslt =
+				p_bmi160->BMI160_BUS_READ_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_OFFSET_2_ACCEL_OFF_Z__REG,
+				&v_data_u8,
+				BMI160_GEN_READ_WRITE_DATA_LENGTH);
+				if (com_rslt == SUCCESS) {
+					v_data_u8 =
+					BMI160_SET_BITSLICE(v_data_u8,
+					BMI160_USER_OFFSET_2_ACCEL_OFF_Z,
+					v_accel_off_z_s8);
+					com_rslt +=
+					p_bmi160->BMI160_BUS_WRITE_FUNC(
+					p_bmi160->dev_addr,
+					BMI160_USER_OFFSET_2_ACCEL_OFF_Z__REG,
+					&v_data_u8,
+					BMI160_GEN_READ_WRITE_DATA_LENGTH);
+				}
+			} else {
+			com_rslt = ERROR;
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read gyro manual offset compensation of x axis
+ *	from the register 0x74 bit 0 to 7 and 0x77 bit 0 and 1
+ *
+ *
+ *
+ *  @param v_gyro_off_x_s16:
+ *	The value of gyro manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_offset_compensation_xaxis(
+s16 *v_gyro_off_x_s16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data1_u8r = BMI160_INIT_VALUE;
+	u8 v_data2_u8r = BMI160_INIT_VALUE;
+	s16 v_data3_u8r, v_data4_u8r = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read gyro offset x*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_3_GYRO_OFF_X__REG,
+			&v_data1_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			v_data1_u8r = BMI160_GET_BITSLICE(v_data1_u8r,
+			BMI160_USER_OFFSET_3_GYRO_OFF_X);
+			com_rslt += p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_6_GYRO_OFF_X__REG,
+			&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			v_data2_u8r = BMI160_GET_BITSLICE(v_data2_u8r,
+			BMI160_USER_OFFSET_6_GYRO_OFF_X);
+			v_data3_u8r = v_data2_u8r
+			<< BMI160_SHIFT_BIT_POSITION_BY_14_BITS;
+			v_data4_u8r =  v_data1_u8r
+			<< BMI160_SHIFT_BIT_POSITION_BY_06_BITS;
+			v_data3_u8r = v_data3_u8r | v_data4_u8r;
+			*v_gyro_off_x_s16 = v_data3_u8r
+			>> BMI160_SHIFT_BIT_POSITION_BY_06_BITS;
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro manual offset compensation of x axis
+ *	from the register 0x74 bit 0 to 7 and 0x77 bit 0 and 1
+ *
+ *
+ *
+ *  @param v_gyro_off_x_s16:
+ *	The value of gyro manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_offset_compensation_xaxis(
+s16 v_gyro_off_x_s16)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data1_u8r, v_data2_u8r = BMI160_INIT_VALUE;
+u16 v_data3_u8r = BMI160_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		/* write gyro offset x*/
+		v_status_s8 = bmi160_set_gyro_offset_enable(
+		GYRO_OFFSET_ENABLE);
+		if (v_status_s8 == SUCCESS) {
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_3_GYRO_OFF_X__REG,
+			&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data1_u8r =
+				((s8) (v_gyro_off_x_s16 &
+				BMI160_GYRO_MANUAL_OFFSET_0_7));
+				v_data2_u8r = BMI160_SET_BITSLICE(
+				v_data2_u8r,
+				BMI160_USER_OFFSET_3_GYRO_OFF_X,
+				v_data1_u8r);
+				/* write 0x74 bit 0 to 7*/
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_OFFSET_3_GYRO_OFF_X__REG,
+				&v_data2_u8r,
+				BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			com_rslt += p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_6_GYRO_OFF_X__REG,
+			&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data3_u8r =
+				(u16) (v_gyro_off_x_s16 &
+				BMI160_GYRO_MANUAL_OFFSET_8_9);
+				v_data1_u8r = (u8)(v_data3_u8r
+				>> BMI160_SHIFT_BIT_POSITION_BY_08_BITS);
+				v_data2_u8r = BMI160_SET_BITSLICE(
+				v_data2_u8r,
+				BMI160_USER_OFFSET_6_GYRO_OFF_X,
+				v_data1_u8r);
+				/* write 0x77 bit 0 and 1*/
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_OFFSET_6_GYRO_OFF_X__REG,
+				&v_data2_u8r,
+				BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		return ERROR;
+		}
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API read gyro manual offset compensation of y axis
+ *	from the register 0x75 bit 0 to 7 and 0x77 bit 2 and 3
+ *
+ *
+ *
+ *  @param v_gyro_off_y_s16:
+ *	The value of gyro manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_offset_compensation_yaxis(
+s16 *v_gyro_off_y_s16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data1_u8r = BMI160_INIT_VALUE;
+	u8 v_data2_u8r = BMI160_INIT_VALUE;
+	s16 v_data3_u8r, v_data4_u8r = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read gyro offset y*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_4_GYRO_OFF_Y__REG,
+			&v_data1_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			v_data1_u8r = BMI160_GET_BITSLICE(v_data1_u8r,
+			BMI160_USER_OFFSET_4_GYRO_OFF_Y);
+			com_rslt += p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_6_GYRO_OFF_Y__REG,
+			&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			v_data2_u8r = BMI160_GET_BITSLICE(v_data2_u8r,
+			BMI160_USER_OFFSET_6_GYRO_OFF_Y);
+			v_data3_u8r = v_data2_u8r
+			<< BMI160_SHIFT_BIT_POSITION_BY_14_BITS;
+			v_data4_u8r =  v_data1_u8r
+			<< BMI160_SHIFT_BIT_POSITION_BY_06_BITS;
+			v_data3_u8r = v_data3_u8r | v_data4_u8r;
+			*v_gyro_off_y_s16 = v_data3_u8r
+			>> BMI160_SHIFT_BIT_POSITION_BY_06_BITS;
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro manual offset compensation of y axis
+ *	from the register 0x75 bit 0 to 7 and 0x77 bit 2 and 3
+ *
+ *
+ *
+ *  @param v_gyro_off_y_s16:
+ *	The value of gyro manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_offset_compensation_yaxis(
+s16 v_gyro_off_y_s16)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data1_u8r, v_data2_u8r = BMI160_INIT_VALUE;
+u16 v_data3_u8r = BMI160_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		/* enable gyro offset bit */
+		v_status_s8 = bmi160_set_gyro_offset_enable(
+		GYRO_OFFSET_ENABLE);
+		/* write gyro offset y*/
+		if (v_status_s8 == SUCCESS) {
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_4_GYRO_OFF_Y__REG,
+			&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data1_u8r =
+				((s8) (v_gyro_off_y_s16 &
+				BMI160_GYRO_MANUAL_OFFSET_0_7));
+				v_data2_u8r = BMI160_SET_BITSLICE(
+				v_data2_u8r,
+				BMI160_USER_OFFSET_4_GYRO_OFF_Y,
+				v_data1_u8r);
+				/* write 0x75 bit 0 to 7*/
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_OFFSET_4_GYRO_OFF_Y__REG,
+				&v_data2_u8r,
+				BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			com_rslt += p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_6_GYRO_OFF_Y__REG,
+			&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data3_u8r =
+				(u16) (v_gyro_off_y_s16 &
+				BMI160_GYRO_MANUAL_OFFSET_8_9);
+				v_data1_u8r = (u8)(v_data3_u8r
+				>> BMI160_SHIFT_BIT_POSITION_BY_08_BITS);
+				v_data2_u8r = BMI160_SET_BITSLICE(
+				v_data2_u8r,
+				BMI160_USER_OFFSET_6_GYRO_OFF_Y,
+				v_data1_u8r);
+				/* write 0x77 bit 2 and 3*/
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_OFFSET_6_GYRO_OFF_Y__REG,
+				&v_data2_u8r,
+				BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		return ERROR;
+		}
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API read gyro manual offset compensation of z axis
+ *	from the register 0x76 bit 0 to 7 and 0x77 bit 4 and 5
+ *
+ *
+ *
+ *  @param v_gyro_off_z_s16:
+ *	The value of gyro manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_offset_compensation_zaxis(
+s16 *v_gyro_off_z_s16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data1_u8r = BMI160_INIT_VALUE;
+	u8 v_data2_u8r = BMI160_INIT_VALUE;
+	s16 v_data3_u8r, v_data4_u8r = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read gyro manual offset z axis*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_5_GYRO_OFF_Z__REG,
+			&v_data1_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			v_data1_u8r = BMI160_GET_BITSLICE
+			(v_data1_u8r,
+			BMI160_USER_OFFSET_5_GYRO_OFF_Z);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_6_GYRO_OFF_Z__REG,
+			&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			v_data2_u8r = BMI160_GET_BITSLICE(
+			v_data2_u8r,
+			BMI160_USER_OFFSET_6_GYRO_OFF_Z);
+			v_data3_u8r = v_data2_u8r
+			<< BMI160_SHIFT_BIT_POSITION_BY_14_BITS;
+			v_data4_u8r =  v_data1_u8r
+			<< BMI160_SHIFT_BIT_POSITION_BY_06_BITS;
+			v_data3_u8r = v_data3_u8r | v_data4_u8r;
+			*v_gyro_off_z_s16 = v_data3_u8r
+			>> BMI160_SHIFT_BIT_POSITION_BY_06_BITS;
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro manual offset compensation of z axis
+ *	from the register 0x76 bit 0 to 7 and 0x77 bit 4 and 5
+ *
+ *
+ *
+ *  @param v_gyro_off_z_s16:
+ *	The value of gyro manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_offset_compensation_zaxis(
+s16 v_gyro_off_z_s16)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data1_u8r, v_data2_u8r = BMI160_INIT_VALUE;
+u16 v_data3_u8r = BMI160_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+	} else {
+		/* enable gyro offset*/
+		v_status_s8 = bmi160_set_gyro_offset_enable(
+		GYRO_OFFSET_ENABLE);
+		/* write gyro manual offset z axis*/
+		if (v_status_s8 == SUCCESS) {
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_5_GYRO_OFF_Z__REG,
+			&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data1_u8r =
+				((u8) (v_gyro_off_z_s16 &
+				BMI160_GYRO_MANUAL_OFFSET_0_7));
+				v_data2_u8r = BMI160_SET_BITSLICE(
+				v_data2_u8r,
+				BMI160_USER_OFFSET_5_GYRO_OFF_Z,
+				v_data1_u8r);
+				/* write 0x76 bit 0 to 7*/
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_OFFSET_5_GYRO_OFF_Z__REG,
+				&v_data2_u8r,
+				BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			com_rslt += p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_6_GYRO_OFF_Z__REG,
+			&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data3_u8r =
+				(u16) (v_gyro_off_z_s16 &
+				BMI160_GYRO_MANUAL_OFFSET_8_9);
+				v_data1_u8r = (u8)(v_data3_u8r
+				>> BMI160_SHIFT_BIT_POSITION_BY_08_BITS);
+				v_data2_u8r = BMI160_SET_BITSLICE(
+				v_data2_u8r,
+				BMI160_USER_OFFSET_6_GYRO_OFF_Z,
+				v_data1_u8r);
+				/* write 0x77 bit 4 and 5*/
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_USER_OFFSET_6_GYRO_OFF_Z__REG,
+				&v_data2_u8r,
+				BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		return ERROR;
+		}
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API read the accel offset enable bit
+ *	from the register 0x77 bit 6
+ *
+ *
+ *
+ *  @param v_accel_off_enable_u8: The value of accel offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_offset_enable(
+u8 *v_accel_off_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read accel offset enable */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_6_ACCEL_OFF_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_off_enable_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_OFFSET_6_ACCEL_OFF_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write the accel offset enable bit
+ *	from the register 0x77 bit 6
+ *
+ *
+ *
+ *  @param v_accel_off_enable_u8: The value of accel offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_offset_enable(
+u8 v_accel_off_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+			} else {
+			/* write accel offset enable */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_6_ACCEL_OFF_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_OFFSET_6_ACCEL_OFF_ENABLE,
+				v_accel_off_enable_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_OFFSET_6_ACCEL_OFF_ENABLE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read the accel offset enable bit
+ *	from the register 0x77 bit 7
+ *
+ *
+ *
+ *  @param v_gyro_off_enable_u8: The value of gyro offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_offset_enable(
+u8 *v_gyro_off_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read gyro offset*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_6_GYRO_OFF_EN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_off_enable_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_OFFSET_6_GYRO_OFF_EN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write the accel offset enable bit
+ *	from the register 0x77 bit 7
+ *
+ *
+ *
+ *  @param v_gyro_off_enable_u8: The value of gyro offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_offset_enable(
+u8 v_gyro_off_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write gyro offset*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_OFFSET_6_GYRO_OFF_EN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_USER_OFFSET_6_GYRO_OFF_EN,
+				v_gyro_off_enable_u8);
+				com_rslt += p_bmi160->BMI160_BUS_WRITE_FUNC(
+				p_bmi160->dev_addr,
+				BMI160_USER_OFFSET_6_GYRO_OFF_EN__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads step counter value
+ *	form the register 0x78 and 0x79
+ *
+ *
+ *
+ *
+ *  @param v_step_cnt_s16 : The value of step counter
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_step_count(u16 *v_step_cnt_s16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* array having the step counter LSB and MSB data
+	v_data_u8[0] - LSB
+	v_data_u8[1] - MSB*/
+	u8 a_data_u8r[BMI160_STEP_COUNT_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE};
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read step counter */
+			com_rslt =
+			p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+			BMI160_USER_STEP_COUNT_LSB__REG,
+			a_data_u8r, BMI160_STEP_COUNTER_LENGTH);
+
+			*v_step_cnt_s16 = (s16)
+			((((s32)((s8)a_data_u8r[BMI160_STEP_COUNT_MSB_BYTE]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (a_data_u8r[BMI160_STEP_COUNT_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API Reads
+ *	step counter configuration
+ *	from the register 0x7A bit 0 to 7
+ *	and from the register 0x7B bit 0 to 2 and 4 to 7
+ *
+ *
+ *  @param v_step_config_u16 : The value of step configuration
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_step_config(
+u16 *v_step_config_u16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data1_u8r = BMI160_INIT_VALUE;
+	u8 v_data2_u8r = BMI160_INIT_VALUE;
+	u16 v_data3_u8r = BMI160_INIT_VALUE;
+	/* Read the 0 to 7 bit*/
+	com_rslt =
+	p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+	BMI160_USER_STEP_CONFIG_ZERO__REG,
+	&v_data1_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	/* Read the 8 to 10 bit*/
+	com_rslt +=
+	p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+	BMI160_USER_STEP_CONFIG_ONE_CNF1__REG,
+	&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	v_data2_u8r = BMI160_GET_BITSLICE(v_data2_u8r,
+	BMI160_USER_STEP_CONFIG_ONE_CNF1);
+	v_data3_u8r = ((u16)((((u32)
+	((u8)v_data2_u8r))
+	<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) | (v_data1_u8r)));
+	/* Read the 11 to 14 bit*/
+	com_rslt +=
+	p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+	BMI160_USER_STEP_CONFIG_ONE_CNF2__REG,
+	&v_data1_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	v_data1_u8r = BMI160_GET_BITSLICE(v_data1_u8r,
+	BMI160_USER_STEP_CONFIG_ONE_CNF2);
+	*v_step_config_u16 = ((u16)((((u32)
+	((u8)v_data1_u8r))
+	<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) | (v_data3_u8r)));
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write
+ *	step counter configuration
+ *	from the register 0x7A bit 0 to 7
+ *	and from the register 0x7B bit 0 to 2 and 4 to 7
+ *
+ *
+ *  @param v_step_config_u16   :
+ *	the value of  Enable step configuration
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_step_config(
+u16 v_step_config_u16)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data1_u8r = BMI160_INIT_VALUE;
+	u8 v_data2_u8r = BMI160_INIT_VALUE;
+	u16 v_data3_u16 = BMI160_INIT_VALUE;
+
+	/* write the 0 to 7 bit*/
+	v_data1_u8r = (u8)(v_step_config_u16 &
+	BMI160_STEP_CONFIG_0_7);
+	p_bmi160->BMI160_BUS_WRITE_FUNC
+	(p_bmi160->dev_addr,
+	BMI160_USER_STEP_CONFIG_ZERO__REG,
+	&v_data1_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	/* write the 8 to 10 bit*/
+	com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+	(p_bmi160->dev_addr,
+	BMI160_USER_STEP_CONFIG_ONE_CNF1__REG,
+	&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	if (com_rslt == SUCCESS) {
+		v_data3_u16 = (u16) (v_step_config_u16 &
+		BMI160_STEP_CONFIG_8_10);
+		v_data1_u8r = (u8)(v_data3_u16
+		>> BMI160_SHIFT_BIT_POSITION_BY_08_BITS);
+		v_data2_u8r = BMI160_SET_BITSLICE(v_data2_u8r,
+		BMI160_USER_STEP_CONFIG_ONE_CNF1, v_data1_u8r);
+		p_bmi160->BMI160_BUS_WRITE_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_STEP_CONFIG_ONE_CNF1__REG,
+		&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	}
+	/* write the 11 to 14 bit*/
+	com_rslt += p_bmi160->BMI160_BUS_READ_FUNC
+	(p_bmi160->dev_addr,
+	BMI160_USER_STEP_CONFIG_ONE_CNF2__REG,
+	&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	if (com_rslt == SUCCESS) {
+		v_data3_u16 = (u16) (v_step_config_u16 &
+		BMI160_STEP_CONFIG_11_14);
+		v_data1_u8r = (u8)(v_data3_u16
+		>> BMI160_SHIFT_BIT_POSITION_BY_12_BITS);
+		v_data2_u8r = BMI160_SET_BITSLICE(v_data2_u8r,
+		BMI160_USER_STEP_CONFIG_ONE_CNF2, v_data1_u8r);
+		p_bmi160->BMI160_BUS_WRITE_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_STEP_CONFIG_ONE_CNF2__REG,
+		&v_data2_u8r, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	}
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read enable step counter
+ *	from the register 0x7B bit 3
+ *
+ *
+ *  @param v_step_counter_u8 : The value of step counter enable
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_step_counter_enable(
+u8 *v_step_counter_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the step counter */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_step_counter_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write enable step counter
+ *	from the register 0x7B bit 3
+ *
+ *
+ *  @param v_step_counter_u8 : The value of step counter enable
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_step_counter_enable(u8 v_step_counter_u8)
+{
+/* variable used for return the status of communication result*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+/* check the p_bmi160 structure as NULL*/
+if (p_bmi160 == BMI160_NULL) {
+	return E_BMI160_NULL_PTR;
+} else {
+	if (v_step_counter_u8 <= BMI160_MAX_GYRO_STEP_COUNTER) {
+		/* write the step counter */
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+		(p_bmi160->dev_addr,
+		BMI160_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 =
+			BMI160_SET_BITSLICE(v_data_u8,
+			BMI160_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE,
+			v_step_counter_u8);
+			com_rslt +=
+			p_bmi160->BMI160_BUS_WRITE_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_BMI160_OUT_OF_RANGE;
+	}
+}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API set Step counter modes
+ *
+ *
+ *  @param  v_step_mode_u8 : The value of step counter mode
+ *  value    |   mode
+ * ----------|-----------
+ *   0       | BMI160_STEP_NORMAL_MODE
+ *   1       | BMI160_STEP_SENSITIVE_MODE
+ *   2       | BMI160_STEP_ROBUST_MODE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_step_mode(u8 v_step_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+
+	switch (v_step_mode_u8) {
+	case BMI160_STEP_NORMAL_MODE:
+		com_rslt = bmi160_set_step_config(
+		STEP_CONFIG_NORMAL);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+	case BMI160_STEP_SENSITIVE_MODE:
+		com_rslt = bmi160_set_step_config(
+		STEP_CONFIG_SENSITIVE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+	case BMI160_STEP_ROBUST_MODE:
+		com_rslt = bmi160_set_step_config(
+		STEP_CONFIG_ROBUST);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+	break;
+	}
+
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to trigger the  signification motion
+ *	interrupt
+ *
+ *
+ *  @param  v_significant_u8 : The value of interrupt selection
+ *  value    |  interrupt
+ * ----------|-----------
+ *   0       |  BMI160_MAP_INTR1
+ *   1       |  BMI160_MAP_INTR2
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_map_significant_motion_intr(
+u8 v_significant_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_sig_motion_u8 = BMI160_INIT_VALUE;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	u8 v_any_motion_intr1_stat_u8 = BMI160_ENABLE_ANY_MOTION_INTR1;
+	u8 v_any_motion_intr2_stat_u8 = BMI160_ENABLE_ANY_MOTION_INTR2;
+	u8 v_any_motion_axis_stat_u8 = BMI160_ENABLE_ANY_MOTION_AXIS;
+	/* enable the significant motion interrupt */
+	com_rslt = bmi160_get_intr_significant_motion_select(&v_sig_motion_u8);
+	if (v_sig_motion_u8 != BMI160_SIG_MOTION_STAT_HIGH)
+		com_rslt += bmi160_set_intr_significant_motion_select(
+		BMI160_SIG_MOTION_INTR_ENABLE);
+	switch (v_significant_u8) {
+	case BMI160_MAP_INTR1:
+		/* interrupt */
+		com_rslt += bmi160_read_reg(
+		BMI160_USER_INTR_MAP_0_INTR1_ANY_MOTION__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_any_motion_intr1_stat_u8;
+		/* map the signification interrupt to any-motion interrupt1*/
+		com_rslt += bmi160_write_reg(
+		BMI160_USER_INTR_MAP_0_INTR1_ANY_MOTION__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* axis*/
+		com_rslt = bmi160_read_reg(BMI160_USER_INTR_ENABLE_0_ADDR,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_any_motion_axis_stat_u8;
+		com_rslt += bmi160_write_reg(
+		BMI160_USER_INTR_ENABLE_0_ADDR,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+
+	case BMI160_MAP_INTR2:
+		/* map the signification interrupt to any-motion interrupt2*/
+		com_rslt += bmi160_read_reg(
+		BMI160_USER_INTR_MAP_2_INTR2_ANY_MOTION__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_any_motion_intr2_stat_u8;
+		com_rslt += bmi160_write_reg(
+		BMI160_USER_INTR_MAP_2_INTR2_ANY_MOTION__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* axis*/
+		com_rslt = bmi160_read_reg(BMI160_USER_INTR_ENABLE_0_ADDR,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_any_motion_axis_stat_u8;
+		com_rslt += bmi160_write_reg(
+		BMI160_USER_INTR_ENABLE_0_ADDR,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+	break;
+
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to trigger the step detector
+ *	interrupt
+ *
+ *
+ *  @param  v_step_detector_u8 : The value of interrupt selection
+ *  value    |  interrupt
+ * ----------|-----------
+ *   0       |  BMI160_MAP_INTR1
+ *   1       |  BMI160_MAP_INTR2
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_map_step_detector_intr(
+u8 v_step_detector_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_step_det_u8 = BMI160_INIT_VALUE;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	u8 v_low_g_intr_u81_stat_u8 = BMI160_LOW_G_INTR_STAT;
+	u8 v_low_g_intr_u82_stat_u8 = BMI160_LOW_G_INTR_STAT;
+	u8 v_low_g_enable_u8 = BMI160_ENABLE_LOW_G;
+	/* read the v_status_s8 of step detector interrupt*/
+	com_rslt = bmi160_get_step_detector_enable(&v_step_det_u8);
+	if (v_step_det_u8 != BMI160_STEP_DET_STAT_HIGH)
+		com_rslt += bmi160_set_step_detector_enable(
+		BMI160_STEP_DETECT_INTR_ENABLE);
+	switch (v_step_detector_u8) {
+	case BMI160_MAP_INTR1:
+		com_rslt += bmi160_read_reg(
+		BMI160_USER_INTR_MAP_0_INTR1_LOW_G__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_low_g_intr_u81_stat_u8;
+		/* map the step detector interrupt
+		to Low-g interrupt 1*/
+		com_rslt += bmi160_write_reg(
+		BMI160_USER_INTR_MAP_0_INTR1_LOW_G__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* Enable the Low-g interrupt*/
+		com_rslt = bmi160_read_reg(
+		BMI160_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_low_g_enable_u8;
+		com_rslt += bmi160_write_reg(
+		BMI160_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+	case BMI160_MAP_INTR2:
+		/* map the step detector interrupt
+		to Low-g interrupt 1*/
+		com_rslt += bmi160_read_reg(
+		BMI160_USER_INTR_MAP_2_INTR2_LOW_G__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_low_g_intr_u82_stat_u8;
+
+		com_rslt += bmi160_write_reg(
+		BMI160_USER_INTR_MAP_2_INTR2_LOW_G__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* Enable the Low-g interrupt*/
+		com_rslt = bmi160_read_reg(
+		BMI160_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_low_g_enable_u8;
+		com_rslt += bmi160_write_reg(
+		BMI160_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+	break;
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API used to clear the step counter interrupt
+ *	interrupt
+ *
+ *
+ *  @param  : None
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_clear_step_counter(void)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* clear the step counter*/
+	com_rslt = bmi160_set_command_register(RESET_STEP_COUNTER);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+
+	return com_rslt;
+
+}
+ /*!
+ *	@brief This API writes value to the register 0x7E bit 0 to 7
+ *
+ *
+ *  @param  v_command_reg_u8 : The value to write command register
+ *  value   |  Description
+ * ---------|--------------------------------------------------------
+ *	0x00	|	Reserved
+ *  0x03	|	Starts fast offset calibration for the accel and gyro
+ *	0x10	|	Sets the PMU mode for the Accelerometer to suspend
+ *	0x11	|	Sets the PMU mode for the Accelerometer to normal
+ *	0x12	|	Sets the PMU mode for the Accelerometer Lowpower
+ *  0x14	|	Sets the PMU mode for the Gyroscope to suspend
+ *	0x15	|	Sets the PMU mode for the Gyroscope to normal
+ *	0x16	|	Reserved
+ *	0x17	|	Sets the PMU mode for the Gyroscope to fast start-up
+ *  0x18	|	Sets the PMU mode for the Magnetometer to suspend
+ *	0x19	|	Sets the PMU mode for the Magnetometer to normal
+ *	0x1A	|	Sets the PMU mode for the Magnetometer to Lowpower
+ *	0xB0	|	Clears all data in the FIFO
+ *  0xB1	|	Resets the interrupt engine
+ *	0xB2	|	step_cnt_clr Clears the step counter
+ *	0xB6	|	Triggers a reset
+ *	0x37	|	See extmode_en_last
+ *	0x9A	|	See extmode_en_last
+ *	0xC0	|	Enable the extended mode
+ *  0xC4	|	Erase NVM cell
+ *	0xC8	|	Load NVM cell
+ *	0xF0	|	Reset acceleration data path
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_command_register(u8 v_command_reg_u8)
+{
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write command register */
+			com_rslt = p_bmi160->BMI160_BUS_WRITE_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_CMD_COMMANDS__REG,
+			&v_command_reg_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read target page from the register 0x7F bit 4 and 5
+ *
+ *  @param v_target_page_u8: The value of target page
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  User data/configure page
+ *   1      |  Chip level trim/test page
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_target_page(u8 *v_target_page_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* read the page*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+			p_bmi160->dev_addr,
+			BMI160_CMD_TARGET_PAGE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			*v_target_page_u8 = BMI160_GET_BITSLICE(v_data_u8,
+			BMI160_CMD_TARGET_PAGE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write target page from the register 0x7F bit 4 and 5
+ *
+ *  @param v_target_page_u8: The value of target page
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  User data/configure page
+ *   1      |  Chip level trim/test page
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_target_page(u8 v_target_page_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_target_page_u8 <= BMI160_MAX_TARGET_PAGE) {
+			/* write the page*/
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_CMD_TARGET_PAGE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_CMD_TARGET_PAGE,
+				v_target_page_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_CMD_TARGET_PAGE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read page enable from the register 0x7F bit 7
+ *
+ *
+ *
+ *  @param v_page_enable_u8: The value of page enable
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  DISABLE
+ *   1      |  ENABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_paging_enable(u8 *v_page_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		/* read the page enable */
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+		p_bmi160->dev_addr,
+		BMI160_CMD_PAGING_EN__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		*v_page_enable_u8 = BMI160_GET_BITSLICE(v_data_u8,
+		BMI160_CMD_PAGING_EN);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write page enable from the register 0x7F bit 7
+ *
+ *
+ *
+ *  @param v_page_enable_u8: The value of page enable
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  DISABLE
+ *   1      |  ENABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_paging_enable(
+u8 v_page_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		if (v_page_enable_u8 <= BMI160_MAX_VALUE_PAGE) {
+			/* write the page enable */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_CMD_PAGING_EN__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_CMD_PAGING_EN,
+				v_page_enable_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_CMD_PAGING_EN__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read
+ *	pull up configuration from the register 0X85 bit 4 an 5
+ *
+ *
+ *
+ *  @param v_control_pullup_u8: The value of pull up register
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_pullup_configuration(
+u8 *v_control_pullup_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt  = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		/* read pull up value */
+		com_rslt = p_bmi160->BMI160_BUS_READ_FUNC(
+		p_bmi160->dev_addr,
+		BMI160_COM_C_TRIM_FIVE__REG,
+		&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		*v_control_pullup_u8 = BMI160_GET_BITSLICE(v_data_u8,
+		BMI160_COM_C_TRIM_FIVE);
+		}
+	return com_rslt;
+
+}
+ /*!
+ *	@brief This API write
+ *	pull up configuration from the register 0X85 bit 4 an 5
+ *
+ *
+ *
+ *  @param v_control_pullup_u8: The value of pull up register
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_pullup_configuration(
+u8 v_control_pullup_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+			/* write  pull up value */
+			com_rslt = p_bmi160->BMI160_BUS_READ_FUNC
+			(p_bmi160->dev_addr,
+			BMI160_COM_C_TRIM_FIVE__REG,
+			&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				BMI160_SET_BITSLICE(v_data_u8,
+				BMI160_COM_C_TRIM_FIVE,
+				v_control_pullup_u8);
+				com_rslt +=
+				p_bmi160->BMI160_BUS_WRITE_FUNC
+				(p_bmi160->dev_addr,
+				BMI160_COM_C_TRIM_FIVE__REG,
+				&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+
+/*!
+ *	@brief This function used for read the compensated value of mag
+ *	Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bmm150_mag_compensate_xyz(
+struct bmi160_mag_xyz_s32_t *mag_comp_xyz)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	struct bmi160_mag_xyzr_t mag_xyzr;
+	com_rslt = bmi160_read_mag_xyzr(&mag_xyzr);
+	if (com_rslt)
+		return com_rslt;
+	/* Compensation for X axis */
+	mag_comp_xyz->x = bmi160_bmm150_mag_compensate_X(
+	mag_xyzr.x, mag_xyzr.r);
+
+	/* Compensation for Y axis */
+	mag_comp_xyz->y = bmi160_bmm150_mag_compensate_Y(
+	mag_xyzr.y, mag_xyzr.r);
+
+	/* Compensation for Z axis */
+	mag_comp_xyz->z = bmi160_bmm150_mag_compensate_Z(
+	mag_xyzr.z, mag_xyzr.r);
+
+	return com_rslt;
+}
+
+/*!
+ *	@brief This function used for read the compensated value of mag
+ *	Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bmm150_mag_compensate_xyz_raw(
+struct bmi160_mag_xyz_s32_t *mag_comp_xyz, struct bmi160_mag_xyzr_t mag_xyzr)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+
+	/* Compensation for X axis */
+	mag_comp_xyz->x = bmi160_bmm150_mag_compensate_X(
+	mag_xyzr.x, mag_xyzr.r);
+
+	/* Compensation for Y axis */
+	mag_comp_xyz->y = bmi160_bmm150_mag_compensate_Y(
+	mag_xyzr.y, mag_xyzr.r);
+
+	/* Compensation for Z axis */
+	mag_comp_xyz->z = bmi160_bmm150_mag_compensate_Z(
+	mag_xyzr.z, mag_xyzr.r);
+
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to get the compensated BMM150-X data
+ *	the out put of X as s32
+ *	Before start reading the mag compensated X data
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *
+ *  @param  v_mag_data_x_s16 : The value of mag raw X data
+ *  @param  v_data_r_u16 : The value of mag R data
+ *
+ *	@return results of compensated X data value output as s32
+ *
+ */
+s32 bmi160_bmm150_mag_compensate_X(s16 v_mag_data_x_s16, u16 v_data_r_u16)
+{
+s32 inter_retval = BMI160_INIT_VALUE;
+/* no overflow */
+if (v_mag_data_x_s16 != BMI160_MAG_FLIP_OVERFLOW_ADCVAL) {
+	if ((v_data_r_u16 != 0)
+	&& (mag_trim.dig_xyz1 != 0)) {
+		inter_retval = ((s32)(((u16)
+		((((s32)mag_trim.dig_xyz1)
+		<< BMI160_SHIFT_BIT_POSITION_BY_14_BITS)/
+		 (v_data_r_u16 != 0 ?
+		 v_data_r_u16 : mag_trim.dig_xyz1))) -
+		((u16)0x4000)));
+	} else {
+		inter_retval = BMI160_MAG_OVERFLOW_OUTPUT;
+		return inter_retval;
+	}
+	inter_retval = ((s32)((((s32)v_mag_data_x_s16) *
+			((((((((s32)mag_trim.dig_xy2) *
+			((((s32)inter_retval) *
+			((s32)inter_retval))
+			>> BMI160_SHIFT_BIT_POSITION_BY_07_BITS)) +
+			 (((s32)inter_retval) *
+			  ((s32)(((s16)mag_trim.dig_xy1)
+			  << BMI160_SHIFT_BIT_POSITION_BY_07_BITS))))
+			  >> BMI160_SHIFT_BIT_POSITION_BY_09_BITS) +
+		   ((s32)0x100000)) *
+		  ((s32)(((s16)mag_trim.dig_x2) +
+		  ((s16)0xA0))))
+		  >> BMI160_SHIFT_BIT_POSITION_BY_12_BITS))
+		  >> BMI160_SHIFT_BIT_POSITION_BY_13_BITS)) +
+		(((s16)mag_trim.dig_x1)
+		<< BMI160_SHIFT_BIT_POSITION_BY_03_BITS);
+	/* check the overflow output */
+	if (inter_retval == (s32)BMI160_MAG_OVERFLOW_OUTPUT)
+		inter_retval = BMI160_MAG_OVERFLOW_OUTPUT_S32;
+} else {
+	/* overflow */
+	inter_retval = BMI160_MAG_OVERFLOW_OUTPUT;
+}
+return inter_retval;
+}
+/*!
+ *	@brief This API used to get the compensated BMM150-Y data
+ *	the out put of Y as s32
+ *	Before start reading the mag compensated Y data
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *
+ *  @param  v_mag_data_y_s16 : The value of mag raw Y data
+ *  @param  v_data_r_u16 : The value of mag R data
+ *
+ *	@return results of compensated Y data value output as s32
+ */
+s32 bmi160_bmm150_mag_compensate_Y(s16 v_mag_data_y_s16, u16 v_data_r_u16)
+{
+s32 inter_retval = BMI160_INIT_VALUE;
+/* no overflow */
+if (v_mag_data_y_s16 != BMI160_MAG_FLIP_OVERFLOW_ADCVAL) {
+	if ((v_data_r_u16 != 0)
+	&& (mag_trim.dig_xyz1 != 0)) {
+		inter_retval = ((s32)(((u16)(((
+		(s32)mag_trim.dig_xyz1)
+		<< BMI160_SHIFT_BIT_POSITION_BY_14_BITS) /
+		(v_data_r_u16 != 0 ?
+		 v_data_r_u16 : mag_trim.dig_xyz1))) -
+		((u16)0x4000)));
+		} else {
+			inter_retval = BMI160_MAG_OVERFLOW_OUTPUT;
+			return inter_retval;
+		}
+	inter_retval = ((s32)((((s32)v_mag_data_y_s16) * ((((((((s32)
+		mag_trim.dig_xy2) * ((((s32) inter_retval) *
+		((s32)inter_retval)) >> BMI160_SHIFT_BIT_POSITION_BY_07_BITS))
+		+ (((s32)inter_retval) *
+		((s32)(((s16)mag_trim.dig_xy1)
+		<< BMI160_SHIFT_BIT_POSITION_BY_07_BITS))))
+		>> BMI160_SHIFT_BIT_POSITION_BY_09_BITS) +
+		((s32)0x100000))
+		* ((s32)(((s16)mag_trim.dig_y2)
+		+ ((s16)0xA0))))
+		>> BMI160_SHIFT_BIT_POSITION_BY_12_BITS))
+		>> BMI160_SHIFT_BIT_POSITION_BY_13_BITS)) +
+		(((s16)mag_trim.dig_y1)
+		<< BMI160_SHIFT_BIT_POSITION_BY_03_BITS);
+	/* check the overflow output */
+	if (inter_retval == (s32)BMI160_MAG_OVERFLOW_OUTPUT)
+		inter_retval = BMI160_MAG_OVERFLOW_OUTPUT_S32;
+} else {
+	/* overflow */
+	inter_retval = BMI160_MAG_OVERFLOW_OUTPUT;
+}
+return inter_retval;
+}
+/*!
+ *	@brief This API used to get the compensated BMM150-Z data
+ *	the out put of Z as s32
+ *	Before start reading the mag compensated Z data
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *
+ *  @param  v_mag_data_z_s16 : The value of mag raw Z data
+ *  @param  v_data_r_u16 : The value of mag R data
+ *
+ *	@return results of compensated Z data value output as s32
+ */
+s32 bmi160_bmm150_mag_compensate_Z(s16 v_mag_data_z_s16, u16 v_data_r_u16)
+{
+	s32 retval = BMI160_INIT_VALUE;
+
+	if (v_mag_data_z_s16 != BMI160_MAG_HALL_OVERFLOW_ADCVAL) {
+		if ((v_data_r_u16 != 0)
+		   && (mag_trim.dig_z2 != 0)
+		/*   && (mag_trim.dig_z3 != 0)*/
+		   && (mag_trim.dig_z1 != 0)
+		   && (mag_trim.dig_xyz1 != 0)) {
+			retval = (((((s32)(v_mag_data_z_s16 - mag_trim.dig_z4))
+			<< BMI160_SHIFT_BIT_POSITION_BY_15_BITS) -
+			((((s32)mag_trim.dig_z3) *
+			((s32)(((s16)v_data_r_u16) -
+			((s16)mag_trim.dig_xyz1))))
+			>> BMI160_SHIFT_BIT_POSITION_BY_02_BITS))/
+			(mag_trim.dig_z2 +
+			((s16)(((((s32)mag_trim.dig_z1) *
+			((((s16)v_data_r_u16)
+			<< BMI160_SHIFT_BIT_POSITION_BY_01_BIT))) +
+			(1 << BMI160_SHIFT_BIT_POSITION_BY_15_BITS))
+			>> BMI160_SHIFT_BIT_POSITION_BY_16_BITS))));
+		}
+	} else {
+		retval = BMI160_MAG_OVERFLOW_OUTPUT;
+	}
+		return retval;
+}
+ /*!
+ *	@brief This function used for initialize the bmm150 sensor
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bmm150_mag_interface_init(void)
+{
+	/* This variable used for provide the communication
+	results*/
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = BMI160_INIT_VALUE;
+	u8 v_pull_value_u8 = BMI160_INIT_VALUE;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	/* accel operation mode to normal*/
+	com_rslt = bmi160_set_command_register(ACCEL_MODE_NORMAL);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* write the mag power mode as NORMAL*/
+	com_rslt += bmi160_set_mag_interface_normal();
+
+	/* register 0x7E write the 0x37, 0x9A and 0x30*/
+	com_rslt += bmi160_set_command_register(BMI160_COMMAND_REG_ONE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_command_register(BMI160_COMMAND_REG_TWO);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_command_register(BMI160_COMMAND_REG_THREE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/*switch the page1*/
+	com_rslt += bmi160_set_target_page(BMI160_WRITE_TARGET_PAGE1);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_target_page(&v_data_u8);
+	com_rslt += bmi160_set_paging_enable(BMI160_WRITE_ENABLE_PAGE1);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_paging_enable(&v_data_u8);
+	/* enable the pullup configuration from
+	the register 0x05 bit 4 and 5 as 10*/
+	bmi160_get_pullup_configuration(&v_pull_value_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	v_pull_value_u8 = v_pull_value_u8 | BMI160_PULL_UP_DATA;
+	com_rslt += bmi160_set_pullup_configuration(v_pull_value_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/*switch the page0*/
+	com_rslt += bmi160_set_target_page(BMI160_WRITE_TARGET_PAGE0);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_target_page(&v_data_u8);
+	/* Write the BMM150 i2c address*/
+	com_rslt += bmi160_set_i2c_device_addr(BMI160_AUX_BMM150_I2C_ADDRESS);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* enable the mag interface to manual mode*/
+	com_rslt += bmi160_set_mag_manual_enable(BMI160_MANUAL_ENABLE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_mag_manual_enable(&v_data_u8);
+	/*Enable the MAG interface */
+	com_rslt += bmi160_set_if_mode(BMI160_ENABLE_MAG_IF_MODE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_if_mode(&v_data_u8);
+	/* Mag normal mode*/
+	com_rslt += bmi160_bmm150_mag_wakeup();
+	printk(KERN_INFO "com_rslt:%d, <%s><%d>\n",
+		com_rslt, __func__, __LINE__);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* Read the BMM150 device id is 0x32*/
+	/*com_rslt += bmi160_set_mag_read_addr(BMI160_BMM150_CHIP_ID);*/
+	/*p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);*/
+	/*com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);*/
+	/**v_chip_id_u8 = v_data_u8;*/
+	/*p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);*/
+	/* write the power mode register*/
+	com_rslt += bmi160_set_mag_write_data(BMI160_BMM_POWER_MODE_REG);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/*write 0x4C register to write set power mode to normal*/
+	com_rslt += bmi160_set_mag_write_addr(
+	BMI160_BMM150_POWE_MODE_REG);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* read the mag trim values*/
+	com_rslt += bmi160_read_bmm150_mag_trim();
+	printk(KERN_INFO "com_rslt:%d, <%s><%d>\n",
+		com_rslt, __func__, __LINE__);
+	/* To avoid the auto mode enable when manual mode operation running*/
+	V_bmm150_maual_auto_condition_u8 = BMI160_MANUAL_ENABLE;
+	/* write the XY and Z repetitions*/
+	com_rslt += bmi160_set_bmm150_mag_presetmode(
+	BMI160_MAG_PRESETMODE_REGULAR);
+	printk(KERN_INFO "com_rslt:%d, <%s><%d>\n",
+		com_rslt, __func__, __LINE__);
+	/* To avoid the auto mode enable when manual mode operation running*/
+	V_bmm150_maual_auto_condition_u8 = BMI160_MANUAL_DISABLE;
+	/* Set the power mode of mag as force mode*/
+	/* The data have to write for the register
+	It write the value in the register 0x4F */
+	com_rslt += bmi160_set_mag_write_data(BMI160_BMM150_FORCE_MODE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	printk(KERN_INFO "com_rslt:%d, <%s><%d>\n",
+		com_rslt, __func__, __LINE__);
+	/* write into power mode register*/
+	com_rslt += bmi160_set_mag_write_addr(
+	BMI160_BMM150_POWE_MODE_REG);
+	/* write the mag v_data_bw_u8 as 25Hz*/
+	com_rslt += bmi160_set_mag_output_data_rate(
+	BMI160_MAG_OUTPUT_DATA_RATE_25HZ);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+
+	/* When mag interface is auto mode - The mag read address
+	starts the register 0x42*/
+	com_rslt += bmi160_set_mag_read_addr(
+	BMI160_BMM150_DATA_REG);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* enable mag interface to auto mode*/
+	com_rslt += bmi160_set_mag_manual_enable(BMI160_MANUAL_DISABLE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_mag_manual_enable(&v_data_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for set the mag power control
+ *	bit enable
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bmm150_mag_wakeup(void)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = BMI160_INIT_VALUE;
+	u8 v_try_times_u8 = BMI160_BMM150_MAX_RETRY_WAKEUP;
+	u8 v_power_control_bit_u8 = BMI160_INIT_VALUE;
+	u8 i = BMI160_INIT_VALUE;
+
+	for (i = BMI160_INIT_VALUE; i < v_try_times_u8; i++) {
+		com_rslt = bmi160_set_mag_write_data(BMI160_BMM150_POWER_ON);
+		p_bmi160->delay_msec(BMI160_BMM150_WAKEUP_DELAY1);
+		/*write 0x4B register to enable power control bit*/
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_POWE_CONTROL_REG);
+		p_bmi160->delay_msec(BMI160_BMM150_WAKEUP_DELAY2);
+		com_rslt += bmi160_set_mag_read_addr(
+		BMI160_BMM150_POWE_CONTROL_REG);
+		/* 0x04 is secondary read mag x lsb register */
+		p_bmi160->delay_msec(BMI160_BMM150_WAKEUP_DELAY3);
+		com_rslt += bmi160_read_reg(BMI160_USER_DATA_0_ADDR,
+		&v_power_control_bit_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+		v_power_control_bit_u8 = BMI160_BMM150_SET_POWER_CONTROL
+		& v_power_control_bit_u8;
+		if (v_power_control_bit_u8 == BMI160_BMM150_POWER_ON)
+			break;
+	}
+	com_rslt = (i >= v_try_times_u8) ?
+	BMI160_BMM150_POWER_ON_FAIL : BMI160_BMM150_POWER_ON_SUCCESS;
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for set the magnetometer
+ *	power mode.
+ *	@note
+ *	Before set the mag power mode
+ *	make sure the following two point is addressed
+ *		Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *
+ *	@param v_mag_sec_if_pow_mode_u8 : The value of mag power mode
+ *  value    |  mode
+ * ----------|------------
+ *   0       | BMI160_MAG_FORCE_MODE
+ *   1       | BMI160_MAG_SUSPEND_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_bmm150_mag_and_secondary_if_power_mode(
+u8 v_mag_sec_if_pow_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = BMI160_INIT_VALUE;
+	/* set the accel power mode to NORMAL*/
+	com_rslt = bmi160_set_command_register(ACCEL_MODE_NORMAL);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_bmi160->mag_manual_enable, __func__, __LINE__);
+	/* set mag interface manual mode*/
+	if (p_bmi160->mag_manual_enable != BMI160_MANUAL_ENABLE)	{
+		com_rslt += bmi160_set_mag_manual_enable(
+		BMI160_MANUAL_ENABLE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	}
+	printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+	com_rslt, p_bmi160->mag_manual_enable, __func__, __LINE__);
+
+	switch (v_mag_sec_if_pow_mode_u8) {
+	case BMI160_MAG_FORCE_MODE:
+		/* set the secondary mag power mode as NORMAL*/
+		com_rslt += bmi160_set_mag_interface_normal();
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_bmi160->mag_manual_enable, __func__, __LINE__);
+		/* set the mag power mode as FORCE mode*/
+		com_rslt += bmi160_bmm150_mag_set_power_mode(FORCE_MODE);
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_bmi160->mag_manual_enable, __func__, __LINE__);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+	case BMI160_MAG_SUSPEND_MODE:
+		/* set the mag power mode as SUSPEND mode*/
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_bmi160->mag_manual_enable, __func__, __LINE__);
+		com_rslt += bmi160_bmm150_mag_set_power_mode(SUSPEND_MODE);
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_bmi160->mag_manual_enable, __func__, __LINE__);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* set the secondary mag power mode as SUSPEND*/
+		com_rslt += bmi160_set_command_register(MAG_MODE_SUSPEND);
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_bmi160->mag_manual_enable, __func__, __LINE__);
+		p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+	break;
+	}
+	if (p_bmi160->mag_manual_enable == BMI160_MANUAL_ENABLE) {
+		/* set mag interface auto mode*/
+		com_rslt += bmi160_set_mag_manual_enable(
+		BMI160_MANUAL_DISABLE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	}
+	printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+	com_rslt, p_bmi160->mag_manual_enable, __func__, __LINE__);
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for set the magnetometer
+ *	power mode.
+ *	@note
+ *	Before set the mag power mode
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *	@param v_mag_pow_mode_u8 : The value of mag power mode
+ *  value    |  mode
+ * ----------|------------
+ *   0       | FORCE_MODE
+ *   1       | SUSPEND_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bmm150_mag_set_power_mode(
+u8 v_mag_pow_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = BMI160_INIT_VALUE;
+	u8 manual_enable_status = 0;
+	/* set mag interface manual mode*/
+	if (p_bmi160->mag_manual_enable != BMI160_MANUAL_ENABLE) {
+		com_rslt = bmi160_set_mag_manual_enable(
+		BMI160_MANUAL_ENABLE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_get_mag_manual_enable(&manual_enable_status);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		printk(KERN_INFO "1com_rslt:%d, manual:%d, manual_read:%d\n",
+		com_rslt, p_bmi160->mag_manual_enable, manual_enable_status);
+	}
+	printk(KERN_INFO "2com_rslt:%d, manual:%d, manual_read:%d\n",
+	com_rslt, p_bmi160->mag_manual_enable, manual_enable_status);
+
+	switch (v_mag_pow_mode_u8) {
+	case FORCE_MODE:
+		/* Set the power control bit enabled */
+		com_rslt = bmi160_bmm150_mag_wakeup();
+		/* write the mag power mode as FORCE mode*/
+		com_rslt += bmi160_set_mag_write_data(
+		BMI160_BMM150_FORCE_MODE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_POWE_MODE_REG);
+		p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		/* To avoid the auto mode enable when manual
+		mode operation running*/
+		V_bmm150_maual_auto_condition_u8 = BMI160_MANUAL_ENABLE;
+		/* set the preset mode */
+		com_rslt += bmi160_set_bmm150_mag_presetmode(
+		BMI160_MAG_PRESETMODE_REGULAR);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* To avoid the auto mode enable when manual
+		mode operation running*/
+		V_bmm150_maual_auto_condition_u8 = BMI160_MANUAL_DISABLE;
+		/* set the mag read address to data registers*/
+		com_rslt += bmi160_set_mag_read_addr(
+		BMI160_BMM150_DATA_REG);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+	case SUSPEND_MODE:
+		printk(KERN_INFO "3com_rslt:%d, manual:%d, read_manual:%d\n",
+		com_rslt, p_bmi160->mag_manual_enable, manual_enable_status);
+		/* Set the power mode of mag as suspend mode*/
+		com_rslt += bmi160_set_mag_write_data(
+		BMI160_BMM150_POWER_OFF);
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_bmi160->mag_manual_enable, __func__, __LINE__);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_POWE_CONTROL_REG);
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_bmi160->mag_manual_enable, __func__, __LINE__);
+		p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+	break;
+	}
+	printk(KERN_INFO "4com_rslt:%d, manual:%d, manual_read:%d\n",
+	com_rslt, p_bmi160->mag_manual_enable, manual_enable_status);
+	/* set mag interface auto mode*/
+	if (p_bmi160->mag_manual_enable == BMI160_MANUAL_ENABLE) {
+		com_rslt += bmi160_set_mag_manual_enable(
+		BMI160_MANUAL_DISABLE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_get_mag_manual_enable(&manual_enable_status);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	}
+	printk(KERN_INFO "5com_rslt:%d, manual:%d, manual_read:%d\n",
+	com_rslt, p_bmi160->mag_manual_enable, manual_enable_status);
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to set the pre-set modes of bmm150
+ *	The pre-set mode setting is depend on data rate and xy and z repetitions
+ *
+ *	@note
+ *	Before set the mag preset mode
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param  v_mode_u8: The value of pre-set mode selection value
+ *  value    |  pre_set mode
+ * ----------|------------
+ *   1       | BMI160_MAG_PRESETMODE_LOWPOWER
+ *   2       | BMI160_MAG_PRESETMODE_REGULAR
+ *   3       | BMI160_MAG_PRESETMODE_HIGHACCURACY
+ *   4       | BMI160_MAG_PRESETMODE_ENHANCED
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_bmm150_mag_presetmode(u8 v_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	switch (v_mode_u8) {
+	case BMI160_MAG_PRESETMODE_LOWPOWER:
+		/* write the XY and Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt = bmi160_set_mag_write_data(
+		BMI160_MAG_LOWPOWER_REPXY);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_XY_REP);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* write the Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt += bmi160_set_mag_write_data(
+		BMI160_MAG_LOWPOWER_REPZ);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_Z_REP);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* set the mag v_data_u8 rate as 10 to the register 0x4C*/
+		com_rslt += bmi160_set_mag_write_data(
+		BMI160_MAG_LOWPOWER_DR);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_POWE_MODE_REG);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+	case BMI160_MAG_PRESETMODE_REGULAR:
+		/* write the XY and Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt = bmi160_set_mag_write_data(
+		BMI160_MAG_REGULAR_REPXY);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_XY_REP);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* write the Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt += bmi160_set_mag_write_data(
+		BMI160_MAG_REGULAR_REPZ);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_Z_REP);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* set the mag v_data_u8 rate as 10 to the register 0x4C*/
+		com_rslt += bmi160_set_mag_write_data(
+		BMI160_MAG_REGULAR_DR);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_POWE_MODE_REG);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+	case BMI160_MAG_PRESETMODE_HIGHACCURACY:
+		/* write the XY and Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt = bmi160_set_mag_write_data(
+		BMI160_MAG_HIGHACCURACY_REPXY);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_XY_REP);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* write the Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt += bmi160_set_mag_write_data(
+		BMI160_MAG_HIGHACCURACY_REPZ);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_Z_REP);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* set the mag v_data_u8 rate as 20 to the register 0x4C*/
+		com_rslt += bmi160_set_mag_write_data(
+		BMI160_MAG_HIGHACCURACY_DR);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_POWE_MODE_REG);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+	case BMI160_MAG_PRESETMODE_ENHANCED:
+		/* write the XY and Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt = bmi160_set_mag_write_data(
+		BMI160_MAG_ENHANCED_REPXY);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_XY_REP);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* write the Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt += bmi160_set_mag_write_data(
+		BMI160_MAG_ENHANCED_REPZ);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_Z_REP);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* set the mag v_data_u8 rate as 10 to the register 0x4C*/
+		com_rslt += bmi160_set_mag_write_data(
+		BMI160_MAG_ENHANCED_DR);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_BMM150_POWE_MODE_REG);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+	break;
+	}
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for read the trim values of magnetometer
+ *
+ *	@note
+ *	Before reading the mag trimming values
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_bmm150_mag_trim(void)
+{
+	/* This variable used for provide the communication
+	results*/
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array holding the bmm150 trim data
+	*/
+	u8 v_data_u8[BMI160_MAG_TRIM_DATA_SIZE] = {
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE};
+	/* read dig_x1 value */
+	com_rslt = bmi160_set_mag_read_addr(
+	BMI160_MAG_DIG_X1);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_X1],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	mag_trim.dig_x1 = v_data_u8[BMI160_BMM150_DIG_X1];
+	/* read dig_y1 value */
+	com_rslt += bmi160_set_mag_read_addr(
+	BMI160_MAG_DIG_Y1);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_Y1],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	mag_trim.dig_y1 = v_data_u8[BMI160_BMM150_DIG_Y1];
+
+	/* read dig_x2 value */
+	com_rslt += bmi160_set_mag_read_addr(
+	BMI160_MAG_DIG_X2);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_X2],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	mag_trim.dig_x2 = v_data_u8[BMI160_BMM150_DIG_X2];
+	/* read dig_y2 value */
+	com_rslt += bmi160_set_mag_read_addr(
+	BMI160_MAG_DIG_Y2);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_Y3],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	mag_trim.dig_y2 = v_data_u8[BMI160_BMM150_DIG_Y3];
+
+	/* read dig_xy1 value */
+	com_rslt += bmi160_set_mag_read_addr(
+	BMI160_MAG_DIG_XY1);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_XY1],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	mag_trim.dig_xy1 = v_data_u8[BMI160_BMM150_DIG_XY1];
+	/* read dig_xy2 value */
+	com_rslt += bmi160_set_mag_read_addr(
+	BMI160_MAG_DIG_XY2);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is v_mag_x_s16 ls register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_XY2],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	mag_trim.dig_xy2 = v_data_u8[BMI160_BMM150_DIG_XY2];
+
+	/* read dig_z1 lsb value */
+	com_rslt += bmi160_set_mag_read_addr(
+	BMI160_MAG_DIG_Z1_LSB);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_Z1_LSB],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* read dig_z1 msb value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_MAG_DIG_Z1_MSB);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is v_mag_x_s16 msb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_Z1_MSB],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	mag_trim.dig_z1 =
+	(u16)((((u32)((u8)v_data_u8[BMI160_BMM150_DIG_Z1_MSB]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[BMI160_BMM150_DIG_Z1_LSB]));
+
+	/* read dig_z2 lsb value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_MAG_DIG_Z2_LSB);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_Z2_LSB],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* read dig_z2 msb value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_MAG_DIG_Z2_MSB);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is v_mag_x_s16 msb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_Z2_MSB],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	mag_trim.dig_z2 =
+	(s16)((((s32)((s8)v_data_u8[BMI160_BMM150_DIG_Z2_MSB]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[BMI160_BMM150_DIG_Z2_LSB]));
+
+	/* read dig_z3 lsb value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_MAG_DIG_Z3_LSB);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_DIG_Z3_LSB],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* read dig_z3 msb value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_MAG_DIG_Z3_MSB);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is v_mag_x_s16 msb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_DIG_Z3_MSB],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	mag_trim.dig_z3 =
+	(s16)((((s32)((s8)v_data_u8[BMI160_BMM150_DIG_DIG_Z3_MSB]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[BMI160_BMM150_DIG_DIG_Z3_LSB]));
+
+	/* read dig_z4 lsb value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_MAG_DIG_Z4_LSB);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_DIG_Z4_LSB],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* read dig_z4 msb value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_MAG_DIG_Z4_MSB);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is v_mag_x_s16 msb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_DIG_Z4_MSB],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	mag_trim.dig_z4 =
+	(s16)((((s32)((s8)v_data_u8[BMI160_BMM150_DIG_DIG_Z4_MSB]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[BMI160_BMM150_DIG_DIG_Z4_LSB]));
+
+	/* read dig_xyz1 lsb value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_MAG_DIG_XYZ1_LSB);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_DIG_XYZ1_LSB],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* read dig_xyz1 msb value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_MAG_DIG_XYZ1_MSB);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is v_mag_x_s16 msb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[BMI160_BMM150_DIG_DIG_XYZ1_MSB],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	mag_trim.dig_xyz1 =
+	(u16)((((u32)((u8)v_data_u8[BMI160_BMM150_DIG_DIG_XYZ1_MSB]))
+			<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[BMI160_BMM150_DIG_DIG_XYZ1_LSB]));
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for initialize
+ *	the AKM09911 and AKM09912 sensor
+ *
+ *
+ *	@param v_akm_i2c_address_u8: The value of device address
+ *	AKM sensor   |  Slave address
+ * --------------|---------------------
+ *  AKM09911     |  AKM09911_I2C_ADDR_1
+ *     -         |  and AKM09911_I2C_ADDR_2
+ *  AKM09912     |  AKM09912_I2C_ADDR_1
+ *     -         |  AKM09912_I2C_ADDR_2
+ *     -         |  AKM09912_I2C_ADDR_3
+ *     -         |  AKM09912_I2C_ADDR_4
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_akm_mag_interface_init(
+u8 v_akm_i2c_address_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_pull_value_u8 = BMI160_INIT_VALUE;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	u8 v_akm_chip_id_u8 = BMI160_INIT_VALUE;
+	/* accel operation mode to normal*/
+	com_rslt = bmi160_set_command_register(ACCEL_MODE_NORMAL);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_command_register(MAG_MODE_NORMAL);
+	p_bmi160->delay_msec(BMI160_AKM_INIT_DELAY);
+	bmi160_get_mag_power_mode_stat(&v_data_u8);
+	/* register 0x7E write the 0x37, 0x9A and 0x30*/
+	com_rslt += bmi160_set_command_register(BMI160_COMMAND_REG_ONE);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_command_register(BMI160_COMMAND_REG_TWO);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_command_register(BMI160_COMMAND_REG_THREE);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/*switch the page1*/
+	com_rslt += bmi160_set_target_page(BMI160_WRITE_TARGET_PAGE1);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_target_page(&v_data_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_paging_enable(BMI160_WRITE_ENABLE_PAGE1);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_paging_enable(&v_data_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* enable the pullup configuration from
+	the register 0x05 bit 4 and 5  to 10*/
+	bmi160_get_pullup_configuration(&v_pull_value_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	v_pull_value_u8 = v_pull_value_u8 | BMI160_PULL_UP_DATA;
+	com_rslt += bmi160_set_pullup_configuration(v_pull_value_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+
+	/*switch the page0*/
+	com_rslt += bmi160_set_target_page(BMI160_WRITE_TARGET_PAGE0);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_target_page(&v_data_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* Write the AKM09911 0r AKM09912 i2c address*/
+	com_rslt += bmi160_set_i2c_device_addr(v_akm_i2c_address_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* enable the mag interface to manual mode*/
+	com_rslt += bmi160_set_mag_manual_enable(BMI160_MANUAL_ENABLE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_mag_manual_enable(&v_data_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/*Enable the MAG interface */
+	com_rslt += bmi160_set_if_mode(BMI160_ENABLE_MAG_IF_MODE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_if_mode(&v_data_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+
+	/* Set the AKM Fuse ROM mode */
+	/* Set value for fuse ROM mode*/
+	com_rslt += bmi160_set_mag_write_data(AKM_FUSE_ROM_MODE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* AKM mode address is 0x31*/
+	com_rslt += bmi160_set_mag_write_addr(AKM_POWER_MODE_REG);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* Read the Fuse ROM v_data_u8 from registers
+	0x60,0x61 and 0x62*/
+	/* ASAX v_data_u8 */
+	com_rslt += bmi160_read_bst_akm_sensitivity_data();
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* read the device id of the AKM sensor
+	if device id is 0x05 - AKM09911
+	if device id is 0x04 - AKM09912*/
+	com_rslt += bmi160_set_mag_read_addr(AKM09912_CHIP_ID_REG);
+	/* 0x04 is mag_x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_akm_chip_id_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	printk(KERN_INFO "bmi160,addr:0x%x, akm_chip_id:0x%x",
+	v_akm_i2c_address_u8, v_akm_chip_id_u8);
+	/* Set value power down mode mode*/
+	com_rslt += bmi160_set_mag_write_data(AKM_POWER_DOWN_MODE_DATA);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* AKM mode address is 0x31*/
+	com_rslt += bmi160_set_mag_write_addr(AKM_POWER_MODE_REG);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* Set AKM Force mode*/
+	com_rslt += bmi160_set_mag_write_data(
+	AKM_SINGLE_MEASUREMENT_MODE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* AKM mode address is 0x31*/
+	com_rslt += bmi160_set_mag_write_addr(AKM_POWER_MODE_REG);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* Set the AKM read xyz v_data_u8 address*/
+	com_rslt += bmi160_set_mag_read_addr(AKM_DATA_REGISTER);
+	/* write the mag v_data_bw_u8 as 25Hz*/
+	com_rslt += bmi160_set_mag_output_data_rate(
+	BMI160_MAG_OUTPUT_DATA_RATE_25HZ);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* Enable mag interface to auto mode*/
+	com_rslt += bmi160_set_mag_manual_enable(BMI160_MANUAL_DISABLE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_mag_manual_enable(&v_data_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for read the sensitivity data of
+ *	AKM09911 and AKM09912
+ *
+ *	@note Before reading the mag sensitivity values
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_bst_akm_sensitivity_data(void)
+{
+	/* This variable used for provide the communication
+	results*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array holding the sensitivity ax,ay and az data*/
+	u8 v_data_u8[BMI160_AKM_SENSITIVITY_DATA_SIZE] = {
+	BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	/* read asax value */
+	com_rslt = bmi160_set_mag_read_addr(BMI160_BST_AKM_ASAX);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[AKM_ASAX],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	akm_asa_data.asax = v_data_u8[AKM_ASAX];
+	/* read asay value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_BST_AKM_ASAY);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[AKM_ASAY],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	akm_asa_data.asay = v_data_u8[AKM_ASAY];
+	/* read asaz value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_BST_AKM_ASAZ);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[AKM_ASAZ],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	akm_asa_data.asaz = v_data_u8[AKM_ASAZ];
+
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to get the compensated X data
+ *	of AKM09911 the out put of X as s32
+ *	@note	Before start reading the mag compensated X data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bst_akm_x_s16 : The value of X data
+ *
+ *	@return results of compensated X data value output as s32
+ *
+ */
+s32 bmi160_bst_akm09911_compensate_X(s16 v_bst_akm_x_s16)
+{
+	/*Return value of AKM x compensated v_data_u8*/
+	s32 retval = BMI160_INIT_VALUE;
+	/* Convert raw v_data_u8 into compensated v_data_u8*/
+	retval = (v_bst_akm_x_s16 *
+	((akm_asa_data.asax/AKM09911_SENSITIVITY_DIV) +
+	BMI160_GEN_READ_WRITE_DATA_LENGTH));
+	return retval;
+}
+/*!
+ *	@brief This API used to get the compensated Y data
+ *	of AKM09911 the out put of Y as s32
+ *	@note	Before start reading the mag compensated Y data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bst_akm_y_s16 : The value of Y data
+ *
+ *	@return results of compensated Y data value output as s32
+ *
+ */
+s32 bmi160_bst_akm09911_compensate_Y(s16 v_bst_akm_y_s16)
+{
+	/*Return value of AKM y compensated v_data_u8*/
+	s32 retval = BMI160_INIT_VALUE;
+	/* Convert raw v_data_u8 into compensated v_data_u8*/
+	retval = (v_bst_akm_y_s16 *
+	((akm_asa_data.asay/AKM09911_SENSITIVITY_DIV) +
+	BMI160_GEN_READ_WRITE_DATA_LENGTH));
+	return retval;
+}
+/*!
+ *	@brief This API used to get the compensated Z data
+ *	of AKM09911 the out put of Z as s32
+ *	@note	Before start reading the mag compensated Z data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bst_akm_z_s16 : The value of Z data
+ *
+ *	@return results of compensated Z data value output as s32
+ *
+ */
+s32 bmi160_bst_akm09911_compensate_Z(s16 v_bst_akm_z_s16)
+{
+	/*Return value of AKM z compensated v_data_u8*/
+	s32 retval = BMI160_INIT_VALUE;
+	/* Convert raw v_data_u8 into compensated v_data_u8*/
+	retval = (v_bst_akm_z_s16 *
+	((akm_asa_data.asaz/AKM09911_SENSITIVITY_DIV) +
+	BMI160_GEN_READ_WRITE_DATA_LENGTH));
+	return retval;
+}
+/*!
+ *	@brief This API used to get the compensated X data
+ *	of AKM09912 the out put of X as s32
+ *	@note	Before start reading the mag compensated X data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bst_akm_x_s16 : The value of X data
+ *
+ *	@return results of compensated X data value output as s32
+ *
+ */
+s32 bmi160_bst_akm09912_compensate_X(s16 v_bst_akm_x_s16)
+{
+	/*Return value of AKM x compensated data*/
+	s32 retval = BMI160_INIT_VALUE;
+	/* Convert raw data into compensated data*/
+	retval = v_bst_akm_x_s16 *
+	(akm_asa_data.asax + AKM09912_SENSITIVITY)
+	/ AKM09912_SENSITIVITY_DIV;
+	return retval;
+}
+/*!
+ *	@brief This API used to get the compensated Y data
+ *	of AKM09912 the out put of Y as s32
+ *	@note	Before start reading the mag compensated Y data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bst_akm_y_s16 : The value of Y data
+ *
+ *	@return results of compensated Y data value output as s32
+ *
+ */
+s32 bmi160_bst_akm09912_compensate_Y(s16 v_bst_akm_y_s16)
+{
+	/*Return value of AKM y compensated data*/
+	s32 retval = BMI160_INIT_VALUE;
+	/* Convert raw data into compensated data*/
+	retval = v_bst_akm_y_s16 *
+	(akm_asa_data.asax + AKM09912_SENSITIVITY)
+	/ AKM09912_SENSITIVITY_DIV;
+	return retval;
+}
+/*!
+ *	@brief This API used to get the compensated Z data
+ *	of AKM09912 the out put of Z as s32
+ *	@note	Before start reading the mag compensated Z data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bst_akm_z_s16 : The value of Z data
+ *
+ *	@return results of compensated Z data value output as s32
+ *
+ */
+s32 bmi160_bst_akm09912_compensate_Z(s16 v_bst_akm_z_s16)
+{
+	/*Return value of AKM z compensated data*/
+	s32 retval = BMI160_INIT_VALUE;
+	/* Convert raw data into compensated data*/
+	retval = v_bst_akm_z_s16 *
+	(akm_asa_data.asax + AKM09912_SENSITIVITY)
+	/ AKM09912_SENSITIVITY_DIV;
+	return retval;
+}
+ /*!
+ *	@brief This function used for read the compensated value of
+ *	AKM09911
+ *	@note Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_akm09911_compensate_xyz(
+struct bmi160_mag_xyz_s32_t *bst_akm_xyz)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	struct bmi160_mag_t mag_xyz;
+
+	com_rslt = bmi160_read_mag_xyz(&mag_xyz, BST_AKM);
+	/* Compensation for X axis */
+	bst_akm_xyz->x = bmi160_bst_akm09911_compensate_X(mag_xyz.x);
+
+	/* Compensation for Y axis */
+	bst_akm_xyz->y = bmi160_bst_akm09911_compensate_Y(mag_xyz.y);
+
+	/* Compensation for Z axis */
+	bst_akm_xyz->z = bmi160_bst_akm09911_compensate_Z(mag_xyz.z);
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for read the compensated value of
+ *	AKM09912
+ *	@note Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_akm09912_compensate_xyz(
+struct bmi160_mag_xyz_s32_t *bst_akm_xyz)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	struct bmi160_mag_t mag_xyz;
+
+	com_rslt = bmi160_read_mag_xyz(&mag_xyz, BST_AKM);
+	printk(KERN_INFO "akm09912_raw_x:%d, %d, %d, <%s>,<%d>",
+	mag_xyz.x, mag_xyz.y, mag_xyz.z, __func__, __LINE__);
+	/* Compensation for X axis */
+	bst_akm_xyz->x = bmi160_bst_akm09912_compensate_X(mag_xyz.x);
+
+	/* Compensation for Y axis */
+	bst_akm_xyz->y = bmi160_bst_akm09912_compensate_Y(mag_xyz.y);
+
+	/* Compensation for Z axis */
+	bst_akm_xyz->z = bmi160_bst_akm09912_compensate_Z(mag_xyz.z);
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for read the compensated value of
+ *	AKM09912
+ *	@note Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_akm09912_compensate_xyz_raw(
+struct bmi160_mag_xyz_s32_t *bst_akm_xyz)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Compensation for X axis */
+	bst_akm_xyz->x = bmi160_bst_akm09912_compensate_X(bst_akm_xyz->x);
+
+	/* Compensation for Y axis */
+	bst_akm_xyz->y = bmi160_bst_akm09912_compensate_Y(bst_akm_xyz->y);
+
+	/* Compensation for Z axis */
+	bst_akm_xyz->z = bmi160_bst_akm09912_compensate_Z(bst_akm_xyz->z);
+
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for set the AKM09911 and AKM09912
+ *	power mode.
+ *	@note Before set the AKM power mode
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *	@param v_akm_pow_mode_u8 : The value of akm power mode
+ *  value   |    Description
+ * ---------|--------------------
+ *    0     |  AKM_POWER_DOWN_MODE
+ *    1     |  AKM_SINGLE_MEAS_MODE
+ *    2     |  FUSE_ROM_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_akm_set_powermode(
+u8 v_akm_pow_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = BMI160_INIT_VALUE;
+	/* set mag interface manual mode*/
+	if (p_bmi160->mag_manual_enable != BMI160_MANUAL_ENABLE) {
+		com_rslt = bmi160_set_mag_manual_enable(
+		BMI160_MANUAL_ENABLE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	}
+	printk(KERN_INFO "com_rslt:%d, manual:%d, <%s>\n",
+	com_rslt, p_bmi160->mag_manual_enable, __func__);
+	switch (v_akm_pow_mode_u8) {
+	case AKM_POWER_DOWN_MODE:
+		/* Set the power mode of AKM as power down mode*/
+		com_rslt += bmi160_set_mag_write_data(AKM_POWER_DOWN_MODE_DATA);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(AKM_POWER_MODE_REG);
+		p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	break;
+	case AKM_SINGLE_MEAS_MODE:
+		/* Set the power mode of AKM as
+		single measurement mode*/
+		com_rslt += bmi160_set_mag_write_data
+		(AKM_SINGLE_MEASUREMENT_MODE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(AKM_POWER_MODE_REG);
+		p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_read_addr(AKM_DATA_REGISTER);
+	break;
+	case FUSE_ROM_MODE:
+		/* Set the power mode of AKM as
+		Fuse ROM mode*/
+		com_rslt += bmi160_set_mag_write_data(AKM_FUSE_ROM_MODE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(AKM_POWER_MODE_REG);
+		p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		/* Sensitivity v_data_u8 */
+		com_rslt += bmi160_read_bst_akm_sensitivity_data();
+		p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		/* power down mode*/
+		com_rslt += bmi160_set_mag_write_data(AKM_POWER_DOWN_MODE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(AKM_POWER_MODE_REG);
+		p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+	break;
+	}
+	/* set mag interface auto mode*/
+	if (p_bmi160->mag_manual_enable == BMI160_MANUAL_ENABLE) {
+		com_rslt += bmi160_set_mag_manual_enable(
+		BMI160_MANUAL_DISABLE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	}
+	printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+	com_rslt, p_bmi160->mag_manual_enable, __func__, __LINE__);
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for set the magnetometer
+ *	power mode of AKM09911 and AKM09912
+ *	@note Before set the mag power mode
+ *	make sure the following two point is addressed
+ *		Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *
+ *	@param v_mag_sec_if_pow_mode_u8 : The value of secondary if power mode
+ *  value   |    Description
+ * ---------|--------------------
+ *    0     |  BMI160_MAG_FORCE_MODE
+ *    1     |  BMI160_MAG_SUSPEND_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_bst_akm_and_secondary_if_powermode(
+u8 v_mag_sec_if_pow_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* accel operation mode to normal*/
+	com_rslt = bmi160_set_command_register(ACCEL_MODE_NORMAL);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* set mag interface manual mode*/
+	if (p_bmi160->mag_manual_enable != BMI160_MANUAL_ENABLE) {
+		com_rslt = bmi160_set_mag_manual_enable(
+		BMI160_MANUAL_ENABLE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	}
+	printk(KERN_ERR "com_rslt:%d, manual:%d,after setacc normal mode\n",
+	com_rslt, p_bmi160->mag_manual_enable);
+	switch (v_mag_sec_if_pow_mode_u8) {
+	case BMI160_MAG_FORCE_MODE:
+		/* set the secondary mag power mode as NORMAL*/
+		com_rslt += bmi160_set_mag_interface_normal();
+		/* set the akm power mode as single measurement mode*/
+		com_rslt += bmi160_bst_akm_set_powermode(AKM_SINGLE_MEAS_MODE);
+		p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_read_addr(AKM_DATA_REGISTER);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	break;
+	case BMI160_MAG_SUSPEND_MODE:
+		/* set the akm power mode as power down mode*/
+		com_rslt += bmi160_bst_akm_set_powermode(AKM_POWER_DOWN_MODE);
+		p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		/* set the secondary mag power mode as SUSPEND*/
+		com_rslt += bmi160_set_command_register(MAG_MODE_SUSPEND);
+		p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_BMI160_OUT_OF_RANGE;
+	break;
+	}
+	/* set mag interface auto mode*/
+	if (p_bmi160->mag_manual_enable == BMI160_MANUAL_ENABLE)
+		com_rslt += bmi160_set_mag_manual_enable(
+		BMI160_MANUAL_DISABLE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for read the YAMAH-YAS532 init
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yamaha_yas532_mag_interface_init(
+void)
+{
+	/* This variable used for provide the communication
+	results*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	u8 v_pull_value_u8 = BMI160_INIT_VALUE;
+	u8 v_data_u8 = BMI160_INIT_VALUE;
+	u8 i = BMI160_INIT_VALUE;
+	/* accel operation mode to normal*/
+	com_rslt = bmi160_set_command_register(ACCEL_MODE_NORMAL);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* write mag power mode as NORMAL*/
+	com_rslt += bmi160_set_mag_interface_normal();
+	/* register 0x7E write the 0x37, 0x9A and 0x30*/
+	com_rslt += bmi160_set_command_register(BMI160_COMMAND_REG_ONE);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_command_register(BMI160_COMMAND_REG_TWO);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_command_register(BMI160_COMMAND_REG_THREE);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/*switch the page1*/
+	com_rslt += bmi160_set_target_page(BMI160_WRITE_TARGET_PAGE1);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_target_page(&v_data_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_paging_enable(BMI160_WRITE_ENABLE_PAGE1);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_paging_enable(&v_data_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* enable the pullup configuration from
+	the register 0x05 bit 4 and 5 as 10*/
+	bmi160_get_pullup_configuration(&v_pull_value_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	v_pull_value_u8 = v_pull_value_u8 | BMI160_PULL_UP_DATA;
+	com_rslt += bmi160_set_pullup_configuration(v_pull_value_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/*switch the page0*/
+	com_rslt += bmi160_set_target_page(BMI160_WRITE_TARGET_PAGE0);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_target_page(&v_data_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* Write the YAS532 i2c address*/
+	com_rslt += bmi160_set_i2c_device_addr(BMI160_AUX_YAS532_I2C_ADDRESS);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* enable the mag interface to manual mode*/
+	com_rslt += bmi160_set_mag_manual_enable(BMI160_MANUAL_ENABLE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_mag_manual_enable(&v_data_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/*Enable the MAG interface */
+	com_rslt += bmi160_set_if_mode(BMI160_ENABLE_MAG_IF_MODE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_if_mode(&v_data_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	v_data_u8 = BMI160_MANUAL_DISABLE;
+	/* Read the YAS532 device id is 0x02*/
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS_DEVICE_ID_REG);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* Read the YAS532 calibration data*/
+	com_rslt += bmi160_bst_yamaha_yas532_calib_values();
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* Assign the data acquisition mode*/
+	yas532_data.measure_state = YAS532_MAG_STATE_INIT_COIL;
+	/* Set the default offset as invalid offset*/
+	set_vector(yas532_data.v_hard_offset_s8, INVALID_OFFSET);
+	/* set the transform to zero */
+	yas532_data.transform = BMI160_NULL;
+	/* Assign overflow as zero*/
+	yas532_data.overflow = 0;
+	#if YAS532_MAG_LOG < YAS532_MAG_TEMPERATURE_LOG
+		yas532_data.temp_data.num =
+		yas532_data.temp_data.idx = 0;
+	#endif
+	/* Assign the coef value*/
+	for (i = 0; i < 3; i++) {
+		yas532_data.coef[i] = yas532_version_ac_coef[i];
+		yas532_data.last_raw[i] = 0;
+	}
+	yas532_data.last_raw[3] = 0;
+	/* Set the initial values of yas532*/
+	com_rslt += bmi160_bst_yas532_set_initial_values();
+	/* write the mag v_data_bw_u8 as 25Hz*/
+	com_rslt += bmi160_set_mag_output_data_rate(
+	BMI160_MAG_OUTPUT_DATA_RATE_25HZ);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* Enable mag interface to auto mode*/
+	com_rslt += bmi160_set_mag_manual_enable(
+	BMI160_MANUAL_DISABLE);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	bmi160_get_mag_manual_enable(&v_data_u8);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+
+	return com_rslt;
+}
+/*!
+ *	@brief This function used to set the YAS532 initial values
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_set_initial_values(void)
+{
+/* This variable used for provide the communication
+	results*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* write testr1 as 0x00*/
+	com_rslt = bmi160_set_mag_write_data(
+	BMI160_YAS532_WRITE_TESTR1);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_mag_write_addr(BMI160_YAS532_TESTR1);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* write testr2 as 0x00*/
+	com_rslt += bmi160_set_mag_write_data(
+	BMI160_YAS532_WRITE_TESTR2);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_mag_write_addr(BMI160_YAS532_TESTR2);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* write Rcoil as 0x00*/
+	com_rslt += bmi160_set_mag_write_data(
+	BMI160_YAS532_WRITE_RCOIL);
+	p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_mag_write_addr(BMI160_YAS532_RCOIL);
+	p_bmi160->delay_msec(BMI160_YAS532_SET_INITIAL_VALUE_DELAY);
+	/* check the valid offset*/
+	if (is_valid_offset(yas532_data.v_hard_offset_s8)) {
+		com_rslt += bmi160_bst_yas532_set_offset(
+		yas532_data.v_hard_offset_s8);
+		yas532_data.measure_state = YAS532_MAG_STATE_NORMAL;
+	} else {
+		/* set the default offset as invalid offset*/
+		set_vector(yas532_data.v_hard_offset_s8, INVALID_OFFSET);
+		/*Set the default measure state for offset correction*/
+		yas532_data.measure_state = YAS532_MAG_STATE_MEASURE_OFFSET;
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for YAS532 offset correction
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_magnetic_measure_set_offset(
+void)
+{
+	/* This variable used for provide the communication
+	results*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* used for offset value set to the offset register*/
+	s8 v_hard_offset_s8[BMI160_HARD_OFFSET_DATA_SIZE] = {
+	BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	/* offset correction factors*/
+	static const u8 v_correct_u8[BMI160_YAS_CORRECT_DATA_SIZE] = {
+	16, 8, 4, 2, 1};
+	/* used for the temperature */
+	u16 v_temp_u16 = BMI160_INIT_VALUE;
+	/* used for the xy1y2 read*/
+	u16 v_xy1y2_u16[BMI160_YAS_XY1Y2_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	/* local flag for assign the values*/
+	s32 v_flag_s32[BMI160_YAS_FLAG_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	u8 i, j, v_busy_u8, v_overflow_u8 = BMI160_INIT_VALUE;
+
+	for (i = 0; i < 5; i++) {
+		/* set the offset values*/
+		com_rslt = bmi160_bst_yas532_set_offset(v_hard_offset_s8);
+		/* read the sensor data*/
+		com_rslt += bmi160_bst_yas532_normal_measurement_data(
+		BMI160_YAS532_ACQ_START, &v_busy_u8, &v_temp_u16,
+		v_xy1y2_u16, &v_overflow_u8);
+		/* check the sensor busy status*/
+		if (v_busy_u8)
+			return E_BMI160_BUSY;
+		/* calculate the magnetic correction with
+		offset and assign the values
+		to the offset register */
+		for (j = 0; j < 3; j++) {
+			if (YAS532_DATA_CENTER == v_xy1y2_u16[j])
+				v_flag_s32[j] = 0;
+			if (YAS532_DATA_CENTER < v_xy1y2_u16[j])
+				v_flag_s32[j] = 1;
+			if (v_xy1y2_u16[j] < YAS532_DATA_CENTER)
+				v_flag_s32[j] = -1;
+		}
+		for (j = 0; j < 3; j++) {
+			if (v_flag_s32[j])
+				v_hard_offset_s8[j] = (s8)(v_hard_offset_s8[j]
+				+ v_flag_s32[j] * v_correct_u8[i]);
+		}
+	}
+	/* set the offset */
+	com_rslt += bmi160_bst_yas532_set_offset(v_hard_offset_s8);
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS532 calibration data
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yamaha_yas532_calib_values(void)
+{
+	/* This variable used for provide the communication
+	results*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array holding the YAS532 calibration values */
+	u8 v_data_u8[BMI160_YAS532_CALIB_DATA_SIZE] = {
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	/* Read the DX value */
+	com_rslt = bmi160_set_mag_read_addr(BMI160_YAS532_CALIB_CX);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[0], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	yas532_data.calib_yas532.cx = (s32)((v_data_u8[0]
+	* 10) - 1280);
+	/* Read the DY1 value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS532_CALIB_CY1);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[1], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	yas532_data.calib_yas532.cy1 =
+	(s32)((v_data_u8[1] * 10) - 1280);
+	/* Read the DY2 value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS532_CALIB_CY2);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[2], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	yas532_data.calib_yas532.cy2 =
+	(s32)((v_data_u8[2] * 10) - 1280);
+	/* Read the D2 and D3 value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS532_CALIB1);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[3], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	yas532_data.calib_yas532.a2 =
+	(s32)(((v_data_u8[3] >>
+	BMI160_SHIFT_BIT_POSITION_BY_02_BITS)
+	& 0x03F) - 32);
+	/* Read the D3 and D4 value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS532_CALIB2);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[4], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	/* calculate a3*/
+	yas532_data.calib_yas532.a3 = (s32)((((v_data_u8[3] <<
+	BMI160_SHIFT_BIT_POSITION_BY_02_BITS) & 0x0C) |
+	((v_data_u8[4]
+	>> BMI160_SHIFT_BIT_POSITION_BY_06_BITS)
+	& 0x03)) - 8);
+	/* calculate a4*/
+	yas532_data.calib_yas532.a4 = (s32)((v_data_u8[4]
+	& 0x3F) - 32);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+    /* Read the D5 and D6 value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS532_CALIB3);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[5], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	/* calculate a5*/
+	yas532_data.calib_yas532.a5 =
+	(s32)(((v_data_u8[5]
+	>> BMI160_SHIFT_BIT_POSITION_BY_02_BITS)
+	& 0x3F) + 38);
+	/* Read the D6 and D7 value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS532_CALIB4);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[6], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	/* calculate a6*/
+	yas532_data.calib_yas532.a6 =
+	(s32)((((v_data_u8[5]
+	<< BMI160_SHIFT_BIT_POSITION_BY_04_BITS)
+	& 0x30) | ((v_data_u8[6] >>
+	 BMI160_SHIFT_BIT_POSITION_BY_04_BITS)
+	 & 0x0F)) - 32);
+	 /* Read the D7 and D8 value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS532_CALIB5);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[7], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	/* calculate a7*/
+	yas532_data.calib_yas532.a7 = (s32)((((v_data_u8[6]
+	<< BMI160_SHIFT_BIT_POSITION_BY_03_BITS)
+	& 0x78) |
+	((v_data_u8[7]
+	>> BMI160_SHIFT_BIT_POSITION_BY_05_BITS) &
+	0x07)) - 64);
+	/* Read the D8 and D9 value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS532_CLAIB6);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[8], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	/* calculate a8*/
+	yas532_data.calib_yas532.a8 = (s32)((((v_data_u8[7] <<
+	BMI160_GEN_READ_WRITE_DATA_LENGTH) & 0x3E) |
+	((v_data_u8[8] >>
+	BMI160_SHIFT_BIT_POSITION_BY_07_BITS) & 0x01)) -
+	32);
+
+	/* Read the D8 and D9 value */
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS532_CALIB7);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[9], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	/* calculate a9*/
+	yas532_data.calib_yas532.a9 = (s32)(((v_data_u8[8] <<
+	BMI160_GEN_READ_WRITE_DATA_LENGTH) & 0xFE) |
+	 ((v_data_u8[9] >>
+	 BMI160_SHIFT_BIT_POSITION_BY_07_BITS) & 0x01));
+	/* calculate k*/
+	yas532_data.calib_yas532.k = (s32)((v_data_u8[9] >>
+	BMI160_SHIFT_BIT_POSITION_BY_02_BITS) & 0x1F);
+	/* Read the  value from register 0x9A*/
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS532_CALIB8);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[10],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	/* Read the  value from register 0x9B*/
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS532_CALIIB9);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[11],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	/* Read the  value from register 0x9C*/
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS532_CALIB10);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[12],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	/* Read the  value from register 0x9D*/
+	com_rslt += bmi160_set_mag_read_addr(BMI160_YAS532_CALIB11);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+	&v_data_u8[13],
+	BMI160_GEN_READ_WRITE_DATA_LENGTH);
+	/* Calculate the fxy1y2 and rxy1y1*/
+	yas532_data.calib_yas532.fxy1y2[0] =
+	(u8)(((v_data_u8[10]
+	& 0x01)
+	<< BMI160_SHIFT_BIT_POSITION_BY_01_BIT)
+	| ((v_data_u8[11] >>
+	BMI160_SHIFT_BIT_POSITION_BY_07_BITS) & 0x01));
+	yas532_data.calib_yas532.rxy1y2[0] =
+	((s8)(((v_data_u8[10]
+	>> BMI160_SHIFT_BIT_POSITION_BY_01_BIT) & 0x3F)
+	<< BMI160_SHIFT_BIT_POSITION_BY_02_BITS))
+	>> BMI160_SHIFT_BIT_POSITION_BY_02_BITS;
+	yas532_data.calib_yas532.fxy1y2[1] =
+	(u8)(((v_data_u8[11] & 0x01)
+	<< BMI160_SHIFT_BIT_POSITION_BY_01_BIT)
+	 | ((v_data_u8[12] >>
+	 BMI160_SHIFT_BIT_POSITION_BY_07_BITS) & 0x01));
+	yas532_data.calib_yas532.rxy1y2[1] =
+	((s8)(((v_data_u8[11]
+	>> BMI160_SHIFT_BIT_POSITION_BY_01_BIT) & 0x3F)
+	<< BMI160_SHIFT_BIT_POSITION_BY_02_BITS))
+	>> BMI160_SHIFT_BIT_POSITION_BY_02_BITS;
+	yas532_data.calib_yas532.fxy1y2[2] =
+	(u8)(((v_data_u8[12] & 0x01)
+	<< BMI160_SHIFT_BIT_POSITION_BY_01_BIT)
+	| ((v_data_u8[13]
+	>> BMI160_SHIFT_BIT_POSITION_BY_07_BITS) & 0x01));
+	yas532_data.calib_yas532.rxy1y2[2] =
+	((s8)(((v_data_u8[12]
+	>> BMI160_SHIFT_BIT_POSITION_BY_01_BIT) & 0x3F)
+	 << BMI160_SHIFT_BIT_POSITION_BY_02_BITS))
+	 >> BMI160_SHIFT_BIT_POSITION_BY_02_BITS;
+
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for calculate the
+ *	YAS532 read the linear data
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_xy1y2_to_linear(
+u16 *v_xy1y2_u16, s32 *xy1y2_linear)
+{
+	/* This variable used for provide the communication
+	results*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = SUCCESS;
+	static const u16 v_calib_data[] = {
+	3721, 3971, 4221, 4471};
+	u8 i = BMI160_INIT_VALUE;
+
+	for (i = 0; i < 3; i++)
+		xy1y2_linear[i] = v_xy1y2_u16[i] -
+		 v_calib_data[yas532_data.calib_yas532.fxy1y2[i]]
+			+ (yas532_data.v_hard_offset_s8[i] -
+			yas532_data.calib_yas532.rxy1y2[i])
+			* yas532_data.coef[i];
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for read the YAS532 sensor data
+ *	@param	v_acquisition_command_u8: used to set the data acquisition
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ *	@param	v_busy_u8 : used to get the busy flay for sensor data read
+ *	@param	v_temp_u16 : used to get the temperature data
+ *	@param	v_xy1y2_u16 : used to get the sensor xy1y2 data
+ *	@param	v_overflow_u8 : used to get the overflow data
+ *
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_normal_measurement_data(
+u8 v_acquisition_command_u8, u8 *v_busy_u8,
+u16 *v_temp_u16, u16 *v_xy1y2_u16, u8 *v_overflow_u8)
+{
+	/* This variable used for provide the communication
+	results*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = BMI160_INIT_VALUE;
+	/* Array holding the YAS532 xyy1 data*/
+	u8 v_data_u8[BMI160_YAS_XY1Y2T_DATA_SIZE] = {
+	BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	u8 i = BMI160_INIT_VALUE;
+	/* check the p_bmi160 structure as NULL*/
+	if (p_bmi160 == BMI160_NULL) {
+		return E_BMI160_NULL_PTR;
+		} else {
+		/* read the sensor data */
+		com_rslt = bmi160_bst_yas532_acquisition_command_register(
+		v_acquisition_command_u8);
+		com_rslt +=
+		p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+		BMI160_USER_DATA_MAG_X_LSB__REG,
+		v_data_u8, BMI160_MAG_YAS_DATA_LENGTH);
+		/* read the xyy1 data*/
+		*v_busy_u8 =
+		((v_data_u8[0]
+		>> BMI160_SHIFT_BIT_POSITION_BY_07_BITS) & 0x01);
+		*v_temp_u16 =
+		(u16)((((s32)v_data_u8[0]
+		<< BMI160_SHIFT_BIT_POSITION_BY_03_BITS)
+		& 0x3F8) | ((v_data_u8[1]
+		>> BMI160_SHIFT_BIT_POSITION_BY_05_BITS) & 0x07));
+		v_xy1y2_u16[0] =
+		(u16)((((s32)v_data_u8[2]
+		<< BMI160_SHIFT_BIT_POSITION_BY_06_BITS) & 0x1FC0)
+		| ((v_data_u8[3] >>
+		BMI160_SHIFT_BIT_POSITION_BY_02_BITS) & 0x3F));
+		v_xy1y2_u16[1] =
+		(u16)((((s32)v_data_u8[4]
+		<< BMI160_SHIFT_BIT_POSITION_BY_06_BITS)
+		& 0x1FC0)
+		| ((v_data_u8[5]
+		>> BMI160_SHIFT_BIT_POSITION_BY_02_BITS) & 0x3F));
+		v_xy1y2_u16[2] =
+		(u16)((((s32)v_data_u8[6]
+		<< BMI160_SHIFT_BIT_POSITION_BY_06_BITS)
+		& 0x1FC0)
+		| ((v_data_u8[7]
+		>> BMI160_SHIFT_BIT_POSITION_BY_02_BITS) & 0x3F));
+		*v_overflow_u8 = 0;
+		for (i = 0; i < 3; i++) {
+			if (v_xy1y2_u16[i] == YAS532_DATA_OVERFLOW)
+				*v_overflow_u8 |= (1 << (i * 2));
+			if (v_xy1y2_u16[i] == YAS532_DATA_UNDERFLOW)
+				*v_overflow_u8 |= (1 << (i * 2 + 1));
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for YAS532 sensor data
+ *	@param	v_acquisition_command_u8	:	the value of CMDR
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ * @param xyz_data : the vector xyz output
+ * @param v_overflow_s8 : the value of overflow
+ * @param v_temp_correction_u8 : the value of temperate correction enable
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_measurement_xyz_data(
+struct yas532_vector *xyz_data, u8 *v_overflow_s8, u8 v_temp_correction_u8,
+u8 v_acquisition_command_u8)
+{
+	/* This variable used for provide the communication
+	results*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = BMI160_INIT_VALUE;
+	/* Array holding the linear calculation output*/
+	s32 v_xy1y2_linear_s32[BMI160_YAS_XY1Y2_DATA_SIZE] = {
+	BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	/* Array holding the temperature data */
+	s32 v_xyz_tmp_s32[BMI160_YAS_TEMP_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	s32 tmp = BMI160_INIT_VALUE;
+	s32 sx, sy1, sy2, sy, sz = BMI160_INIT_VALUE;
+	u8 i, v_busy_u8 = BMI160_INIT_VALUE;
+	u16 v_temp_u16 = BMI160_INIT_VALUE;
+	/* Array holding the xyy1 sensor raw data*/
+	u16 v_xy1y2_u16[BMI160_YAS_XY1Y2_DATA_SIZE] = {BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	#if YAS532_MAG_LOG < YAS532_MAG_TEMPERATURE_LOG
+	s32 sum = BMI160_INIT_VALUE;
+	#endif
+	*v_overflow_s8 = BMI160_INIT_VALUE;
+	switch (yas532_data.measure_state) {
+	case YAS532_MAG_STATE_INIT_COIL:
+		if (p_bmi160->mag_manual_enable != BMI160_MANUAL_ENABLE)
+			com_rslt = bmi160_set_mag_manual_enable(
+			BMI160_MANUAL_ENABLE);
+		/* write Rcoil*/
+		com_rslt += bmi160_set_mag_write_data(
+		BMI160_YAS_DISABLE_RCOIL);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(BMI160_YAS532_RCOIL);
+		p_bmi160->delay_msec(BMI160_YAS532_MEASUREMENT_DELAY);
+		if (!yas532_data.overflow && is_valid_offset(
+		yas532_data.v_hard_offset_s8))
+			yas532_data.measure_state = 0;
+	break;
+	case YAS532_MAG_STATE_MEASURE_OFFSET:
+		com_rslt = bmi160_bst_yas532_magnetic_measure_set_offset();
+		yas532_data.measure_state = 0;
+	break;
+	default:
+	break;
+	}
+	/* Read sensor data*/
+	com_rslt += bmi160_bst_yas532_normal_measurement_data(
+	v_acquisition_command_u8, &v_busy_u8, &v_temp_u16,
+	v_xy1y2_u16, v_overflow_s8);
+	/* Calculate the linear data*/
+	com_rslt += bmi160_bst_yas532_xy1y2_to_linear(v_xy1y2_u16,
+	v_xy1y2_linear_s32);
+	/* Calculate temperature correction */
+	#if YAS532_MAG_LOG < YAS532_MAG_TEMPERATURE_LOG
+		yas532_data.temp_data.log[yas532_data.temp_data.idx++] =
+		v_temp_u16;
+	if (YAS532_MAG_TEMPERATURE_LOG <= yas532_data.temp_data.idx)
+		yas532_data.temp_data.idx = 0;
+		yas532_data.temp_data.num++;
+	if (YAS532_MAG_TEMPERATURE_LOG <= yas532_data.temp_data.num)
+		yas532_data.temp_data.num = YAS532_MAG_TEMPERATURE_LOG;
+	for (i = 0; i < yas532_data.temp_data.num; i++)
+		sum += yas532_data.temp_data.log[i];
+		tmp = sum * 10 / yas532_data.temp_data.num
+		- YAS532_TEMP20DEGREE_TYPICAL * 10;
+	#else
+		tmp = (v_temp_u16 - YAS532_TEMP20DEGREE_TYPICAL)
+		* 10;
+	#endif
+	sx  = v_xy1y2_linear_s32[0];
+	sy1 = v_xy1y2_linear_s32[1];
+	sy2 = v_xy1y2_linear_s32[2];
+	/* Temperature correction */
+	if (v_temp_correction_u8) {
+		sx  -= (yas532_data.calib_yas532.cx  * tmp)
+		/ 1000;
+		sy1 -= (yas532_data.calib_yas532.cy1 * tmp)
+		/ 1000;
+		sy2 -= (yas532_data.calib_yas532.cy2 * tmp)
+		/ 1000;
+	}
+	sy = sy1 - sy2;
+	sz = -sy1 - sy2;
+
+	xyz_data->yas532_vector_xyz[0] = yas532_data.calib_yas532.k *
+	((100 * sx + yas532_data.calib_yas532.a2 * sy +
+	yas532_data.calib_yas532.a3 * sz) / 10);
+	xyz_data->yas532_vector_xyz[1] = yas532_data.calib_yas532.k *
+	((yas532_data.calib_yas532.a4 * sx + yas532_data.calib_yas532.a5 * sy +
+	yas532_data.calib_yas532.a6 * sz) / 10);
+	xyz_data->yas532_vector_xyz[2] = yas532_data.calib_yas532.k *
+	((yas532_data.calib_yas532.a7 * sx + yas532_data.calib_yas532.a8 * sy +
+	yas532_data.calib_yas532.a9 * sz) / 10);
+	if (yas532_data.transform != BMI160_NULL) {
+		for (i = 0; i < 3; i++) {
+				v_xyz_tmp_s32[i] = yas532_data.transform[i
+				* 3] *
+				xyz_data->yas532_vector_xyz[0]
+				+ yas532_data.transform[i * 3 + 1] *
+				xyz_data->yas532_vector_xyz[1]
+				+ yas532_data.transform[i * 3 + 2] *
+				xyz_data->yas532_vector_xyz[2];
+		}
+		set_vector(xyz_data->yas532_vector_xyz, v_xyz_tmp_s32);
+	}
+	for (i = 0; i < 3; i++) {
+		xyz_data->yas532_vector_xyz[i] -=
+		xyz_data->yas532_vector_xyz[i] % 10;
+		if (*v_overflow_s8 & (1
+		<< (i * 2)))
+			xyz_data->yas532_vector_xyz[i] +=
+			1; /* set overflow */
+		if (*v_overflow_s8 & (1 <<
+		(i * 2 + 1)))
+			xyz_data->yas532_vector_xyz[i] += 2; /* set underflow */
+	}
+
+
+if (v_busy_u8)
+		return com_rslt;
+	if (0 < *v_overflow_s8) {
+		if (!yas532_data.overflow)
+			yas532_data.overflow = 1;
+		yas532_data.measure_state = YAS532_MAG_STATE_INIT_COIL;
+	} else
+		yas532_data.overflow = 0;
+	for (i = 0; i < 3; i++)
+		yas532_data.last_raw[i] = v_xy1y2_u16[i];
+	  yas532_data.last_raw[i] = v_temp_u16;
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for YAS532 write data acquisition
+ *	command register write
+ *	@param	v_command_reg_data_u8	:	the value of data acquisition
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_acquisition_command_register(
+u8 v_command_reg_data_u8)
+{
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+
+	if (p_bmi160->mag_manual_enable != BMI160_MANUAL_ENABLE)
+			com_rslt = bmi160_set_mag_manual_enable(
+			BMI160_MANUAL_ENABLE);
+
+		com_rslt = bmi160_set_mag_write_data(v_command_reg_data_u8);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* YAMAHA YAS532-0x82*/
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_YAS532_COMMAND_REGISTER);
+		p_bmi160->delay_msec(BMI160_YAS_ACQ_COMMAND_DELAY);
+		com_rslt += bmi160_set_mag_read_addr(
+		BMI160_YAS532_DATA_REGISTER);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+
+	if (p_bmi160->mag_manual_enable == BMI160_MANUAL_ENABLE)
+		com_rslt += bmi160_set_mag_manual_enable(BMI160_MANUAL_DISABLE);
+
+	return com_rslt;
+
+}
+/*!
+ *	@brief This function used write offset of YAS532
+ *
+ *	@param	p_offset_s8	: The value of offset to write
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_set_offset(
+const s8 *p_offset_s8)
+{
+	/* This variable used for provide the communication
+	results*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+
+	if (p_bmi160->mag_manual_enable != BMI160_MANUAL_ENABLE)
+		com_rslt = bmi160_set_mag_manual_enable(BMI160_MANUAL_ENABLE);
+		p_bmi160->delay_msec(BMI160_YAS532_OFFSET_DELAY);
+
+	    /* Write offset X data*/
+		com_rslt = bmi160_set_mag_write_data(p_offset_s8[0]);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* YAS532 offset x write*/
+		com_rslt += bmi160_set_mag_write_addr(BMI160_YAS532_OFFSET_X);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+
+		/* Write offset Y data*/
+		com_rslt = bmi160_set_mag_write_data(p_offset_s8[1]);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* YAS532 offset y write*/
+		com_rslt += bmi160_set_mag_write_addr(BMI160_YAS532_OFFSET_Y);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+
+		/* Write offset Z data*/
+		com_rslt = bmi160_set_mag_write_data(p_offset_s8[2]);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* YAS532 offset z write*/
+		com_rslt += bmi160_set_mag_write_addr(BMI160_YAS532_OFFSET_Z);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		set_vector(yas532_data.v_hard_offset_s8, p_offset_s8);
+
+	if (p_bmi160->mag_manual_enable == BMI160_MANUAL_ENABLE)
+		com_rslt = bmi160_set_mag_manual_enable(BMI160_MANUAL_DISABLE);
+	return com_rslt;
+}
+/*!
+ *	@brief This function used to init the YAMAH-YAS537
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yamaha_yas537_mag_interface_init(
+void)
+{
+/* This variable used for provide the communication
+results*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+u8 v_pull_value_u8 = BMI160_INIT_VALUE;
+u8 v_data_u8 = BMI160_INIT_VALUE;
+u8 i = BMI160_INIT_VALUE;
+/* accel operation mode to normal*/
+com_rslt = bmi160_set_command_register(ACCEL_MODE_NORMAL);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* write mag power mode as NORMAL*/
+com_rslt += bmi160_set_mag_interface_normal();
+/* register 0x7E write the 0x37, 0x9A and 0x30*/
+com_rslt += bmi160_set_command_register(BMI160_COMMAND_REG_ONE);
+p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+com_rslt += bmi160_set_command_register(BMI160_COMMAND_REG_TWO);
+p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+com_rslt += bmi160_set_command_register(BMI160_COMMAND_REG_THREE);
+p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+/*switch the page1*/
+com_rslt += bmi160_set_target_page(BMI160_WRITE_TARGET_PAGE1);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+bmi160_get_target_page(&v_data_u8);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+com_rslt += bmi160_set_paging_enable(BMI160_WRITE_ENABLE_PAGE1);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+bmi160_get_paging_enable(&v_data_u8);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* enable the pullup configuration from
+the register 0x05 bit 4 and 5 as 10*/
+bmi160_get_pullup_configuration(&v_pull_value_u8);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+v_pull_value_u8 = v_pull_value_u8 | BMI160_PULL_UP_DATA;
+com_rslt += bmi160_set_pullup_configuration(v_pull_value_u8);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/*switch the page0*/
+com_rslt += bmi160_set_target_page(BMI160_WRITE_TARGET_PAGE0);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+bmi160_get_target_page(&v_data_u8);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* Write the YAS532 i2c address*/
+com_rslt += bmi160_set_i2c_device_addr(BMI160_YAS537_I2C_ADDRESS);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* enable the mag interface to manual mode*/
+com_rslt += bmi160_set_mag_manual_enable(BMI160_MANUAL_ENABLE);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+bmi160_get_mag_manual_enable(&v_data_u8);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/*Enable the MAG interface */
+com_rslt += bmi160_set_if_mode(BMI160_ENABLE_MAG_IF_MODE);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+bmi160_get_if_mode(&v_data_u8);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+v_data_u8 = BMI160_MANUAL_DISABLE;
+/* Read the YAS537 device id*/
+com_rslt += bmi160_set_mag_read_addr(BMI160_YAS_DEVICE_ID_REG);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&v_data_u8, BMI160_GEN_READ_WRITE_DATA_LENGTH);
+yas537_data.dev_id = v_data_u8;
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* Read the YAS532 calibration data*/
+com_rslt +=
+bmi160_bst_yamaha_yas537_calib_values(
+BMI160_GEN_READ_WRITE_DATA_LENGTH);
+p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+/* set the mode to NORMAL*/
+yas537_data.measure_state = YAS537_MAG_STATE_NORMAL;
+/* set the transform to zero */
+yas537_data.transform = BMI160_NULL;
+yas537_data.average = 32;
+for (i = 0; i < 3; i++) {
+	yas537_data.hard_offset[i] = -128;
+	yas537_data.last_after_rcoil[i] = 0;
+}
+for (i = 0; i < 4; i++)
+	yas537_data.last_raw[i] = 0;
+/* write the mag bandwidth as 25Hz*/
+com_rslt += bmi160_set_mag_output_data_rate(
+BMI160_MAG_OUTPUT_DATA_RATE_25HZ);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* Enable mag interface to auto mode*/
+com_rslt += bmi160_set_mag_manual_enable(
+BMI160_MANUAL_DISABLE);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+bmi160_get_mag_manual_enable(&v_data_u8);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+return com_rslt;
+}
+/*!
+*	@brief This function used for read the
+*	YAMAHA YAS537 calibration data
+*
+*
+*	@param v_rcoil_u8 : The value of r coil
+*
+*
+*	@return results of bus communication function
+*	@retval 0 -> Success
+*	@retval -1 -> Error
+*
+*
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yamaha_yas537_calib_values(
+u8 v_rcoil_u8)
+{
+/* This variable used for provide the communication
+results*/
+BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+/* Array holding the YAS532 calibration values */
+u8 a_data_u8[BMI160_YAS537_CALIB_DATA_SIZE] = {
+BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+};
+static const u8 v_avrr_u8[] = {0x50, 0x60, 0x70};
+u8 v_cal_valid_u8 = BMI160_INIT_VALUE, i;
+/* write soft reset as 0x02*/
+com_rslt = bmi160_set_mag_write_data(
+YAS537_SRSTR_DATA);
+p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+com_rslt += bmi160_set_mag_write_addr(YAS537_REG_SRSTR);
+p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+/* Read the DX value */
+com_rslt = bmi160_set_mag_read_addr(YAS537_REG_CALR_C0);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[0], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the DY1 value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_C1);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[1], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the DY2 value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_C2);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[2], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D2 value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_C3);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[3], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D3 value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_C4);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[4], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D4 value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_C5);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[5], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D5 value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_C6);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[6], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D6 value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_C7);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[7], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D7 value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_C8);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[8], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D8 value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_C9);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[9], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D9 value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_CA);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[10], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the RX value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_CB);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[11], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the RY1 value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_CC);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[12], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the RY2 value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_CD);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[13], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the RY2 value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_CE);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[14], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the CHF value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_CF);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[15], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the VER value */
+com_rslt += bmi160_set_mag_read_addr(YAS537_REG_CALR_DO);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += bmi160_read_reg(BMI160_MAG_DATA_READ_REG,
+&a_data_u8[16], BMI160_GEN_READ_WRITE_DATA_LENGTH);
+/* get the calib ver*/
+yas537_data.calib_yas537.ver =
+(a_data_u8[16] >> BMI160_SHIFT_BIT_POSITION_BY_06_BITS);
+for (i = 0; i < 17; i++) {
+	if (((i < 16 && a_data_u8[i]) != 0))
+		v_cal_valid_u8 = 1;
+	if ((i < 16 &&
+	(a_data_u8[i] & 0x3F)) != 0)
+		v_cal_valid_u8 = 1;
+}
+if (!v_cal_valid_u8)
+	return ERROR;
+if (yas537_data.calib_yas537.ver == 0) {
+	for (i = 0; i < 17; i++) {
+		if (i < 12) {
+			/* write offset*/
+			com_rslt += bmi160_set_mag_write_data(
+			a_data_u8[i]);
+			p_bmi160->delay_msec(
+			BMI160_GEN_READ_WRITE_DELAY);
+			com_rslt += bmi160_set_mag_write_addr(
+			YAS537_REG_MTCR + i);
+			p_bmi160->delay_msec(
+			BMI160_GEN_READ_WRITE_DELAY);
+		} else if (i < 15) {
+			/* write offset correction*/
+			com_rslt += bmi160_set_mag_write_data(
+			a_data_u8[i]);
+			p_bmi160->delay_msec(
+			BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+			com_rslt += bmi160_set_mag_write_addr((
+			(YAS537_REG_OXR + i) - 12));
+			p_bmi160->delay_msec(
+			BMI160_GEN_READ_WRITE_DELAY);
+			yas537_data.hard_offset[i - 12]
+			= a_data_u8[i];
+		} else {
+			/* write offset correction*/
+			com_rslt += bmi160_set_mag_write_data(
+			a_data_u8[i]);
+			p_bmi160->delay_msec(
+			BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+			com_rslt += bmi160_set_mag_write_addr((
+			(YAS537_REG_OXR + i) - 11));
+			p_bmi160->delay_msec(
+			BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		}
+
+}
+} else if (yas537_data.calib_yas537.ver == 1) {
+	for (i = 0; i < 3; i++) {
+		/* write offset*/
+		com_rslt += bmi160_set_mag_write_data(
+		a_data_u8[i]);
+		p_bmi160->delay_msec(
+		BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(
+		YAS537_REG_MTCR + i);
+		p_bmi160->delay_msec(
+		BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		if (com_rslt == SUCCESS) {
+			/* write offset*/
+			com_rslt += bmi160_set_mag_write_data(
+			a_data_u8[i + 12]);
+			p_bmi160->delay_msec(
+			BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+			com_rslt += bmi160_set_mag_write_addr(
+			YAS537_REG_OXR + i);
+			p_bmi160->delay_msec(
+			BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+			yas537_data.hard_offset[i] =
+			a_data_u8[i + 12];
+		} else {
+			com_rslt = ERROR;
+		}
+	}
+	/* write offset*/
+	com_rslt += bmi160_set_mag_write_data(
+	((a_data_u8[i] & 0xE0) | 0x10));
+	p_bmi160->delay_msec(
+	BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_mag_write_addr(
+	YAS537_REG_MTCR + i);
+	p_bmi160->delay_msec(
+	BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* write offset*/
+	com_rslt += bmi160_set_mag_write_data(
+	((a_data_u8[15]
+	>> BMI160_SHIFT_BIT_POSITION_BY_03_BITS)
+	& 0x1E));
+	p_bmi160->delay_msec(
+	BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_mag_write_addr(YAS537_REG_HCKR);
+	p_bmi160->delay_msec(
+	BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* write offset*/
+	com_rslt += bmi160_set_mag_write_data(
+	((a_data_u8[15] << 1) & 0x1E));
+	p_bmi160->delay_msec(
+	BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_mag_write_addr(YAS537_REG_LCKR);
+	p_bmi160->delay_msec(
+	BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* write offset*/
+	com_rslt += bmi160_set_mag_write_data(
+	(a_data_u8[16] & 0x3F));
+	p_bmi160->delay_msec(
+	BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_mag_write_addr(YAS537_REG_OCR);
+	p_bmi160->delay_msec(
+	BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+
+	/* Assign the calibration values*/
+	/* a2 */
+	yas537_data.calib_yas537.a2 =
+	((((a_data_u8[3]
+	<< BMI160_SHIFT_BIT_POSITION_BY_02_BITS)
+	& 0x7C)
+	| (a_data_u8[4]
+	>> BMI160_SHIFT_BIT_POSITION_BY_06_BITS)) - 64);
+	/* a3 */
+	yas537_data.calib_yas537.a3 =
+	((((a_data_u8[4] << BMI160_SHIFT_BIT_POSITION_BY_01_BIT)
+	& 0x7E)
+	| (a_data_u8[5]
+	>> BMI160_SHIFT_BIT_POSITION_BY_07_BITS)) - 64);
+	/* a4 */
+	yas537_data.calib_yas537.a4 =
+	((((a_data_u8[5]
+	<< BMI160_SHIFT_BIT_POSITION_BY_01_BIT)
+	& 0xFE)
+	| (a_data_u8[6]
+	>> BMI160_SHIFT_BIT_POSITION_BY_07_BITS))
+	- 128);
+	/* a5 */
+	yas537_data.calib_yas537.a5 =
+	((((a_data_u8[6]
+	<< BMI160_SHIFT_BIT_POSITION_BY_02_BITS)
+	& 0x1FC)
+	| (a_data_u8[7]
+	>> BMI160_SHIFT_BIT_POSITION_BY_06_BITS))
+	- 112);
+	/* a6 */
+	yas537_data.calib_yas537.a6 =
+	((((a_data_u8[7]
+	<< BMI160_SHIFT_BIT_POSITION_BY_01_BIT)
+	& 0x7E)
+	| (a_data_u8[8]
+	>> BMI160_SHIFT_BIT_POSITION_BY_07_BITS)) - 64);
+	/* a7 */
+	yas537_data.calib_yas537.a7 =
+	((((a_data_u8[8]
+	<< BMI160_SHIFT_BIT_POSITION_BY_01_BIT)
+	& 0xFE)
+	| (a_data_u8[9]
+	>> BMI160_SHIFT_BIT_POSITION_BY_07_BITS))
+	- 128);
+	/* a8 */
+	yas537_data.calib_yas537.a8 = ((a_data_u8[9] &
+	0x7F) - 64);
+	/* a9 */
+	yas537_data.calib_yas537.a9 = ((((a_data_u8[10]
+	<< BMI160_SHIFT_BIT_POSITION_BY_01_BIT) & 0x1FE)
+	| (a_data_u8[11]
+	>> BMI160_SHIFT_BIT_POSITION_BY_07_BITS))
+	- 112);
+	/* k */
+	yas537_data.calib_yas537.k = (
+	a_data_u8[11] & 0x7F);
+	} else {
+		return ERROR;
+	}
+/* write A/D converter*/
+com_rslt += bmi160_set_mag_write_data(
+YAS537_WRITE_A_D_CONVERTER);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+com_rslt += bmi160_set_mag_write_addr(YAS537_REG_ADCCALR);
+p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+/* write A/D converter second register*/
+com_rslt += bmi160_set_mag_write_data(
+YAS537_WRITE_A_D_CONVERTER2);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+com_rslt += bmi160_set_mag_write_addr(YAS537_REG_ADCCALR_ONE);
+p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+/* write temperature calibration register*/
+com_rslt += bmi160_set_mag_write_data(YAS537_WRITE_TEMP_CALIB);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+com_rslt += bmi160_set_mag_write_addr(YAS537_REG_TRMR);
+p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+/* write average filter register*/
+com_rslt += bmi160_set_mag_write_data(
+v_avrr_u8[yas537_data.average]);
+p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+com_rslt += bmi160_set_mag_write_addr(YAS537_REG_AVRR);
+p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+if (v_rcoil_u8) {
+	/* write average; filter register*/
+	com_rslt += bmi160_set_mag_write_data(
+	YAS537_WRITE_FILTER);
+	p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+	com_rslt += bmi160_set_mag_write_addr(YAS537_REG_CONFR);
+	p_bmi160->delay_msec(
+	BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+}
+
+return com_rslt;
+
+}
+/*!
+ *	@brief This function used for YAS537 write data acquisition
+ *	command register write
+ *	@param	v_command_reg_data_u8	:	the value of data acquisition
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas537_acquisition_command_register(
+u8 v_command_reg_data_u8)
+{
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+
+	if (p_bmi160->mag_manual_enable != BMI160_MANUAL_ENABLE)
+			com_rslt = bmi160_set_mag_manual_enable(
+			BMI160_MANUAL_ENABLE);
+			p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+
+		com_rslt = bmi160_set_mag_write_data(v_command_reg_data_u8);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		/* YAMAHA YAS532-0x82*/
+		com_rslt += bmi160_set_mag_write_addr(
+		BMI160_REG_YAS537_CMDR);
+		/* set the mode to RECORD*/
+		yas537_data.measure_state = YAS537_MAG_STATE_RECORD_DATA;
+		p_bmi160->delay_msec(BMI160_YAS_ACQ_COMMAND_DELAY);
+		com_rslt += bmi160_set_mag_read_addr(
+		YAS537_REG_TEMPERATURE_0);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+
+	if (p_bmi160->mag_manual_enable == BMI160_MANUAL_ENABLE)
+		com_rslt += bmi160_set_mag_manual_enable(
+		BMI160_MANUAL_DISABLE);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+
+	return com_rslt;
+
+}
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 xy1y2 data
+ *
+ *	@param xy1y2: The value of raw xy1y2 data
+ *	@param xyz: The value of  xyz data
+ *
+ *
+ *	@return None
+ *
+ *
+ */
+static void xy1y2_to_xyz(u16 *xy1y2, s32 *xyz)
+{
+	xyz[0] = ((xy1y2[0] - 8192)
+	* 300);
+	xyz[1] = (((xy1y2[1] - xy1y2[2])
+	* 1732) / 10);
+	xyz[2] = (((-xy1y2[2] - xy1y2[2])
+	+ 16384) * 300);
+}
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 xy1y2 data
+ *
+ *	@param v_coil_stat_u8: The value of R coil status
+ *	@param v_busy_u8: The value of busy status
+ *	@param v_temperature_u16: The value of temperature
+ *	@param xy1y2: The value of raw xy1y2 data
+ *	@param v_ouflow_u8: The value of overflow
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yamaha_yas537_read_xy1y2_data(
+u8 *v_coil_stat_u8, u8 *v_busy_u8,
+u16 *v_temperature_u16, u16 *xy1y2, u8 *v_ouflow_u8)
+{
+	/* This variable used for provide the communication
+	results*/
+	BMI160_RETURN_FUNCTION_TYPE com_rslt = E_BMI160_COMM_RES;
+	/* Array holding the YAS532 calibration values */
+	u8 a_data_u8[BMI160_YAS_XY1Y2T_DATA_SIZE] = {
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE,
+	};
+	u8 i = BMI160_INIT_VALUE;
+	s32 a_h_s32[BMI160_YAS_H_DATA_SIZE] = {
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	s32 a_s_s32[BMI160_YAS_S_DATA_SIZE] = {
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	/* set command register*/
+	com_rslt = bmi160_bst_yas537_acquisition_command_register(
+	YAS537_SET_COMMAND_REGISTER);
+	/* read the yas537 sensor data of xy1y2*/
+	com_rslt +=
+	p_bmi160->BMI160_BUS_READ_FUNC(p_bmi160->dev_addr,
+	BMI160_USER_DATA_MAG_X_LSB__REG,
+	a_data_u8, BMI160_MAG_YAS_DATA_LENGTH);
+	/* read the busy flag*/
+	*v_busy_u8 = a_data_u8[2]
+	>> BMI160_SHIFT_BIT_POSITION_BY_07_BITS;
+	/* read the coil status*/
+	*v_coil_stat_u8 =
+	((a_data_u8[2] >>
+	BMI160_SHIFT_BIT_POSITION_BY_06_BITS) & 0X01);
+	/* read temperature data*/
+	*v_temperature_u16 = (u16)((a_data_u8[0]
+	<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) | a_data_u8[1]);
+	/* read x data*/
+	xy1y2[0] = (u16)(((a_data_u8[2] &
+	0x3F)
+	<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+	| (a_data_u8[3]));
+	/* read y1 data*/
+	xy1y2[1] = (u16)((a_data_u8[4]
+	<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+	| a_data_u8[5]);
+	/* read y2 data*/
+	xy1y2[2] = (u16)((a_data_u8[6]
+	<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS)
+	| a_data_u8[7]);
+	for (i = 0; i < 3; i++)
+		yas537_data.last_raw[i] = xy1y2[i];
+	yas537_data.last_raw[i] = *v_temperature_u16;
+	if (yas537_data.calib_yas537.ver == 1) {
+		for (i = 0; i < 3; i++)
+			a_s_s32[i] = xy1y2[i] - 8192;
+		/* read hx*/
+		a_h_s32[0] = ((yas537_data.calib_yas537.k * (
+		(128 * a_s_s32[0]) +
+		(yas537_data.calib_yas537.a2 * a_s_s32[1]) +
+		(yas537_data.calib_yas537.a3 * a_s_s32[2])))
+		/ (8192));
+		/* read hy1*/
+		a_h_s32[1] = ((yas537_data.calib_yas537.k * (
+		(yas537_data.calib_yas537.a4 * a_s_s32[0]) +
+		(yas537_data.calib_yas537.a5 * a_s_s32[1]) +
+		(yas537_data.calib_yas537.a6 * a_s_s32[2])))
+		/ (8192));
+		/* read hy2*/
+		a_h_s32[2] = ((yas537_data.calib_yas537.k * (
+		(yas537_data.calib_yas537.a7 * a_s_s32[0]) +
+		(yas537_data.calib_yas537.a8 * a_s_s32[1]) +
+		(yas537_data.calib_yas537.a9 * a_s_s32[2])))
+		/ (8192));
+
+		for (i = 0; i < 3; i++) {
+			if (a_h_s32[i] < -8192)
+				a_h_s32[i] = -8192;
+
+			if (8192 < a_h_s32[i])
+				a_h_s32[i] = 8192;
+
+			xy1y2[i] = a_h_s32[i] + 8192;
+
+		}
+	}
+	*v_ouflow_u8 = 0;
+	for (i = 0; i < 3; i++) {
+		if (YAS537_DATA_OVERFLOW <= xy1y2[i])
+			*v_ouflow_u8 |= (1 << (i * 2));
+		if (xy1y2[i] == YAS537_DATA_UNDERFLOW)
+			*v_ouflow_u8 |= (1 << (i * 2 + 1));
+	}
+
+	return com_rslt;
+
+}
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 xy1y2 data
+ *
+ *	@param v_ouflow_u8: The value of overflow
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+static BMI160_RETURN_FUNCTION_TYPE invalid_magnetic_field(
+u16 *v_cur_u16, u16 *v_last_u16)
+{
+	s16 invalid_thresh[] = {1500, 1500, 1500};
+	u8 i = BMI160_INIT_VALUE;
+
+	for (i = 0; i < 3; i++)
+		if (invalid_thresh[i] < ABS(v_cur_u16[i] - v_last_u16[i]))
+			return 1;
+	return 0;
+}
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 xy1y2 data
+ *
+ *	@param v_ouflow_u8: The value of overflow
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yamaha_yas537_measure_xyz_data(
+u8 *v_ouflow_u8, struct yas_vector *vector_xyz)
+{
+	s32 a_xyz_tmp_s32[BMI160_YAS_TEMP_DATA_SIZE] = {
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	u8 i = BMI160_INIT_VALUE;
+	s8 com_rslt = BMI160_INIT_VALUE;
+	u8 v_busy_u8 = BMI160_INIT_VALUE;
+	u8 v_rcoil_u8 = BMI160_INIT_VALUE;
+	u16 v_temperature_u16 = BMI160_INIT_VALUE;
+	u16 a_xy1y2_u16[BMI160_YAS_XY1Y2_DATA_SIZE] = {
+	BMI160_INIT_VALUE, BMI160_INIT_VALUE, BMI160_INIT_VALUE};
+	*v_ouflow_u8 = 0;
+	/* read the yas537 xy1y2 data*/
+	com_rslt = bmi160_bst_yamaha_yas537_read_xy1y2_data(
+	&v_rcoil_u8, &v_busy_u8,
+	&v_temperature_u16, a_xy1y2_u16, v_ouflow_u8);
+	/* linear calculation*/
+	xy1y2_to_xyz(a_xy1y2_u16, vector_xyz->yas537_vector_xyz);
+	if (yas537_data.transform != BMI160_NULL) {
+		for (i = 0; i < 3; i++) {
+			a_xyz_tmp_s32[i] = ((
+			yas537_data.transform[i + 3]
+			* vector_xyz->yas537_vector_xyz[0])
+			+ (yas537_data.transform[
+			i * 3 + 1]
+			* vector_xyz->yas537_vector_xyz[1])
+			+ (yas537_data.transform[
+			i * 3 + 2]
+			* vector_xyz->yas537_vector_xyz[2]));
+		}
+		yas537_set_vector(
+		vector_xyz->yas537_vector_xyz, a_xyz_tmp_s32);
+	}
+	for (i = 0; i < 3; i++) {
+		vector_xyz->yas537_vector_xyz[i] -=
+		vector_xyz->yas537_vector_xyz[i] % 10;
+		if (*v_ouflow_u8 & (1 <<
+		(i * 2)))
+			vector_xyz->yas537_vector_xyz[i] +=
+			1; /* set overflow */
+		if (*v_ouflow_u8 & (1 << (i * 2 + 1)))
+			/* set underflow */
+			vector_xyz->yas537_vector_xyz[i] += 2;
+	}
+	if (v_busy_u8)
+		return ERROR;
+	switch (yas537_data.measure_state) {
+	case YAS537_MAG_STATE_INIT_COIL:
+		if (p_bmi160->mag_manual_enable != BMI160_MANUAL_ENABLE)
+			com_rslt = bmi160_set_mag_manual_enable(
+			BMI160_MANUAL_ENABLE);
+		com_rslt += bmi160_set_mag_write_data(YAS537_WRITE_CONFR);
+		p_bmi160->delay_msec(BMI160_GEN_READ_WRITE_DELAY);
+		com_rslt += bmi160_set_mag_write_addr(YAS537_REG_CONFR);
+		p_bmi160->delay_msec(BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		yas537_data.measure_state = YAS537_MAG_STATE_RECORD_DATA;
+		if (p_bmi160->mag_manual_enable == BMI160_MANUAL_ENABLE)
+			com_rslt = bmi160_set_mag_manual_enable(
+			BMI160_MANUAL_DISABLE);
+	break;
+	case YAS537_MAG_STATE_RECORD_DATA:
+		if (v_rcoil_u8)
+			break;
+		yas537_set_vector(yas537_data.last_after_rcoil, a_xy1y2_u16);
+		yas537_data.measure_state = YAS537_MAG_STATE_NORMAL;
+	break;
+	case YAS537_MAG_STATE_NORMAL:
+		if (BMI160_INIT_VALUE < v_ouflow_u8
+		|| invalid_magnetic_field(a_xy1y2_u16,
+		yas537_data.last_after_rcoil)) {
+			yas537_data.measure_state = YAS537_MAG_STATE_INIT_COIL;
+			for (i = 0; i < 3; i++) {
+				if (!*v_ouflow_u8)
+					vector_xyz->yas537_vector_xyz[i] += 3;
+			}
+		}
+	break;
+	}
+
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for reading
+ *	bmi160_t structure
+ *
+ *  @return the reference and values of bmi160_t
+ *
+ *
+*/
+struct bmi160_t *bmi160_get_ptr(void)
+{
+	return  p_bmi160;
+}
diff --git a/drivers/input/sensors/bmi160/bmi160.h b/drivers/input/sensors/bmi160/bmi160.h
new file mode 100644
index 0000000..bc94a35
--- /dev/null
+++ b/drivers/input/sensors/bmi160/bmi160.h
@@ -0,0 +1,11814 @@
+/*
+****************************************************************************
+* Copyright (C) 2014 Bosch Sensortec GmbH
+*
+* bmi160.h
+* Date : 2015/04/02
+* @id       836294d
+* Revision : 2.0.9 $
+* @brief
+* The head file of BMI160API
+*
+****************************************************************************
+*
+* \section Disclaimer
+*
+* Common:
+* Bosch Sensortec products are developed for the consumer goods industry.
+* They may only be used within the parameters of the respective valid
+* product data sheet.  Bosch Sensortec products are provided with the
+* express understanding that there is no warranty of fitness for a
+* particular purpose.They are not fit for use in life-sustaining,
+* safety or security sensitive systems or any system or device
+* that may lead to bodily harm or property damage if the system
+* or device malfunctions. In addition,Bosch Sensortec products are
+* not fit for use in products which interact with motor vehicle systems.
+* The resale and or use of products are at the purchasers own risk and
+* his own responsibility. The examination of fitness for the intended use
+* is the sole responsibility of the Purchaser.
+*
+* The purchaser shall indemnify Bosch Sensortec from all third party
+* claims, including any claims for incidental, or consequential damages,
+* arising from any product use not covered by the parameters of
+* the respective valid product data sheet or not approved by
+* Bosch Sensortec and reimburse Bosch Sensortec for all costs in
+* connection with such claims.
+*
+* The purchaser must monitor the market for the purchased products,
+* particularly with regard to product safety and inform Bosch Sensortec
+* without delay of all security relevant incidents.
+*
+* Engineering Samples are marked with an asterisk (*) or (e).
+* Samples may vary from the valid technical specifications of the product
+* series. They are therefore not intended or fit for resale to third
+* parties or for use in end products. Their sole purpose is internal
+* client testing. The testing of an engineering sample may in no way
+* replace the testing of a product series. Bosch Sensortec assumes
+* no liability for the use of engineering samples.
+* By accepting the engineering samples, the Purchaser agrees to indemnify
+* Bosch Sensortec from all claims arising from the use of engineering
+* samples.
+*
+* Special:
+* This software module (hereinafter called "Software") and any information
+* on application-sheets (hereinafter called "Information") is provided
+* free of charge for the sole purpose to support your application work.
+* The Software and Information is subject to the following
+* terms and conditions:
+*
+* The Software is specifically designed for the exclusive use for
+* Bosch Sensortec products by personnel who have special experience
+* and training. Do not use this Software if you do not have the
+* proper experience or training.
+*
+* This Software package is provided `` as is `` and without any expressed
+* or implied warranties,including without limitation, the implied warranties
+* of merchantability and fitness for a particular purpose.
+*
+* Bosch Sensortec and their representatives and agents deny any liability
+* for the functional impairment
+* of this Software in terms of fitness, performance and safety.
+* Bosch Sensortec and their representatives and agents shall not be liable
+* for any direct or indirect damages or injury, except as
+* otherwise stipulated in mandatory applicable law.
+*
+* The Information provided is believed to be accurate and reliable.
+* Bosch Sensortec assumes no responsibility for the consequences of use
+* of such Information nor for any infringement of patents or
+* other rights of third parties which may result from its use.
+* No license is granted by implication or otherwise under any patent or
+* patent rights of Bosch. Specifications mentioned in the Information are
+* subject to change without notice.
+**************************************************************************/
+/*! \file bmi160.h
+    \brief BMI160 Sensor Driver Support Header File */
+/* user defined code to be added here ... */
+#ifndef __BMI160_H__
+#define __BMI160_H__
+
+/*!
+* @brief The following definition uses for define the data types
+*
+* @note While porting the API please consider the following
+* @note Please check the version of C standard
+* @note Are you using Linux platform
+*/
+
+/*!
+* @brief For the Linux platform support
+* Please use the types.h for your data types definitions
+*/
+#ifdef	__KERNEL__
+
+#include <linux/types.h>
+
+#else /* ! __KERNEL__ */
+/**********************************************************
+* These definition uses for define the C
+* standard version data types
+***********************************************************/
+# if !defined(__STDC_VERSION__)
+
+/************************************************
+ * compiler is C11 C standard
+************************************************/
+#if (__STDC_VERSION__ == 201112L)
+
+/************************************************/
+#include <stdint.h>
+/************************************************/
+
+/*unsigned integer types*/
+#define	u8	uint8_t
+#define	u16	uint16_t
+#define	u32	uint32_t
+#define	u64	uint64_t
+
+/*signed integer types*/
+#define	s8	int8_t
+#define	s16	int16_t
+#define	s32	int32_t
+#define	s64	int64_t
+/************************************************
+ * compiler is C99 C standard
+************************************************/
+
+#elif (__STDC_VERSION__ == 199901L)
+
+/* stdint.h is a C99 supported c library.
+which is used to fixed the integer size*/
+/************************************************/
+#include <stdint.h>
+/************************************************/
+
+/*unsigned integer types*/
+#define	u8	uint8_t
+#define	u16	uint16_t
+#define	u32	uint32_t
+#define	u64	uint64_t
+
+/*signed integer types*/
+#define s8	int8_t
+#define	s16	int16_t
+#define	s32	int32_t
+#define	s64	int64_t
+/************************************************
+ * compiler is C89 or other C standard
+************************************************/
+#else /*  !defined(__STDC_VERSION__) */
+/*	By default it is defined as 32 bit machine configuration*/
+/*	define the definition based on your machine configuration*/
+/*	define the data types based on your
+	machine/compiler/controller configuration*/
+#define  MACHINE_32_BIT
+
+/* If your machine support 16 bit
+define the MACHINE_16_BIT*/
+#ifdef MACHINE_16_BIT
+#include <limits.h>
+/*signed integer types*/
+#define	s8	signed char
+#define	s16	signed short int
+#define	s32	signed long int
+
+#if defined(LONG_MAX) && LONG_MAX == 0x7fffffffffffffffL
+#define s64 long int
+#define u64 unsigned long int
+#elif defined(LLONG_MAX) && (LLONG_MAX == 0x7fffffffffffffffLL)
+#define s64 long long int
+#define u64 unsigned long long int
+#else
+#warning Either the correct data type for signed 64 bit integer \
+could not be found, or 64 bit integers are not supported in your environment.
+#warning If 64 bit integers are supported on your platform, \
+please set s64 manually.
+#endif
+
+/*unsigned integer types*/
+#define	u8	unsigned char
+#define	u16	unsigned short int
+#define	u32	unsigned long int
+
+/* If your machine support 32 bit
+define the MACHINE_32_BIT*/
+#elif defined MACHINE_32_BIT
+/*signed integer types*/
+#define	s8	signed char
+#define	s16	signed short int
+#define	s32	signed int
+#define	s64	signed long long int
+
+/*unsigned integer types*/
+#define	u8	unsigned char
+#define	u16	unsigned short int
+#define	u32	unsigned int
+#define	u64	unsigned long long int
+
+/* If your machine support 64 bit
+define the MACHINE_64_BIT*/
+#elif defined MACHINE_64_BIT
+/*signed integer types*/
+#define	s8	signed char
+#define	s16	signed short int
+#define	s32	signed int
+#define	s64	signed long int
+
+/*unsigned integer types*/
+#define	u8	unsigned char
+#define	u16	unsigned short int
+#define	u32	unsigned int
+#define	u64	unsigned long int
+
+#else
+#warning The data types defined above which not supported \
+define the data types manually
+#endif
+#endif
+
+/*** This else will execute for the compilers
+ *	which are not supported the C standards
+ *	Like C89/C99/C11***/
+#else
+/*	By default it is defined as 32 bit machine configuration*/
+/*	define the definition based on your machine configuration*/
+/*	define the data types based on your
+	machine/compiler/controller configuration*/
+#define  MACHINE_32_BIT
+
+/* If your machine support 16 bit
+define the MACHINE_16_BIT*/
+#ifdef MACHINE_16_BIT
+#include <limits.h>
+/*signed integer types*/
+#define	s8	signed char
+#define	s16	signed short int
+#define	s32	signed long int
+
+#if defined(LONG_MAX) && LONG_MAX == 0x7fffffffffffffffL
+#define s64 long int
+#define u64 unsigned long int
+#elif defined(LLONG_MAX) && (LLONG_MAX == 0x7fffffffffffffffLL)
+#define s64 long long int
+#define u64 unsigned long long int
+#else
+#warning Either the correct data type for signed 64 bit integer \
+could not be found, or 64 bit integers are not supported in your environment.
+#warning If 64 bit integers are supported on your platform, \
+please set s64 manually.
+#endif
+
+/*unsigned integer types*/
+#define	u8	unsigned char
+#define	u16	unsigned short int
+#define	u32	unsigned long int
+
+/* If your machine support 32 bit
+define the MACHINE_32_BIT*/
+#elif defined MACHINE_32_BIT
+/*signed integer types*/
+#define	s8	signed char
+#define	s16	signed short int
+#define	s32	signed int
+#define	s64	signed long long int
+
+/*unsigned integer types*/
+#define	u8	unsigned char
+#define	u16	unsigned short int
+#define	u32	unsigned int
+#define	u64	unsigned long long int
+
+/* If your machine support 64 bit
+define the MACHINE_64_BIT*/
+#elif defined  MACHINE_64_BIT
+/*signed integer types*/
+#define	s8	signed char
+#define	s16	signed short int
+#define	s32	signed int
+#define	s64	signed long int
+
+/*unsigned integer types*/
+#define	u8	unsigned char
+#define	u16	unsigned short int
+#define	u32	unsigned int
+#define	u64	unsigned long int
+
+#else
+#warning The data types defined above which not supported \
+define the data types manually
+#endif
+#endif
+#endif
+/***************************************************************/
+/**\name	BUS READ AND WRITE FUNCTION POINTERS        */
+/***************************************************************/
+/*!
+	@brief Define the calling convention of YOUR bus communication routine.
+	@note This includes types of parameters. This example shows the
+	configuration for an SPI bus link.
+
+    If your communication function looks like this:
+
+    write_my_bus_xy(u8 device_addr, u8 register_addr,
+    u8 * data, u8 length);
+
+    The BMI160_WR_FUNC_PTR would equal:
+
+    BMI160_WR_FUNC_PTR s8 (* bus_write)(u8,
+    u8, u8 *, u8)
+
+    Parameters can be mixed as needed refer to the
+    @ref BMI160_BUS_WRITE_FUNC  macro.
+
+
+*/
+#define BMI160_WR_FUNC_PTR s8 (*bus_write)(u8, u8,\
+u8 *, u8)
+/**< link macro between API function calls and bus write function
+	@note The bus write function can change since this is a
+	system dependant issue.
+
+    If the bus_write parameter calling order is like: reg_addr,
+    reg_data, wr_len it would be as it is here.
+
+    If the parameters are differently ordered or your communication
+    function like I2C need to know the device address,
+    you can change this macro accordingly.
+
+
+    BMI160_BUS_WRITE_FUNC(dev_addr, reg_addr, reg_data, wr_len)\
+    bus_write(dev_addr, reg_addr, reg_data, wr_len)
+
+    This macro lets all API functions call YOUR communication routine in a
+    way that equals your definition in the
+    @ref BMI160_WR_FUNC_PTR definition.
+
+*/
+#define BMI160_BUS_WRITE_FUNC(dev_addr, reg_addr, reg_data, wr_len)\
+				bus_write(dev_addr, reg_addr, reg_data, wr_len)
+
+/**< Define the calling convention of YOUR bus communication routine.
+	@note This includes types of parameters. This example shows the
+	configuration for an SPI bus link.
+
+    If your communication function looks like this:
+
+    read_my_bus_xy(u8 device_addr, u8 register_addr,
+    u8 * data, u8 length);
+
+    The BMI160_RD_FUNC_PTR would equal:
+
+    BMI160_RD_FUNC_PTR s8 (* bus_read)(u8,
+    u8, u8 *, u8)
+
+    Parameters can be mixed as needed refer to the
+    refer BMI160_BUS_READ_FUNC  macro.
+
+*/
+#define BMI160_SPI_RD_MASK (0x80)   /* for spi read transactions on SPI the
+			MSB has to be set */
+#define BMI160_RD_FUNC_PTR s8 (*bus_read)(u8,\
+			u8, u8 *, u8)
+
+#define BMI160_BRD_FUNC_PTR s8 \
+(*burst_read)(u8, u8, u8 *, u32)
+
+/**< link macro between API function calls and bus read function
+	@note The bus write function can change since this is a
+	system dependant issue.
+
+    If the bus_read parameter calling order is like: reg_addr,
+    reg_data, wr_len it would be as it is here.
+
+    If the parameters are differently ordered or your communication
+    function like I2C need to know the device address,
+    you can change this macro accordingly.
+
+
+    BMI160_BUS_READ_FUNC(dev_addr, reg_addr, reg_data, wr_len)\
+    bus_read(dev_addr, reg_addr, reg_data, wr_len)
+
+    This macro lets all API functions call YOUR communication routine in a
+    way that equals your definition in the
+    refer BMI160_WR_FUNC_PTR definition.
+
+    @note: this macro also includes the "MSB='1'
+    for reading BMI160 addresses.
+
+*/
+#define BMI160_BUS_READ_FUNC(dev_addr, reg_addr, reg_data, r_len)\
+				bus_read(dev_addr, reg_addr, reg_data, r_len)
+
+#define BMI160_BURST_READ_FUNC(device_addr, \
+register_addr, register_data, rd_len)\
+burst_read(device_addr, register_addr, register_data, rd_len)
+
+
+#define BMI160_MDELAY_DATA_TYPE                 u32
+
+/***************************************************************/
+/**\name	BUS READ AND WRITE FUNCTION POINTERS        */
+/***************************************************************/
+#define BMI160_I2C_ADDR1	0x68 /**< I2C Address needs to be changed */
+#define BMI160_I2C_ADDR2    0x69 /**< I2C Address needs to be changed */
+#define BMI160_AUX_BMM150_I2C_ADDRESS       (0x10)
+#define BMI160_AUX_YAS532_I2C_ADDRESS       (0x2E)
+/**< I2C address of YAS532*/
+#define BMI160_AKM09911_I2C_ADDRESS   0x0C/**< I2C address of AKM09911*/
+/**< I2C address of AKM09911*/
+#define	BMI160_AUX_AKM09911_I2C_ADDR_2		(0x0D)
+/**< I2C address of AKM09911*/
+#define	BMI160_AUX_AKM09912_I2C_ADDR_1		(0x0C)
+/**< I2C address of AKM09912*/
+#define	BMI160_AUX_AKM09912_I2C_ADDR_2		(0x0D)
+/**< I2C address of AKM09912*/
+#define	BMI160_AUX_AKM09912_I2C_ADDR_3		(0x0E)
+/**< I2C address of AKM09912*/
+#define BMI160_AKM09912_I2C_ADDRESS   0x0F/**< I2C address of akm09912*/
+
+#define BMI160_YAS532_I2C_ADDRESS	0x2E/**< I2C address of YAS532*/
+/*******************************************/
+/**\name	CONSTANTS        */
+/******************************************/
+#define  BMI160_INIT_VALUE					(0)
+#define  BMI160_GEN_READ_WRITE_DATA_LENGTH	(1)
+#define  BMI160_MAXIMUM_TIMEOUT             (10)
+/* output data rate condition check*/
+#define  BMI160_OUTPUT_DATA_RATE0	(0)
+#define  BMI160_OUTPUT_DATA_RATE1	(1)
+#define  BMI160_OUTPUT_DATA_RATE2	(2)
+#define  BMI160_OUTPUT_DATA_RATE3	(3)
+#define  BMI160_OUTPUT_DATA_RATE4	(4)
+#define  BMI160_OUTPUT_DATA_RATE5	(5)
+#define  BMI160_OUTPUT_DATA_RATE6	(14)
+#define  BMI160_OUTPUT_DATA_RATE7	(15)
+/* accel range check*/
+#define BMI160_ACCEL_RANGE0  (3)
+#define BMI160_ACCEL_RANGE1  (5)
+#define BMI160_ACCEL_RANGE3  (8)
+#define BMI160_ACCEL_RANGE4  (12)
+/* check the status of registers*/
+#define  BMI160_FOC_STAT_HIGH			(1)
+#define  BMI160_SIG_MOTION_STAT_HIGH	(1)
+#define  BMI160_STEP_DET_STAT_HIGH		(1)
+
+/*condition check for reading and writing data*/
+#define	BMI160_MAX_VALUE_SIGNIFICANT_MOTION      (1)
+#define	BMI160_MAX_VALUE_FIFO_FILTER    (1)
+#define	BMI160_MAX_VALUE_FIFO_TIME      (1)
+#define	BMI160_MAX_VALUE_FIFO_INTR      (1)
+#define	BMI160_MAX_VALUE_FIFO_HEADER    (1)
+#define	BMI160_MAX_VALUE_FIFO_MAG       (1)
+#define	BMI160_MAX_VALUE_FIFO_ACCEL     (1)
+#define	BMI160_MAX_VALUE_FIFO_GYRO      (1)
+#define	BMI160_MAX_VALUE_SOURCE_INTR    (1)
+#define	BMI160_MAX_VALUE_LOW_G_MODE     (1)
+#define	BMI160_MAX_VALUE_NO_MOTION      (1)
+#define	BMI160_MAX_VALUE_TAP_SHOCK      (1)
+#define	BMI160_MAX_VALUE_TAP_QUIET      (1)
+#define	BMI160_MAX_VALUE_ORIENT_UD      (1)
+#define	BMI160_MAX_VALUE_ORIENT_AXES    (1)
+#define	BMI160_MAX_VALUE_NVM_PROG       (1)
+#define	BMI160_MAX_VALUE_SPI3           (1)
+#define	BMI160_MAX_VALUE_PAGE           (1)
+#define	BMI160_MAX_VALUE_I2C_WDT        (1)
+#define	BMI160_MAX_VALUE_SLEEP_STATE    (1)
+#define	BMI160_MAX_VALUE_WAKEUP_INTR    (1)
+#define	BMI160_MAX_VALUE_SELFTEST_SIGN  (1)
+#define	BMI160_MAX_VALUE_SELFTEST_AMP   (1)
+#define	BMI160_MAX_VALUE_SELFTEST_START (1)
+#define BMI160_MAX_GYRO_WAKEUP_TRIGGER		(3)
+#define BMI160_MAX_ACCEL_SELFTEST_AXIS	    (3)
+#define BMI160_MAX_GYRO_STEP_COUNTER        (1)
+#define BMI160_MAX_GYRO_BW                  (3)
+#define BMI160_MAX_ACCEL_BW                 (7)
+#define BMI160_MAX_ORIENT_MODE              (3)
+#define BMI160_MAX_ORIENT_BLOCKING          (3)
+#define BMI160_MAX_FLAT_HOLD                (3)
+#define BMI160_MAX_ACCEL_FOC                (3)
+#define BMI160_MAX_IF_MODE                  (3)
+#define BMI160_MAX_TARGET_PAGE              (3)
+#define BMI160_MAX_GYRO_RANGE               (4)
+#define BMI160_MAX_GYRO_SLEEP_TIGGER        (7)
+#define BMI160_MAX_TAP_TURN                 (7)
+#define BMI160_MAX_UNDER_SAMPLING           (1)
+#define BMI160_MAX_UNDER_SIG_MOTION         (3)
+#define BMI160_MAX_ACCEL_OUTPUT_DATA_RATE   (12)
+#define BMI160_MAX_LATCH_INTR               (15)
+#define BMI160_MAX_FLAT_HYST                (15)
+#define BMI160_MAX_ORIENT_THETA             (63)
+#define BMI160_MAX_FLAT_THETA               (63)
+
+/* FIFO index definitions*/
+#define BMI160_FIFO_X_LSB_DATA			(0)
+#define BMI160_FIFO_X_MSB_DATA			(1)
+#define BMI160_FIFO_Y_LSB_DATA			(2)
+#define BMI160_FIFO_Y_MSB_DATA			(3)
+#define BMI160_FIFO_Z_LSB_DATA			(4)
+#define BMI160_FIFO_Z_MSB_DATA			(5)
+#define BMI160_FIFO_R_LSB_DATA			(6)
+#define BMI160_FIFO_R_MSB_DATA			(7)
+/* FIFO gyro definition*/
+#define BMI160_GA_FIFO_G_X_LSB		(0)
+#define BMI160_GA_FIFO_G_X_MSB		(1)
+#define BMI160_GA_FIFO_G_Y_LSB		(2)
+#define BMI160_GA_FIFO_G_Y_MSB		(3)
+#define BMI160_GA_FIFO_G_Z_LSB		(4)
+#define BMI160_GA_FIFO_G_Z_MSB		(5)
+#define BMI160_GA_FIFO_A_X_LSB		(6)
+#define BMI160_GA_FIFO_A_X_MSB		(7)
+#define BMI160_GA_FIFO_A_Y_LSB		(8)
+#define BMI160_GA_FIFO_A_Y_MSB		(9)
+#define BMI160_GA_FIFO_A_Z_LSB		(10)
+#define BMI160_GA_FIFO_A_Z_MSB		(11)
+/* FIFO mag/gyro/accel definition*/
+#define BMI160_MGA_FIFO_M_X_LSB		(0)
+#define BMI160_MGA_FIFO_M_X_MSB		(1)
+#define BMI160_MGA_FIFO_M_Y_LSB		(2)
+#define BMI160_MGA_FIFO_M_Y_MSB		(3)
+#define BMI160_MGA_FIFO_M_Z_LSB		(4)
+#define BMI160_MGA_FIFO_M_Z_MSB		(5)
+#define BMI160_MGA_FIFO_M_R_LSB		(6)
+#define BMI160_MGA_FIFO_M_R_MSB		(7)
+#define BMI160_MGA_FIFO_G_X_LSB		(8)
+#define BMI160_MGA_FIFO_G_X_MSB		(9)
+#define BMI160_MGA_FIFO_G_Y_LSB		(10)
+#define BMI160_MGA_FIFO_G_Y_MSB		(11)
+#define BMI160_MGA_FIFO_G_Z_LSB		(12)
+#define BMI160_MGA_FIFO_G_Z_MSB		(13)
+#define BMI160_MGA_FIFO_A_X_LSB		(14)
+#define BMI160_MGA_FIFO_A_X_MSB		(15)
+#define BMI160_MGA_FIFO_A_Y_LSB		(16)
+#define BMI160_MGA_FIFO_A_Y_MSB		(17)
+#define BMI160_MGA_FIFO_A_Z_LSB		(18)
+#define BMI160_MGA_FIFO_A_Z_MSB		(19)
+/* FIFO mag definition*/
+#define BMI160_MA_FIFO_M_X_LSB		(0)
+#define BMI160_MA_FIFO_M_X_MSB		(1)
+#define BMI160_MA_FIFO_M_Y_LSB		(2)
+#define BMI160_MA_FIFO_M_Y_MSB		(3)
+#define BMI160_MA_FIFO_M_Z_LSB		(4)
+#define BMI160_MA_FIFO_M_Z_MSB		(5)
+#define BMI160_MA_FIFO_M_R_LSB		(6)
+#define BMI160_MA_FIFO_M_R_MSB		(7)
+#define BMI160_MA_FIFO_A_X_LSB		(8)
+#define BMI160_MA_FIFO_A_X_MSB		(9)
+#define BMI160_MA_FIFO_A_Y_LSB		(10)
+#define BMI160_MA_FIFO_A_Y_MSB		(11)
+#define BMI160_MA_FIFO_A_Z_LSB		(12)
+#define BMI160_MA_FIFO_A_Z_MSB		(13)
+/* FIFO mag/gyro definition*/
+#define BMI160_MG_FIFO_M_X_LSB		(0)
+#define BMI160_MG_FIFO_M_X_MSB		(1)
+#define BMI160_MG_FIFO_M_Y_LSB		(2)
+#define BMI160_MG_FIFO_M_Y_MSB		(3)
+#define BMI160_MG_FIFO_M_Z_LSB		(4)
+#define BMI160_MG_FIFO_M_Z_MSB		(5)
+#define BMI160_MG_FIFO_M_R_LSB		(6)
+#define BMI160_MG_FIFO_M_R_MSB		(7)
+#define BMI160_MG_FIFO_G_X_LSB		(8)
+#define BMI160_MG_FIFO_G_X_MSB		(9)
+#define BMI160_MG_FIFO_G_Y_LSB		(10)
+#define BMI160_MG_FIFO_G_Y_MSB		(11)
+#define BMI160_MG_FIFO_G_Z_LSB		(12)
+#define BMI160_MG_FIFO_G_Z_MSB		(13)
+/* FIFO length definitions*/
+#define BMI160_FIFO_SENSOR_TIME_LSB     (0)
+#define BMI160_FIFO_SENSOR_TIME_XLSB    (1)
+#define BMI160_FIFO_SENSOR_TIME_MSB     (2)
+#define BMI160_FIFO_SENSOR_TIME_LENGTH  (3)
+#define BMI160_FIFO_A_LENGTH            (6)
+#define BMI160_FIFO_G_LENGTH            (6)
+#define BMI160_FIFO_M_LENGTH            (8)
+#define BMI160_FIFO_AG_LENGTH           (12)
+#define BMI160_FIFO_AMG_LENGTH          (20)
+#define BMI160_FIFO_MA_OR_MG_LENGTH     (14)
+
+/* bus read and write length for mag, accel and gyro*/
+#define BMI160_MAG_X_DATA_LENGTH     (2)
+#define BMI160_MAG_Y_DATA_LENGTH     (2)
+#define BMI160_MAG_Z_DATA_LENGTH     (2)
+#define BMI160_MAG_R_DATA_LENGTH     (2)
+#define BMI160_MAG_XYZ_DATA_LENGTH	 (6)
+#define BMI160_MAG_XYZR_DATA_LENGTH	 (8)
+#define BMI160_MAG_YAS_DATA_LENGTH	 (8)
+#define BMI160_GYRO_DATA_LENGTH		 (2)
+#define BMI160_GYRO_XYZ_DATA_LENGTH	 (6)
+#define BMI160_ACCEL_DATA_LENGTH	 (2)
+#define BMI160_ACCEL_XYZ_DATA_LENGTH (6)
+#define BMI160_TEMP_DATA_LENGTH		 (2)
+#define BMI160_FIFO_DATA_LENGTH		 (2)
+#define BMI160_STEP_COUNTER_LENGTH	 (2)
+#define BMI160_SENSOR_TIME_LENGTH	 (3)
+
+/* Delay definitions*/
+#define BMI160_SEC_INTERFACE_GEN_READ_WRITE_DELAY    (5)
+#define BMI160_BMM150_WAKEUP_DELAY1                  (2)
+#define BMI160_BMM150_WAKEUP_DELAY2                  (3)
+#define BMI160_BMM150_WAKEUP_DELAY3                  (1)
+#define BMI160_YAS532_OFFSET_DELAY                   (2)
+#define BMI160_GEN_READ_WRITE_DELAY                  (1)
+#define BMI160_YAS532_MEASUREMENT_DELAY              (25)
+#define BMI160_YAS_ACQ_COMMAND_DELAY                 (50)
+#define BMI160_YAS532_SET_INITIAL_VALUE_DELAY        (200)
+#define BMI160_AKM_INIT_DELAY                        (60)
+/****************************************************/
+/**\name	ARRAY SIZE DEFINITIONS      */
+/***************************************************/
+#define	BMI160_ACCEL_X_DATA_SIZE   (2)
+#define	BMI160_ACCEL_Y_DATA_SIZE   (2)
+#define	BMI160_ACCEL_Z_DATA_SIZE   (2)
+#define	BMI160_ACCEL_XYZ_DATA_SIZE (6)
+
+#define	BMI160_GYRO_X_DATA_SIZE    (2)
+#define	BMI160_GYRO_Y_DATA_SIZE    (2)
+#define	BMI160_GYRO_Z_DATA_SIZE    (2)
+#define	BMI160_GYRO_XYZ_DATA_SIZE  (6)
+
+#define	BMI160_MAG_X_DATA_SIZE      (2)
+#define	BMI160_MAG_Y_DATA_SIZE      (2)
+#define	BMI160_MAG_Z_DATA_SIZE      (2)
+#define	BMI160_MAG_R_DATA_SIZE      (2)
+#define	BMI160_MAG_XYZ_DATA_SIZE    (6)
+#define	BMI160_MAG_XYZR_DATA_SIZE   (8)
+#define	BMI160_MAG_TRIM_DATA_SIZE   (16)
+
+
+#define	BMI160_TEMP_DATA_SIZE       (2)
+#define	BMI160_FIFO_DATA_SIZE       (2)
+#define	BMI160_STEP_COUNT_DATA_SIZE (2)
+
+#define	BMI160_SENSOR_TIME_DATA_SIZE      (3)
+#define	BMI160_AKM_SENSITIVITY_DATA_SIZE  (3)
+#define	BMI160_HARD_OFFSET_DATA_SIZE      (3)
+#define	BMI160_YAS_XY1Y2_DATA_SIZE        (3)
+#define	BMI160_YAS_FLAG_DATA_SIZE         (3)
+#define	BMI160_YAS_TEMP_DATA_SIZE         (3)
+#define	BMI160_YAS_H_DATA_SIZE            (3)
+#define	BMI160_YAS_S_DATA_SIZE            (3)
+#define BMI160_YAS_CORRECT_DATA_SIZE      (5)
+#define BMI160_YAS_XY1Y2T_DATA_SIZE       (8)
+#define BMI160_YAS537_CALIB_DATA_SIZE     (17)
+#define BMI160_YAS532_CALIB_DATA_SIZE     (14)
+/****************************************************/
+/**\name	ARRAY PARAMETER DEFINITIONS      */
+/***************************************************/
+#define BMI160_SENSOR_TIME_MSB_BYTE   (2)
+#define BMI160_SENSOR_TIME_XLSB_BYTE  (1)
+#define BMI160_SENSOR_TIME_LSB_BYTE   (0)
+
+#define BMI160_MAG_X_LSB_BYTE	          (0)
+#define BMI160_MAG_X_MSB_BYTE              (1)
+#define BMI160_MAG_Y_LSB_BYTE	           (0)
+#define BMI160_MAG_Y_MSB_BYTE              (1)
+#define BMI160_MAG_Z_LSB_BYTE	           (0)
+#define BMI160_MAG_Z_MSB_BYTE              (1)
+#define BMI160_MAG_R_LSB_BYTE	           (0)
+#define BMI160_MAG_R_MSB_BYTE              (1)
+#define BMI160_DATA_FRAME_MAG_X_LSB_BYTE   (0)
+#define BMI160_DATA_FRAME_MAG_X_MSB_BYTE   (1)
+#define BMI160_DATA_FRAME_MAG_Y_LSB_BYTE   (2)
+#define BMI160_DATA_FRAME_MAG_Y_MSB_BYTE   (3)
+#define BMI160_DATA_FRAME_MAG_Z_LSB_BYTE   (4)
+#define BMI160_DATA_FRAME_MAG_Z_MSB_BYTE   (5)
+#define BMI160_DATA_FRAME_MAG_R_LSB_BYTE   (6)
+#define BMI160_DATA_FRAME_MAG_R_MSB_BYTE   (7)
+
+#define BMI160_GYRO_X_LSB_BYTE              (0)
+#define BMI160_GYRO_X_MSB_BYTE              (1)
+#define BMI160_GYRO_Y_LSB_BYTE              (0)
+#define BMI160_GYRO_Y_MSB_BYTE              (1)
+#define BMI160_GYRO_Z_LSB_BYTE              (0)
+#define BMI160_GYRO_Z_MSB_BYTE              (1)
+#define BMI160_DATA_FRAME_GYRO_X_LSB_BYTE   (0)
+#define BMI160_DATA_FRAME_GYRO_X_MSB_BYTE   (1)
+#define BMI160_DATA_FRAME_GYRO_Y_LSB_BYTE   (2)
+#define BMI160_DATA_FRAME_GYRO_Y_MSB_BYTE   (3)
+#define BMI160_DATA_FRAME_GYRO_Z_LSB_BYTE   (4)
+#define BMI160_DATA_FRAME_GYRO_Z_MSB_BYTE   (5)
+
+#define BMI160_ACCEL_X_LSB_BYTE              (0)
+#define BMI160_ACCEL_X_MSB_BYTE              (1)
+#define BMI160_ACCEL_Y_LSB_BYTE              (0)
+#define BMI160_ACCEL_Y_MSB_BYTE              (1)
+#define BMI160_ACCEL_Z_LSB_BYTE              (0)
+#define BMI160_ACCEL_Z_MSB_BYTE              (1)
+#define BMI160_DATA_FRAME_ACCEL_X_LSB_BYTE   (0)
+#define BMI160_DATA_FRAME_ACCEL_X_MSB_BYTE   (1)
+#define BMI160_DATA_FRAME_ACCEL_Y_LSB_BYTE   (2)
+#define BMI160_DATA_FRAME_ACCEL_Y_MSB_BYTE   (3)
+#define BMI160_DATA_FRAME_ACCEL_Z_LSB_BYTE   (4)
+#define BMI160_DATA_FRAME_ACCEL_Z_MSB_BYTE   (5)
+
+#define	BMI160_TEMP_LSB_BYTE    (0)
+#define	BMI160_TEMP_MSB_BYTE    (1)
+
+#define	BMI160_FIFO_LENGTH_LSB_BYTE    (0)
+#define	BMI160_FIFO_LENGTH_MSB_BYTE    (1)
+
+#define	BMI160_STEP_COUNT_LSB_BYTE    (0)
+#define	BMI160_STEP_COUNT_MSB_BYTE    (1)
+/****************************************************/
+/**\name	ERROR CODES       */
+/***************************************************/
+
+#define E_BMI160_NULL_PTR			((s8)-127)
+#define E_BMI160_COMM_RES			((s8)-1)
+#define E_BMI160_OUT_OF_RANGE		((s8)-2)
+#define E_BMI160_BUSY				((s8)-3)
+#define	SUCCESS						((u8)0)
+#define	ERROR						((s8)-1)
+
+/* Constants */
+#define BMI160_NULL						(0)
+#define BMI160_DELAY_SETTLING_TIME		(5)
+/*This refers BMI160 return type as s8 */
+#define BMI160_RETURN_FUNCTION_TYPE        s8
+/****************************************************/
+/**\name	REGISTER DEFINITIONS       */
+/***************************************************/
+/*******************/
+/**\name CHIP ID */
+/*******************/
+#define BMI160_USER_CHIP_ID_ADDR				(0x00)
+/*******************/
+/**\name ERROR STATUS */
+/*******************/
+#define BMI160_USER_ERROR_ADDR					(0X02)
+/*******************/
+/**\name POWER MODE STATUS */
+/*******************/
+#define BMI160_USER_PMU_STAT_ADDR				(0X03)
+/*******************/
+/**\name MAG DATA REGISTERS */
+/*******************/
+#define BMI160_USER_DATA_0_ADDR					(0X04)
+#define BMI160_USER_DATA_1_ADDR					(0X05)
+#define BMI160_USER_DATA_2_ADDR					(0X06)
+#define BMI160_USER_DATA_3_ADDR					(0X07)
+#define BMI160_USER_DATA_4_ADDR					(0X08)
+#define BMI160_USER_DATA_5_ADDR					(0X09)
+#define BMI160_USER_DATA_6_ADDR					(0X0A)
+#define BMI160_USER_DATA_7_ADDR					(0X0B)
+/*******************/
+/**\name GYRO DATA REGISTERS */
+/*******************/
+#define BMI160_USER_DATA_8_ADDR					(0X0C)
+#define BMI160_USER_DATA_9_ADDR					(0X0D)
+#define BMI160_USER_DATA_10_ADDR				(0X0E)
+#define BMI160_USER_DATA_11_ADDR				(0X0F)
+#define BMI160_USER_DATA_12_ADDR				(0X10)
+#define BMI160_USER_DATA_13_ADDR				(0X11)
+#define BMI160_USER_DATA_14_ADDR				(0X12)
+#define BMI160_USER_DATA_15_ADDR				(0X13)
+/*******************/
+/**\name ACCEL DATA REGISTERS */
+/*******************/
+#define BMI160_USER_DATA_16_ADDR				(0X14)
+#define BMI160_USER_DATA_17_ADDR				(0X15)
+#define BMI160_USER_DATA_18_ADDR				(0X16)
+#define BMI160_USER_DATA_19_ADDR				(0X17)
+/*******************/
+/**\name SENSOR TIME REGISTERS */
+/*******************/
+#define BMI160_USER_SENSORTIME_0_ADDR			(0X18)
+#define BMI160_USER_SENSORTIME_1_ADDR			(0X19)
+#define BMI160_USER_SENSORTIME_2_ADDR			(0X1A)
+/*******************/
+/**\name STATUS REGISTER FOR SENSOR STATUS FLAG */
+/*******************/
+#define BMI160_USER_STAT_ADDR					(0X1B)
+/*******************/
+/**\name INTERRUPY STATUS REGISTERS */
+/*******************/
+#define BMI160_USER_INTR_STAT_0_ADDR			(0X1C)
+#define BMI160_USER_INTR_STAT_1_ADDR			(0X1D)
+#define BMI160_USER_INTR_STAT_2_ADDR			(0X1E)
+#define BMI160_USER_INTR_STAT_3_ADDR			(0X1F)
+/*******************/
+/**\name TEMPERATURE REGISTERS */
+/*******************/
+#define BMI160_USER_TEMPERATURE_0_ADDR			(0X20)
+#define BMI160_USER_TEMPERATURE_1_ADDR			(0X21)
+/*******************/
+/**\name FIFO REGISTERS */
+/*******************/
+#define BMI160_USER_FIFO_LENGTH_0_ADDR			(0X22)
+#define BMI160_USER_FIFO_LENGTH_1_ADDR			(0X23)
+#define BMI160_USER_FIFO_DATA_ADDR				(0X24)
+/***************************************************/
+/**\name ACCEL CONFIG REGISTERS  FOR ODR, BANDWIDTH AND UNDERSAMPLING*/
+/******************************************************/
+#define BMI160_USER_ACCEL_CONFIG_ADDR			(0X40)
+/*******************/
+/**\name ACCEL RANGE */
+/*******************/
+#define BMI160_USER_ACCEL_RANGE_ADDR            (0X41)
+/***************************************************/
+/**\name GYRO CONFIG REGISTERS  FOR ODR AND BANDWIDTH */
+/******************************************************/
+#define BMI160_USER_GYRO_CONFIG_ADDR            (0X42)
+/*******************/
+/**\name GYRO RANGE */
+/*******************/
+#define BMI160_USER_GYRO_RANGE_ADDR             (0X43)
+/***************************************************/
+/**\name MAG CONFIG REGISTERS  FOR ODR*/
+/******************************************************/
+#define BMI160_USER_MAG_CONFIG_ADDR				(0X44)
+/***************************************************/
+/**\name REGISTER FOR GYRO AND ACCEL DOWNSAMPLING RATES FOR FIFO*/
+/******************************************************/
+#define BMI160_USER_FIFO_DOWN_ADDR              (0X45)
+/***************************************************/
+/**\name FIFO CONFIG REGISTERS*/
+/******************************************************/
+#define BMI160_USER_FIFO_CONFIG_0_ADDR          (0X46)
+#define BMI160_USER_FIFO_CONFIG_1_ADDR          (0X47)
+/***************************************************/
+/**\name MAG INTERFACE REGISTERS*/
+/******************************************************/
+#define BMI160_USER_MAG_IF_0_ADDR				(0X4B)
+#define BMI160_USER_MAG_IF_1_ADDR				(0X4C)
+#define BMI160_USER_MAG_IF_2_ADDR				(0X4D)
+#define BMI160_USER_MAG_IF_3_ADDR				(0X4E)
+#define BMI160_USER_MAG_IF_4_ADDR				(0X4F)
+/***************************************************/
+/**\name INTERRUPT ENABLE REGISTERS*/
+/******************************************************/
+#define BMI160_USER_INTR_ENABLE_0_ADDR			(0X50)
+#define BMI160_USER_INTR_ENABLE_1_ADDR			(0X51)
+#define BMI160_USER_INTR_ENABLE_2_ADDR			(0X52)
+#define BMI160_USER_INTR_OUT_CTRL_ADDR			(0X53)
+/***************************************************/
+/**\name LATCH DURATION REGISTERS*/
+/******************************************************/
+#define BMI160_USER_INTR_LATCH_ADDR				(0X54)
+/***************************************************/
+/**\name MAP INTERRUPT 1 and 2 REGISTERS*/
+/******************************************************/
+#define BMI160_USER_INTR_MAP_0_ADDR				(0X55)
+#define BMI160_USER_INTR_MAP_1_ADDR				(0X56)
+#define BMI160_USER_INTR_MAP_2_ADDR				(0X57)
+/***************************************************/
+/**\name DATA SOURCE REGISTERS*/
+/******************************************************/
+#define BMI160_USER_INTR_DATA_0_ADDR			(0X58)
+#define BMI160_USER_INTR_DATA_1_ADDR			(0X59)
+/***************************************************/
+/**\name
+INTERRUPT THRESHOLD, HYSTERESIS, DURATION, MODE CONFIGURATION REGISTERS*/
+/******************************************************/
+#define BMI160_USER_INTR_LOWHIGH_0_ADDR			(0X5A)
+#define BMI160_USER_INTR_LOWHIGH_1_ADDR			(0X5B)
+#define BMI160_USER_INTR_LOWHIGH_2_ADDR			(0X5C)
+#define BMI160_USER_INTR_LOWHIGH_3_ADDR			(0X5D)
+#define BMI160_USER_INTR_LOWHIGH_4_ADDR			(0X5E)
+#define BMI160_USER_INTR_MOTION_0_ADDR			(0X5F)
+#define BMI160_USER_INTR_MOTION_1_ADDR			(0X60)
+#define BMI160_USER_INTR_MOTION_2_ADDR			(0X61)
+#define BMI160_USER_INTR_MOTION_3_ADDR			(0X62)
+#define BMI160_USER_INTR_TAP_0_ADDR				(0X63)
+#define BMI160_USER_INTR_TAP_1_ADDR				(0X64)
+#define BMI160_USER_INTR_ORIENT_0_ADDR			(0X65)
+#define BMI160_USER_INTR_ORIENT_1_ADDR			(0X66)
+#define BMI160_USER_INTR_FLAT_0_ADDR			(0X67)
+#define BMI160_USER_INTR_FLAT_1_ADDR			(0X68)
+/***************************************************/
+/**\name FAST OFFSET CONFIGURATION REGISTER*/
+/******************************************************/
+#define BMI160_USER_FOC_CONFIG_ADDR				(0X69)
+/***************************************************/
+/**\name MISCELLANEOUS CONFIGURATION REGISTER*/
+/******************************************************/
+#define BMI160_USER_CONFIG_ADDR					(0X6A)
+/***************************************************/
+/**\name SERIAL INTERFACE SETTINGS REGISTER*/
+/******************************************************/
+#define BMI160_USER_IF_CONFIG_ADDR				(0X6B)
+/***************************************************/
+/**\name GYRO POWER MODE TRIGGER REGISTER */
+/******************************************************/
+#define BMI160_USER_PMU_TRIGGER_ADDR			(0X6C)
+/***************************************************/
+/**\name SELF_TEST REGISTER*/
+/******************************************************/
+#define BMI160_USER_SELF_TEST_ADDR				(0X6D)
+/***************************************************/
+/**\name SPI,I2C SELECTION REGISTER*/
+/******************************************************/
+#define BMI160_USER_NV_CONFIG_ADDR				(0x70)
+/***************************************************/
+/**\name ACCEL AND GYRO OFFSET REGISTERS*/
+/******************************************************/
+#define BMI160_USER_OFFSET_0_ADDR				(0X71)
+#define BMI160_USER_OFFSET_1_ADDR				(0X72)
+#define BMI160_USER_OFFSET_2_ADDR				(0X73)
+#define BMI160_USER_OFFSET_3_ADDR				(0X74)
+#define BMI160_USER_OFFSET_4_ADDR				(0X75)
+#define BMI160_USER_OFFSET_5_ADDR				(0X76)
+#define BMI160_USER_OFFSET_6_ADDR				(0X77)
+/***************************************************/
+/**\name STEP COUNTER INTERRUPT REGISTERS*/
+/******************************************************/
+#define BMI160_USER_STEP_COUNT_0_ADDR			(0X78)
+#define BMI160_USER_STEP_COUNT_1_ADDR			(0X79)
+/***************************************************/
+/**\name STEP COUNTER CONFIGURATION REGISTERS*/
+/******************************************************/
+#define BMI160_USER_STEP_CONFIG_0_ADDR			(0X7A)
+#define BMI160_USER_STEP_CONFIG_1_ADDR			(0X7B)
+/***************************************************/
+/**\name COMMAND REGISTER*/
+/******************************************************/
+#define BMI160_CMD_COMMANDS_ADDR				(0X7E)
+/***************************************************/
+/**\name PAGE REGISTERS*/
+/******************************************************/
+#define BMI160_CMD_EXT_MODE_ADDR				(0X7F)
+#define BMI160_COM_C_TRIM_FIVE_ADDR				(0X05)
+
+/****************************************************/
+/**\name	SHIFT VALUE DEFINITION       */
+/***************************************************/
+#define BMI160_SHIFT_BIT_POSITION_BY_01_BIT      (1)
+#define BMI160_SHIFT_BIT_POSITION_BY_02_BITS     (2)
+#define BMI160_SHIFT_BIT_POSITION_BY_03_BITS     (3)
+#define BMI160_SHIFT_BIT_POSITION_BY_04_BITS     (4)
+#define BMI160_SHIFT_BIT_POSITION_BY_05_BITS     (5)
+#define BMI160_SHIFT_BIT_POSITION_BY_06_BITS     (6)
+#define BMI160_SHIFT_BIT_POSITION_BY_07_BITS     (7)
+#define BMI160_SHIFT_BIT_POSITION_BY_08_BITS     (8)
+#define BMI160_SHIFT_BIT_POSITION_BY_09_BITS     (9)
+#define BMI160_SHIFT_BIT_POSITION_BY_12_BITS     (12)
+#define BMI160_SHIFT_BIT_POSITION_BY_13_BITS     (13)
+#define BMI160_SHIFT_BIT_POSITION_BY_14_BITS     (14)
+#define BMI160_SHIFT_BIT_POSITION_BY_15_BITS     (15)
+#define BMI160_SHIFT_BIT_POSITION_BY_16_BITS     (16)
+
+/****************************************************/
+/**\name	 DEFINITIONS USED FOR YAMAHA-YAS532 */
+/***************************************************/
+#define YAS532_MAG_STATE_NORMAL				(0)
+#define YAS532_MAG_STATE_INIT_COIL			(1)
+#define YAS532_MAG_STATE_MEASURE_OFFSET		(2)
+#define YAS532_MAG_INITCOIL_TIMEOUT			(1000)
+#define YAS532_MAG_NOTRANS_POSITION			(3)
+#define YAS532_DEFAULT_SENSOR_DELAY			(50)
+#define YAS532_DATA_OVERFLOW				(8190)
+#define YAS532_DATA_UNDERFLOW				(0)
+#define YAS532_MAG_LOG				(20)
+#define YAS532_MAG_TEMPERATURE_LOG			(10)
+#define YAS532_TEMP20DEGREE_TYPICAL			(390)
+#define YAS532_VERSION_AC_COEF_X			(850)
+#define YAS532_VERSION_AC_COEF_Y1			(750)
+#define YAS532_VERSION_AC_COEF_Y2			(750)
+#define YAS532_DATA_CENTER					(4096)
+/****************************************************/
+/**\name	YAMAHA-YAS532 OFFSET DEFINITION */
+/***************************************************/
+static const s8 INVALID_OFFSET[] = {0x7f, 0x7f, 0x7f};
+#define set_vector(to, from) \
+	{int _l; for (_l = 0; _l < 3; _l++) (to)[_l] = (from)[_l]; }
+#define is_valid_offset(a) \
+	(((a)[0] <= 31) && ((a)[1] <= 31) && ((a)[2] <= 31) \
+		&& (-31 <= (a)[0]) && (-31 <= (a)[1]) && (-31 <= (a)[2]))
+
+/**************************************************/
+/**\name	YAS532 CALIB DATA DEFINITIONS  */
+/*************************************************/
+
+
+/* register address of YAS532*/
+#define BMI160_YAS532_TESTR1			(0x88)
+#define BMI160_YAS532_TESTR2			(0x89)
+#define BMI160_YAS532_RCOIL				(0x81)
+#define BMI160_YAS532_COMMAND_REGISTER	(0x82)
+#define BMI160_YAS532_DATA_REGISTER		(0xB0)
+/* calib data register definition*/
+#define BMI160_YAS532_CALIB_CX	        (0x90)
+#define BMI160_YAS532_CALIB_CY1	        (0x91)
+#define BMI160_YAS532_CALIB_CY2	        (0x92)
+#define BMI160_YAS532_CALIB1	        (0x93)
+#define BMI160_YAS532_CALIB2	        (0x94)
+#define BMI160_YAS532_CALIB3	        (0x95)
+#define BMI160_YAS532_CALIB4	        (0x96)
+#define BMI160_YAS532_CALIB5	        (0x97)
+#define BMI160_YAS532_CLAIB6	        (0x98)
+#define BMI160_YAS532_CALIB7	        (0x99)
+#define BMI160_YAS532_CALIB8	        (0x9A)
+#define BMI160_YAS532_CALIIB9	        (0x9B)
+#define BMI160_YAS532_CALIB10	        (0x9C)
+#define BMI160_YAS532_CALIB11	        (0x9D)
+/* offset definition */
+#define BMI160_YAS532_OFFSET_X	        (0x85)
+#define BMI160_YAS532_OFFSET_Y	        (0x86)
+#define BMI160_YAS532_OFFSET_Z	        (0x87)
+/* data to write register for yas532*/
+#define BMI160_YAS532_WRITE_TESTR1	    (0x00)
+#define BMI160_YAS532_WRITE_TESTR2	    (0x00)
+#define BMI160_YAS532_WRITE_RCOIL       (0x00)
+/**************************************************/
+/**\name	YAS537 DEFINITION  */
+/*************************************************/
+
+#define	YAS537_SRSTR_DATA		        (0x02)
+#define	YAS537_WRITE_A_D_CONVERTER		(0x03)
+#define	YAS537_WRITE_A_D_CONVERTER2		(0xF8)
+#define	YAS537_WRITE_FILTER             (0x08)
+#define	YAS537_WRITE_CONFR              (0x08)
+#define	YAS537_WRITE_TEMP_CALIB         (0xFF)
+#define	YAS537_SET_COMMAND_REGISTER     (0x01)
+
+/**************************************************/
+/**\name	YAS537 REGISTER DEFINITION  */
+/*************************************************/
+#define	YAS537_REG_SRSTR				(0x90)
+#define	YAS537_REG_CALR_C0				(0xC0)
+#define	YAS537_REG_CALR_C1				(0xC1)
+#define	YAS537_REG_CALR_C2				(0xC2)
+#define	YAS537_REG_CALR_C3				(0xC3)
+#define	YAS537_REG_CALR_C4				(0xC4)
+#define	YAS537_REG_CALR_C5				(0xC5)
+#define	YAS537_REG_CALR_C6				(0xC6)
+#define	YAS537_REG_CALR_C7				(0xC7)
+#define	YAS537_REG_CALR_C8				(0xC8)
+#define	YAS537_REG_CALR_C9				(0xC9)
+#define	YAS537_REG_CALR_CA				(0xCA)
+#define	YAS537_REG_CALR_CB				(0xCB)
+#define	YAS537_REG_CALR_CC				(0xCC)
+#define	YAS537_REG_CALR_CD				(0xCD)
+#define	YAS537_REG_CALR_CE				(0xCE)
+#define	YAS537_REG_CALR_CF				(0xCF)
+#define	YAS537_REG_CALR_DO				(0xD0)
+#define	YAS537_REG_MTCR					(0x93)
+#define	YAS537_REG_CONFR				(0x82)
+#define	BMI160_REG_YAS537_CMDR			(0x81)
+#define	YAS537_REG_OXR					(0x84)
+#define	YAS537_REG_AVRR					(0x87)
+#define	YAS537_REG_HCKR					(0x88)
+#define	YAS537_REG_LCKR					(0x89)
+#define	YAS537_REG_ADCCALR				(0x91)
+#define	YAS537_REG_ADCCALR_ONE			(0x92)
+#define	YAS537_REG_OCR					(0x9E)
+#define	YAS537_REG_TRMR			        (0x9F)
+#define	YAS537_REG_TEMPERATURE_0		(0xB0)
+#define	YAS537_REG_TEMPERATURE_1		(0xB1)
+#define	YAS537_REG_DATA_X_0				(0xB2)
+#define	YAS537_REG_DATA_X_1				(0xB3)
+#define	YAS537_REG_DATA_Y1_0			(0xB4)
+#define	YAS537_REG_DATA_Y1_1			(0xB5)
+#define	YAS537_REG_DATA_Y2_0			(0xB6)
+#define	YAS537_REG_DATA_Y2_1			(0xB7)
+#define YAS537_MAG_STATE_NORMAL			(0)
+#define YAS537_MAG_STATE_INIT_COIL		(1)
+#define YAS537_MAG_STATE_RECORD_DATA	(2)
+#define YAS537_DATA_UNDERFLOW			(0)
+#define YAS537_DATA_OVERFLOW			(16383)
+/****************************************************/
+/**\name	YAS537_set vector */
+/***************************************************/
+#define yas537_set_vector(to, from) \
+	{int _l; for (_l = 0; _l < 3; _l++) (to)[_l] = (from)[_l]; }
+
+#ifndef ABS
+#define ABS(a)		((a) > 0 ? (a) : -(a)) /*!< Absolute value */
+#endif
+/****************************************************/
+/**\name	AKM09911 AND AKM09912 DEFINITION */
+/***************************************************/
+#define AKM09912_SENSITIVITY_DIV	(256)
+#define AKM09912_SENSITIVITY		(128)
+#define AKM09911_SENSITIVITY_DIV	(128)
+#define AKM_ASAX	(0)
+#define AKM_ASAY	(1)
+#define AKM_ASAZ	(2)
+#define AKM_POWER_DOWN_MODE_DATA		(0x00)
+#define AKM_FUSE_ROM_MODE				(0x1F)
+#define AKM_POWER_MODE_REG				(0x31)
+#define	AKM_SINGLE_MEASUREMENT_MODE		(0x01)
+#define AKM_DATA_REGISTER				(0x11)
+/*! AKM09912 Register definition */
+#define AKM09912_CHIP_ID_REG			(0x01)
+/****************************************************/
+/**\name	BMM150 DEFINITION */
+/***************************************************/
+#define BMI160_BMM150_SET_POWER_CONTROL	(0x01)
+#define BMI160_BMM150_MAX_RETRY_WAKEUP	(5)
+#define BMI160_BMM150_POWER_ON			(0x01)
+#define BMI160_BMM150_POWER_OFF			(0x00)
+#define BMI160_BMM150_FORCE_MODE		(0x02)
+#define BMI160_BMM150_POWER_ON_SUCCESS	(0)
+#define BMI160_BMM150_POWER_ON_FAIL		((s8)-1)
+
+#define	BMI160_BMM150_DIG_X1			(0)
+#define	BMI160_BMM150_DIG_Y1			(1)
+#define	BMI160_BMM150_DIG_X2			(2)
+#define	BMI160_BMM150_DIG_Y3			(3)
+#define	BMI160_BMM150_DIG_XY1			(4)
+#define	BMI160_BMM150_DIG_XY2			(5)
+#define	BMI160_BMM150_DIG_Z1_LSB		(6)
+#define	BMI160_BMM150_DIG_Z1_MSB		(7)
+#define	BMI160_BMM150_DIG_Z2_LSB		(8)
+#define	BMI160_BMM150_DIG_Z2_MSB		(9)
+#define	BMI160_BMM150_DIG_DIG_Z3_LSB	(10)
+#define	BMI160_BMM150_DIG_DIG_Z3_MSB	(11)
+#define	BMI160_BMM150_DIG_DIG_Z4_LSB	(12)
+#define	BMI160_BMM150_DIG_DIG_Z4_MSB	(13)
+#define	BMI160_BMM150_DIG_DIG_XYZ1_LSB	(14)
+#define	BMI160_BMM150_DIG_DIG_XYZ1_MSB	(15)
+
+/**************************************************************/
+/**\name	STRUCTURE DEFINITIONS                         */
+/**************************************************************/
+/*!
+*	@brief bmi160 structure
+*	This structure holds all relevant information about bmi160
+*/
+struct bmi160_t {
+u8 chip_id;/**< chip id of BMI160 */
+u8 dev_addr;/**< device address of BMI160 */
+s8 mag_manual_enable;/**< used for check the mag manual/auto mode status */
+BMI160_WR_FUNC_PTR;/**< bus write function pointer */
+BMI160_RD_FUNC_PTR;/**< bus read function pointer */
+BMI160_BRD_FUNC_PTR;/**< burst write function pointer */
+void (*delay_msec)(BMI160_MDELAY_DATA_TYPE);/**< delay function pointer */
+};
+/*!
+ * @brief Structure containing bmm150 and akm09911
+ *	magnetometer values for x,y and
+ *	z-axis in s16
+ */
+struct bmi160_mag_t {
+s16 x;/**< BMM150 and AKM09911 and AKM09912 X raw data*/
+s16 y;/**< BMM150 and AKM09911 and AKM09912 Y raw data*/
+s16 z;/**< BMM150 and AKM09911 and AKM09912 Z raw data*/
+};
+/*!
+ * @brief Structure containing bmm150 xyz data and temperature
+ */
+struct bmi160_mag_xyzr_t {
+s16 x;/**< BMM150 X raw data*/
+s16 y;/**< BMM150 Y raw data*/
+s16 z;/**<BMM150 Z raw data*/
+u16 r;/**<BMM150 R raw data*/
+};
+/*!
+ * @brief Structure containing gyro xyz data
+ */
+struct bmi160_gyro_t {
+s16 x;/**<gyro X  data*/
+s16 y;/**<gyro Y  data*/
+s16 z;/**<gyro Z  data*/
+};
+/*!
+ * @brief Structure containing accel xyz data
+ */
+struct bmi160_accel_t {
+s16 x;/**<accel X  data*/
+s16 y;/**<accel Y  data*/
+s16 z;/**<accel Z  data*/
+};
+/*!
+ * @brief Structure bmm150 mag compensated data with s32 output
+ */
+struct bmi160_mag_xyz_s32_t {
+s16 x;/**<BMM150 X compensated data*/
+s16 y;/**<BMM150 Y compensated data*/
+s16 z;/**<BMM150 Z compensated data*/
+};
+/*!
+ * @brief Structure bmm150 mag trim data
+ */
+struct trim_data_t {
+s8 dig_x1;/**<BMM150 trim x1 data*/
+s8 dig_y1;/**<BMM150 trim y1 data*/
+
+s8 dig_x2;/**<BMM150 trim x2 data*/
+s8 dig_y2;/**<BMM150 trim y2 data*/
+
+u16 dig_z1;/**<BMM150 trim z1 data*/
+s16 dig_z2;/**<BMM150 trim z2 data*/
+s16 dig_z3;/**<BMM150 trim z3 data*/
+s16 dig_z4;/**<BMM150 trim z4 data*/
+
+u8 dig_xy1;/**<BMM150 trim xy1 data*/
+s8 dig_xy2;/**<BMM150 trim xy2 data*/
+
+u16 dig_xyz1;/**<BMM150 trim xyz1 data*/
+};
+
+/*!
+*	@brief Structure for reading AKM compensating data
+*/
+struct bst_akm_sensitivity_data_t {
+u8 asax;/**<AKM09911 and AKM09912 X sensitivity data*/
+u8 asay;/**<AKM09911 and AKM09912 Y sensitivity data*/
+u8 asaz;/**<AKM09911 and AKM09912 Z sensitivity data*/
+};
+/*!
+* @brief YAMAHA-YAS532 struct
+* Calibration YAS532 data struct
+*/
+struct bst_yas532_calib_data_t {
+s32 cx;/**<YAS532 calib cx data */
+s32 cy1;/**<YAS532 calib cy1 data */
+s32 cy2;/**<YAS532 calib cy2 data */
+s32 a2;/**<YAS532 calib a2 data */
+s32 a3;/**<YAS532 calib a3 data */
+s32 a4;/**<YAS532 calib a4 data */
+s32 a5;/**<YAS532 calib a5 data */
+s32 a6;/**<YAS532 calib a6 data */
+s32 a7;/**<YAS532 calib a7 data */
+s32 a8;/**<YAS532 calib a8 data */
+s32 a9;/**<YAS532 calib a9 data */
+s32 k;/**<YAS532 calib k data */
+s8 rxy1y2[3];/**<YAS532 calib rxy1y2 data */
+u8 fxy1y2[3];/**<YAS532 calib fxy1y2 data */
+};
+/*!
+* @brief YAS532 Temperature structure
+*/
+#if YAS532_MAG_LOG < YAS532_MAG_TEMPERATURE_LOG
+struct yas_temp_filter_t {
+u16 log[YAS532_MAG_TEMPERATURE_LOG];/**<YAS532 temp log array */
+u8 num;/**< used for increment the index */
+u8 idx;/**< used for increment the index */
+};
+#endif
+/*!
+* @brief YAS532 sensor initialization
+*/
+struct yas532_t {
+struct bst_yas532_calib_data_t calib_yas532;/**< calib data */
+s8 measure_state;/**< update measure state */
+s8 v_hard_offset_s8[3];/**< offset write array*/
+s32 coef[3];/**< co efficient data */
+s8 overflow;/**< over flow condition check */
+u8 dev_id;/**< device id information */
+const s8 *transform;/**< transform condition check  */
+#if YAS532_MAG_LOG < YAS532_MAG_TEMPERATURE_LOG
+struct yas_temp_filter_t temp_data;/**< temp data */
+#endif
+u16 last_raw[4];/**< raw data */
+};
+/*!
+* @brief Used for reading the YAS532 XYZ data
+*/
+struct yas532_vector {
+s32 yas532_vector_xyz[3];/**< YAS532 compensated xyz data*/
+};
+/**
+ * @struct yas_vector
+ * @brief Stores the sensor data
+ */
+struct yas_vector {
+	s32 yas537_vector_xyz[3]; /*!< vector data */
+};
+/*!
+* @brief YAMAHA-YAS532 struct
+* Calibration YAS532 data struct
+*/
+struct bst_yas537_calib_data_t {
+s8 a2;/**<YAS532 calib a2 data */
+s8 a3;/**<YAS532 calib a3 data */
+s8 a4;/**<YAS532 calib a4 data */
+s16 a5;/**<YAS532 calib a5 data */
+s8 a6;/**<YAS532 calib a6 data */
+s8 a7;/**<YAS532 calib a7 data */
+s8 a8;/**<YAS532 calib a8 data */
+s16 a9;/**<YAS532 calib a9 data */
+u8 k;/**<YAS532 calib k data */
+u8 ver;/**<YAS532 calib ver data*/
+};
+/*!
+* @brief YAS537 sensor initialization
+*/
+struct yas537_t {
+struct bst_yas537_calib_data_t calib_yas537;/**< calib data */
+s8 measure_state;/**< update measure state */
+s8 hard_offset[3];/**< offset write array*/
+u16 last_after_rcoil[3];/**< rcoil write array*/
+s32 coef[3];/**< co efficient data */
+s8 overflow;/**< over flow condition check */
+u8 dev_id;/**< device id information */
+u8 average;/**<average selection for offset configuration*/
+const s8 *transform;/**< transform condition check  */
+u16 last_raw[4];/**< raw data */
+struct yas_vector xyz; /*!< X, Y, Z measurement data of the sensor */
+};
+/**************************************************************/
+/**\name	USER DATA REGISTERS DEFINITION START    */
+/**************************************************************/
+
+/**************************************************************/
+/**\name	CHIP ID LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Chip ID Description - Reg Addr --> (0x00), Bit --> 0...7 */
+#define BMI160_USER_CHIP_ID__POS             (0)
+#define BMI160_USER_CHIP_ID__MSK            (0xFF)
+#define BMI160_USER_CHIP_ID__LEN             (8)
+#define BMI160_USER_CHIP_ID__REG             (BMI160_USER_CHIP_ID_ADDR)
+/**************************************************************/
+/**\name	ERROR STATUS LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Error Description - Reg Addr --> (0x02), Bit --> 0 */
+#define BMI160_USER_ERR_STAT__POS               (0)
+#define BMI160_USER_ERR_STAT__LEN               (8)
+#define BMI160_USER_ERR_STAT__MSK               (0xFF)
+#define BMI160_USER_ERR_STAT__REG               (BMI160_USER_ERROR_ADDR)
+
+#define BMI160_USER_FATAL_ERR__POS               (0)
+#define BMI160_USER_FATAL_ERR__LEN               (1)
+#define BMI160_USER_FATAL_ERR__MSK               (0x01)
+#define BMI160_USER_FATAL_ERR__REG               (BMI160_USER_ERROR_ADDR)
+
+/* Error Description - Reg Addr --> (0x02), Bit --> 1...4 */
+#define BMI160_USER_ERR_CODE__POS               (1)
+#define BMI160_USER_ERR_CODE__LEN               (4)
+#define BMI160_USER_ERR_CODE__MSK               (0x1E)
+#define BMI160_USER_ERR_CODE__REG               (BMI160_USER_ERROR_ADDR)
+
+/* Error Description - Reg Addr --> (0x02), Bit --> 5 */
+#define BMI160_USER_I2C_FAIL_ERR__POS               (5)
+#define BMI160_USER_I2C_FAIL_ERR__LEN               (1)
+#define BMI160_USER_I2C_FAIL_ERR__MSK               (0x20)
+#define BMI160_USER_I2C_FAIL_ERR__REG               (BMI160_USER_ERROR_ADDR)
+
+/* Error Description - Reg Addr --> (0x02), Bit --> 6 */
+#define BMI160_USER_DROP_CMD_ERR__POS              (6)
+#define BMI160_USER_DROP_CMD_ERR__LEN              (1)
+#define BMI160_USER_DROP_CMD_ERR__MSK              (0x40)
+#define BMI160_USER_DROP_CMD_ERR__REG              (BMI160_USER_ERROR_ADDR)
+/**************************************************************/
+/**\name	MAG DATA READY LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Error Description - Reg Addr --> (0x02), Bit --> 7 */
+#define BMI160_USER_MAG_DADA_RDY_ERR__POS               (7)
+#define BMI160_USER_MAG_DADA_RDY_ERR__LEN               (1)
+#define BMI160_USER_MAG_DADA_RDY_ERR__MSK               (0x80)
+#define BMI160_USER_MAG_DADA_RDY_ERR__REG               (BMI160_USER_ERROR_ADDR)
+/**************************************************************/
+/**\name	MAG POWER MODE LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* PMU_Status Description of MAG - Reg Addr --> (0x03), Bit --> 1..0 */
+#define BMI160_USER_MAG_POWER_MODE_STAT__POS		(0)
+#define BMI160_USER_MAG_POWER_MODE_STAT__LEN		(2)
+#define BMI160_USER_MAG_POWER_MODE_STAT__MSK		(0x03)
+#define BMI160_USER_MAG_POWER_MODE_STAT__REG		\
+(BMI160_USER_PMU_STAT_ADDR)
+/**************************************************************/
+/**\name	GYRO POWER MODE LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* PMU_Status Description of GYRO - Reg Addr --> (0x03), Bit --> 3...2 */
+#define BMI160_USER_GYRO_POWER_MODE_STAT__POS               (2)
+#define BMI160_USER_GYRO_POWER_MODE_STAT__LEN               (2)
+#define BMI160_USER_GYRO_POWER_MODE_STAT__MSK               (0x0C)
+#define BMI160_USER_GYRO_POWER_MODE_STAT__REG		      \
+(BMI160_USER_PMU_STAT_ADDR)
+/**************************************************************/
+/**\name	ACCEL POWER MODE LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* PMU_Status Description of ACCEL - Reg Addr --> (0x03), Bit --> 5...4 */
+#define BMI160_USER_ACCEL_POWER_MODE_STAT__POS               (4)
+#define BMI160_USER_ACCEL_POWER_MODE_STAT__LEN               (2)
+#define BMI160_USER_ACCEL_POWER_MODE_STAT__MSK               (0x30)
+#define BMI160_USER_ACCEL_POWER_MODE_STAT__REG		    \
+(BMI160_USER_PMU_STAT_ADDR)
+/**************************************************************/
+/**\name	MAG DATA XYZ LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Mag_X(LSB) Description - Reg Addr --> (0x04), Bit --> 0...7 */
+#define BMI160_USER_DATA_0_MAG_X_LSB__POS           (0)
+#define BMI160_USER_DATA_0_MAG_X_LSB__LEN           (8)
+#define BMI160_USER_DATA_0_MAG_X_LSB__MSK          (0xFF)
+#define BMI160_USER_DATA_0_MAG_X_LSB__REG          (BMI160_USER_DATA_0_ADDR)
+
+/* Mag_X(LSB) Description - Reg Addr --> (0x04), Bit --> 3...7 */
+#define BMI160_USER_DATA_MAG_X_LSB__POS           (3)
+#define BMI160_USER_DATA_MAG_X_LSB__LEN           (5)
+#define BMI160_USER_DATA_MAG_X_LSB__MSK          (0xF8)
+#define BMI160_USER_DATA_MAG_X_LSB__REG          (BMI160_USER_DATA_0_ADDR)
+
+/* Mag_X(MSB) Description - Reg Addr --> (0x05), Bit --> 0...7 */
+#define BMI160_USER_DATA_1_MAG_X_MSB__POS           (0)
+#define BMI160_USER_DATA_1_MAG_X_MSB__LEN           (8)
+#define BMI160_USER_DATA_1_MAG_X_MSB__MSK          (0xFF)
+#define BMI160_USER_DATA_1_MAG_X_MSB__REG          (BMI160_USER_DATA_1_ADDR)
+
+/* Mag_Y(LSB) Description - Reg Addr --> (0x06), Bit --> 0...7 */
+#define BMI160_USER_DATA_2_MAG_Y_LSB__POS           (0)
+#define BMI160_USER_DATA_2_MAG_Y_LSB__LEN           (8)
+#define BMI160_USER_DATA_2_MAG_Y_LSB__MSK          (0xFF)
+#define BMI160_USER_DATA_2_MAG_Y_LSB__REG          (BMI160_USER_DATA_2_ADDR)
+
+/* Mag_Y(LSB) Description - Reg Addr --> (0x06), Bit --> 3...7 */
+#define BMI160_USER_DATA_MAG_Y_LSB__POS           (3)
+#define BMI160_USER_DATA_MAG_Y_LSB__LEN           (5)
+#define BMI160_USER_DATA_MAG_Y_LSB__MSK          (0xF8)
+#define BMI160_USER_DATA_MAG_Y_LSB__REG          (BMI160_USER_DATA_2_ADDR)
+
+/* Mag_Y(MSB) Description - Reg Addr --> (0x07), Bit --> 0...7 */
+#define BMI160_USER_DATA_3_MAG_Y_MSB__POS           (0)
+#define BMI160_USER_DATA_3_MAG_Y_MSB__LEN           (8)
+#define BMI160_USER_DATA_3_MAG_Y_MSB__MSK          (0xFF)
+#define BMI160_USER_DATA_3_MAG_Y_MSB__REG          (BMI160_USER_DATA_3_ADDR)
+
+/* Mag_Z(LSB) Description - Reg Addr --> (0x08), Bit --> 0...7 */
+#define BMI160_USER_DATA_4_MAG_Z_LSB__POS           (0)
+#define BMI160_USER_DATA_4_MAG_Z_LSB__LEN           (8)
+#define BMI160_USER_DATA_4_MAG_Z_LSB__MSK          (0xFF)
+#define BMI160_USER_DATA_4_MAG_Z_LSB__REG          (BMI160_USER_DATA_4_ADDR)
+
+/* Mag_X(LSB) Description - Reg Addr --> (0x08), Bit --> 3...7 */
+#define BMI160_USER_DATA_MAG_Z_LSB__POS           (1)
+#define BMI160_USER_DATA_MAG_Z_LSB__LEN           (7)
+#define BMI160_USER_DATA_MAG_Z_LSB__MSK          (0xFE)
+#define BMI160_USER_DATA_MAG_Z_LSB__REG          (BMI160_USER_DATA_4_ADDR)
+
+/* Mag_Z(MSB) Description - Reg Addr --> (0x09), Bit --> 0...7 */
+#define BMI160_USER_DATA_5_MAG_Z_MSB__POS           (0)
+#define BMI160_USER_DATA_5_MAG_Z_MSB__LEN           (8)
+#define BMI160_USER_DATA_5_MAG_Z_MSB__MSK          (0xFF)
+#define BMI160_USER_DATA_5_MAG_Z_MSB__REG          (BMI160_USER_DATA_5_ADDR)
+
+/* RHALL(LSB) Description - Reg Addr --> (0x0A), Bit --> 0...7 */
+#define BMI160_USER_DATA_6_RHALL_LSB__POS           (0)
+#define BMI160_USER_DATA_6_RHALL_LSB__LEN           (8)
+#define BMI160_USER_DATA_6_RHALL_LSB__MSK          (0xFF)
+#define BMI160_USER_DATA_6_RHALL_LSB__REG          (BMI160_USER_DATA_6_ADDR)
+
+/* Mag_R(LSB) Description - Reg Addr --> (0x0A), Bit --> 3...7 */
+#define BMI160_USER_DATA_MAG_R_LSB__POS           (2)
+#define BMI160_USER_DATA_MAG_R_LSB__LEN           (6)
+#define BMI160_USER_DATA_MAG_R_LSB__MSK          (0xFC)
+#define BMI160_USER_DATA_MAG_R_LSB__REG          (BMI160_USER_DATA_6_ADDR)
+
+/* RHALL(MSB) Description - Reg Addr --> (0x0B), Bit --> 0...7 */
+#define BMI160_USER_DATA_7_RHALL_MSB__POS           (0)
+#define BMI160_USER_DATA_7_RHALL_MSB__LEN           (8)
+#define BMI160_USER_DATA_7_RHALL_MSB__MSK          (0xFF)
+#define BMI160_USER_DATA_7_RHALL_MSB__REG          (BMI160_USER_DATA_7_ADDR)
+/**************************************************************/
+/**\name	GYRO DATA XYZ LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* GYR_X (LSB) Description - Reg Addr --> (0x0C), Bit --> 0...7 */
+#define BMI160_USER_DATA_8_GYRO_X_LSB__POS           (0)
+#define BMI160_USER_DATA_8_GYRO_X_LSB__LEN           (8)
+#define BMI160_USER_DATA_8_GYRO_X_LSB__MSK          (0xFF)
+#define BMI160_USER_DATA_8_GYRO_X_LSB__REG          (BMI160_USER_DATA_8_ADDR)
+
+/* GYR_X (MSB) Description - Reg Addr --> (0x0D), Bit --> 0...7 */
+#define BMI160_USER_DATA_9_GYRO_X_MSB__POS           (0)
+#define BMI160_USER_DATA_9_GYRO_X_MSB__LEN           (8)
+#define BMI160_USER_DATA_9_GYRO_X_MSB__MSK          (0xFF)
+#define BMI160_USER_DATA_9_GYRO_X_MSB__REG          (BMI160_USER_DATA_9_ADDR)
+
+/* GYR_Y (LSB) Description - Reg Addr --> 0x0E, Bit --> 0...7 */
+#define BMI160_USER_DATA_10_GYRO_Y_LSB__POS           (0)
+#define BMI160_USER_DATA_10_GYRO_Y_LSB__LEN           (8)
+#define BMI160_USER_DATA_10_GYRO_Y_LSB__MSK          (0xFF)
+#define BMI160_USER_DATA_10_GYRO_Y_LSB__REG          (BMI160_USER_DATA_10_ADDR)
+
+/* GYR_Y (MSB) Description - Reg Addr --> (0x0F), Bit --> 0...7 */
+#define BMI160_USER_DATA_11_GYRO_Y_MSB__POS           (0)
+#define BMI160_USER_DATA_11_GYRO_Y_MSB__LEN           (8)
+#define BMI160_USER_DATA_11_GYRO_Y_MSB__MSK          (0xFF)
+#define BMI160_USER_DATA_11_GYRO_Y_MSB__REG          (BMI160_USER_DATA_11_ADDR)
+
+/* GYR_Z (LSB) Description - Reg Addr --> (0x10), Bit --> 0...7 */
+#define BMI160_USER_DATA_12_GYRO_Z_LSB__POS           (0)
+#define BMI160_USER_DATA_12_GYRO_Z_LSB__LEN           (8)
+#define BMI160_USER_DATA_12_GYRO_Z_LSB__MSK          (0xFF)
+#define BMI160_USER_DATA_12_GYRO_Z_LSB__REG          (BMI160_USER_DATA_12_ADDR)
+
+/* GYR_Z (MSB) Description - Reg Addr --> (0x11), Bit --> 0...7 */
+#define BMI160_USER_DATA_13_GYRO_Z_MSB__POS           (0)
+#define BMI160_USER_DATA_13_GYRO_Z_MSB__LEN           (8)
+#define BMI160_USER_DATA_13_GYRO_Z_MSB__MSK          (0xFF)
+#define BMI160_USER_DATA_13_GYRO_Z_MSB__REG          (BMI160_USER_DATA_13_ADDR)
+/**************************************************************/
+/**\name	ACCEL DATA XYZ LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* ACC_X (LSB) Description - Reg Addr --> (0x12), Bit --> 0...7 */
+#define BMI160_USER_DATA_14_ACCEL_X_LSB__POS           (0)
+#define BMI160_USER_DATA_14_ACCEL_X_LSB__LEN           (8)
+#define BMI160_USER_DATA_14_ACCEL_X_LSB__MSK          (0xFF)
+#define BMI160_USER_DATA_14_ACCEL_X_LSB__REG          (BMI160_USER_DATA_14_ADDR)
+
+/* ACC_X (MSB) Description - Reg Addr --> 0x13, Bit --> 0...7 */
+#define BMI160_USER_DATA_15_ACCEL_X_MSB__POS           (0)
+#define BMI160_USER_DATA_15_ACCEL_X_MSB__LEN           (8)
+#define BMI160_USER_DATA_15_ACCEL_X_MSB__MSK          (0xFF)
+#define BMI160_USER_DATA_15_ACCEL_X_MSB__REG          (BMI160_USER_DATA_15_ADDR)
+
+/* ACC_Y (LSB) Description - Reg Addr --> (0x14), Bit --> 0...7 */
+#define BMI160_USER_DATA_16_ACCEL_Y_LSB__POS           (0)
+#define BMI160_USER_DATA_16_ACCEL_Y_LSB__LEN           (8)
+#define BMI160_USER_DATA_16_ACCEL_Y_LSB__MSK          (0xFF)
+#define BMI160_USER_DATA_16_ACCEL_Y_LSB__REG          (BMI160_USER_DATA_16_ADDR)
+
+/* ACC_Y (MSB) Description - Reg Addr --> (0x15), Bit --> 0...7 */
+#define BMI160_USER_DATA_17_ACCEL_Y_MSB__POS           (0)
+#define BMI160_USER_DATA_17_ACCEL_Y_MSB__LEN           (8)
+#define BMI160_USER_DATA_17_ACCEL_Y_MSB__MSK          (0xFF)
+#define BMI160_USER_DATA_17_ACCEL_Y_MSB__REG          (BMI160_USER_DATA_17_ADDR)
+
+/* ACC_Z (LSB) Description - Reg Addr --> 0x16, Bit --> 0...7 */
+#define BMI160_USER_DATA_18_ACCEL_Z_LSB__POS           (0)
+#define BMI160_USER_DATA_18_ACCEL_Z_LSB__LEN           (8)
+#define BMI160_USER_DATA_18_ACCEL_Z_LSB__MSK          (0xFF)
+#define BMI160_USER_DATA_18_ACCEL_Z_LSB__REG          (BMI160_USER_DATA_18_ADDR)
+
+/* ACC_Z (MSB) Description - Reg Addr --> (0x17), Bit --> 0...7 */
+#define BMI160_USER_DATA_19_ACCEL_Z_MSB__POS           (0)
+#define BMI160_USER_DATA_19_ACCEL_Z_MSB__LEN           (8)
+#define BMI160_USER_DATA_19_ACCEL_Z_MSB__MSK          (0xFF)
+#define BMI160_USER_DATA_19_ACCEL_Z_MSB__REG          (BMI160_USER_DATA_19_ADDR)
+/**************************************************************/
+/**\name	SENSOR TIME LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* SENSORTIME_0 (LSB) Description - Reg Addr --> (0x18), Bit --> 0...7 */
+#define BMI160_USER_SENSORTIME_0_SENSOR_TIME_LSB__POS           (0)
+#define BMI160_USER_SENSORTIME_0_SENSOR_TIME_LSB__LEN           (8)
+#define BMI160_USER_SENSORTIME_0_SENSOR_TIME_LSB__MSK          (0xFF)
+#define BMI160_USER_SENSORTIME_0_SENSOR_TIME_LSB__REG          \
+		(BMI160_USER_SENSORTIME_0_ADDR)
+
+/* SENSORTIME_1 (MSB) Description - Reg Addr --> (0x19), Bit --> 0...7 */
+#define BMI160_USER_SENSORTIME_1_SENSOR_TIME_MSB__POS           (0)
+#define BMI160_USER_SENSORTIME_1_SENSOR_TIME_MSB__LEN           (8)
+#define BMI160_USER_SENSORTIME_1_SENSOR_TIME_MSB__MSK          (0xFF)
+#define BMI160_USER_SENSORTIME_1_SENSOR_TIME_MSB__REG          \
+		(BMI160_USER_SENSORTIME_1_ADDR)
+
+/* SENSORTIME_2 (MSB) Description - Reg Addr --> (0x1A), Bit --> 0...7 */
+#define BMI160_USER_SENSORTIME_2_SENSOR_TIME_MSB__POS           (0)
+#define BMI160_USER_SENSORTIME_2_SENSOR_TIME_MSB__LEN           (8)
+#define BMI160_USER_SENSORTIME_2_SENSOR_TIME_MSB__MSK          (0xFF)
+#define BMI160_USER_SENSORTIME_2_SENSOR_TIME_MSB__REG          \
+		(BMI160_USER_SENSORTIME_2_ADDR)
+/**************************************************************/
+/**\name	GYRO SELF TEST LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Status Description - Reg Addr --> 0x1B, Bit --> 1 */
+#define BMI160_USER_STAT_GYRO_SELFTEST_OK__POS          (1)
+#define BMI160_USER_STAT_GYRO_SELFTEST_OK__LEN          (1)
+#define BMI160_USER_STAT_GYRO_SELFTEST_OK__MSK          (0x02)
+#define BMI160_USER_STAT_GYRO_SELFTEST_OK__REG         \
+		(BMI160_USER_STAT_ADDR)
+/**************************************************************/
+/**\name	MAG MANUAL OPERATION LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Status Description - Reg Addr --> 0x1B, Bit --> 2 */
+#define BMI160_USER_STAT_MAG_MANUAL_OPERATION__POS          (2)
+#define BMI160_USER_STAT_MAG_MANUAL_OPERATION__LEN          (1)
+#define BMI160_USER_STAT_MAG_MANUAL_OPERATION__MSK          (0x04)
+#define BMI160_USER_STAT_MAG_MANUAL_OPERATION__REG          \
+		(BMI160_USER_STAT_ADDR)
+/**************************************************************/
+/**\name	FOC STATUS LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Status Description - Reg Addr --> 0x1B, Bit --> 3 */
+#define BMI160_USER_STAT_FOC_RDY__POS          (3)
+#define BMI160_USER_STAT_FOC_RDY__LEN          (1)
+#define BMI160_USER_STAT_FOC_RDY__MSK          (0x08)
+#define BMI160_USER_STAT_FOC_RDY__REG          (BMI160_USER_STAT_ADDR)
+/**************************************************************/
+/**\name	NVM READY LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Status Description - Reg Addr --> 0x1B, Bit --> 4 */
+#define BMI160_USER_STAT_NVM_RDY__POS           (4)
+#define BMI160_USER_STAT_NVM_RDY__LEN           (1)
+#define BMI160_USER_STAT_NVM_RDY__MSK           (0x10)
+#define BMI160_USER_STAT_NVM_RDY__REG           (BMI160_USER_STAT_ADDR)
+/**************************************************************/
+/**\name	DATA READY LENGTH, POSITION AND MASK FOR ACCEL, MAG AND GYRO*/
+/**************************************************************/
+/* Status Description - Reg Addr --> 0x1B, Bit --> 5 */
+#define BMI160_USER_STAT_DATA_RDY_MAG__POS           (5)
+#define BMI160_USER_STAT_DATA_RDY_MAG__LEN           (1)
+#define BMI160_USER_STAT_DATA_RDY_MAG__MSK           (0x20)
+#define BMI160_USER_STAT_DATA_RDY_MAG__REG           (BMI160_USER_STAT_ADDR)
+
+/* Status Description - Reg Addr --> 0x1B, Bit --> 6 */
+#define BMI160_USER_STAT_DATA_RDY_GYRO__POS           (6)
+#define BMI160_USER_STAT_DATA_RDY_GYRO__LEN           (1)
+#define BMI160_USER_STAT_DATA_RDY_GYRO__MSK           (0x40)
+#define BMI160_USER_STAT_DATA_RDY_GYRO__REG           (BMI160_USER_STAT_ADDR)
+
+/* Status Description - Reg Addr --> 0x1B, Bit --> 7 */
+#define BMI160_USER_STAT_DATA_RDY_ACCEL__POS           (7)
+#define BMI160_USER_STAT_DATA_RDY_ACCEL__LEN           (1)
+#define BMI160_USER_STAT_DATA_RDY_ACCEL__MSK           (0x80)
+#define BMI160_USER_STAT_DATA_RDY_ACCEL__REG           (BMI160_USER_STAT_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT STATUS LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 0 */
+#define BMI160_USER_INTR_STAT_0_STEP_INTR__POS           (0)
+#define BMI160_USER_INTR_STAT_0_STEP_INTR__LEN           (1)
+#define BMI160_USER_INTR_STAT_0_STEP_INTR__MSK          (0x01)
+#define BMI160_USER_INTR_STAT_0_STEP_INTR__REG          \
+		(BMI160_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	SIGNIFICANT INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 1 */
+#define BMI160_USER_INTR_STAT_0_SIGNIFICANT_INTR__POS		(1)
+#define BMI160_USER_INTR_STAT_0_SIGNIFICANT_INTR__LEN		(1)
+#define BMI160_USER_INTR_STAT_0_SIGNIFICANT_INTR__MSK		(0x02)
+#define BMI160_USER_INTR_STAT_0_SIGNIFICANT_INTR__REG       \
+		(BMI160_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	ANY_MOTION INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 2 */
+#define BMI160_USER_INTR_STAT_0_ANY_MOTION__POS           (2)
+#define BMI160_USER_INTR_STAT_0_ANY_MOTION__LEN           (1)
+#define BMI160_USER_INTR_STAT_0_ANY_MOTION__MSK          (0x04)
+#define BMI160_USER_INTR_STAT_0_ANY_MOTION__REG          \
+		(BMI160_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	PMU TRIGGER INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 3 */
+#define BMI160_USER_INTR_STAT_0_PMU_TRIGGER__POS           3
+#define BMI160_USER_INTR_STAT_0_PMU_TRIGGER__LEN           (1)
+#define BMI160_USER_INTR_STAT_0_PMU_TRIGGER__MSK          (0x08)
+#define BMI160_USER_INTR_STAT_0_PMU_TRIGGER__REG          \
+		(BMI160_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	DOUBLE TAP INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 4 */
+#define BMI160_USER_INTR_STAT_0_DOUBLE_TAP_INTR__POS           4
+#define BMI160_USER_INTR_STAT_0_DOUBLE_TAP_INTR__LEN           (1)
+#define BMI160_USER_INTR_STAT_0_DOUBLE_TAP_INTR__MSK          (0x10)
+#define BMI160_USER_INTR_STAT_0_DOUBLE_TAP_INTR__REG          \
+		(BMI160_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	SINGLE TAP INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 5 */
+#define BMI160_USER_INTR_STAT_0_SINGLE_TAP_INTR__POS           5
+#define BMI160_USER_INTR_STAT_0_SINGLE_TAP_INTR__LEN           (1)
+#define BMI160_USER_INTR_STAT_0_SINGLE_TAP_INTR__MSK          (0x20)
+#define BMI160_USER_INTR_STAT_0_SINGLE_TAP_INTR__REG          \
+		(BMI160_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	ORIENT INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 6 */
+#define BMI160_USER_INTR_STAT_0_ORIENT__POS           (6)
+#define BMI160_USER_INTR_STAT_0_ORIENT__LEN           (1)
+#define BMI160_USER_INTR_STAT_0_ORIENT__MSK          (0x40)
+#define BMI160_USER_INTR_STAT_0_ORIENT__REG          \
+		(BMI160_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	FLAT INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 7 */
+#define BMI160_USER_INTR_STAT_0_FLAT__POS           (7)
+#define BMI160_USER_INTR_STAT_0_FLAT__LEN           (1)
+#define BMI160_USER_INTR_STAT_0_FLAT__MSK          (0x80)
+#define BMI160_USER_INTR_STAT_0_FLAT__REG          \
+		(BMI160_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	HIGH_G INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_1 Description - Reg Addr --> 0x1D, Bit --> 2 */
+#define BMI160_USER_INTR_STAT_1_HIGH_G_INTR__POS               (2)
+#define BMI160_USER_INTR_STAT_1_HIGH_G_INTR__LEN               (1)
+#define BMI160_USER_INTR_STAT_1_HIGH_G_INTR__MSK              (0x04)
+#define BMI160_USER_INTR_STAT_1_HIGH_G_INTR__REG              \
+		(BMI160_USER_INTR_STAT_1_ADDR)
+/**************************************************************/
+/**\name	LOW_G INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_1 Description - Reg Addr --> 0x1D, Bit --> 3 */
+#define BMI160_USER_INTR_STAT_1_LOW_G_INTR__POS               (3)
+#define BMI160_USER_INTR_STAT_1_LOW_G_INTR__LEN               (1)
+#define BMI160_USER_INTR_STAT_1_LOW_G_INTR__MSK              (0x08)
+#define BMI160_USER_INTR_STAT_1_LOW_G_INTR__REG              \
+		(BMI160_USER_INTR_STAT_1_ADDR)
+/**************************************************************/
+/**\name	DATA READY INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_1 Description - Reg Addr --> 0x1D, Bit --> 4 */
+#define BMI160_USER_INTR_STAT_1_DATA_RDY_INTR__POS               (4)
+#define BMI160_USER_INTR_STAT_1_DATA_RDY_INTR__LEN               (1)
+#define BMI160_USER_INTR_STAT_1_DATA_RDY_INTR__MSK               (0x10)
+#define BMI160_USER_INTR_STAT_1_DATA_RDY_INTR__REG               \
+		(BMI160_USER_INTR_STAT_1_ADDR)
+/**************************************************************/
+/**\name	FIFO FULL INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_1 Description - Reg Addr --> 0x1D, Bit --> 5 */
+#define BMI160_USER_INTR_STAT_1_FIFO_FULL_INTR__POS               (5)
+#define BMI160_USER_INTR_STAT_1_FIFO_FULL_INTR__LEN               (1)
+#define BMI160_USER_INTR_STAT_1_FIFO_FULL_INTR__MSK               (0x20)
+#define BMI160_USER_INTR_STAT_1_FIFO_FULL_INTR__REG               \
+		(BMI160_USER_INTR_STAT_1_ADDR)
+/**************************************************************/
+/**\name FIFO WATERMARK INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_1 Description - Reg Addr --> 0x1D, Bit --> 6 */
+#define BMI160_USER_INTR_STAT_1_FIFO_WM_INTR__POS               (6)
+#define BMI160_USER_INTR_STAT_1_FIFO_WM_INTR__LEN               (1)
+#define BMI160_USER_INTR_STAT_1_FIFO_WM_INTR__MSK               (0x40)
+#define BMI160_USER_INTR_STAT_1_FIFO_WM_INTR__REG               \
+		(BMI160_USER_INTR_STAT_1_ADDR)
+/**************************************************************/
+/**\name	NO MOTION INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_1 Description - Reg Addr --> 0x1D, Bit --> 7 */
+#define BMI160_USER_INTR_STAT_1_NOMOTION_INTR__POS               (7)
+#define BMI160_USER_INTR_STAT_1_NOMOTION_INTR__LEN               (1)
+#define BMI160_USER_INTR_STAT_1_NOMOTION_INTR__MSK               (0x80)
+#define BMI160_USER_INTR_STAT_1_NOMOTION_INTR__REG               \
+		(BMI160_USER_INTR_STAT_1_ADDR)
+/**************************************************************/
+/**\name	ANY MOTION-XYZ AXIS INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 0 */
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_X__POS               (0)
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_X__LEN               (1)
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_X__MSK               (0x01)
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_X__REG               \
+		(BMI160_USER_INTR_STAT_2_ADDR)
+
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 1 */
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y__POS               (1)
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y__LEN               (1)
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y__MSK               (0x02)
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y__REG               \
+		(BMI160_USER_INTR_STAT_2_ADDR)
+
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 2 */
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z__POS               (2)
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z__LEN               (1)
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z__MSK               (0x04)
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z__REG               \
+		(BMI160_USER_INTR_STAT_2_ADDR)
+/**************************************************************/
+/**\name	ANY MOTION SIGN LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 3 */
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_SIGN__POS               (3)
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_SIGN__LEN               (1)
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_SIGN__MSK               (0x08)
+#define BMI160_USER_INTR_STAT_2_ANY_MOTION_SIGN__REG               \
+		(BMI160_USER_INTR_STAT_2_ADDR)
+/**************************************************************/
+/**\name	TAP_XYZ AND SIGN LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 4 */
+#define BMI160_USER_INTR_STAT_2_TAP_FIRST_X__POS               (4)
+#define BMI160_USER_INTR_STAT_2_TAP_FIRST_X__LEN               (1)
+#define BMI160_USER_INTR_STAT_2_TAP_FIRST_X__MSK               (0x10)
+#define BMI160_USER_INTR_STAT_2_TAP_FIRST_X__REG               \
+		(BMI160_USER_INTR_STAT_2_ADDR)
+
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 5 */
+#define BMI160_USER_INTR_STAT_2_TAP_FIRST_Y__POS               (5)
+#define BMI160_USER_INTR_STAT_2_TAP_FIRST_Y__LEN               (1)
+#define BMI160_USER_INTR_STAT_2_TAP_FIRST_Y__MSK               (0x20)
+#define BMI160_USER_INTR_STAT_2_TAP_FIRST_Y__REG               \
+		(BMI160_USER_INTR_STAT_2_ADDR)
+
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 6 */
+#define BMI160_USER_INTR_STAT_2_TAP_FIRST_Z__POS               (6)
+#define BMI160_USER_INTR_STAT_2_TAP_FIRST_Z__LEN               (1)
+#define BMI160_USER_INTR_STAT_2_TAP_FIRST_Z__MSK               (0x40)
+#define BMI160_USER_INTR_STAT_2_TAP_FIRST_Z__REG               \
+		(BMI160_USER_INTR_STAT_2_ADDR)
+
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 7 */
+#define BMI160_USER_INTR_STAT_2_TAP_SIGN__POS               (7)
+#define BMI160_USER_INTR_STAT_2_TAP_SIGN__LEN               (1)
+#define BMI160_USER_INTR_STAT_2_TAP_SIGN__MSK               (0x80)
+#define BMI160_USER_INTR_STAT_2_TAP_SIGN__REG               \
+		(BMI160_USER_INTR_STAT_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT SATAUS FOR WHOLE 0x1E LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 0...7 */
+#define BMI160_USER_INTR_STAT_2__POS               (0)
+#define BMI160_USER_INTR_STAT_2__LEN               (8)
+#define BMI160_USER_INTR_STAT_2__MSK               (0xFF)
+#define BMI160_USER_INTR_STAT_2__REG               \
+		(BMI160_USER_INTR_STAT_2_ADDR)
+/**************************************************************/
+/**\name	HIGH_G-XYZ AND SIGN LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 0 */
+#define BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_X__POS               (0)
+#define BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_X__LEN               (1)
+#define BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_X__MSK               (0x01)
+#define BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_X__REG               \
+		(BMI160_USER_INTR_STAT_3_ADDR)
+
+/* Int_Status_3 Description - Reg Addr --> 0x1E, Bit --> 1 */
+#define BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_Y__POS               (1)
+#define BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_Y__LEN               (1)
+#define BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_Y__MSK               (0x02)
+#define BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_Y__REG               \
+		(BMI160_USER_INTR_STAT_3_ADDR)
+
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 2 */
+#define BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_Z__POS               (2)
+#define BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_Z__LEN               (1)
+#define BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_Z__MSK               (0x04)
+#define BMI160_USER_INTR_STAT_3_HIGH_G_FIRST_Z__REG               \
+		(BMI160_USER_INTR_STAT_3_ADDR)
+
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 3 */
+#define BMI160_USER_INTR_STAT_3_HIGH_G_SIGN__POS               (3)
+#define BMI160_USER_INTR_STAT_3_HIGH_G_SIGN__LEN               (1)
+#define BMI160_USER_INTR_STAT_3_HIGH_G_SIGN__MSK               (0x08)
+#define BMI160_USER_INTR_STAT_3_HIGH_G_SIGN__REG               \
+		(BMI160_USER_INTR_STAT_3_ADDR)
+/**************************************************************/
+/**\name	ORIENT XY and Z AXIS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 4...5 */
+#define BMI160_USER_INTR_STAT_3_ORIENT_XY__POS               (4)
+#define BMI160_USER_INTR_STAT_3_ORIENT_XY__LEN               (2)
+#define BMI160_USER_INTR_STAT_3_ORIENT_XY__MSK               (0x30)
+#define BMI160_USER_INTR_STAT_3_ORIENT_XY__REG               \
+		(BMI160_USER_INTR_STAT_3_ADDR)
+
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 6 */
+#define BMI160_USER_INTR_STAT_3_ORIENT_Z__POS               (6)
+#define BMI160_USER_INTR_STAT_3_ORIENT_Z__LEN               (1)
+#define BMI160_USER_INTR_STAT_3_ORIENT_Z__MSK               (0x40)
+#define BMI160_USER_INTR_STAT_3_ORIENT_Z__REG               \
+		(BMI160_USER_INTR_STAT_3_ADDR)
+/**************************************************************/
+/**\name	FLAT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 7 */
+#define BMI160_USER_INTR_STAT_3_FLAT__POS               (7)
+#define BMI160_USER_INTR_STAT_3_FLAT__LEN               (1)
+#define BMI160_USER_INTR_STAT_3_FLAT__MSK               (0x80)
+#define BMI160_USER_INTR_STAT_3_FLAT__REG               \
+		(BMI160_USER_INTR_STAT_3_ADDR)
+/**************************************************************/
+/**\name	(0x1F) LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 0...7 */
+#define BMI160_USER_INTR_STAT_3__POS               (0)
+#define BMI160_USER_INTR_STAT_3__LEN               (8)
+#define BMI160_USER_INTR_STAT_3__MSK               (0xFF)
+#define BMI160_USER_INTR_STAT_3__REG               \
+		(BMI160_USER_INTR_STAT_3_ADDR)
+/**************************************************************/
+/**\name	TEMPERATURE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Temperature Description - LSB Reg Addr --> (0x20), Bit --> 0...7 */
+#define BMI160_USER_TEMP_LSB_VALUE__POS               (0)
+#define BMI160_USER_TEMP_LSB_VALUE__LEN               (8)
+#define BMI160_USER_TEMP_LSB_VALUE__MSK               (0xFF)
+#define BMI160_USER_TEMP_LSB_VALUE__REG               \
+		(BMI160_USER_TEMPERATURE_0_ADDR)
+
+/* Temperature Description - LSB Reg Addr --> 0x21, Bit --> 0...7 */
+#define BMI160_USER_TEMP_MSB_VALUE__POS               (0)
+#define BMI160_USER_TEMP_MSB_VALUE__LEN               (8)
+#define BMI160_USER_TEMP_MSB_VALUE__MSK               (0xFF)
+#define BMI160_USER_TEMP_MSB_VALUE__REG               \
+		(BMI160_USER_TEMPERATURE_1_ADDR)
+/**************************************************************/
+/**\name	FIFO BYTE COUNTER LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Length0 Description - Reg Addr --> 0x22, Bit --> 0...7 */
+#define BMI160_USER_FIFO_BYTE_COUNTER_LSB__POS           (0)
+#define BMI160_USER_FIFO_BYTE_COUNTER_LSB__LEN           (8)
+#define BMI160_USER_FIFO_BYTE_COUNTER_LSB__MSK          (0xFF)
+#define BMI160_USER_FIFO_BYTE_COUNTER_LSB__REG          \
+		(BMI160_USER_FIFO_LENGTH_0_ADDR)
+
+/*Fifo_Length1 Description - Reg Addr --> 0x23, Bit --> 0...2 */
+#define BMI160_USER_FIFO_BYTE_COUNTER_MSB__POS           (0)
+#define BMI160_USER_FIFO_BYTE_COUNTER_MSB__LEN           3
+#define BMI160_USER_FIFO_BYTE_COUNTER_MSB__MSK          (0x07)
+#define BMI160_USER_FIFO_BYTE_COUNTER_MSB__REG          \
+		(BMI160_USER_FIFO_LENGTH_1_ADDR)
+
+/**************************************************************/
+/**\name	FIFO DATA LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Data Description - Reg Addr --> 0x24, Bit --> 0...7 */
+#define BMI160_USER_FIFO_DATA__POS           (0)
+#define BMI160_USER_FIFO_DATA__LEN           (8)
+#define BMI160_USER_FIFO_DATA__MSK          (0xFF)
+#define BMI160_USER_FIFO_DATA__REG          (BMI160_USER_FIFO_DATA_ADDR)
+
+/**************************************************************/
+/**\name	ACCEL CONFIGURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Acc_Conf Description - Reg Addr --> (0x40), Bit --> 0...3 */
+#define BMI160_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__POS               (0)
+#define BMI160_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__LEN               (4)
+#define BMI160_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__MSK               (0x0F)
+#define BMI160_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__REG		       \
+(BMI160_USER_ACCEL_CONFIG_ADDR)
+
+/* Acc_Conf Description - Reg Addr --> (0x40), Bit --> 4...6 */
+#define BMI160_USER_ACCEL_CONFIG_ACCEL_BW__POS               (4)
+#define BMI160_USER_ACCEL_CONFIG_ACCEL_BW__LEN               (3)
+#define BMI160_USER_ACCEL_CONFIG_ACCEL_BW__MSK               (0x70)
+#define BMI160_USER_ACCEL_CONFIG_ACCEL_BW__REG	(BMI160_USER_ACCEL_CONFIG_ADDR)
+
+/* Acc_Conf Description - Reg Addr --> (0x40), Bit --> 7 */
+#define BMI160_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__POS           (7)
+#define BMI160_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__LEN           (1)
+#define BMI160_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__MSK           (0x80)
+#define BMI160_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__REG	\
+(BMI160_USER_ACCEL_CONFIG_ADDR)
+
+/* Acc_Range Description - Reg Addr --> 0x41, Bit --> 0...3 */
+#define BMI160_USER_ACCEL_RANGE__POS               (0)
+#define BMI160_USER_ACCEL_RANGE__LEN               (4)
+#define BMI160_USER_ACCEL_RANGE__MSK               (0x0F)
+#define BMI160_USER_ACCEL_RANGE__REG              \
+(BMI160_USER_ACCEL_RANGE_ADDR)
+/**************************************************************/
+/**\name	GYRO CONFIGURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Gyro_Conf Description - Reg Addr --> (0x42), Bit --> 0...3 */
+#define BMI160_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__POS               (0)
+#define BMI160_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__LEN               (4)
+#define BMI160_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__MSK               (0x0F)
+#define BMI160_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__REG               \
+(BMI160_USER_GYRO_CONFIG_ADDR)
+
+/* Gyro_Conf Description - Reg Addr --> (0x42), Bit --> 4...5 */
+#define BMI160_USER_GYRO_CONFIG_BW__POS               (4)
+#define BMI160_USER_GYRO_CONFIG_BW__LEN               (2)
+#define BMI160_USER_GYRO_CONFIG_BW__MSK               (0x30)
+#define BMI160_USER_GYRO_CONFIG_BW__REG               \
+(BMI160_USER_GYRO_CONFIG_ADDR)
+
+/* Gyr_Range Description - Reg Addr --> 0x43, Bit --> 0...2 */
+#define BMI160_USER_GYRO_RANGE__POS               (0)
+#define BMI160_USER_GYRO_RANGE__LEN               (3)
+#define BMI160_USER_GYRO_RANGE__MSK               (0x07)
+#define BMI160_USER_GYRO_RANGE__REG               (BMI160_USER_GYRO_RANGE_ADDR)
+/**************************************************************/
+/**\name	MAG CONFIGURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Mag_Conf Description - Reg Addr --> (0x44), Bit --> 0...3 */
+#define BMI160_USER_MAG_CONFIG_OUTPUT_DATA_RATE__POS               (0)
+#define BMI160_USER_MAG_CONFIG_OUTPUT_DATA_RATE__LEN               (4)
+#define BMI160_USER_MAG_CONFIG_OUTPUT_DATA_RATE__MSK               (0x0F)
+#define BMI160_USER_MAG_CONFIG_OUTPUT_DATA_RATE__REG               \
+(BMI160_USER_MAG_CONFIG_ADDR)
+/**************************************************************/
+/**\name	FIFO DOWNS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Downs Description - Reg Addr --> 0x45, Bit --> 0...2 */
+#define BMI160_USER_FIFO_DOWN_GYRO__POS               (0)
+#define BMI160_USER_FIFO_DOWN_GYRO__LEN               (3)
+#define BMI160_USER_FIFO_DOWN_GYRO__MSK               (0x07)
+#define BMI160_USER_FIFO_DOWN_GYRO__REG	(BMI160_USER_FIFO_DOWN_ADDR)
+/**************************************************************/
+/**\name	FIFO FILTER FOR ACCEL AND GYRO LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_filt Description - Reg Addr --> 0x45, Bit --> 3 */
+#define BMI160_USER_FIFO_FILTER_GYRO__POS               (3)
+#define BMI160_USER_FIFO_FILTER_GYRO__LEN               (1)
+#define BMI160_USER_FIFO_FILTER_GYRO__MSK               (0x08)
+#define BMI160_USER_FIFO_FILTER_GYRO__REG	  (BMI160_USER_FIFO_DOWN_ADDR)
+
+/* Fifo_Downs Description - Reg Addr --> 0x45, Bit --> 4...6 */
+#define BMI160_USER_FIFO_DOWN_ACCEL__POS               (4)
+#define BMI160_USER_FIFO_DOWN_ACCEL__LEN               (3)
+#define BMI160_USER_FIFO_DOWN_ACCEL__MSK               (0x70)
+#define BMI160_USER_FIFO_DOWN_ACCEL__REG	(BMI160_USER_FIFO_DOWN_ADDR)
+
+/* Fifo_FILT Description - Reg Addr --> 0x45, Bit --> 7 */
+#define BMI160_USER_FIFO_FILTER_ACCEL__POS               (7)
+#define BMI160_USER_FIFO_FILTER_ACCEL__LEN               (1)
+#define BMI160_USER_FIFO_FILTER_ACCEL__MSK               (0x80)
+#define BMI160_USER_FIFO_FILTER_ACCEL__REG	(BMI160_USER_FIFO_DOWN_ADDR)
+/**************************************************************/
+/**\name	FIFO WATER MARK LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_0 Description - Reg Addr --> 0x46, Bit --> 0...7 */
+#define BMI160_USER_FIFO_WM__POS               (0)
+#define BMI160_USER_FIFO_WM__LEN               (8)
+#define BMI160_USER_FIFO_WM__MSK               (0xFF)
+#define BMI160_USER_FIFO_WM__REG	(BMI160_USER_FIFO_CONFIG_0_ADDR)
+/**************************************************************/
+/**\name	FIFO TIME LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 1 */
+#define BMI160_USER_FIFO_TIME_ENABLE__POS               (1)
+#define BMI160_USER_FIFO_TIME_ENABLE__LEN               (1)
+#define BMI160_USER_FIFO_TIME_ENABLE__MSK               (0x02)
+#define BMI160_USER_FIFO_TIME_ENABLE__REG	(BMI160_USER_FIFO_CONFIG_1_ADDR)
+/**************************************************************/
+/**\name	FIFO TAG INTERRUPT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 2 */
+#define BMI160_USER_FIFO_TAG_INTR2_ENABLE__POS               (2)
+#define BMI160_USER_FIFO_TAG_INTR2_ENABLE__LEN               (1)
+#define BMI160_USER_FIFO_TAG_INTR2_ENABLE__MSK               (0x04)
+#define BMI160_USER_FIFO_TAG_INTR2_ENABLE__REG	(BMI160_USER_FIFO_CONFIG_1_ADDR)
+
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 3 */
+#define BMI160_USER_FIFO_TAG_INTR1_ENABLE__POS               (3)
+#define BMI160_USER_FIFO_TAG_INTR1_ENABLE__LEN               (1)
+#define BMI160_USER_FIFO_TAG_INTR1_ENABLE__MSK               (0x08)
+#define BMI160_USER_FIFO_TAG_INTR1_ENABLE__REG	(BMI160_USER_FIFO_CONFIG_1_ADDR)
+/**************************************************************/
+/**\name	FIFO HEADER LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 4 */
+#define BMI160_USER_FIFO_HEADER_ENABLE__POS               (4)
+#define BMI160_USER_FIFO_HEADER_ENABLE__LEN               (1)
+#define BMI160_USER_FIFO_HEADER_ENABLE__MSK               (0x10)
+#define BMI160_USER_FIFO_HEADER_ENABLE__REG		         \
+(BMI160_USER_FIFO_CONFIG_1_ADDR)
+/**************************************************************/
+/**\name	FIFO MAG ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 5 */
+#define BMI160_USER_FIFO_MAG_ENABLE__POS               (5)
+#define BMI160_USER_FIFO_MAG_ENABLE__LEN               (1)
+#define BMI160_USER_FIFO_MAG_ENABLE__MSK               (0x20)
+#define BMI160_USER_FIFO_MAG_ENABLE__REG		     \
+(BMI160_USER_FIFO_CONFIG_1_ADDR)
+/**************************************************************/
+/**\name	FIFO ACCEL ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 6 */
+#define BMI160_USER_FIFO_ACCEL_ENABLE__POS               (6)
+#define BMI160_USER_FIFO_ACCEL_ENABLE__LEN               (1)
+#define BMI160_USER_FIFO_ACCEL_ENABLE__MSK               (0x40)
+#define BMI160_USER_FIFO_ACCEL_ENABLE__REG		        \
+(BMI160_USER_FIFO_CONFIG_1_ADDR)
+/**************************************************************/
+/**\name	FIFO GYRO ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 7 */
+#define BMI160_USER_FIFO_GYRO_ENABLE__POS               (7)
+#define BMI160_USER_FIFO_GYRO_ENABLE__LEN               (1)
+#define BMI160_USER_FIFO_GYRO_ENABLE__MSK               (0x80)
+#define BMI160_USER_FIFO_GYRO_ENABLE__REG		       \
+(BMI160_USER_FIFO_CONFIG_1_ADDR)
+
+/**************************************************************/
+/**\name	MAG I2C ADDRESS SELECTION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+
+/* Mag_IF_0 Description - Reg Addr --> 0x4b, Bit --> 1...7 */
+#define BMI160_USER_I2C_DEVICE_ADDR__POS               (1)
+#define BMI160_USER_I2C_DEVICE_ADDR__LEN               (7)
+#define BMI160_USER_I2C_DEVICE_ADDR__MSK               (0xFE)
+#define BMI160_USER_I2C_DEVICE_ADDR__REG	(BMI160_USER_MAG_IF_0_ADDR)
+/**************************************************************/
+/**\name MAG CONFIGURATION FOR SECONDARY
+	INTERFACE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Mag_IF_1 Description - Reg Addr --> 0x4c, Bit --> 0...1 */
+#define BMI160_USER_MAG_BURST__POS               (0)
+#define BMI160_USER_MAG_BURST__LEN               (2)
+#define BMI160_USER_MAG_BURST__MSK               (0x03)
+#define BMI160_USER_MAG_BURST__REG               (BMI160_USER_MAG_IF_1_ADDR)
+
+/* Mag_IF_1 Description - Reg Addr --> 0x4c, Bit --> 2...5 */
+#define BMI160_USER_MAG_OFFSET__POS               (2)
+#define BMI160_USER_MAG_OFFSET__LEN               (4)
+#define BMI160_USER_MAG_OFFSET__MSK               (0x3C)
+#define BMI160_USER_MAG_OFFSET__REG               (BMI160_USER_MAG_IF_1_ADDR)
+
+/* Mag_IF_1 Description - Reg Addr --> 0x4c, Bit --> 7 */
+#define BMI160_USER_MAG_MANUAL_ENABLE__POS               (7)
+#define BMI160_USER_MAG_MANUAL_ENABLE__LEN               (1)
+#define BMI160_USER_MAG_MANUAL_ENABLE__MSK               (0x80)
+#define BMI160_USER_MAG_MANUAL_ENABLE__REG               \
+(BMI160_USER_MAG_IF_1_ADDR)
+
+/* Mag_IF_2 Description - Reg Addr --> 0x4d, Bit -->0... 7 */
+#define BMI160_USER_READ_ADDR__POS               (0)
+#define BMI160_USER_READ_ADDR__LEN               (8)
+#define BMI160_USER_READ_ADDR__MSK               (0xFF)
+#define BMI160_USER_READ_ADDR__REG               (BMI160_USER_MAG_IF_2_ADDR)
+
+/* Mag_IF_3 Description - Reg Addr --> 0x4e, Bit -->0... 7 */
+#define BMI160_USER_WRITE_ADDR__POS               (0)
+#define BMI160_USER_WRITE_ADDR__LEN               (8)
+#define BMI160_USER_WRITE_ADDR__MSK               (0xFF)
+#define BMI160_USER_WRITE_ADDR__REG               (BMI160_USER_MAG_IF_3_ADDR)
+
+/* Mag_IF_4 Description - Reg Addr --> 0x4f, Bit -->0... 7 */
+#define BMI160_USER_WRITE_DATA__POS               (0)
+#define BMI160_USER_WRITE_DATA__LEN               (8)
+#define BMI160_USER_WRITE_DATA__MSK               (0xFF)
+#define BMI160_USER_WRITE_DATA__REG               (BMI160_USER_MAG_IF_4_ADDR)
+/**************************************************************/
+/**\name	ANY MOTION XYZ AXIS ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->0 */
+#define BMI160_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__POS               (0)
+#define BMI160_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__MSK               (0x01)
+#define BMI160_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__REG	              \
+(BMI160_USER_INTR_ENABLE_0_ADDR)
+
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->1 */
+#define BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__POS               (1)
+#define BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__MSK               (0x02)
+#define BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__REG	          \
+(BMI160_USER_INTR_ENABLE_0_ADDR)
+
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->2 */
+#define BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__POS               (2)
+#define BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__MSK               (0x04)
+#define BMI160_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__REG	            \
+(BMI160_USER_INTR_ENABLE_0_ADDR)
+/**************************************************************/
+/**\name	DOUBLE TAP ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->4 */
+#define BMI160_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__POS               (4)
+#define BMI160_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__MSK               (0x10)
+#define BMI160_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__REG	        \
+(BMI160_USER_INTR_ENABLE_0_ADDR)
+/**************************************************************/
+/**\name	SINGLE TAP ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->5 */
+#define BMI160_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__POS               (5)
+#define BMI160_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__MSK               (0x20)
+#define BMI160_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__REG	       \
+(BMI160_USER_INTR_ENABLE_0_ADDR)
+/**************************************************************/
+/**\name	ORIENT ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->6 */
+#define BMI160_USER_INTR_ENABLE_0_ORIENT_ENABLE__POS               (6)
+#define BMI160_USER_INTR_ENABLE_0_ORIENT_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_0_ORIENT_ENABLE__MSK               (0x40)
+#define BMI160_USER_INTR_ENABLE_0_ORIENT_ENABLE__REG	           \
+(BMI160_USER_INTR_ENABLE_0_ADDR)
+/**************************************************************/
+/**\name	FLAT ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->7 */
+#define BMI160_USER_INTR_ENABLE_0_FLAT_ENABLE__POS               (7)
+#define BMI160_USER_INTR_ENABLE_0_FLAT_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_0_FLAT_ENABLE__MSK               (0x80)
+#define BMI160_USER_INTR_ENABLE_0_FLAT_ENABLE__REG	           \
+(BMI160_USER_INTR_ENABLE_0_ADDR)
+/**************************************************************/
+/**\name	HIGH_G XYZ ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->0 */
+#define BMI160_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__POS               (0)
+#define BMI160_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__MSK               (0x01)
+#define BMI160_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__REG	           \
+(BMI160_USER_INTR_ENABLE_1_ADDR)
+
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->1 */
+#define BMI160_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__POS               (1)
+#define BMI160_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__MSK               (0x02)
+#define BMI160_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__REG	           \
+(BMI160_USER_INTR_ENABLE_1_ADDR)
+
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->2 */
+#define BMI160_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__POS               (2)
+#define BMI160_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__MSK               (0x04)
+#define BMI160_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__REG	           \
+(BMI160_USER_INTR_ENABLE_1_ADDR)
+/**************************************************************/
+/**\name	LOW_G ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->3 */
+#define BMI160_USER_INTR_ENABLE_1_LOW_G_ENABLE__POS               (3)
+#define BMI160_USER_INTR_ENABLE_1_LOW_G_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_1_LOW_G_ENABLE__MSK               (0x08)
+#define BMI160_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG	          \
+(BMI160_USER_INTR_ENABLE_1_ADDR)
+/**************************************************************/
+/**\name	DATA READY ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->4 */
+#define BMI160_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__POS               (4)
+#define BMI160_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__MSK               (0x10)
+#define BMI160_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__REG	            \
+(BMI160_USER_INTR_ENABLE_1_ADDR)
+/**************************************************************/
+/**\name	FIFO FULL AND WATER MARK ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->5 */
+#define BMI160_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__POS               (5)
+#define BMI160_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__MSK               (0x20)
+#define BMI160_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__REG	              \
+(BMI160_USER_INTR_ENABLE_1_ADDR)
+
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->6 */
+#define BMI160_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__POS               (6)
+#define BMI160_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__MSK               (0x40)
+#define BMI160_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__REG	           \
+(BMI160_USER_INTR_ENABLE_1_ADDR)
+/**************************************************************/
+/**\name	NO MOTION XYZ ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_2 Description - Reg Addr --> (0x52), Bit -->0 */
+#define BMI160_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__POS               (0)
+#define BMI160_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__MSK               (0x01)
+#define BMI160_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__REG	  \
+(BMI160_USER_INTR_ENABLE_2_ADDR)
+
+/* Int_En_2 Description - Reg Addr --> (0x52), Bit -->1 */
+#define BMI160_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__POS               (1)
+#define BMI160_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__MSK               (0x02)
+#define BMI160_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__REG	  \
+(BMI160_USER_INTR_ENABLE_2_ADDR)
+
+/* Int_En_2 Description - Reg Addr --> (0x52), Bit -->2 */
+#define BMI160_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__POS               (2)
+#define BMI160_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__MSK               (0x04)
+#define BMI160_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__REG	  \
+(BMI160_USER_INTR_ENABLE_2_ADDR)
+/**************************************************************/
+/**\name	STEP DETECTOR ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_2 Description - Reg Addr --> (0x52), Bit -->3 */
+#define BMI160_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__POS               (3)
+#define BMI160_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__LEN               (1)
+#define BMI160_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__MSK               (0x08)
+#define BMI160_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__REG	  \
+(BMI160_USER_INTR_ENABLE_2_ADDR)
+/**************************************************************/
+/**\name	EDGE CONTROL ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->0 */
+#define BMI160_USER_INTR1_EDGE_CTRL__POS               (0)
+#define BMI160_USER_INTR1_EDGE_CTRL__LEN               (1)
+#define BMI160_USER_INTR1_EDGE_CTRL__MSK               (0x01)
+#define BMI160_USER_INTR1_EDGE_CTRL__REG		\
+(BMI160_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	LEVEL CONTROL ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->1 */
+#define BMI160_USER_INTR1_LEVEL__POS               (1)
+#define BMI160_USER_INTR1_LEVEL__LEN               (1)
+#define BMI160_USER_INTR1_LEVEL__MSK               (0x02)
+#define BMI160_USER_INTR1_LEVEL__REG               \
+(BMI160_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	OUTPUT TYPE ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->2 */
+#define BMI160_USER_INTR1_OUTPUT_TYPE__POS               (2)
+#define BMI160_USER_INTR1_OUTPUT_TYPE__LEN               (1)
+#define BMI160_USER_INTR1_OUTPUT_TYPE__MSK               (0x04)
+#define BMI160_USER_INTR1_OUTPUT_TYPE__REG               \
+(BMI160_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	OUTPUT TYPE ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->3 */
+#define BMI160_USER_INTR1_OUTPUT_ENABLE__POS               (3)
+#define BMI160_USER_INTR1_OUTPUT_ENABLE__LEN               (1)
+#define BMI160_USER_INTR1_OUTPUT_ENABLE__MSK               (0x08)
+#define BMI160_USER_INTR1_OUTPUT_ENABLE__REG		\
+(BMI160_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	EDGE CONTROL ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->4 */
+#define BMI160_USER_INTR2_EDGE_CTRL__POS               (4)
+#define BMI160_USER_INTR2_EDGE_CTRL__LEN               (1)
+#define BMI160_USER_INTR2_EDGE_CTRL__MSK               (0x10)
+#define BMI160_USER_INTR2_EDGE_CTRL__REG		\
+(BMI160_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	LEVEL CONTROL ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->5 */
+#define BMI160_USER_INTR2_LEVEL__POS               (5)
+#define BMI160_USER_INTR2_LEVEL__LEN               (1)
+#define BMI160_USER_INTR2_LEVEL__MSK               (0x20)
+#define BMI160_USER_INTR2_LEVEL__REG               \
+(BMI160_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	OUTPUT TYPE ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->6 */
+#define BMI160_USER_INTR2_OUTPUT_TYPE__POS               (6)
+#define BMI160_USER_INTR2_OUTPUT_TYPE__LEN               (1)
+#define BMI160_USER_INTR2_OUTPUT_TYPE__MSK               (0x40)
+#define BMI160_USER_INTR2_OUTPUT_TYPE__REG               \
+(BMI160_USER_INTR_OUT_CTRL_ADDR)
+
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->7 */
+#define BMI160_USER_INTR2_OUTPUT_EN__POS               (7)
+#define BMI160_USER_INTR2_OUTPUT_EN__LEN               (1)
+#define BMI160_USER_INTR2_OUTPUT_EN__MSK               (0x80)
+#define BMI160_USER_INTR2_OUTPUT_EN__REG		\
+(BMI160_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	LATCH INTERRUPT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Latch Description - Reg Addr --> 0x54, Bit -->0...3 */
+#define BMI160_USER_INTR_LATCH__POS               (0)
+#define BMI160_USER_INTR_LATCH__LEN               (4)
+#define BMI160_USER_INTR_LATCH__MSK               (0x0F)
+#define BMI160_USER_INTR_LATCH__REG               (BMI160_USER_INTR_LATCH_ADDR)
+/**************************************************************/
+/**\name	INPUT ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Latch Description - Reg Addr --> 0x54, Bit -->4 */
+#define BMI160_USER_INTR1_INPUT_ENABLE__POS               (4)
+#define BMI160_USER_INTR1_INPUT_ENABLE__LEN               (1)
+#define BMI160_USER_INTR1_INPUT_ENABLE__MSK               (0x10)
+#define BMI160_USER_INTR1_INPUT_ENABLE__REG               \
+(BMI160_USER_INTR_LATCH_ADDR)
+
+/* Int_Latch Description - Reg Addr --> 0x54, Bit -->5*/
+#define BMI160_USER_INTR2_INPUT_ENABLE__POS               (5)
+#define BMI160_USER_INTR2_INPUT_ENABLE__LEN               (1)
+#define BMI160_USER_INTR2_INPUT_ENABLE__MSK               (0x20)
+#define BMI160_USER_INTR2_INPUT_ENABLE__REG              \
+(BMI160_USER_INTR_LATCH_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF LOW_G LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->0 */
+#define BMI160_USER_INTR_MAP_0_INTR1_LOW_G__POS               (0)
+#define BMI160_USER_INTR_MAP_0_INTR1_LOW_G__LEN               (1)
+#define BMI160_USER_INTR_MAP_0_INTR1_LOW_G__MSK               (0x01)
+#define BMI160_USER_INTR_MAP_0_INTR1_LOW_G__REG	(BMI160_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF HIGH_G LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->1 */
+#define BMI160_USER_INTR_MAP_0_INTR1_HIGH_G__POS               (1)
+#define BMI160_USER_INTR_MAP_0_INTR1_HIGH_G__LEN               (1)
+#define BMI160_USER_INTR_MAP_0_INTR1_HIGH_G__MSK               (0x02)
+#define BMI160_USER_INTR_MAP_0_INTR1_HIGH_G__REG	\
+(BMI160_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT MAPPIONG OF ANY MOTION_G LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->2 */
+#define BMI160_USER_INTR_MAP_0_INTR1_ANY_MOTION__POS               (2)
+#define BMI160_USER_INTR_MAP_0_INTR1_ANY_MOTION__LEN               (1)
+#define BMI160_USER_INTR_MAP_0_INTR1_ANY_MOTION__MSK               (0x04)
+#define BMI160_USER_INTR_MAP_0_INTR1_ANY_MOTION__REG            \
+(BMI160_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF NO MOTION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->3 */
+#define BMI160_USER_INTR_MAP_0_INTR1_NOMOTION__POS               (3)
+#define BMI160_USER_INTR_MAP_0_INTR1_NOMOTION__LEN               (1)
+#define BMI160_USER_INTR_MAP_0_INTR1_NOMOTION__MSK               (0x08)
+#define BMI160_USER_INTR_MAP_0_INTR1_NOMOTION__REG (BMI160_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF DOUBLE TAP LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->4 */
+#define BMI160_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__POS               (4)
+#define BMI160_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__LEN               (1)
+#define BMI160_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__MSK               (0x10)
+#define BMI160_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__REG	\
+(BMI160_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF SINGLE TAP LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->5 */
+#define BMI160_USER_INTR_MAP_0_INTR1_SINGLE_TAP__POS               (5)
+#define BMI160_USER_INTR_MAP_0_INTR1_SINGLE_TAP__LEN               (1)
+#define BMI160_USER_INTR_MAP_0_INTR1_SINGLE_TAP__MSK               (0x20)
+#define BMI160_USER_INTR_MAP_0_INTR1_SINGLE_TAP__REG	      \
+(BMI160_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF ORIENT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->6 */
+#define BMI160_USER_INTR_MAP_0_INTR1_ORIENT__POS               (6)
+#define BMI160_USER_INTR_MAP_0_INTR1_ORIENT__LEN               (1)
+#define BMI160_USER_INTR_MAP_0_INTR1_ORIENT__MSK               (0x40)
+#define BMI160_USER_INTR_MAP_0_INTR1_ORIENT__REG	          \
+(BMI160_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT MAPPIONG OF FLAT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x56, Bit -->7 */
+#define BMI160_USER_INTR_MAP_0_INTR1_FLAT__POS               (7)
+#define BMI160_USER_INTR_MAP_0_INTR1_FLAT__LEN               (1)
+#define BMI160_USER_INTR_MAP_0_INTR1_FLAT__MSK               (0x80)
+#define BMI160_USER_INTR_MAP_0_INTR1_FLAT__REG	(BMI160_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF PMU TRIGGER LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->0 */
+#define BMI160_USER_INTR_MAP_1_INTR2_PMU_TRIG__POS               (0)
+#define BMI160_USER_INTR_MAP_1_INTR2_PMU_TRIG__LEN               (1)
+#define BMI160_USER_INTR_MAP_1_INTR2_PMU_TRIG__MSK               (0x01)
+#define BMI160_USER_INTR_MAP_1_INTR2_PMU_TRIG__REG (BMI160_USER_INTR_MAP_1_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF FIFO FULL AND
+	WATER MARK LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->1 */
+#define BMI160_USER_INTR_MAP_1_INTR2_FIFO_FULL__POS               (1)
+#define BMI160_USER_INTR_MAP_1_INTR2_FIFO_FULL__LEN               (1)
+#define BMI160_USER_INTR_MAP_1_INTR2_FIFO_FULL__MSK               (0x02)
+#define BMI160_USER_INTR_MAP_1_INTR2_FIFO_FULL__REG	         \
+(BMI160_USER_INTR_MAP_1_ADDR)
+
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->2 */
+#define BMI160_USER_INTR_MAP_1_INTR2_FIFO_WM__POS               (2)
+#define BMI160_USER_INTR_MAP_1_INTR2_FIFO_WM__LEN               (1)
+#define BMI160_USER_INTR_MAP_1_INTR2_FIFO_WM__MSK               (0x04)
+#define BMI160_USER_INTR_MAP_1_INTR2_FIFO_WM__REG	         \
+(BMI160_USER_INTR_MAP_1_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF DATA READY LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->3 */
+#define BMI160_USER_INTR_MAP_1_INTR2_DATA_RDY__POS               (3)
+#define BMI160_USER_INTR_MAP_1_INTR2_DATA_RDY__LEN               (1)
+#define BMI160_USER_INTR_MAP_1_INTR2_DATA_RDY__MSK               (0x08)
+#define BMI160_USER_INTR_MAP_1_INTR2_DATA_RDY__REG	      \
+(BMI160_USER_INTR_MAP_1_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF PMU TRIGGER LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->4 */
+#define BMI160_USER_INTR_MAP_1_INTR1_PMU_TRIG__POS               (4)
+#define BMI160_USER_INTR_MAP_1_INTR1_PMU_TRIG__LEN               (1)
+#define BMI160_USER_INTR_MAP_1_INTR1_PMU_TRIG__MSK               (0x10)
+#define BMI160_USER_INTR_MAP_1_INTR1_PMU_TRIG__REG (BMI160_USER_INTR_MAP_1_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF FIFO FULL AND
+	WATER MARK LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->5 */
+#define BMI160_USER_INTR_MAP_1_INTR1_FIFO_FULL__POS               (5)
+#define BMI160_USER_INTR_MAP_1_INTR1_FIFO_FULL__LEN               (1)
+#define BMI160_USER_INTR_MAP_1_INTR1_FIFO_FULL__MSK               (0x20)
+#define BMI160_USER_INTR_MAP_1_INTR1_FIFO_FULL__REG	       \
+(BMI160_USER_INTR_MAP_1_ADDR)
+
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->6 */
+#define BMI160_USER_INTR_MAP_1_INTR1_FIFO_WM__POS               (6)
+#define BMI160_USER_INTR_MAP_1_INTR1_FIFO_WM__LEN               (1)
+#define BMI160_USER_INTR_MAP_1_INTR1_FIFO_WM__MSK               (0x40)
+#define BMI160_USER_INTR_MAP_1_INTR1_FIFO_WM__REG	\
+(BMI160_USER_INTR_MAP_1_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF DATA READY LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->7 */
+#define BMI160_USER_INTR_MAP_1_INTR1_DATA_RDY__POS               (7)
+#define BMI160_USER_INTR_MAP_1_INTR1_DATA_RDY__LEN               (1)
+#define BMI160_USER_INTR_MAP_1_INTR1_DATA_RDY__MSK               (0x80)
+#define BMI160_USER_INTR_MAP_1_INTR1_DATA_RDY__REG	\
+(BMI160_USER_INTR_MAP_1_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF LOW_G LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->0 */
+#define BMI160_USER_INTR_MAP_2_INTR2_LOW_G__POS               (0)
+#define BMI160_USER_INTR_MAP_2_INTR2_LOW_G__LEN               (1)
+#define BMI160_USER_INTR_MAP_2_INTR2_LOW_G__MSK               (0x01)
+#define BMI160_USER_INTR_MAP_2_INTR2_LOW_G__REG	(BMI160_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF HIGH_G LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->1 */
+#define BMI160_USER_INTR_MAP_2_INTR2_HIGH_G__POS               (1)
+#define BMI160_USER_INTR_MAP_2_INTR2_HIGH_G__LEN               (1)
+#define BMI160_USER_INTR_MAP_2_INTR2_HIGH_G__MSK               (0x02)
+#define BMI160_USER_INTR_MAP_2_INTR2_HIGH_G__REG	\
+(BMI160_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF ANY MOTION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->2 */
+#define BMI160_USER_INTR_MAP_2_INTR2_ANY_MOTION__POS      (2)
+#define BMI160_USER_INTR_MAP_2_INTR2_ANY_MOTION__LEN      (1)
+#define BMI160_USER_INTR_MAP_2_INTR2_ANY_MOTION__MSK     (0x04)
+#define BMI160_USER_INTR_MAP_2_INTR2_ANY_MOTION__REG     \
+(BMI160_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF NO MOTION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->3 */
+#define BMI160_USER_INTR_MAP_2_INTR2_NOMOTION__POS               (3)
+#define BMI160_USER_INTR_MAP_2_INTR2_NOMOTION__LEN               (1)
+#define BMI160_USER_INTR_MAP_2_INTR2_NOMOTION__MSK               (0x08)
+#define BMI160_USER_INTR_MAP_2_INTR2_NOMOTION__REG (BMI160_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF DOUBLE TAP LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->4 */
+#define BMI160_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__POS               (4)
+#define BMI160_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__LEN               (1)
+#define BMI160_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__MSK               (0x10)
+#define BMI160_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__REG	\
+(BMI160_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF SINGLE TAP LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->5 */
+#define BMI160_USER_INTR_MAP_2_INTR2_SINGLE_TAP__POS               (5)
+#define BMI160_USER_INTR_MAP_2_INTR2_SINGLE_TAP__LEN               (1)
+#define BMI160_USER_INTR_MAP_2_INTR2_SINGLE_TAP__MSK               (0x20)
+#define BMI160_USER_INTR_MAP_2_INTR2_SINGLE_TAP__REG	\
+(BMI160_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF ORIENT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->6 */
+#define BMI160_USER_INTR_MAP_2_INTR2_ORIENT__POS               (6)
+#define BMI160_USER_INTR_MAP_2_INTR2_ORIENT__LEN               (1)
+#define BMI160_USER_INTR_MAP_2_INTR2_ORIENT__MSK               (0x40)
+#define BMI160_USER_INTR_MAP_2_INTR2_ORIENT__REG	\
+(BMI160_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF FLAT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->7 */
+#define BMI160_USER_INTR_MAP_2_INTR2_FLAT__POS               (7)
+#define BMI160_USER_INTR_MAP_2_INTR2_FLAT__LEN               (1)
+#define BMI160_USER_INTR_MAP_2_INTR2_FLAT__MSK               (0x80)
+#define BMI160_USER_INTR_MAP_2_INTR2_FLAT__REG	(BMI160_USER_INTR_MAP_2_ADDR)
+
+/**************************************************************/
+/**\name	TAP SOURCE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Data_0 Description - Reg Addr --> 0x58, Bit --> 3 */
+#define BMI160_USER_INTR_DATA_0_INTR_TAP_SOURCE__POS               (3)
+#define BMI160_USER_INTR_DATA_0_INTR_TAP_SOURCE__LEN               (1)
+#define BMI160_USER_INTR_DATA_0_INTR_TAP_SOURCE__MSK               (0x08)
+#define BMI160_USER_INTR_DATA_0_INTR_TAP_SOURCE__REG	           \
+(BMI160_USER_INTR_DATA_0_ADDR)
+
+/**************************************************************/
+/**\name	HIGH SOURCE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Data_0 Description - Reg Addr --> 0x58, Bit --> 7 */
+#define BMI160_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__POS           (7)
+#define BMI160_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__LEN           (1)
+#define BMI160_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__MSK           (0x80)
+#define BMI160_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__REG            \
+(BMI160_USER_INTR_DATA_0_ADDR)
+
+/**************************************************************/
+/**\name	MOTION SOURCE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Data_1 Description - Reg Addr --> 0x59, Bit --> 7 */
+#define BMI160_USER_INTR_DATA_1_INTR_MOTION_SOURCE__POS               (7)
+#define BMI160_USER_INTR_DATA_1_INTR_MOTION_SOURCE__LEN               (1)
+#define BMI160_USER_INTR_DATA_1_INTR_MOTION_SOURCE__MSK               (0x80)
+#define BMI160_USER_INTR_DATA_1_INTR_MOTION_SOURCE__REG               \
+		(BMI160_USER_INTR_DATA_1_ADDR)
+/**************************************************************/
+/**\name	LOW HIGH DURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_0 Description - Reg Addr --> 0x5a, Bit --> 0...7 */
+#define BMI160_USER_INTR_LOWHIGH_0_INTR_LOW_DURN__POS               (0)
+#define BMI160_USER_INTR_LOWHIGH_0_INTR_LOW_DURN__LEN               (8)
+#define BMI160_USER_INTR_LOWHIGH_0_INTR_LOW_DURN__MSK               (0xFF)
+#define BMI160_USER_INTR_LOWHIGH_0_INTR_LOW_DURN__REG               \
+		(BMI160_USER_INTR_LOWHIGH_0_ADDR)
+/**************************************************************/
+/**\name	LOW THRESHOLD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_1 Description - Reg Addr --> 0x5b, Bit --> 0...7 */
+#define BMI160_USER_INTR_LOWHIGH_1_INTR_LOW_THRES__POS               (0)
+#define BMI160_USER_INTR_LOWHIGH_1_INTR_LOW_THRES__LEN               (8)
+#define BMI160_USER_INTR_LOWHIGH_1_INTR_LOW_THRES__MSK               (0xFF)
+#define BMI160_USER_INTR_LOWHIGH_1_INTR_LOW_THRES__REG               \
+		(BMI160_USER_INTR_LOWHIGH_1_ADDR)
+/**************************************************************/
+/**\name	LOW HYSTERESIS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_2 Description - Reg Addr --> 0x5c, Bit --> 0...1 */
+#define BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__POS               (0)
+#define BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__LEN               (2)
+#define BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__MSK               (0x03)
+#define BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__REG               \
+		(BMI160_USER_INTR_LOWHIGH_2_ADDR)
+/**************************************************************/
+/**\name	LOW MODE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_2 Description - Reg Addr --> 0x5c, Bit --> 2 */
+#define BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__POS               (2)
+#define BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__LEN               (1)
+#define BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__MSK               (0x04)
+#define BMI160_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__REG               \
+		(BMI160_USER_INTR_LOWHIGH_2_ADDR)
+/**************************************************************/
+/**\name	HIGH_G HYSTERESIS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_2 Description - Reg Addr --> 0x5c, Bit --> 6...7 */
+#define BMI160_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__POS               (6)
+#define BMI160_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__LEN               (2)
+#define BMI160_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__MSK               (0xC0)
+#define BMI160_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__REG               \
+		(BMI160_USER_INTR_LOWHIGH_2_ADDR)
+/**************************************************************/
+/**\name	HIGH_G DURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_3 Description - Reg Addr --> 0x5d, Bit --> 0...7 */
+#define BMI160_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN__POS               (0)
+#define BMI160_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN__LEN               (8)
+#define BMI160_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN__MSK               (0xFF)
+#define BMI160_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN__REG               \
+		(BMI160_USER_INTR_LOWHIGH_3_ADDR)
+/**************************************************************/
+/**\name	HIGH_G THRESHOLD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_4 Description - Reg Addr --> 0x5e, Bit --> 0...7 */
+#define BMI160_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES__POS               (0)
+#define BMI160_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES__LEN               (8)
+#define BMI160_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES__MSK               (0xFF)
+#define BMI160_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES__REG               \
+		(BMI160_USER_INTR_LOWHIGH_4_ADDR)
+/**************************************************************/
+/**\name	ANY MOTION DURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Motion_0 Description - Reg Addr --> 0x5f, Bit --> 0...1 */
+#define BMI160_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__POS               (0)
+#define BMI160_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__LEN               (2)
+#define BMI160_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__MSK               (0x03)
+#define BMI160_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__REG               \
+		(BMI160_USER_INTR_MOTION_0_ADDR)
+/**************************************************************/
+/**\name	SLOW/NO MOTION DURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+	/* Int_Motion_0 Description - Reg Addr --> 0x5f, Bit --> 2...7 */
+#define BMI160_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__POS      (2)
+#define BMI160_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__LEN      (6)
+#define BMI160_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__MSK      (0xFC)
+#define BMI160_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__REG       \
+		(BMI160_USER_INTR_MOTION_0_ADDR)
+/**************************************************************/
+/**\name	ANY MOTION THRESHOLD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Motion_1 Description - Reg Addr --> (0x60), Bit --> 0...7 */
+#define BMI160_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES__POS      (0)
+#define BMI160_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES__LEN      (8)
+#define BMI160_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES__MSK      (0xFF)
+#define BMI160_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES__REG               \
+		(BMI160_USER_INTR_MOTION_1_ADDR)
+/**************************************************************/
+/**\name	SLOW/NO MOTION THRESHOLD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Motion_2 Description - Reg Addr --> 0x61, Bit --> 0...7 */
+#define BMI160_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES__POS       (0)
+#define BMI160_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES__LEN       (8)
+#define BMI160_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES__MSK       (0xFF)
+#define BMI160_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES__REG       \
+		(BMI160_USER_INTR_MOTION_2_ADDR)
+/**************************************************************/
+/**\name	SLOW/NO MOTION SELECT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Motion_3 Description - Reg Addr --> (0x62), Bit --> 0 */
+#define BMI160_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__POS	(0)
+#define BMI160_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__LEN	(1)
+#define BMI160_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__MSK	(0x01)
+#define BMI160_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__REG   \
+(BMI160_USER_INTR_MOTION_3_ADDR)
+/**************************************************************/
+/**\name	SIGNIFICANT MOTION SELECT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Motion_3 Description - Reg Addr --> (0x62), Bit --> 1 */
+#define BMI160_USER_INTR_SIGNIFICATION_MOTION_SELECT__POS		(1)
+#define BMI160_USER_INTR_SIGNIFICATION_MOTION_SELECT__LEN		(1)
+#define BMI160_USER_INTR_SIGNIFICATION_MOTION_SELECT__MSK		(0x02)
+#define BMI160_USER_INTR_SIGNIFICATION_MOTION_SELECT__REG		\
+		(BMI160_USER_INTR_MOTION_3_ADDR)
+
+/* Int_Motion_3 Description - Reg Addr --> (0x62), Bit --> 3..2 */
+#define BMI160_USER_INTR_SIGNIFICANT_MOTION_SKIP__POS		(2)
+#define BMI160_USER_INTR_SIGNIFICANT_MOTION_SKIP__LEN		(2)
+#define BMI160_USER_INTR_SIGNIFICANT_MOTION_SKIP__MSK		(0x0C)
+#define BMI160_USER_INTR_SIGNIFICANT_MOTION_SKIP__REG		\
+		(BMI160_USER_INTR_MOTION_3_ADDR)
+
+/* Int_Motion_3 Description - Reg Addr --> (0x62), Bit --> 5..4 */
+#define BMI160_USER_INTR_SIGNIFICANT_MOTION_PROOF__POS		(4)
+#define BMI160_USER_INTR_SIGNIFICANT_MOTION_PROOF__LEN		(2)
+#define BMI160_USER_INTR_SIGNIFICANT_MOTION_PROOF__MSK		(0x30)
+#define BMI160_USER_INTR_SIGNIFICANT_MOTION_PROOF__REG		\
+		(BMI160_USER_INTR_MOTION_3_ADDR)
+/**************************************************************/
+/**\name	TAP DURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* INT_TAP_0 Description - Reg Addr --> (0x63), Bit --> 0..2*/
+#define BMI160_USER_INTR_TAP_0_INTR_TAP_DURN__POS               (0)
+#define BMI160_USER_INTR_TAP_0_INTR_TAP_DURN__LEN               (3)
+#define BMI160_USER_INTR_TAP_0_INTR_TAP_DURN__MSK               (0x07)
+#define BMI160_USER_INTR_TAP_0_INTR_TAP_DURN__REG	\
+(BMI160_USER_INTR_TAP_0_ADDR)
+/**************************************************************/
+/**\name	TAP SHOCK LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Tap_0 Description - Reg Addr --> (0x63), Bit --> 6 */
+#define BMI160_USER_INTR_TAP_0_INTR_TAP_SHOCK__POS               (6)
+#define BMI160_USER_INTR_TAP_0_INTR_TAP_SHOCK__LEN               (1)
+#define BMI160_USER_INTR_TAP_0_INTR_TAP_SHOCK__MSK               (0x40)
+#define BMI160_USER_INTR_TAP_0_INTR_TAP_SHOCK__REG (BMI160_USER_INTR_TAP_0_ADDR)
+/**************************************************************/
+/**\name	TAP QUIET LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Tap_0 Description - Reg Addr --> (0x63), Bit --> 7 */
+#define BMI160_USER_INTR_TAP_0_INTR_TAP_QUIET__POS               (7)
+#define BMI160_USER_INTR_TAP_0_INTR_TAP_QUIET__LEN               (1)
+#define BMI160_USER_INTR_TAP_0_INTR_TAP_QUIET__MSK               (0x80)
+#define BMI160_USER_INTR_TAP_0_INTR_TAP_QUIET__REG (BMI160_USER_INTR_TAP_0_ADDR)
+/**************************************************************/
+/**\name	TAP THRESHOLD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Tap_1 Description - Reg Addr --> (0x64), Bit --> 0...4 */
+#define BMI160_USER_INTR_TAP_1_INTR_TAP_THRES__POS               (0)
+#define BMI160_USER_INTR_TAP_1_INTR_TAP_THRES__LEN               (5)
+#define BMI160_USER_INTR_TAP_1_INTR_TAP_THRES__MSK               (0x1F)
+#define BMI160_USER_INTR_TAP_1_INTR_TAP_THRES__REG (BMI160_USER_INTR_TAP_1_ADDR)
+/**************************************************************/
+/**\name	ORIENT MODE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Orient_0 Description - Reg Addr --> (0x65), Bit --> 0...1 */
+#define BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__POS               (0)
+#define BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__LEN               (2)
+#define BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__MSK               (0x03)
+#define BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__REG               \
+		(BMI160_USER_INTR_ORIENT_0_ADDR)
+/**************************************************************/
+/**\name	ORIENT BLOCKING LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Orient_0 Description - Reg Addr --> (0x65), Bit --> 2...3 */
+#define BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__POS               (2)
+#define BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__LEN               (2)
+#define BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__MSK               (0x0C)
+#define BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__REG               \
+		(BMI160_USER_INTR_ORIENT_0_ADDR)
+/**************************************************************/
+/**\name	ORIENT HYSTERESIS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Orient_0 Description - Reg Addr --> (0x65), Bit --> 4...7 */
+#define BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__POS               (4)
+#define BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__LEN               (4)
+#define BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__MSK               (0xF0)
+#define BMI160_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__REG               \
+		(BMI160_USER_INTR_ORIENT_0_ADDR)
+/**************************************************************/
+/**\name	ORIENT THETA LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Orient_1 Description - Reg Addr --> 0x66, Bit --> 0...5 */
+#define BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__POS               (0)
+#define BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__LEN               (6)
+#define BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__MSK               (0x3F)
+#define BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__REG               \
+		(BMI160_USER_INTR_ORIENT_1_ADDR)
+/**************************************************************/
+/**\name	ORIENT UD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Orient_1 Description - Reg Addr --> 0x66, Bit --> 6 */
+#define BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__POS         (6)
+#define BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__LEN         (1)
+#define BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__MSK         (0x40)
+#define BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__REG          \
+		(BMI160_USER_INTR_ORIENT_1_ADDR)
+/**************************************************************/
+/**\name	ORIENT AXIS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Orient_1 Description - Reg Addr --> 0x66, Bit --> 7 */
+#define BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__POS               (7)
+#define BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__LEN               (1)
+#define BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__MSK               (0x80)
+#define BMI160_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__REG               \
+		(BMI160_USER_INTR_ORIENT_1_ADDR)
+/**************************************************************/
+/**\name	FLAT THETA LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Flat_0 Description - Reg Addr --> 0x67, Bit --> 0...5 */
+#define BMI160_USER_INTR_FLAT_0_INTR_FLAT_THETA__POS               (0)
+#define BMI160_USER_INTR_FLAT_0_INTR_FLAT_THETA__LEN               (6)
+#define BMI160_USER_INTR_FLAT_0_INTR_FLAT_THETA__MSK               (0x3F)
+#define BMI160_USER_INTR_FLAT_0_INTR_FLAT_THETA__REG  \
+		(BMI160_USER_INTR_FLAT_0_ADDR)
+/**************************************************************/
+/**\name	FLAT HYSTERESIS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Flat_1 Description - Reg Addr --> (0x68), Bit --> 0...3 */
+#define BMI160_USER_INTR_FLAT_1_INTR_FLAT_HYST__POS		(0)
+#define BMI160_USER_INTR_FLAT_1_INTR_FLAT_HYST__LEN		(4)
+#define BMI160_USER_INTR_FLAT_1_INTR_FLAT_HYST__MSK		(0x0F)
+#define BMI160_USER_INTR_FLAT_1_INTR_FLAT_HYST__REG	 \
+(BMI160_USER_INTR_FLAT_1_ADDR)
+/**************************************************************/
+/**\name	FLAT HOLD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Flat_1 Description - Reg Addr --> (0x68), Bit --> 4...5 */
+#define BMI160_USER_INTR_FLAT_1_INTR_FLAT_HOLD__POS                (4)
+#define BMI160_USER_INTR_FLAT_1_INTR_FLAT_HOLD__LEN                (2)
+#define BMI160_USER_INTR_FLAT_1_INTR_FLAT_HOLD__MSK                (0x30)
+#define BMI160_USER_INTR_FLAT_1_INTR_FLAT_HOLD__REG  \
+(BMI160_USER_INTR_FLAT_1_ADDR)
+/**************************************************************/
+/**\name	FOC ACCEL XYZ LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Foc_Conf Description - Reg Addr --> (0x69), Bit --> 0...1 */
+#define BMI160_USER_FOC_ACCEL_Z__POS               (0)
+#define BMI160_USER_FOC_ACCEL_Z__LEN               (2)
+#define BMI160_USER_FOC_ACCEL_Z__MSK               (0x03)
+#define BMI160_USER_FOC_ACCEL_Z__REG               (BMI160_USER_FOC_CONFIG_ADDR)
+
+/* Foc_Conf Description - Reg Addr --> (0x69), Bit --> 2...3 */
+#define BMI160_USER_FOC_ACCEL_Y__POS               (2)
+#define BMI160_USER_FOC_ACCEL_Y__LEN               (2)
+#define BMI160_USER_FOC_ACCEL_Y__MSK               (0x0C)
+#define BMI160_USER_FOC_ACCEL_Y__REG               (BMI160_USER_FOC_CONFIG_ADDR)
+
+/* Foc_Conf Description - Reg Addr --> (0x69), Bit --> 4...5 */
+#define BMI160_USER_FOC_ACCEL_X__POS               (4)
+#define BMI160_USER_FOC_ACCEL_X__LEN               (2)
+#define BMI160_USER_FOC_ACCEL_X__MSK               (0x30)
+#define BMI160_USER_FOC_ACCEL_X__REG               (BMI160_USER_FOC_CONFIG_ADDR)
+/**************************************************************/
+/**\name	FOC GYRO LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Foc_Conf Description - Reg Addr --> (0x69), Bit --> 6 */
+#define BMI160_USER_FOC_GYRO_ENABLE__POS               (6)
+#define BMI160_USER_FOC_GYRO_ENABLE__LEN               (1)
+#define BMI160_USER_FOC_GYRO_ENABLE__MSK               (0x40)
+#define BMI160_USER_FOC_GYRO_ENABLE__REG               \
+(BMI160_USER_FOC_CONFIG_ADDR)
+/**************************************************************/
+/**\name	NVM PROGRAM LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* CONF Description - Reg Addr --> (0x6A), Bit --> 1 */
+#define BMI160_USER_CONFIG_NVM_PROG_ENABLE__POS               (1)
+#define BMI160_USER_CONFIG_NVM_PROG_ENABLE__LEN               (1)
+#define BMI160_USER_CONFIG_NVM_PROG_ENABLE__MSK               (0x02)
+#define BMI160_USER_CONFIG_NVM_PROG_ENABLE__REG               \
+(BMI160_USER_CONFIG_ADDR)
+
+/*IF_CONF Description - Reg Addr --> (0x6B), Bit --> 0 */
+
+#define BMI160_USER_IF_CONFIG_SPI3__POS               (0)
+#define BMI160_USER_IF_CONFIG_SPI3__LEN               (1)
+#define BMI160_USER_IF_CONFIG_SPI3__MSK               (0x01)
+#define BMI160_USER_IF_CONFIG_SPI3__REG               \
+(BMI160_USER_IF_CONFIG_ADDR)
+
+/*IF_CONF Description - Reg Addr --> (0x6B), Bit --> 5..4 */
+#define BMI160_USER_IF_CONFIG_IF_MODE__POS               (4)
+#define BMI160_USER_IF_CONFIG_IF_MODE__LEN               (2)
+#define BMI160_USER_IF_CONFIG_IF_MODE__MSK               (0x30)
+#define BMI160_USER_IF_CONFIG_IF_MODE__REG		\
+(BMI160_USER_IF_CONFIG_ADDR)
+/**************************************************************/
+/**\name	GYRO SLEEP CONFIGURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Pmu_Trigger Description - Reg Addr --> 0x6c, Bit --> 0...2 */
+#define BMI160_USER_GYRO_SLEEP_TRIGGER__POS               (0)
+#define BMI160_USER_GYRO_SLEEP_TRIGGER__LEN               (3)
+#define BMI160_USER_GYRO_SLEEP_TRIGGER__MSK               (0x07)
+#define BMI160_USER_GYRO_SLEEP_TRIGGER__REG	(BMI160_USER_PMU_TRIGGER_ADDR)
+
+/* Pmu_Trigger Description - Reg Addr --> 0x6c, Bit --> 3...4 */
+#define BMI160_USER_GYRO_WAKEUP_TRIGGER__POS               (3)
+#define BMI160_USER_GYRO_WAKEUP_TRIGGER__LEN               (2)
+#define BMI160_USER_GYRO_WAKEUP_TRIGGER__MSK               (0x18)
+#define BMI160_USER_GYRO_WAKEUP_TRIGGER__REG	(BMI160_USER_PMU_TRIGGER_ADDR)
+
+/* Pmu_Trigger Description - Reg Addr --> 0x6c, Bit --> 5 */
+#define BMI160_USER_GYRO_SLEEP_STATE__POS               (5)
+#define BMI160_USER_GYRO_SLEEP_STATE__LEN               (1)
+#define BMI160_USER_GYRO_SLEEP_STATE__MSK               (0x20)
+#define BMI160_USER_GYRO_SLEEP_STATE__REG	(BMI160_USER_PMU_TRIGGER_ADDR)
+
+/* Pmu_Trigger Description - Reg Addr --> 0x6c, Bit --> 6 */
+#define BMI160_USER_GYRO_WAKEUP_INTR__POS               (6)
+#define BMI160_USER_GYRO_WAKEUP_INTR__LEN               (1)
+#define BMI160_USER_GYRO_WAKEUP_INTR__MSK               (0x40)
+#define BMI160_USER_GYRO_WAKEUP_INTR__REG	(BMI160_USER_PMU_TRIGGER_ADDR)
+/**************************************************************/
+/**\name	ACCEL SELF TEST LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Self_Test Description - Reg Addr --> 0x6d, Bit --> 0...1 */
+#define BMI160_USER_ACCEL_SELFTEST_AXIS__POS               (0)
+#define BMI160_USER_ACCEL_SELFTEST_AXIS__LEN               (2)
+#define BMI160_USER_ACCEL_SELFTEST_AXIS__MSK               (0x03)
+#define BMI160_USER_ACCEL_SELFTEST_AXIS__REG	(BMI160_USER_SELF_TEST_ADDR)
+
+/* Self_Test Description - Reg Addr --> 0x6d, Bit --> 2 */
+#define BMI160_USER_ACCEL_SELFTEST_SIGN__POS               (2)
+#define BMI160_USER_ACCEL_SELFTEST_SIGN__LEN               (1)
+#define BMI160_USER_ACCEL_SELFTEST_SIGN__MSK               (0x04)
+#define BMI160_USER_ACCEL_SELFTEST_SIGN__REG	(BMI160_USER_SELF_TEST_ADDR)
+
+/* Self_Test Description - Reg Addr --> 0x6d, Bit --> 3 */
+#define BMI160_USER_SELFTEST_AMP__POS               (3)
+#define BMI160_USER_SELFTEST_AMP__LEN               (1)
+#define BMI160_USER_SELFTEST_AMP__MSK               (0x08)
+#define BMI160_USER_SELFTEST_AMP__REG		(BMI160_USER_SELF_TEST_ADDR)
+/**************************************************************/
+/**\name	GYRO SELF TEST LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Self_Test Description - Reg Addr --> 0x6d, Bit --> 4 */
+#define BMI160_USER_GYRO_SELFTEST_START__POS               (4)
+#define BMI160_USER_GYRO_SELFTEST_START__LEN               (1)
+#define BMI160_USER_GYRO_SELFTEST_START__MSK               (0x10)
+#define BMI160_USER_GYRO_SELFTEST_START__REG		    \
+(BMI160_USER_SELF_TEST_ADDR)
+/**************************************************************/
+/**\name	NV_CONFIG LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* NV_CONF Description - Reg Addr --> (0x70), Bit --> 0 */
+#define BMI160_USER_NV_CONFIG_SPI_ENABLE__POS               (0)
+#define BMI160_USER_NV_CONFIG_SPI_ENABLE__LEN               (1)
+#define BMI160_USER_NV_CONFIG_SPI_ENABLE__MSK               (0x01)
+#define BMI160_USER_NV_CONFIG_SPI_ENABLE__REG	 (BMI160_USER_NV_CONFIG_ADDR)
+
+/*IF_CONF Description - Reg Addr --> (0x70), Bit --> 1 */
+#define BMI160_USER_IF_CONFIG_I2C_WDT_SELECT__POS               (1)
+#define BMI160_USER_IF_CONFIG_I2C_WDT_SELECT__LEN               (1)
+#define BMI160_USER_IF_CONFIG_I2C_WDT_SELECT__MSK               (0x02)
+#define BMI160_USER_IF_CONFIG_I2C_WDT_SELECT__REG		\
+(BMI160_USER_NV_CONFIG_ADDR)
+
+/*IF_CONF Description - Reg Addr --> (0x70), Bit --> 2 */
+#define BMI160_USER_IF_CONFIG_I2C_WDT_ENABLE__POS               (2)
+#define BMI160_USER_IF_CONFIG_I2C_WDT_ENABLE__LEN               (1)
+#define BMI160_USER_IF_CONFIG_I2C_WDT_ENABLE__MSK               (0x04)
+#define BMI160_USER_IF_CONFIG_I2C_WDT_ENABLE__REG		\
+(BMI160_USER_NV_CONFIG_ADDR)
+
+/* NV_CONF Description - Reg Addr --> (0x70), Bit --> 3 */
+#define BMI160_USER_NV_CONFIG_SPARE0__POS               (3)
+#define BMI160_USER_NV_CONFIG_SPARE0__LEN               (1)
+#define BMI160_USER_NV_CONFIG_SPARE0__MSK               (0x08)
+#define BMI160_USER_NV_CONFIG_SPARE0__REG	(BMI160_USER_NV_CONFIG_ADDR)
+
+/* NV_CONF Description - Reg Addr --> (0x70), Bit --> 4...7 */
+#define BMI160_USER_NV_CONFIG_NVM_COUNTER__POS               (4)
+#define BMI160_USER_NV_CONFIG_NVM_COUNTER__LEN               (4)
+#define BMI160_USER_NV_CONFIG_NVM_COUNTER__MSK               (0xF0)
+#define BMI160_USER_NV_CONFIG_NVM_COUNTER__REG	(BMI160_USER_NV_CONFIG_ADDR)
+/**************************************************************/
+/**\name	ACCEL MANUAL OFFSET LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Offset_0 Description - Reg Addr --> (0x71), Bit --> 0...7 */
+#define BMI160_USER_OFFSET_0_ACCEL_OFF_X__POS               (0)
+#define BMI160_USER_OFFSET_0_ACCEL_OFF_X__LEN               (8)
+#define BMI160_USER_OFFSET_0_ACCEL_OFF_X__MSK               (0xFF)
+#define BMI160_USER_OFFSET_0_ACCEL_OFF_X__REG	(BMI160_USER_OFFSET_0_ADDR)
+
+/* Offset_1 Description - Reg Addr --> 0x72, Bit --> 0...7 */
+#define BMI160_USER_OFFSET_1_ACCEL_OFF_Y__POS               (0)
+#define BMI160_USER_OFFSET_1_ACCEL_OFF_Y__LEN               (8)
+#define BMI160_USER_OFFSET_1_ACCEL_OFF_Y__MSK               (0xFF)
+#define BMI160_USER_OFFSET_1_ACCEL_OFF_Y__REG	(BMI160_USER_OFFSET_1_ADDR)
+
+/* Offset_2 Description - Reg Addr --> 0x73, Bit --> 0...7 */
+#define BMI160_USER_OFFSET_2_ACCEL_OFF_Z__POS               (0)
+#define BMI160_USER_OFFSET_2_ACCEL_OFF_Z__LEN               (8)
+#define BMI160_USER_OFFSET_2_ACCEL_OFF_Z__MSK               (0xFF)
+#define BMI160_USER_OFFSET_2_ACCEL_OFF_Z__REG	(BMI160_USER_OFFSET_2_ADDR)
+/**************************************************************/
+/**\name	GYRO MANUAL OFFSET LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Offset_3 Description - Reg Addr --> 0x74, Bit --> 0...7 */
+#define BMI160_USER_OFFSET_3_GYRO_OFF_X__POS               (0)
+#define BMI160_USER_OFFSET_3_GYRO_OFF_X__LEN               (8)
+#define BMI160_USER_OFFSET_3_GYRO_OFF_X__MSK               (0xFF)
+#define BMI160_USER_OFFSET_3_GYRO_OFF_X__REG	(BMI160_USER_OFFSET_3_ADDR)
+
+/* Offset_4 Description - Reg Addr --> 0x75, Bit --> 0...7 */
+#define BMI160_USER_OFFSET_4_GYRO_OFF_Y__POS               (0)
+#define BMI160_USER_OFFSET_4_GYRO_OFF_Y__LEN               (8)
+#define BMI160_USER_OFFSET_4_GYRO_OFF_Y__MSK               (0xFF)
+#define BMI160_USER_OFFSET_4_GYRO_OFF_Y__REG	(BMI160_USER_OFFSET_4_ADDR)
+
+/* Offset_5 Description - Reg Addr --> 0x76, Bit --> 0...7 */
+#define BMI160_USER_OFFSET_5_GYRO_OFF_Z__POS               (0)
+#define BMI160_USER_OFFSET_5_GYRO_OFF_Z__LEN               (8)
+#define BMI160_USER_OFFSET_5_GYRO_OFF_Z__MSK               (0xFF)
+#define BMI160_USER_OFFSET_5_GYRO_OFF_Z__REG	(BMI160_USER_OFFSET_5_ADDR)
+
+
+/* Offset_6 Description - Reg Addr --> 0x77, Bit --> 0..1 */
+#define BMI160_USER_OFFSET_6_GYRO_OFF_X__POS               (0)
+#define BMI160_USER_OFFSET_6_GYRO_OFF_X__LEN               (2)
+#define BMI160_USER_OFFSET_6_GYRO_OFF_X__MSK               (0x03)
+#define BMI160_USER_OFFSET_6_GYRO_OFF_X__REG	(BMI160_USER_OFFSET_6_ADDR)
+
+/* Offset_6 Description - Reg Addr --> 0x77, Bit --> 2...3 */
+#define BMI160_USER_OFFSET_6_GYRO_OFF_Y__POS               (2)
+#define BMI160_USER_OFFSET_6_GYRO_OFF_Y__LEN               (2)
+#define BMI160_USER_OFFSET_6_GYRO_OFF_Y__MSK               (0x0C)
+#define BMI160_USER_OFFSET_6_GYRO_OFF_Y__REG	(BMI160_USER_OFFSET_6_ADDR)
+
+/* Offset_6 Description - Reg Addr --> 0x77, Bit --> 4...5 */
+#define BMI160_USER_OFFSET_6_GYRO_OFF_Z__POS               (4)
+#define BMI160_USER_OFFSET_6_GYRO_OFF_Z__LEN               (2)
+#define BMI160_USER_OFFSET_6_GYRO_OFF_Z__MSK               (0x30)
+#define BMI160_USER_OFFSET_6_GYRO_OFF_Z__REG	 (BMI160_USER_OFFSET_6_ADDR)
+/**************************************************************/
+/**\name	ACCEL OFFSET  ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Offset_6 Description - Reg Addr --> 0x77, Bit --> 6 */
+#define BMI160_USER_OFFSET_6_ACCEL_OFF_ENABLE__POS               (6)
+#define BMI160_USER_OFFSET_6_ACCEL_OFF_ENABLE__LEN               (1)
+#define BMI160_USER_OFFSET_6_ACCEL_OFF_ENABLE__MSK               (0x40)
+#define BMI160_USER_OFFSET_6_ACCEL_OFF_ENABLE__REG	 \
+(BMI160_USER_OFFSET_6_ADDR)
+/**************************************************************/
+/**\name	GYRO OFFSET  ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Offset_6 Description - Reg Addr --> 0x77, Bit -->  7 */
+#define BMI160_USER_OFFSET_6_GYRO_OFF_EN__POS               (7)
+#define BMI160_USER_OFFSET_6_GYRO_OFF_EN__LEN               (1)
+#define BMI160_USER_OFFSET_6_GYRO_OFF_EN__MSK               (0x80)
+#define BMI160_USER_OFFSET_6_GYRO_OFF_EN__REG	 (BMI160_USER_OFFSET_6_ADDR)
+/**************************************************************/
+/**\name	STEP COUNTER LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* STEP_CNT_0  Description - Reg Addr --> 0x78, Bit -->  0 to 7 */
+#define BMI160_USER_STEP_COUNT_LSB__POS               (0)
+#define BMI160_USER_STEP_COUNT_LSB__LEN               (7)
+#define BMI160_USER_STEP_COUNT_LSB__MSK               (0xFF)
+#define BMI160_USER_STEP_COUNT_LSB__REG	 (BMI160_USER_STEP_COUNT_0_ADDR)
+
+/* STEP_CNT_1  Description - Reg Addr --> 0x79, Bit -->  0 to 7 */
+#define BMI160_USER_STEP_COUNT_MSB__POS               (0)
+#define BMI160_USER_STEP_COUNT_MSB__LEN               (7)
+#define BMI160_USER_STEP_COUNT_MSB__MSK               (0xFF)
+#define BMI160_USER_STEP_COUNT_MSB__REG	 (BMI160_USER_STEP_COUNT_1_ADDR)
+/**************************************************************/
+/**\name	STEP COUNTER CONFIGURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* STEP_CONFIG_0  Description - Reg Addr --> 0x7A, Bit -->  0 to 7 */
+#define BMI160_USER_STEP_CONFIG_ZERO__POS               (0)
+#define BMI160_USER_STEP_CONFIG_ZERO__LEN               (7)
+#define BMI160_USER_STEP_CONFIG_ZERO__MSK               (0xFF)
+#define BMI160_USER_STEP_CONFIG_ZERO__REG	 \
+(BMI160_USER_STEP_CONFIG_0_ADDR)
+
+
+/* STEP_CONFIG_1  Description - Reg Addr --> 0x7B, Bit -->  0 to 2 and
+4 to 7 */
+#define BMI160_USER_STEP_CONFIG_ONE_CNF1__POS               (0)
+#define BMI160_USER_STEP_CONFIG_ONE_CNF1__LEN               (3)
+#define BMI160_USER_STEP_CONFIG_ONE_CNF1__MSK               (0x07)
+#define BMI160_USER_STEP_CONFIG_ONE_CNF1__REG	 \
+(BMI160_USER_STEP_CONFIG_1_ADDR)
+
+#define BMI160_USER_STEP_CONFIG_ONE_CNF2__POS               (4)
+#define BMI160_USER_STEP_CONFIG_ONE_CNF2__LEN               (4)
+#define BMI160_USER_STEP_CONFIG_ONE_CNF2__MSK               (0xF0)
+#define BMI160_USER_STEP_CONFIG_ONE_CNF2__REG	 \
+(BMI160_USER_STEP_CONFIG_1_ADDR)
+/**************************************************************/
+/**\name	STEP COUNTER ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* STEP_CONFIG_1  Description - Reg Addr --> 0x7B, Bit -->  0 to 2 */
+#define BMI160_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__POS		(3)
+#define BMI160_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__LEN		(1)
+#define BMI160_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__MSK		(0x08)
+#define BMI160_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__REG	\
+(BMI160_USER_STEP_CONFIG_1_ADDR)
+
+/* USER REGISTERS DEFINITION END */
+/**************************************************************************/
+/* CMD REGISTERS DEFINITION START */
+/**************************************************************/
+/**\name	COMMAND REGISTER LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Command description address - Reg Addr --> 0x7E, Bit -->  0....7 */
+#define BMI160_CMD_COMMANDS__POS              (0)
+#define BMI160_CMD_COMMANDS__LEN              (8)
+#define BMI160_CMD_COMMANDS__MSK              (0xFF)
+#define BMI160_CMD_COMMANDS__REG	 (BMI160_CMD_COMMANDS_ADDR)
+/**************************************************************/
+/**\name	PAGE ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Target page address - Reg Addr --> 0x7F, Bit -->  4....5 */
+#define BMI160_CMD_TARGET_PAGE__POS           (4)
+#define BMI160_CMD_TARGET_PAGE__LEN           (2)
+#define BMI160_CMD_TARGET_PAGE__MSK           (0x30)
+#define BMI160_CMD_TARGET_PAGE__REG	 (BMI160_CMD_EXT_MODE_ADDR)
+
+/* Target page address - Reg Addr --> 0x7F, Bit -->  4....5 */
+#define BMI160_CMD_PAGING_EN__POS           (7)
+#define BMI160_CMD_PAGING_EN__LEN           (1)
+#define BMI160_CMD_PAGING_EN__MSK           (0x80)
+#define BMI160_CMD_PAGING_EN__REG		(BMI160_CMD_EXT_MODE_ADDR)
+
+/* Target page address - Reg Addr --> 0x7F, Bit -->  4....5 */
+#define BMI160_COM_C_TRIM_FIVE__POS           (0)
+#define BMI160_COM_C_TRIM_FIVE__LEN           (8)
+#define BMI160_COM_C_TRIM_FIVE__MSK           (0xFF)
+#define BMI160_COM_C_TRIM_FIVE__REG		(BMI160_COM_C_TRIM_FIVE_ADDR)
+
+/**************************************************************************/
+/* CMD REGISTERS DEFINITION END */
+
+/**************************************************/
+/**\name	FIFO FRAME COUNT DEFINITION           */
+/*************************************************/
+#define FIFO_FRAME				(1024)
+#define FIFO_CONFIG_CHECK1		(0x00)
+#define FIFO_CONFIG_CHECK2		(0x80)
+/**************************************************/
+/**\name	MAG SENSOR SELECT          */
+/*************************************************/
+#define BST_BMM		(0)
+#define BST_AKM		(1)
+#define BMI160_YAS537_I2C_ADDRESS	(0x2E)
+/**************************************************/
+/**\name	ACCEL RANGE          */
+/*************************************************/
+#define BMI160_ACCEL_RANGE_2G           (0X03)
+#define BMI160_ACCEL_RANGE_4G           (0X05)
+#define BMI160_ACCEL_RANGE_8G           (0X08)
+#define BMI160_ACCEL_RANGE_16G          (0X0C)
+/**************************************************/
+/**\name	ACCEL ODR          */
+/*************************************************/
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_RESERVED       (0x00)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_0_78HZ         (0x01)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_1_56HZ         (0x02)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_3_12HZ         (0x03)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_6_25HZ         (0x04)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_12_5HZ         (0x05)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_25HZ           (0x06)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_50HZ           (0x07)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_100HZ          (0x08)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_200HZ          (0x09)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_400HZ          (0x0A)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_800HZ          (0x0B)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_1600HZ         (0x0C)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_RESERVED0      (0x0D)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_RESERVED1      (0x0E)
+#define BMI160_ACCEL_OUTPUT_DATA_RATE_RESERVED2      (0x0F)
+/**************************************************/
+/**\name	ACCEL BANDWIDTH PARAMETER         */
+/*************************************************/
+#define BMI160_ACCEL_OSR4_AVG1			(0x00)
+#define BMI160_ACCEL_OSR2_AVG2			(0x01)
+#define BMI160_ACCEL_NORMAL_AVG4		(0x02)
+#define BMI160_ACCEL_CIC_AVG8			(0x03)
+#define BMI160_ACCEL_RES_AVG16			(0x04)
+#define BMI160_ACCEL_RES_AVG32			(0x05)
+#define BMI160_ACCEL_RES_AVG64			(0x06)
+#define BMI160_ACCEL_RES_AVG128			(0x07)
+/**************************************************/
+/**\name	GYRO ODR         */
+/*************************************************/
+#define BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED		(0x00)
+#define BMI160_GYRO_OUTPUT_DATA_RATE_25HZ			(0x06)
+#define BMI160_GYRO_OUTPUT_DATA_RATE_50HZ			(0x07)
+#define BMI160_GYRO_OUTPUT_DATA_RATE_100HZ			(0x08)
+#define BMI160_GYRO_OUTPUT_DATA_RATE_200HZ			(0x09)
+#define BMI160_GYRO_OUTPUT_DATA_RATE_400HZ			(0x0A)
+#define BMI160_GYRO_OUTPUT_DATA_RATE_800HZ			(0x0B)
+#define BMI160_GYRO_OUTPUT_DATA_RATE_1600HZ			(0x0C)
+#define BMI160_GYRO_OUTPUT_DATA_RATE_3200HZ			(0x0D)
+/**************************************************/
+/**\name	GYRO BANDWIDTH PARAMETER         */
+/*************************************************/
+#define BMI160_GYRO_OSR4_MODE		(0x00)
+#define BMI160_GYRO_OSR2_MODE		(0x01)
+#define BMI160_GYRO_NORMAL_MODE		(0x02)
+#define BMI160_GYRO_CIC_MODE		(0x03)
+/**************************************************/
+/**\name	GYROSCOPE RANGE PARAMETER         */
+/*************************************************/
+#define BMI160_GYRO_RANGE_2000_DEG_SEC	(0x00)
+#define BMI160_GYRO_RANGE_1000_DEG_SEC	(0x01)
+#define BMI160_GYRO_RANGE_500_DEG_SEC	(0x02)
+#define BMI160_GYRO_RANGE_250_DEG_SEC	(0x03)
+#define BMI160_GYRO_RANGE_125_DEG_SEC	(0x04)
+/**************************************************/
+/**\name	MAG ODR         */
+/*************************************************/
+#define BMI160_MAG_OUTPUT_DATA_RATE_RESERVED       (0x00)
+#define BMI160_MAG_OUTPUT_DATA_RATE_0_78HZ         (0x01)
+#define BMI160_MAG_OUTPUT_DATA_RATE_1_56HZ         (0x02)
+#define BMI160_MAG_OUTPUT_DATA_RATE_3_12HZ         (0x03)
+#define BMI160_MAG_OUTPUT_DATA_RATE_6_25HZ         (0x04)
+#define BMI160_MAG_OUTPUT_DATA_RATE_12_5HZ         (0x05)
+#define BMI160_MAG_OUTPUT_DATA_RATE_25HZ           (0x06)
+#define BMI160_MAG_OUTPUT_DATA_RATE_50HZ           (0x07)
+#define BMI160_MAG_OUTPUT_DATA_RATE_100HZ          (0x08)
+#define BMI160_MAG_OUTPUT_DATA_RATE_200HZ          (0x09)
+#define BMI160_MAG_OUTPUT_DATA_RATE_400HZ          (0x0A)
+#define BMI160_MAG_OUTPUT_DATA_RATE_800HZ          (0x0B)
+#define BMI160_MAG_OUTPUT_DATA_RATE_1600HZ         (0x0C)
+#define BMI160_MAG_OUTPUT_DATA_RATE_RESERVED0      (0x0D)
+#define BMI160_MAG_OUTPUT_DATA_RATE_RESERVED1      (0x0E)
+#define BMI160_MAG_OUTPUT_DATA_RATE_RESERVED2      (0x0F)
+
+/**************************************************/
+/**\name	ENABLE/DISABLE SELECTIONS        */
+/*************************************************/
+
+/* Enable accel and gyro offset */
+#define ACCEL_OFFSET_ENABLE		(0x01)
+#define GYRO_OFFSET_ENABLE		(0x01)
+
+/* command register definition */
+#define START_FOC_ACCEL_GYRO	(0X03)
+
+ /* INT ENABLE 1 */
+#define BMI160_ANY_MOTION_X_ENABLE       (0)
+#define BMI160_ANY_MOTION_Y_ENABLE       (1)
+#define BMI160_ANY_MOTION_Z_ENABLE       (2)
+#define BMI160_DOUBLE_TAP_ENABLE         (4)
+#define BMI160_SINGLE_TAP_ENABLE         (5)
+#define BMI160_ORIENT_ENABLE             (6)
+#define BMI160_FLAT_ENABLE               (7)
+
+/* INT ENABLE 1 */
+#define BMI160_HIGH_G_X_ENABLE       (0)
+#define BMI160_HIGH_G_Y_ENABLE       (1)
+#define BMI160_HIGH_G_Z_ENABLE       (2)
+#define BMI160_LOW_G_ENABLE          (3)
+#define BMI160_DATA_RDY_ENABLE       (4)
+#define BMI160_FIFO_FULL_ENABLE      (5)
+#define BMI160_FIFO_WM_ENABLE        (6)
+
+/* INT ENABLE 2 */
+#define  BMI160_NOMOTION_X_ENABLE	(0)
+#define  BMI160_NOMOTION_Y_ENABLE	(1)
+#define  BMI160_NOMOTION_Z_ENABLE	(2)
+#define  BMI160_STEP_DETECTOR_EN	(3)
+
+/* FOC axis selection for accel*/
+#define	FOC_X_AXIS		(0)
+#define	FOC_Y_AXIS		(1)
+#define	FOC_Z_AXIS		(2)
+
+/* IN OUT CONTROL */
+#define BMI160_INTR1_EDGE_CTRL			(0)
+#define BMI160_INTR2_EDGE_CTRL			(1)
+#define BMI160_INTR1_LEVEL				(0)
+#define BMI160_INTR2_LEVEL				(1)
+#define BMI160_INTR1_OUTPUT_TYPE		(0)
+#define BMI160_INTR2_OUTPUT_TYPE		(1)
+#define BMI160_INTR1_OUTPUT_ENABLE		(0)
+#define BMI160_INTR2_OUTPUT_ENABLE		(1)
+
+#define BMI160_INTR1_INPUT_ENABLE	(0)
+#define BMI160_INTR2_INPUT_ENABLE	(1)
+
+/*  INTERRUPT MAPS    */
+#define BMI160_INTR1_MAP_LOW_G			(0)
+#define BMI160_INTR2_MAP_LOW_G			(1)
+#define BMI160_INTR1_MAP_HIGH_G			(0)
+#define BMI160_INTR2_MAP_HIGH_G			(1)
+#define BMI160_INTR1_MAP_ANY_MOTION		(0)
+#define BMI160_INTR2_MAP_ANY_MOTION		(1)
+#define BMI160_INTR1_MAP_NOMO			(0)
+#define BMI160_INTR2_MAP_NOMO			(1)
+#define BMI160_INTR1_MAP_DOUBLE_TAP		(0)
+#define BMI160_INTR2_MAP_DOUBLE_TAP		(1)
+#define BMI160_INTR1_MAP_SINGLE_TAP		(0)
+#define BMI160_INTR2_MAP_SINGLE_TAP		(1)
+#define BMI160_INTR1_MAP_ORIENT			(0)
+#define BMI160_INTR2_MAP_ORIENT			(1)
+#define BMI160_INTR1_MAP_FLAT			(0)
+#define BMI160_INTR2_MAP_FLAT			(1)
+#define BMI160_INTR1_MAP_DATA_RDY		(0)
+#define BMI160_INTR2_MAP_DATA_RDY		(1)
+#define BMI160_INTR1_MAP_FIFO_WM		(0)
+#define BMI160_INTR2_MAP_FIFO_WM		(1)
+#define BMI160_INTR1_MAP_FIFO_FULL      (0)
+#define BMI160_INTR2_MAP_FIFO_FULL      (1)
+#define BMI160_INTR1_MAP_PMUTRIG        (0)
+#define BMI160_INTR2_MAP_PMUTRIG		(1)
+
+/* Interrupt mapping*/
+#define	BMI160_MAP_INTR1		(0)
+#define	BMI160_MAP_INTR2		(1)
+/**************************************************/
+/**\name	 TAP DURATION         */
+/*************************************************/
+#define BMI160_TAP_DURN_50MS     (0x00)
+#define BMI160_TAP_DURN_100MS    (0x01)
+#define BMI160_TAP_DURN_150MS    (0x02)
+#define BMI160_TAP_DURN_200MS    (0x03)
+#define BMI160_TAP_DURN_250MS    (0x04)
+#define BMI160_TAP_DURN_375MS    (0x05)
+#define BMI160_TAP_DURN_500MS    (0x06)
+#define BMI160_TAP_DURN_700MS    (0x07)
+/**************************************************/
+/**\name	TAP SHOCK         */
+/*************************************************/
+#define BMI160_TAP_SHOCK_50MS	(0x00)
+#define BMI160_TAP_SHOCK_75MS	(0x01)
+/**************************************************/
+/**\name	TAP QUIET        */
+/*************************************************/
+#define BMI160_TAP_QUIET_30MS	(0x00)
+#define BMI160_TAP_QUIET_20MS	(0x01)
+/**************************************************/
+/**\name	STEP DETECTION SELECTION MODES      */
+/*************************************************/
+#define	BMI160_STEP_NORMAL_MODE			(0)
+#define	BMI160_STEP_SENSITIVE_MODE		(1)
+#define	BMI160_STEP_ROBUST_MODE			(2)
+/**************************************************/
+/**\name	STEP CONFIGURATION SELECT MODE    */
+/*************************************************/
+#define	STEP_CONFIG_NORMAL		(0X315)
+#define	STEP_CONFIG_SENSITIVE	(0X2D)
+#define	STEP_CONFIG_ROBUST		(0X71D)
+/**************************************************/
+/**\name	BMM150 TRIM DATA DEFINITIONS      */
+/*************************************************/
+#define BMI160_MAG_DIG_X1                      (0x5D)
+#define BMI160_MAG_DIG_Y1                      (0x5E)
+#define BMI160_MAG_DIG_Z4_LSB                  (0x62)
+#define BMI160_MAG_DIG_Z4_MSB                  (0x63)
+#define BMI160_MAG_DIG_X2                      (0x64)
+#define BMI160_MAG_DIG_Y2                      (0x65)
+#define BMI160_MAG_DIG_Z2_LSB                  (0x68)
+#define BMI160_MAG_DIG_Z2_MSB                  (0x69)
+#define BMI160_MAG_DIG_Z1_LSB                  (0x6A)
+#define BMI160_MAG_DIG_Z1_MSB                  (0x6B)
+#define BMI160_MAG_DIG_XYZ1_LSB                (0x6C)
+#define BMI160_MAG_DIG_XYZ1_MSB                (0x6D)
+#define BMI160_MAG_DIG_Z3_LSB                  (0x6E)
+#define BMI160_MAG_DIG_Z3_MSB                  (0x6F)
+#define BMI160_MAG_DIG_XY2                     (0x70)
+#define BMI160_MAG_DIG_XY1                     (0x71)
+/**************************************************/
+/**\name	BMM150 PRE-SET MODE DEFINITIONS     */
+/*************************************************/
+#define BMI160_MAG_PRESETMODE_LOWPOWER                 (1)
+#define BMI160_MAG_PRESETMODE_REGULAR                  (2)
+#define BMI160_MAG_PRESETMODE_HIGHACCURACY             (3)
+#define BMI160_MAG_PRESETMODE_ENHANCED                 (4)
+/**************************************************/
+/**\name	BMM150 PRESET MODES - DATA RATES    */
+/*************************************************/
+#define BMI160_MAG_LOWPOWER_DR                       (0x02)
+#define BMI160_MAG_REGULAR_DR                        (0x02)
+#define BMI160_MAG_HIGHACCURACY_DR                   (0x2A)
+#define BMI160_MAG_ENHANCED_DR                       (0x02)
+/**************************************************/
+/**\name	BMM150 PRESET MODES - REPETITIONS-XY RATES */
+/*************************************************/
+#define BMI160_MAG_LOWPOWER_REPXY                    (1)
+#define BMI160_MAG_REGULAR_REPXY                     (4)
+#define BMI160_MAG_HIGHACCURACY_REPXY                (23)
+#define BMI160_MAG_ENHANCED_REPXY                    (7)
+/**************************************************/
+/**\name	BMM150 PRESET MODES - REPETITIONS-Z RATES */
+/*************************************************/
+#define BMI160_MAG_LOWPOWER_REPZ                     (2)
+#define BMI160_MAG_REGULAR_REPZ                      (14)
+#define BMI160_MAG_HIGHACCURACY_REPZ                 (82)
+#define BMI160_MAG_ENHANCED_REPZ                     (26)
+#define BMI160_MAG_NOAMRL_SWITCH_TIMES               (5)
+#define MAG_INTERFACE_PMU_ENABLE                     (1)
+#define MAG_INTERFACE_PMU_DISABLE                    (0)
+/**************************************************/
+/**\name	USED FOR MAG OVERFLOW CHECK FOR BMM150  */
+/*************************************************/
+#define BMI160_MAG_OVERFLOW_OUTPUT			((s16)-32768)
+#define BMI160_MAG_OVERFLOW_OUTPUT_S32		((s32)(-2147483647-1))
+#define BMI160_MAG_NEGATIVE_SATURATION_Z   ((s16)-32767)
+#define BMI160_MAG_POSITIVE_SATURATION_Z   ((u16)32767)
+#define BMI160_MAG_FLIP_OVERFLOW_ADCVAL		((s16)-4096)
+#define BMI160_MAG_HALL_OVERFLOW_ADCVAL		((s16)-16384)
+/**************************************************/
+/**\name	BMM150 REGISTER DEFINITION */
+/*************************************************/
+#define BMI160_BMM150_CHIP_ID           (0x40)
+#define BMI160_BMM150_POWE_CONTROL_REG	(0x4B)
+#define BMI160_BMM150_POWE_MODE_REG		(0x4C)
+#define BMI160_BMM150_DATA_REG			(0x42)
+#define BMI160_BMM150_XY_REP			(0x51)
+#define BMI160_BMM150_Z_REP				(0x52)
+/**************************************************/
+/**\name	AKM COMPENSATING DATA REGISTERS     */
+/*************************************************/
+#define BMI160_BST_AKM_ASAX		(0x60)
+#define BMI160_BST_AKM_ASAY		(0x61)
+#define BMI160_BST_AKM_ASAZ		(0x62)
+/**************************************************/
+/**\name	AKM POWER MODE SELECTION     */
+/*************************************************/
+#define AKM_POWER_DOWN_MODE			(0)
+#define AKM_SINGLE_MEAS_MODE		(1)
+#define FUSE_ROM_MODE				(2)
+/**************************************************/
+/**\name	SECONDARY_MAG POWER MODE SELECTION    */
+/*************************************************/
+#define BMI160_MAG_FORCE_MODE		(0)
+#define BMI160_MAG_SUSPEND_MODE		(1)
+/**************************************************/
+/**\name	MAG POWER MODE SELECTION    */
+/*************************************************/
+#define	FORCE_MODE		(0)
+#define	SUSPEND_MODE	(1)
+#define	NORMAL_MODE		(2)
+#define MAG_SUSPEND_MODE (1)
+/**************************************************/
+/**\name	FIFO CONFIGURATIONS    */
+/*************************************************/
+#define FIFO_HEADER_ENABLE			(0x01)
+#define FIFO_MAG_ENABLE				(0x01)
+#define FIFO_ACCEL_ENABLE			(0x01)
+#define FIFO_GYRO_ENABLE			(0x01)
+#define FIFO_TIME_ENABLE			(0x01)
+#define FIFO_STOPONFULL_ENABLE		(0x01)
+#define FIFO_WM_INTERRUPT_ENABLE	(0x01)
+#define	BMI160_FIFO_INDEX_LENGTH	(1)
+#define	BMI160_FIFO_TAG_INTR_MASK	(0xFC)
+
+/**************************************************/
+/**\name	ACCEL POWER MODE    */
+/*************************************************/
+#define ACCEL_MODE_NORMAL	(0x11)
+#define	ACCEL_LOWPOWER		(0X12)
+#define	ACCEL_SUSPEND		(0X10)
+/**************************************************/
+/**\name	GYRO POWER MODE    */
+/*************************************************/
+#define GYRO_MODE_SUSPEND		(0x14)
+#define GYRO_MODE_NORMAL		(0x15)
+#define GYRO_MODE_FASTSTARTUP	(0x17)
+/**************************************************/
+/**\name	MAG POWER MODE    */
+/*************************************************/
+#define MAG_MODE_SUSPEND	(0x18)
+#define MAG_MODE_NORMAL		(0x19)
+#define MAG_MODE_LOWPOWER	(0x1A)
+/**************************************************/
+/**\name	ENABLE/DISABLE BIT VALUES    */
+/*************************************************/
+#define BMI160_ENABLE	(0x01)
+#define BMI160_DISABLE	(0x00)
+/**************************************************/
+/**\name	INTERRUPT EDGE TRIGGER ENABLE    */
+/*************************************************/
+#define BMI160_EDGE		(0x01)
+#define BMI160_LEVEL	(0x00)
+/**************************************************/
+/**\name	INTERRUPT LEVEL ENABLE    */
+/*************************************************/
+#define BMI160_LEVEL_LOW		(0x00)
+#define BMI160_LEVEL_HIGH		(0x01)
+/**************************************************/
+/**\name	INTERRUPT OUTPUT ENABLE    */
+/*************************************************/
+#define BMI160_OPEN_DRAIN	(0x01)
+#define BMI160_PUSH_PULL	(0x00)
+
+/* interrupt output enable*/
+#define BMI160_INPUT	(0x01)
+#define BMI160_OUTPUT	(0x00)
+
+/**************************************************/
+/**\name	INTERRUPT TAP SOURCE ENABLE    */
+/*************************************************/
+#define FILTER_DATA		(0x00)
+#define UNFILTER_DATA	(0x01)
+/**************************************************/
+/**\name	SLOW MOTION/ NO MOTION SELECT   */
+/*************************************************/
+#define SLOW_MOTION		(0x00)
+#define NO_MOTION		(0x01)
+/**************************************************/
+/**\name	SIGNIFICANT MOTION SELECTION   */
+/*************************************************/
+#define ANY_MOTION			(0x00)
+#define SIGNIFICANT_MOTION	(0x01)
+/**************************************************/
+/**\name	LATCH DURATION   */
+/*************************************************/
+#define BMI160_LATCH_DUR_NONE				(0x00)
+#define BMI160_LATCH_DUR_312_5_MICRO_SEC	(0x01)
+#define BMI160_LATCH_DUR_625_MICRO_SEC		(0x02)
+#define BMI160_LATCH_DUR_1_25_MILLI_SEC		(0x03)
+#define BMI160_LATCH_DUR_2_5_MILLI_SEC		(0x04)
+#define BMI160_LATCH_DUR_5_MILLI_SEC		(0x05)
+#define BMI160_LATCH_DUR_10_MILLI_SEC		(0x06)
+#define BMI160_LATCH_DUR_20_MILLI_SEC		(0x07)
+#define BMI160_LATCH_DUR_40_MILLI_SEC		(0x08)
+#define BMI160_LATCH_DUR_80_MILLI_SEC		(0x09)
+#define BMI160_LATCH_DUR_160_MILLI_SEC		(0x0A)
+#define BMI160_LATCH_DUR_320_MILLI_SEC		(0x0B)
+#define BMI160_LATCH_DUR_640_MILLI_SEC		(0x0C)
+#define BMI160_LATCH_DUR_1_28_SEC			(0x0D)
+#define BMI160_LATCH_DUR_2_56_SEC			(0x0E)
+#define BMI160_LATCHED						(0x0F)
+/**************************************************/
+/**\name	GYRO OFFSET MASK DEFINITION   */
+/*************************************************/
+#define BMI160_GYRO_MANUAL_OFFSET_0_7	(0x00FF)
+#define BMI160_GYRO_MANUAL_OFFSET_8_9	(0x0300)
+/**************************************************/
+/**\name	STEP CONFIGURATION MASK DEFINITION   */
+/*************************************************/
+#define BMI160_STEP_CONFIG_0_7		(0x00FF)
+#define BMI160_STEP_CONFIG_8_10		(0x0700)
+#define BMI160_STEP_CONFIG_11_14	(0xF000)
+/**************************************************/
+/**\name	DEFINITION USED FOR DIFFERENT WRITE   */
+/*************************************************/
+#define	BMI160_WRITE_TARGET_PAGE0	(0x00)
+#define	BMI160_WRITE_TARGET_PAGE1	(0x01)
+#define	BMI160_WRITE_ENABLE_PAGE1	(0x01)
+#define	BMI160_MANUAL_DISABLE	    (0x00)
+#define	BMI160_MANUAL_ENABLE	    (0x01)
+#define	BMI160_YAS_DISABLE_RCOIL	(0x00)
+#define	BMI160_ENABLE_MAG_IF_MODE	(0x02)
+#define	BMI160_ENABLE_ANY_MOTION_INTR1	(0x04)
+#define	BMI160_ENABLE_ANY_MOTION_INTR2	(0x04)
+#define	BMI160_MAG_DATA_READ_REG        (0x04)
+#define BMI160_BMM_POWER_MODE_REG		(0x06)
+#define	BMI160_ENABLE_ANY_MOTION_AXIS	(0x07)
+#define	BMI160_ENABLE_LOW_G             (0x08)
+#define	BMI160_YAS532_ACQ_START         (0x11)
+#define	BMI160_YAS_DEVICE_ID_REG        (0x80)
+#define	BMI160_FIFO_GYRO_ENABLE         (0x80)
+#define	BMI160_SIG_MOTION_INTR_ENABLE   (0x01)
+#define	BMI160_STEP_DETECT_INTR_ENABLE  (0x01)
+#define	BMI160_LOW_G_INTR_STAT          (0x01)
+#define BMI160_PULL_UP_DATA             (0x30)
+#define BMI160_FIFO_M_G_A_ENABLE        (0xE0)
+#define BMI160_FIFO_M_G_ENABLE          (0xA0)
+#define BMI160_FIFO_M_A_ENABLE          (0x60)
+#define BMI160_FIFO_G_A_ENABLE          (0xC0)
+#define BMI160_FIFO_A_ENABLE            (0x40)
+#define BMI160_FIFO_M_ENABLE            (0x20)
+/**************************************************/
+/**\name	MAG INIT DEFINITION  */
+/*************************************************/
+#define BMI160_COMMAND_REG_ONE		(0x37)
+#define BMI160_COMMAND_REG_TWO		(0x9A)
+#define BMI160_COMMAND_REG_THREE	(0xC0)
+#define	RESET_STEP_COUNTER			(0xB2)
+/**************************************************/
+/**\name	BIT SLICE GET AND SET FUNCTIONS  */
+/*************************************************/
+#define BMI160_GET_BITSLICE(regvar, bitname)\
+		((regvar & bitname##__MSK) >> bitname##__POS)
+
+
+#define BMI160_SET_BITSLICE(regvar, bitname, val)\
+		((regvar & ~bitname##__MSK) | \
+		((val<<bitname##__POS)&bitname##__MSK))
+
+/**************************************************/
+/**\name	 FUNCTION DECLARATIONS  */
+/*************************************************/
+/**************************************************/
+/**\name	 FUNCTION FOR BMI160 INITIALIZE  */
+/*************************************************/
+/*!
+ *	@brief
+ *	This function is used for initialize
+ *	bus read and bus write functions
+ *	assign the chip id and device address
+ *	chip id is read in the register 0x00 bit from 0 to 7
+ *
+ *	@param bmi160 : structure pointer
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *	@note
+ *	While changing the parameter of the bmi160_t
+ *	consider the following point:
+ *	Changing the reference value of the parameter
+ *	will changes the local copy or local reference
+ *	make sure your changes will not
+ *	affect the reference value of the parameter
+ *	(Better case don't change the reference value of the parameter)
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_init(struct bmi160_t *bmi160);
+/**************************************************/
+/**\name	 FUNCTION FOR READ AND WRITE REGISTERS  */
+/*************************************************/
+/*!
+ * @brief
+ *	This API write the data to
+ *	the given register
+ *
+ *
+ *	@param v_addr_u8 -> Address of the register
+ *	@param v_data_u8 -> The data from the register
+ *	@param v_len_u8 -> no of bytes to read
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_write_reg(u8 v_addr_u8,
+u8 *v_data_u8, u8 v_len_u8);
+/*!
+ * @brief
+ *	This API reads the data from
+ *	the given register
+ *
+ *
+ *	@param v_addr_u8 -> Address of the register
+ *	@param v_data_u8 -> The data from the register
+ *	@param v_len_u8 -> no of bytes to read
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_reg(u8 v_addr_u8,
+u8 *v_data_u8, u8 v_len_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR ERROR CODES  */
+/*************************************************/
+/*!
+ *	@brief This API used to reads the fatal error
+ *	from the Register 0x02 bit 0
+ *	This flag will be reset only by power-on-reset and soft reset
+ *
+ *
+ *  @param v_fatal_err_u8 : The status of fatal error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fatal_err(u8
+*v_fatal_err_u8);
+/*!
+ *	@brief This API used to read the error code
+ *	from register 0x02 bit 1 to 4
+ *
+ *
+ *  @param v_err_code_u8 : The status of error codes
+ *	error_code  |    description
+ *  ------------|---------------
+ *	0x00        |no error
+ *	0x01        |ACC_CONF error (accel ODR and bandwidth not compatible)
+ *	0x02        |GYR_CONF error (Gyroscope ODR and bandwidth not compatible)
+ *	0x03        |Under sampling mode and interrupt uses pre filtered data
+ *	0x04        |reserved
+ *	0x05        |Selected trigger-readout offset in
+ *    -         |MAG_IF greater than selected ODR
+ *	0x06        |FIFO configuration error for header less mode
+ *	0x07        |Under sampling mode and pre filtered data as FIFO source
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_err_code(u8
+*v_error_code_u8);
+/*!
+ *	@brief This API Reads the i2c error code from the
+ *	Register 0x02 bit 5.
+ *	This error occurred in I2C master detected
+ *
+ *  @param v_i2c_err_code_u8 : The status of i2c fail error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_i2c_fail_err(u8
+*v_i2c_error_code_u8);
+ /*!
+ *	@brief This API Reads the dropped command error
+ *	from the register 0x02 bit 6
+ *
+ *
+ *  @param v_drop_cmd_err_u8 : The status of drop command error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_drop_cmd_err(u8
+*v_drop_cmd_err_u8);
+/*!
+ *	@brief This API reads the magnetometer data ready
+ *	interrupt not active.
+ *	It reads from the error register 0x0x2 bit 7
+ *
+ *
+ *
+ *
+ *  @param v_mag_data_rdy_err_u8 : The status of mag data ready interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_dada_rdy_err(u8
+*v_mag_data_rdy_err_u8);
+/*!
+ *	@brief This API reads the error status
+ *	from the error register 0x02 bit 0 to 7
+ *
+ *  @param v_mag_data_rdy_err_u8 : The status of mag data ready interrupt
+ *  @param v_fatal_er_u8r : The status of fatal error
+ *  @param v_err_code_u8 : The status of error code
+ *  @param v_i2c_fail_err_u8 : The status of I2C fail error
+ *  @param v_drop_cmd_err_u8 : The status of drop command error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_error_status(u8 *v_fatal_er_u8r,
+u8 *v_err_code_u8, u8 *v_i2c_fail_err_u8,
+u8 *v_drop_cmd_err_u8, u8 *v_mag_data_rdy_err_u8);
+/******************************************************************/
+/**\name	 FUNCTIONS FOR MAG,ACCEL AND GYRO POWER MODE STATUS  */
+/*****************************************************************/
+/*!
+ *	@brief This API reads the magnetometer power mode from
+ *	PMU status register 0x03 bit 0 and 1
+ *
+ *  @param v_mag_power_mode_stat_u8 : The value of mag power mode
+ *	mag_powermode    |   value
+ * ------------------|----------
+ *    SUSPEND        |   0x00
+ *    NORMAL         |   0x01
+ *   LOW POWER       |   0x02
+ *
+ *
+ * @note The power mode of mag set by the 0x7E command register
+ * @note using the function "bmi160_set_command_register()"
+ *  value    |   mode
+ *  ---------|----------------
+ *   0x18    | MAG_MODE_SUSPEND
+ *   0x19    | MAG_MODE_NORMAL
+ *   0x1A    | MAG_MODE_LOWPOWER
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_power_mode_stat(u8
+*v_mag_power_mode_stat_u8);
+/*!
+ *	@brief This API reads the gyroscope power mode from
+ *	PMU status register 0x03 bit 2 and 3
+ *
+ *  @param v_gyro_power_mode_stat_u8 :	The value of gyro power mode
+ *	gyro_powermode   |   value
+ * ------------------|----------
+ *    SUSPEND        |   0x00
+ *    NORMAL         |   0x01
+ *   FAST POWER UP   |   0x03
+ *
+ * @note The power mode of gyro set by the 0x7E command register
+ * @note using the function "bmi160_set_command_register()"
+ *  value    |   mode
+ *  ---------|----------------
+ *   0x14    | GYRO_MODE_SUSPEND
+ *   0x15    | GYRO_MODE_NORMAL
+ *   0x17    | GYRO_MODE_FASTSTARTUP
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_power_mode_stat(u8
+*v_gyro_power_mode_stat_u8);
+/*!
+ *	@brief This API reads the accelerometer power mode from
+ *	PMU status register 0x03 bit 4 and 5
+ *
+ *
+ *  @param v_accel_power_mode_stat_u8 :	The value of accel power mode
+ *	accel_powermode  |   value
+ * ------------------|----------
+ *    SUSPEND        |   0x00
+ *    NORMAL         |   0x01
+ *  LOW POWER        |   0x03
+ *
+ * @note The power mode of accel set by the 0x7E command register
+ * @note using the function "bmi160_set_command_register()"
+ *  value    |   mode
+ *  ---------|----------------
+ *   0x11    | ACCEL_MODE_NORMAL
+ *   0x12    | ACCEL_LOWPOWER
+ *   0x10    | ACCEL_SUSPEND
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_power_mode_stat(u8
+*v_accel_power_mode_stat_u8);
+/*!
+ *	@brief This API switch mag interface to normal mode
+ *	and confirm whether the mode switching done successfully or not
+*
+ *	@return results of bus communication function and current MAG_PMU result
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_interface_normal(void);
+/**************************************************/
+/**\name	 FUNCTION FOR Mag XYZ data read */
+/*************************************************/
+/*!
+ *	@brief This API reads magnetometer data X values
+ *	from the register 0x04 and 0x05
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param v_mag_x_s16 : The value of mag x
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note bmi160_set_mag_output_data_rate()
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_mag_x(s16 *v_mag_x_s16,
+u8 v_sensor_select_u8);
+/*!
+ *	@brief This API reads magnetometer data Y values
+ *	from the register 0x06 and 0x07
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param v_mag_y_s16 : The value of mag y
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note bmi160_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_mag_y(s16 *v_mag_y_s16,
+u8 v_sensor_select_u8);
+/*!
+ *	@brief This API reads magnetometer data Z values
+ *	from the register 0x08 and 0x09
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param v_mag_z_s16 : The value of mag z
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note bmi160_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_mag_z(s16 *v_mag_z_s16,
+u8 v_sensor_select_u8);
+/*!
+ *	@brief This API reads magnetometer data RHALL values
+ *	from the register 0x0A and 0x0B
+ *
+ *
+ *  @param v_mag_r_s16 : The value of BMM150 r data
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_mag_r(
+s16 *v_mag_r_s16);
+/*!
+ *	@brief This API reads magnetometer data X,Y,Z values
+ *	from the register 0x04 to 0x09
+ *
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param mag : The value of mag xyz data
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note bmi160_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_mag_xyz(
+struct bmi160_mag_t *mag, u8 v_sensor_select_u8);
+ /*!*
+ *	@brief This API reads magnetometer data X,Y,Z,r
+ *	values from the register 0x04 to 0x0B
+ *
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param mag : The value of mag-BMM150 xyzr data
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note bmi160_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_mag_xyzr(
+struct bmi160_mag_xyzr_t *mag);
+/**************************************************/
+/**\name	 FUNCTION FOR GYRO XYZ DATA READ  */
+/*************************************************/
+/*!
+ *	@brief This API reads gyro data X values
+ *	form the register 0x0C and 0x0D
+ *
+ *
+ *
+ *
+ *  @param v_gyro_x_s16 : The value of gyro x data
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note bmi160_set_gyro_output_data_rate()
+ *	@note bmi160_set_gyro_bw()
+ *	@note bmi160_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_gyro_x(
+s16 *v_gyro_x_s16);
+/*!
+ *	@brief This API reads gyro data Y values
+ *	form the register 0x0E and 0x0F
+ *
+ *
+ *
+ *
+ *  @param v_gyro_y_s16 : The value of gyro y data
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note bmi160_set_gyro_output_data_rate()
+ *	@note bmi160_set_gyro_bw()
+ *	@note bmi160_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error result of communication routines
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_gyro_y(
+s16 *v_gyro_y_s16);
+/*!
+ *	@brief This API reads gyro data Z values
+ *	form the register 0x10 and 0x11
+ *
+ *
+ *
+ *
+ *  @param v_gyro_z_s16 : The value of gyro z data
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note bmi160_set_gyro_output_data_rate()
+ *	@note bmi160_set_gyro_bw()
+ *	@note bmi160_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_gyro_z(
+s16 *v_gyro_z_s16);
+/*!
+ *	@brief This API reads gyro data X,Y,Z values
+ *	from the register 0x0C to 0x11
+ *
+ *
+ *
+ *
+ *  @param gyro : The value of gyro xyz
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note bmi160_set_gyro_output_data_rate()
+ *	@note bmi160_set_gyro_bw()
+ *	@note bmi160_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_gyro_xyz(
+struct bmi160_gyro_t *gyro);
+/**************************************************/
+/**\name	 FUNCTION FOR ACCEL XYZ DATA READ  */
+/*************************************************/
+/*!
+ *	@brief This API reads accelerometer data X values
+ *	form the register 0x12 and 0x13
+ *
+ *
+ *
+ *
+ *  @param v_accel_x_s16 : The value of accel x
+ *
+ *	@note For accel configuration use the following functions
+ *	@note bmi160_set_accel_output_data_rate()
+ *	@note bmi160_set_accel_bw()
+ *	@note bmi160_set_accel_under_sampling_parameter()
+ *	@note bmi160_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_accel_x(
+s16 *v_accel_x_s16);
+/*!
+ *	@brief This API reads accelerometer data Y values
+ *	form the register 0x14 and 0x15
+ *
+ *
+ *
+ *
+ *  @param v_accel_y_s16 : The value of accel y
+ *
+ *	@note For accel configuration use the following functions
+ *	@note bmi160_set_accel_output_data_rate()
+ *	@note bmi160_set_accel_bw()
+ *	@note bmi160_set_accel_under_sampling_parameter()
+ *	@note bmi160_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_accel_y(
+s16 *v_accel_y_s16);
+/*!
+ *	@brief This API reads accelerometer data Z values
+ *	form the register 0x16 and 0x17
+ *
+ *
+ *
+ *
+ *  @param v_accel_z_s16 : The value of accel z
+ *
+ *	@note For accel configuration use the following functions
+ *	@note bmi160_set_accel_output_data_rate()
+ *	@note bmi160_set_accel_bw()
+ *	@note bmi160_set_accel_under_sampling_parameter()
+ *	@note bmi160_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_accel_z(
+s16 *v_accel_z_s16);
+/*!
+ *	@brief This API reads accelerometer data X,Y,Z values
+ *	from the register 0x12 to 0x17
+ *
+ *
+ *
+ *
+ *  @param accel :The value of accel xyz
+ *
+ *	@note For accel configuration use the following functions
+ *	@note bmi160_set_accel_output_data_rate()
+ *	@note bmi160_set_accel_bw()
+ *	@note bmi160_set_accel_under_sampling_parameter()
+ *	@note bmi160_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_accel_xyz(
+struct bmi160_accel_t *accel);
+/**************************************************/
+/**\name	 FUNCTION FOR SENSOR TIME */
+/*************************************************/
+/*!
+ *	@brief This API reads sensor_time from the register
+ *	0x18 to 0x1A
+ *
+ *
+ *  @param v_sensor_time_u32 : The value of sensor time
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_sensor_time(
+u32 *v_sensor_time_u32);
+/**************************************************/
+/**\name	 FUNCTION FOR GYRO SLEF TEST  */
+/*************************************************/
+/*!
+ *	@brief This API reads the Gyroscope self test
+ *	status from the register 0x1B bit 1
+ *
+ *
+ *  @param v_gyro_selftest_u8 : The value of gyro self test status
+ *  value    |   status
+ *  ---------|----------------
+ *   0       | Gyroscope self test is running or failed
+ *   1       | Gyroscope self test completed successfully
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_selftest(u8
+*v_gyro_selftest_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR MANUAL INTERFACE  */
+/*************************************************/
+/*!
+ *	@brief This API reads the status of
+ *	mag manual interface operation form the register 0x1B bit 2
+ *
+ *
+ *
+ *  @param v_mag_manual_stat_u8 : The value of mag manual operation status
+ *  value    |   status
+ *  ---------|----------------
+ *   0       | Indicates no manual magnetometer
+ *   -       | interface operation is ongoing
+ *   1       | Indicates manual magnetometer
+ *   -       | interface operation is ongoing
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_manual_operation_stat(u8
+*v_mag_manual_stat_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR FAST OFFSET READY  */
+/*************************************************/
+/*!
+ *	@brief This API reads the fast offset compensation
+ *	status form the register 0x1B bit 3
+ *
+ *
+ *  @param v_foc_rdy_u8 : The status of fast compensation
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_foc_rdy(u8
+*v_foc_rdy_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR NVM READY  */
+/*************************************************/
+/*!
+ * @brief This API Reads the nvm_rdy status from the
+ *	resister 0x1B bit 4
+ *
+ *
+ *  @param v_nvm_rdy_u8 : The value of NVM ready status
+ *  value    |   status
+ *  ---------|----------------
+ *   0       | NVM write operation in progress
+ *   1       | NVM is ready to accept a new write trigger
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_nvm_rdy(u8
+*v_nvm_rdy_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR DATA READY FOR MAG, GYRO, AND ACCEL */
+/*************************************************/
+/*!
+ *	@brief This API reads the status of mag data ready
+ *	from the register 0x1B bit 5
+ *	The status get reset when one mag data register is read out
+ *
+ *  @param v_data_rdy_u8 : The value of mag data ready status
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_data_rdy_mag(u8
+*v_data_rdy_u8);
+/*!
+ *	@brief This API reads the status of gyro data ready form the
+ *	register 0x1B bit 6
+ *	The status get reset when gyro data register read out
+ *
+ *
+ *	@param v_data_rdy_u8 :	The value of gyro data ready
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_data_rdy(u8
+*v_data_rdy_u8);
+/*!
+ *	@brief This API reads the status of accel data ready form the
+ *	register 0x1B bit 7
+ *	The status get reset when accel data register read out
+ *
+ *
+ *	@param v_data_rdy_u8 :	The value of accel data ready status
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_data_rdy(u8
+*drdy_acc);
+/**************************************************/
+/**\name	 FUNCTION FOR STEP INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the step detector interrupt status
+ *	from the register 0x1C bit 0
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_step_intr_u8 : The status of step detector interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_step_intr(u8
+*v_step_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR SIGNIFICANT INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the
+ *	significant motion interrupt status
+ *	from the register 0x1C bit 1
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *
+ *  @param v_significant_intr_u8 : The status of step
+ *	motion interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_significant_intr(u8
+*sigmot_intr);
+/**************************************************/
+/**\name	 FUNCTION FOR ANY MOTION INTERRUPT STATUS  */
+/*************************************************/
+ /*!
+ *	@brief This API reads the any motion interrupt status
+ *	from the register 0x1C bit 2
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *  @param v_any_motion_intr_u8 : The status of any-motion interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_any_motion_intr(u8
+*v_any_motion_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR PMU TRIGGER INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the power mode trigger interrupt status
+ *	from the register 0x1C bit 3
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *
+ *  @param v_pmu_trigger_intr_u8 : The status of power mode trigger interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_pmu_trigger_intr(u8
+*v_pmu_trigger_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR DOUBLE TAB STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the double tab status
+ *	from the register 0x1C bit 4
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_double_tap_intr_u8 :The status of double tab interrupt
+ *
+ *	@note Double tap interrupt can be configured by the following functions
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_double_tap()
+ *	@note AXIS MAPPING
+ *	@note bmi160_get_stat2_tap_first_x()
+ *	@note bmi160_get_stat2_tap_first_y()
+ *	@note bmi160_get_stat2_tap_first_z()
+ *	@note DURATION
+ *	@note bmi160_set_intr_tap_durn()
+ *	@note THRESHOLD
+ *	@note bmi160_set_intr_tap_thres()
+ *	@note TAP QUIET
+ *	@note bmi160_set_intr_tap_quiet()
+ *	@note TAP SHOCK
+ *	@note bmi160_set_intr_tap_shock()
+ *	@note TAP SOURCE
+ *	@note bmi160_set_intr_tap_source()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_double_tap_intr(u8
+*v_double_tap_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR SINGLE TAB STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the single tab status
+ *	from the register 0x1C bit 5
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_single_tap_intr_u8 :The status of single tap interrupt
+ *
+ *	@note Single tap interrupt can be configured by the following functions
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_single_tap()
+ *	@note AXIS MAPPING
+ *	@note bmi160_get_stat2_tap_first_x()
+ *	@note bmi160_get_stat2_tap_first_y()
+ *	@note bmi160_get_stat2_tap_first_z()
+ *	@note DURATION
+ *	@note bmi160_set_intr_tap_durn()
+ *	@note THRESHOLD
+ *	@note bmi160_set_intr_tap_thres()
+ *	@note TAP QUIET
+ *	@note bmi160_set_intr_tap_quiet()
+ *	@note TAP SHOCK
+ *	@note bmi160_set_intr_tap_shock()
+ *	@note TAP SOURCE
+ *	@note bmi160_set_intr_tap_source()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_single_tap_intr(u8
+*v_single_tap_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR ORIENT INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the orient status
+ *	from the register 0x1C bit 6
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the orient interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_orient_intr_u8 : The status of orient interrupt
+ *
+ *	@note For orient interrupt configuration use the following functions
+ *	@note STATUS
+ *	@note bmi160_get_stat0_orient_intr()
+ *	@note AXIS MAPPING
+ *	@note bmi160_get_stat3_orient_xy()
+ *	@note bmi160_get_stat3_orient_z()
+ *	@note bmi160_set_intr_orient_axes_enable()
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_orient()
+ *	@note INTERRUPT OUTPUT
+ *	@note bmi160_set_intr_orient_ud_enable()
+ *	@note THETA
+ *	@note bmi160_set_intr_orient_theta()
+ *	@note HYSTERESIS
+ *	@note bmi160_set_intr_orient_hyst()
+ *	@note BLOCKING
+ *	@note bmi160_set_intr_orient_blocking()
+ *	@note MODE
+ *	@note bmi160_set_intr_orient_mode()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_orient_intr(u8
+*v_orient_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR FLAT INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the flat interrupt status
+ *	from the register 0x1C bit 7
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the flat interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_flat_intr_u8 : The status of  flat interrupt
+ *
+ *	@note For flat configuration use the following functions
+ *	@note STATS
+ *	@note bmi160_get_stat0_flat_intr()
+ *	@note bmi160_get_stat3_flat()
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_flat()
+ *	@note THETA
+ *	@note bmi160_set_intr_flat_theta()
+ *	@note HOLD TIME
+ *	@note bmi160_set_intr_flat_hold()
+ *	@note HYSTERESIS
+ *	@note bmi160_set_intr_flat_hyst()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat0_flat_intr(u8
+*v_flat_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR HIGH_G INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the high_g interrupt status
+ *	from the register 0x1D bit 2
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the high g  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be permanently
+ *	latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_high_g_intr_u8 : The status of high_g interrupt
+ *
+ *	@note High_g interrupt configured by following functions
+ *	@note STATUS
+ *	@note bmi160_get_stat1_high_g_intr()
+ *	@note AXIS MAPPING
+ *	@note bmi160_get_stat3_high_g_first_x()
+ *	@note bmi160_get_stat3_high_g_first_y()
+ *	@note bmi160_get_stat3_high_g_first_z()
+ *	@note SIGN MAPPING
+ *	@note bmi160_get_stat3_high_g_first_sign()
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_high_g()
+  *	@note HYSTERESIS
+ *	@note bmi160_set_intr_high_g_hyst()
+ *	@note DURATION
+ *	@note bmi160_set_intr_high_g_durn()
+ *	@note THRESHOLD
+ *	@note bmi160_set_intr_high_g_thres()
+ *	@note SOURCE
+ *	@note bmi160_set_intr_low_high_source()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat1_high_g_intr(u8
+*v_high_g_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR LOW_G INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the low g interrupt status
+ *	from the register 0x1D bit 3
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the low g  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_low_g_intr_u8 : The status of low_g interrupt
+ *
+ *	@note Low_g interrupt configured by following functions
+ *	@note STATUS
+ *	@note bmi160_get_stat1_low_g_intr()
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_low_g()
+ *	@note SOURCE
+ *	@note bmi160_set_intr_low_high_source()
+ *	@note DURATION
+ *	@note bmi160_set_intr_low_g_durn()
+ *	@note THRESHOLD
+ *	@note bmi160_set_intr_low_g_thres()
+ *	@note HYSTERESIS
+ *	@note bmi160_set_intr_low_g_hyst()
+ *	@note MODE
+ *	@note bmi160_set_intr_low_g_mode()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat1_low_g_intr(u8
+*v_low_g_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR DATA READY INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads data ready interrupt status
+ *	from the register 0x1D bit 4
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the  data ready  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_data_rdy_intr_u8 : The status of data ready interrupt
+ *
+ *	@note Data ready interrupt configured by following functions
+ *	@note STATUS
+ *	@note bmi160_get_stat1_data_rdy_intr()
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_data_rdy()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat1_data_rdy_intr(u8
+*v_data_rdy_intr_u8);
+/**************************************************/
+/**\name	 FUNCTIONS FOR FIFO FULL AND WATER MARK INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads data ready FIFO full interrupt status
+ *	from the register 0x1D bit 5
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the FIFO full interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will
+ *	be permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_fifo_full_intr_u8 : The status of fifo full interrupt
+ *
+ *	@note FIFO full interrupt can be configured by following functions
+ *	@note bmi160_set_intr_fifo_full()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat1_fifo_full_intr(u8
+*v_fifo_full_intr_u8);
+/*!
+ *	@brief This API reads data
+ *	 ready FIFO watermark interrupt status
+ *	from the register 0x1D bit 6
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the FIFO watermark interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_fifo_wm_intr_u8 : The status of fifo water mark interrupt
+ *
+ *	@note FIFO full interrupt can be configured by following functions
+ *	@note bmi160_set_intr_fifo_wm()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat1_fifo_wm_intr(u8
+*v_fifo_wm_intr_u8);
+/**************************************************/
+/**\name	 FUNCTIONS FOR NO MOTION INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads data ready no motion interrupt status
+ *	from the register 0x1D bit 7
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the no motion  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be permanently
+ *	latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_nomotion_intr_u8 : The status of no motion interrupt
+ *
+ *	@note No motion interrupt can be configured by following function
+ *	@note STATUS
+ *	@note bmi160_get_stat1_nomotion_intr()
+ *	@note INTERRUPT MAPPING
+ *	@note bmi160_set_intr_nomotion()
+ *	@note DURATION
+ *	@note bmi160_set_intr_slow_no_motion_durn()
+ *	@note THRESHOLD
+ *	@note bmi160_set_intr_slow_no_motion_thres()
+ *	@note SLOW/NO MOTION SELECT
+ *	@note bmi160_set_intr_slow_no_motion_select()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat1_nomotion_intr(u8
+*nomo_intr);
+/**************************************************/
+/**\name	 FUNCTIONS FOR ANY MOTION FIRST XYZ AND SIGN INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads the status of any motion first x
+ *	from the register 0x1E bit 0
+ *
+ *
+ *  @param v_anymotion_first_x_u8 : The status of any motion first x interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by x axis
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_any_motion_first_x(u8
+*v_anymotion_first_x_u8);
+/*!
+ *	@brief This API reads the status of any motion first y interrupt
+ *	from the register 0x1E bit 1
+ *
+ *
+ *
+ *@param v_any_motion_first_y_u8 : The status of any motion first y interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_any_motion_first_y(u8
+*v_any_motion_first_y_u8);
+/*!
+ *	@brief This API reads the status of any motion first z interrupt
+ *	from the register 0x1E bit 2
+ *
+ *
+ *
+ *
+ *@param v_any_motion_first_z_u8 : The status of any motion first z interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_any_motion_first_z(u8
+*v_any_motion_first_z_u8);
+/*!
+ *	@brief This API reads the any motion sign status from the
+ *	register 0x1E bit 3
+ *
+ *
+ *
+ *
+ *  @param v_anymotion_sign_u8 : The status of any motion sign
+ *  value     |  sign
+ * -----------|-------------
+ *   0        | positive
+ *   1        | negative
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_any_motion_sign(u8
+*v_anymotion_sign_u8);
+/**************************************************/
+/**\name	 FUNCTIONS FOR TAP FIRST XYZ AND SIGN INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads the any motion tap first x status from the
+ *	register 0x1E bit 4
+ *
+ *
+ *
+ *
+ *  @param v_tap_first_x_u8 :The status of any motion tap first x
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by x axis
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_tap_first_x(u8
+*v_tap_first_x_u8);
+/*!
+ *	@brief This API reads the tap first y interrupt status from the
+ *	register 0x1E bit 5
+ *
+ *
+ *
+ *
+ *  @param v_tap_first_y_u8 :The status of tap first y interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_tap_first_y(u8
+*v_tap_first_y_u8);
+/*!
+ *	@brief This API reads the tap first z interrupt status  from the
+ *	register 0x1E bit 6
+ *
+ *
+ *
+ *
+ *  @param v_tap_first_z_u8 :The status of tap first z interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_tap_first_z(u8
+*v_tap_first_z_u8);
+/*!
+ *	@brief This API reads the tap sign status from the
+ *	register 0x1E bit 7
+ *
+ *
+ *
+ *
+ *  @param v_tap_sign_u8 : The status of tap sign
+ *  value     |  sign
+ * -----------|-------------
+ *   0        | positive
+ *   1        | negative
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat2_tap_sign(u8
+*tap_sign);
+/**************************************************/
+/**\name	 FUNCTIONS FOR HIGH_G FIRST XYZ AND SIGN INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads the high_g first x status from the
+ *	register 0x1F bit 0
+ *
+ *
+ *
+ *
+ *  @param v_high_g_first_x_u8 :The status of high_g first x
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_high_g_first_x(u8
+*v_high_g_first_x_u8);
+/*!
+ *	@brief This API reads the high_g first y status from the
+ *	register 0x1F bit 1
+ *
+ *
+ *
+ *
+ *  @param v_high_g_first_y_u8 : The status of high_g first y
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_high_g_first_y(u8
+*v_high_g_first_y_u8);
+/*!
+ *	@brief This API reads the high_g first z status from the
+ *	register 0x1F bit 3
+ *
+ *
+ *
+ *
+ *  @param v_high_g_first_z_u8 : The status of high_g first z
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_high_g_first_z(u8
+*v_high_g_first_z_u8);
+/*!
+ *	@brief This API reads the high sign status from the
+ *	register 0x1F bit 3
+ *
+ *
+ *
+ *
+ *  @param v_high_g_sign_u8 :The status of high sign
+ *  value     |  sign
+ * -----------|-------------
+ *   0        | positive
+ *   1        | negative
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_high_g_sign(u8
+*v_high_g_sign_u8);
+/**************************************************/
+/**\name	 FUNCTIONS FOR ORIENT XY AND Z INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads the status of orient_xy plane
+ *	from the register 0x1F bit 4 and 5
+ *
+ *
+ *  @param v_orient_xy_u8 :The status of orient_xy plane
+ *  value     |  status
+ * -----------|-------------
+ *   0x00     | portrait upright
+ *   0x01     | portrait upside down
+ *   0x02     | landscape left
+ *   0x03     | landscape right
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_orient_xy(u8
+*v_orient_xy_u8);
+/*!
+ *	@brief This API reads the status of orient z plane
+ *	from the register 0x1F bit 6
+ *
+ *
+ *  @param v_orient_z_u8 :The status of orient z
+ *  value     |  status
+ * -----------|-------------
+ *   0x00     | upward looking
+ *   0x01     | downward looking
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_orient_z(u8
+*v_orient_z_u8);
+/**************************************************/
+/**\name	 FUNCTIONS FOR FLAT INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads the flat status from the register
+ *	0x1F bit 7
+ *
+ *
+ *  @param v_flat_u8 : The status of flat interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0x00     | non flat
+ *   0x01     | flat position
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_stat3_flat(u8
+*flat);
+/**************************************************/
+/**\name	 FUNCTION FOR TEMPERATUE READ */
+/*************************************************/
+/*!
+ *	@brief This API reads the temperature of the sensor
+ *	from the register 0x21 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_temp_s16 : The value of temperature
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_temp(s16
+*v_temp_s16);
+/**************************************************/
+/**\name	 FUNCTION FOR FIFO LENGTH AND FIFO DATA READ */
+/*************************************************/
+/*!
+ *	@brief This API reads the  of the sensor
+ *	form the register 0x23 and 0x24 bit 0 to 7 and 0 to 2
+ *	@brief this byte counter is updated each time a complete frame
+ *	was read or writtern
+ *
+ *
+ *  @param v_fifo_length_u32 : The value of fifo byte counter
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_fifo_length(
+u32 *v_fifo_length_u32);
+/*!
+ *	@brief This API reads the fifo data of the sensor
+ *	from the register 0x24
+ *	@brief Data format depends on the setting of register FIFO_CONFIG
+ *
+ *
+ *
+ *  @param v_fifodata_u8 : Pointer holding the fifo data
+ *
+ *	@note For reading FIFO data use the following functions
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_fifo_data(
+u8 *v_fifodata_u8, u16 v_fifo_length_u16);
+/**************************************************/
+/**\name	 FUNCTION FOR ACCEL CONFIGURATIONS */
+/*************************************************/
+/*!
+ *	@brief This API is used to get the
+ *	accel output date rate form the register 0x40 bit 0 to 3
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of accel output date rate
+ *  value |  output data rate
+ * -------|--------------------------
+ *	 0    |	BMI160_ACCEL_OUTPUT_DATA_RATE_RESERVED
+ *	 1	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_0_78HZ
+ *	 2	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_1_56HZ
+ *	 3    |	BMI160_ACCEL_OUTPUT_DATA_RATE_3_12HZ
+ *	 4    | BMI160_ACCEL_OUTPUT_DATA_RATE_6_25HZ
+ *	 5	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_12_5HZ
+ *	 6	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_25HZ
+ *	 7	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_50HZ
+ *	 8	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_100HZ
+ *	 9	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_200HZ
+ *	 10	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_400HZ
+ *	 11	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_800HZ
+ *	 12	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_1600HZ
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_output_data_rate(
+u8 *v_output_data_rate_u8);
+/*!
+ *	@brief This API is used to set the
+ *	accel output date rate form the register 0x40 bit 0 to 3
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of accel output date rate
+ *  value |  output data rate
+ * -------|--------------------------
+ *	 0    |	BMI160_ACCEL_OUTPUT_DATA_RATE_RESERVED
+ *	 1	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_0_78HZ
+ *	 2	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_1_56HZ
+ *	 3    |	BMI160_ACCEL_OUTPUT_DATA_RATE_3_12HZ
+ *	 4    | BMI160_ACCEL_OUTPUT_DATA_RATE_6_25HZ
+ *	 5	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_12_5HZ
+ *	 6	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_25HZ
+ *	 7	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_50HZ
+ *	 8	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_100HZ
+ *	 9	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_200HZ
+ *	 10	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_400HZ
+ *	 11	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_800HZ
+ *	 12	  |	BMI160_ACCEL_OUTPUT_DATA_RATE_1600HZ
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_output_data_rate(u8 odr);
+/*!
+ *	@brief This API is used to get the
+ *	accel bandwidth from the register 0x40 bit 4 to 6
+ *	@brief bandwidth parameter determines filter configuration(acc_us=0)
+ *	and averaging for under sampling mode(acc_us=1)
+ *
+ *
+ *  @param  v_bw_u8 : The value of accel bandwidth
+ *
+ *	@note accel bandwidth depends on under sampling parameter
+ *	@note under sampling parameter cab be set by the function
+ *	"BMI160_SET_ACCEL_UNDER_SAMPLING_PARAMETER"
+ *
+ *	@note Filter configuration
+ *  accel_us  | Filter configuration
+ * -----------|---------------------
+ *    0x00    |  OSR4 mode
+ *    0x01    |  OSR2 mode
+ *    0x02    |  normal mode
+ *    0x03    |  CIC mode
+ *    0x04    |  Reserved
+ *    0x05    |  Reserved
+ *    0x06    |  Reserved
+ *    0x07    |  Reserved
+ *
+ *	@note accel under sampling mode
+ *  accel_us  | Under sampling mode
+ * -----------|---------------------
+ *    0x00    |  no averaging
+ *    0x01    |  average 2 samples
+ *    0x02    |  average 4 samples
+ *    0x03    |  average 8 samples
+ *    0x04    |  average 16 samples
+ *    0x05    |  average 32 samples
+ *    0x06    |  average 64 samples
+ *    0x07    |  average 128 samples
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_bw(u8 *v_bw_u8);
+/*!
+ *	@brief This API is used to set the
+ *	accel bandwidth from the register 0x40 bit 4 to 6
+ *	@brief bandwidth parameter determines filter configuration(acc_us=0)
+ *	and averaging for under sampling mode(acc_us=1)
+ *
+ *
+ *  @param  v_bw_u8 : The value of accel bandwidth
+ *
+ *	@note accel bandwidth depends on under sampling parameter
+ *	@note under sampling parameter cab be set by the function
+ *	"BMI160_SET_ACCEL_UNDER_SAMPLING_PARAMETER"
+ *
+ *	@note Filter configuration
+ *  accel_us  | Filter configuration
+ * -----------|---------------------
+ *    0x00    |  OSR4 mode
+ *    0x01    |  OSR2 mode
+ *    0x02    |  normal mode
+ *    0x03    |  CIC mode
+ *    0x04    |  Reserved
+ *    0x05    |  Reserved
+ *    0x06    |  Reserved
+ *    0x07    |  Reserved
+ *
+ *	@note accel under sampling mode
+ *  accel_us  | Under sampling mode
+ * -----------|---------------------
+ *    0x00    |  no averaging
+ *    0x01    |  average 2 samples
+ *    0x02    |  average 4 samples
+ *    0x03    |  average 8 samples
+ *    0x04    |  average 16 samples
+ *    0x05    |  average 32 samples
+ *    0x06    |  average 64 samples
+ *    0x07    |  average 128 samples
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_bw(u8 v_bw_u8);
+/*!
+ *	@brief This API is used to get the accel
+ *	under sampling parameter form the register 0x40 bit 7
+ *
+ *
+ *
+ *
+ *	@param  v_accel_under_sampling_u8 : The value of accel under sampling
+ *	value    | under_sampling
+ * ----------|---------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_under_sampling_parameter(
+u8 *v_accel_under_sampling_u8);
+/*!
+ *	@brief This API is used to set the accel
+ *	under sampling parameter form the register 0x40 bit 7
+ *
+ *
+ *
+ *
+ *	@param  v_accel_under_sampling_u8 : The value of accel under sampling
+ *	value    | under_sampling
+ * ----------|---------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_under_sampling_parameter(
+u8 v_accel_under_sampling_u8);
+/*!
+ *	@brief This API is used to get the ranges
+ *	(g values) of the accel from the register 0x41 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param v_range_u8 : The value of accel g range
+ *	value    | g_range
+ * ----------|-----------
+ *   0x03    | BMI160_ACCEL_RANGE_2G
+ *   0x05    | BMI160_ACCEL_RANGE_4G
+ *   0x08    | BMI160_ACCEL_RANGE_8G
+ *   0x0C    | BMI160_ACCEL_RANGE_16G
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_range(
+u8 *v_range_u8);
+/*!
+ *	@brief This API is used to set the ranges
+ *	(g values) of the accel from the register 0x41 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param v_range_u8 : The value of accel g range
+ *	value    | g_range
+ * ----------|-----------
+ *   0x03    | BMI160_ACCEL_RANGE_2G
+ *   0x05    | BMI160_ACCEL_RANGE_4G
+ *   0x08    | BMI160_ACCEL_RANGE_8G
+ *   0x0C    | BMI160_ACCEL_RANGE_16G
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_range(
+u8 v_range_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR GYRO CONFIGURATIONS */
+/*************************************************/
+/*!
+ *	@brief This API is used to get the
+ *	gyroscope output data rate from the register 0x42 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of gyro output data rate
+ *  value     |      gyro output data rate
+ * -----------|-----------------------------
+ *   0x00     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x01     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x02     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x03     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x04     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x05     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x06     | BMI160_GYRO_OUTPUT_DATA_RATE_25HZ
+ *   0x07     | BMI160_GYRO_OUTPUT_DATA_RATE_50HZ
+ *   0x08     | BMI160_GYRO_OUTPUT_DATA_RATE_100HZ
+ *   0x09     | BMI160_GYRO_OUTPUT_DATA_RATE_200HZ
+ *   0x0A     | BMI160_GYRO_OUTPUT_DATA_RATE_400HZ
+ *   0x0B     | BMI160_GYRO_OUTPUT_DATA_RATE_800HZ
+ *   0x0C     | BMI160_GYRO_OUTPUT_DATA_RATE_1600HZ
+ *   0x0D     | BMI160_GYRO_OUTPUT_DATA_RATE_3200HZ
+ *   0x0E     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x0F     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_output_data_rate(
+u8 *gyro_output_typer);
+/*!
+ *	@brief This API is used to set the
+ *	gyroscope output data rate from the register 0x42 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of gyro output data rate
+ *  value     |      gyro output data rate
+ * -----------|-----------------------------
+ *   0x00     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x01     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x02     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x03     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x04     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x05     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x06     | BMI160_GYRO_OUTPUT_DATA_RATE_25HZ
+ *   0x07     | BMI160_GYRO_OUTPUT_DATA_RATE_50HZ
+ *   0x08     | BMI160_GYRO_OUTPUT_DATA_RATE_100HZ
+ *   0x09     | BMI160_GYRO_OUTPUT_DATA_RATE_200HZ
+ *   0x0A     | BMI160_GYRO_OUTPUT_DATA_RATE_400HZ
+ *   0x0B     | BMI160_GYRO_OUTPUT_DATA_RATE_800HZ
+ *   0x0C     | BMI160_GYRO_OUTPUT_DATA_RATE_1600HZ
+ *   0x0D     | BMI160_GYRO_OUTPUT_DATA_RATE_3200HZ
+ *   0x0E     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x0F     | BMI160_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_output_data_rate(
+u8 gyro_output_typer);
+/*!
+ *	@brief This API is used to get the
+ *	data of gyro from the register 0x42 bit 4 to 5
+ *
+ *
+ *
+ *
+ *  @param  v_bw_u8 : The value of gyro bandwidth
+ *  value     | gyro bandwidth
+ *  ----------|----------------
+ *   0x00     | BMI160_GYRO_OSR4_MODE
+ *   0x01     | BMI160_GYRO_OSR2_MODE
+ *   0x02     | BMI160_GYRO_NORMAL_MODE
+ *   0x03     | BMI160_GYRO_CIC_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_bw(u8 *v_bw_u8);
+/*!
+ *	@brief This API is used to set the
+ *	data of gyro from the register 0x42 bit 4 to 5
+ *
+ *
+ *
+ *
+ *  @param  v_bw_u8 : The value of gyro bandwidth
+ *  value     | gyro bandwidth
+ *  ----------|----------------
+ *   0x00     | BMI160_GYRO_OSR4_MODE
+ *   0x01     | BMI160_GYRO_OSR2_MODE
+ *   0x02     | BMI160_GYRO_NORMAL_MODE
+ *   0x03     | BMI160_GYRO_CIC_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_bw(u8 v_bw_u8);
+/*!
+ *	@brief This API reads the range
+ *	of gyro from the register 0x43 bit 0 to 2
+ *
+ *  @param  v_range_u8 : The value of gyro range
+ *   value    |    range
+ *  ----------|-------------------------------
+ *    0x00    | BMI160_GYRO_RANGE_2000_DEG_SEC
+ *    0x01    | BMI160_GYRO_RANGE_1000_DEG_SEC
+ *    0x02    | BMI160_GYRO_RANGE_500_DEG_SEC
+ *    0x03    | BMI160_GYRO_RANGE_250_DEG_SEC
+ *    0x04    | BMI160_GYRO_RANGE_125_DEG_SEC
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_range(
+u8 *v_range_u8);
+/*!
+ *	@brief This API set the range
+ *	of gyro from the register 0x43 bit 0 to 2
+ *
+ *  @param  v_range_u8 : The value of gyro range
+ *   value    |    range
+ *  ----------|-------------------------------
+ *    0x00    | BMI160_GYRO_RANGE_2000_DEG_SEC
+ *    0x01    | BMI160_GYRO_RANGE_1000_DEG_SEC
+ *    0x02    | BMI160_GYRO_RANGE_500_DEG_SEC
+ *    0x03    | BMI160_GYRO_RANGE_250_DEG_SEC
+ *    0x04    | BMI160_GYRO_RANGE_125_DEG_SEC
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_range(
+u8 v_range_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR MAG CONFIGURATIONS */
+/*************************************************/
+/*!
+ *	@brief This API is used to get the
+ *	output data rate of magnetometer from the register 0x44 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rat_u8e : The value of mag output data rate
+ *  value   |    mag output data rate
+ * ---------|---------------------------
+ *  0x00    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED
+ *  0x01    |BMI160_MAG_OUTPUT_DATA_RATE_0_78HZ
+ *  0x02    |BMI160_MAG_OUTPUT_DATA_RATE_1_56HZ
+ *  0x03    |BMI160_MAG_OUTPUT_DATA_RATE_3_12HZ
+ *  0x04    |BMI160_MAG_OUTPUT_DATA_RATE_6_25HZ
+ *  0x05    |BMI160_MAG_OUTPUT_DATA_RATE_12_5HZ
+ *  0x06    |BMI160_MAG_OUTPUT_DATA_RATE_25HZ
+ *  0x07    |BMI160_MAG_OUTPUT_DATA_RATE_50HZ
+ *  0x08    |BMI160_MAG_OUTPUT_DATA_RATE_100HZ
+ *  0x09    |BMI160_MAG_OUTPUT_DATA_RATE_200HZ
+ *  0x0A    |BMI160_MAG_OUTPUT_DATA_RATE_400HZ
+ *  0x0B    |BMI160_MAG_OUTPUT_DATA_RATE_800HZ
+ *  0x0C    |BMI160_MAG_OUTPUT_DATA_RATE_1600HZ
+ *  0x0D    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED0
+ *  0x0E    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED1
+ *  0x0F    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED2
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_output_data_rate(u8 *odr);
+/*!
+ *	@brief This API is used to set the
+ *	output data rate of magnetometer from the register 0x44 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rat_u8e : The value of mag output data rate
+ *  value   |    mag output data rate
+ * ---------|---------------------------
+ *  0x00    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED
+ *  0x01    |BMI160_MAG_OUTPUT_DATA_RATE_0_78HZ
+ *  0x02    |BMI160_MAG_OUTPUT_DATA_RATE_1_56HZ
+ *  0x03    |BMI160_MAG_OUTPUT_DATA_RATE_3_12HZ
+ *  0x04    |BMI160_MAG_OUTPUT_DATA_RATE_6_25HZ
+ *  0x05    |BMI160_MAG_OUTPUT_DATA_RATE_12_5HZ
+ *  0x06    |BMI160_MAG_OUTPUT_DATA_RATE_25HZ
+ *  0x07    |BMI160_MAG_OUTPUT_DATA_RATE_50HZ
+ *  0x08    |BMI160_MAG_OUTPUT_DATA_RATE_100HZ
+ *  0x09    |BMI160_MAG_OUTPUT_DATA_RATE_200HZ
+ *  0x0A    |BMI160_MAG_OUTPUT_DATA_RATE_400HZ
+ *  0x0B    |BMI160_MAG_OUTPUT_DATA_RATE_800HZ
+ *  0x0C    |BMI160_MAG_OUTPUT_DATA_RATE_1600HZ
+ *  0x0D    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED0
+ *  0x0E    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED1
+ *  0x0F    |BMI160_MAG_OUTPUT_DATA_RATE_RESERVED2
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_output_data_rate(u8 odr);
+/**************************************************/
+/**\name	 FUNCTION FOR FIFO CONFIGURATIONS */
+/*************************************************/
+ /*!
+ *	@brief This API is used to read Down sampling
+ *	for gyro (2**downs_gyro) in the register 0x45 bit 0 to 2
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_gyro_u8 :The value of gyro fifo down
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_down_gyro(
+u8 *v_fifo_down_gyro_u8);
+ /*!
+ *	@brief This API is used to set Down sampling
+ *	for gyro (2**downs_gyro) in the register 0x45 bit 0 to 2
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_gyro_u8 :The value of gyro fifo down
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_down_gyro(
+u8 v_fifo_down_gyro_u8);
+/*!
+ *	@brief This API is used to read gyro fifo filter data
+ *	from the register 0x45 bit 3
+ *
+ *
+ *
+ *  @param v_gyro_fifo_filter_data_u8 :The value of gyro filter data
+ *  value      |  gyro_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_fifo_filter_data(
+u8 *v_gyro_fifo_filter_data_u8);
+/*!
+ *	@brief This API is used to set gyro fifo filter data
+ *	from the register 0x45 bit 3
+ *
+ *
+ *
+ *  @param v_gyro_fifo_filter_data_u8 :The value of gyro filter data
+ *  value      |  gyro_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_fifo_filter_data(
+u8 v_gyro_fifo_filter_data_u8);
+/*!
+ *	@brief This API is used to read Down sampling
+ *	for accel (2*downs_accel) from the register 0x45 bit 4 to 6
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_u8 :The value of accel fifo down
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_down_accel(
+u8 *v_fifo_down_u8);
+ /*!
+ *	@brief This API is used to set Down sampling
+ *	for accel (2*downs_accel) from the register 0x45 bit 4 to 6
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_u8 :The value of accel fifo down
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_down_accel(
+u8 v_fifo_down_u8);
+/*!
+ *	@brief This API is used to read accel fifo filter data
+ *	from the register 0x45 bit 7
+ *
+ *
+ *
+ *  @param v_accel_fifo_filter_u8 :The value of accel filter data
+ *  value      |  accel_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_fifo_filter_data(
+u8 *v_accel_fifo_filter_u8);
+/*!
+ *	@brief This API is used to set accel fifo filter data
+ *	from the register 0x45 bit 7
+ *
+ *
+ *
+ *  @param v_accel_fifo_filter_u8 :The value of accel filter data
+ *  value      |  accel_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_fifo_filter_data(
+u8 v_accel_fifo_filter_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR FIFO WATER MARK ENABLE */
+/*************************************************/
+/*!
+ *	@brief This API is used to Trigger an interrupt
+ *	when FIFO contains water mark level from the register 0x46 bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_fifo_wm_u8 : The value of fifo water mark level
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_wm(
+u8 *v_fifo_wm_u8);
+/*!
+ *	@brief This API is used to Trigger an interrupt
+ *	when FIFO contains water mark level from the register 0x46 bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_fifo_wm_u8 : The value of fifo water mark level
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_wm(
+u8 v_fifo_wm_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR FIFO CONFIGURATIONS */
+/*************************************************/
+/*!
+ *	@brief This API reads fifo sensor time
+ *	frame after the last valid data frame form the register  0x47 bit 1
+ *
+ *
+ *
+ *
+ *  @param v_fifo_time_enable_u8 : The value of sensor time
+ *  value      |  fifo sensor time
+ * ------------|-------------------------
+ *    0x00     |  do not return sensortime frame
+ *    0x01     |  return sensortime frame
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_time_enable(
+u8 *v_fifo_time_enable_u8);
+/*!
+ *	@brief This API set fifo sensor time
+ *	frame after the last valid data frame form the register  0x47 bit 1
+ *
+ *
+ *
+ *
+ *  @param v_fifo_time_enable_u8 : The value of sensor time
+ *  value      |  fifo sensor time
+ * ------------|-------------------------
+ *    0x00     |  do not return sensortime frame
+ *    0x01     |  return sensortime frame
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_time_enable(
+u8 v_fifo_time_enable_u8);
+/*!
+ *	@brief This API reads FIFO tag interrupt2 enable status
+ *	from the resister 0x47 bit 2
+ *
+ *  @param v_fifo_tag_intr2_u8 : The value of fifo tag interrupt
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_tag_intr2_enable(
+u8 *v_fifo_tag_intr2_u8);
+/*!
+ *	@brief This API set FIFO tag interrupt2 enable status
+ *	from the resister 0x47 bit 2
+ *
+ *  @param v_fifo_tag_intr2_u8 : The value of fifo tag interrupt
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_tag_intr2_enable(
+u8 v_fifo_tag_intr2_u8);
+/*!
+ *	@brief This API get FIFO tag interrupt1 enable status
+ *	from the resister 0x47 bit 3
+ *
+ *  @param v_fifo_tag_intr1_u8 :The value of fifo tag interrupt1
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_tag_intr1_enable(
+u8 *v_fifo_tag_intr1_u8);
+/*!
+ *	@brief This API set FIFO tag interrupt1 enable status
+ *	from the resister 0x47 bit 3
+ *
+ *  @param v_fifo_tag_intr1_u8 :The value of fifo tag interrupt1
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_tag_intr1_enable(
+u8 v_fifo_tag_intr1_u8);
+/*!
+ *	@brief This API reads FIFO frame
+ *	header enable from the register 0x47 bit 4
+ *
+ *  @param v_fifo_header_u8 :The value of fifo header
+ *	value    | fifo header
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_header_enable(
+u8 *v_fifo_header_u8);
+/*!
+ *	@brief This API set FIFO frame
+ *	header enable from the register 0x47 bit 4
+ *
+ *  @param v_fifo_header_u8 :The value of fifo header
+ *	value    | fifo header
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_header_enable(
+u8 v_fifo_header_u8);
+/*!
+ *	@brief This API is used to read stored
+ *	magnetometer data in FIFO (all 3 axes) from the register 0x47 bit 5
+ *
+ *  @param v_fifo_mag_u8 : The value of fifo mag enble
+ *	value    | fifo mag
+ * ----------|-------------------
+ *  0x00     |  no magnetometer data is stored
+ *  0x01     |  magnetometer data is stored
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_mag_enable(
+u8 *v_fifo_mag_u8);
+/*!
+ *	@brief This API is used to set stored
+ *	magnetometer data in FIFO (all 3 axes) from the register 0x47 bit 5
+ *
+ *  @param v_fifo_mag_u8 : The value of fifo mag enble
+ *	value    | fifo mag
+ * ----------|-------------------
+ *  0x00     |  no magnetometer data is stored
+ *  0x01     |  magnetometer data is stored
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_mag_enable(
+u8 v_fifo_mag_u8);
+/*!
+ *	@brief This API is used to read stored
+ *	accel data in FIFO (all 3 axes) from the register 0x47 bit 6
+ *
+ *  @param v_fifo_accel_u8 : The value of fifo accel enble
+ *	value    | fifo accel
+ * ----------|-------------------
+ *  0x00     |  no accel data is stored
+ *  0x01     |  accel data is stored
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_accel_enable(
+u8 *v_fifo_accel_u8);
+/*!
+ *	@brief This API is used to set stored
+ *	accel data in FIFO (all 3 axes) from the register 0x47 bit 6
+ *
+ *  @param v_fifo_accel_u8 : The value of fifo accel enble
+ *	value    | fifo accel
+ * ----------|-------------------
+ *  0x00     |  no accel data is stored
+ *  0x01     |  accel data is stored
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_accel_enable(
+u8 v_fifo_accel_u8);
+/*!
+ *	@brief This API is used to read stored
+ *	 gyro data in FIFO (all 3 axes) from the resister 0x47 bit 7
+ *
+ *
+ *  @param v_fifo_gyro_u8 : The value of fifo gyro enble
+ *	value    | fifo gyro
+ * ----------|-------------------
+ *  0x00     |  no gyro data is stored
+ *  0x01     |  gyro data is stored
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_fifo_gyro_enable(
+u8 *v_fifo_gyro_u8);
+/*!
+ *	@brief This API is used to set stored
+ *	gyro data in FIFO (all 3 axes) from the resister 0x47 bit 7
+ *
+ *
+ *  @param v_fifo_gyro_u8 : The value of fifo gyro enble
+ *	value    | fifo gyro
+ * ----------|-------------------
+ *  0x00     |  no gyro data is stored
+ *  0x01     |  gyro data is stored
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_fifo_gyro_enable(
+u8 v_fifo_gyro_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR MAG I2C ADDRESS SELECTION          */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read
+ *	I2C device address of auxiliary mag from the register 0x4B bit 1 to 7
+ *
+ *
+ *
+ *
+ *  @param v_i2c_device_addr_u8 : The value of mag I2C device address
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_i2c_device_addr(
+u8 *v_i2c_device_addr_u8);
+/*!
+ *	@brief This API is used to set
+ *	I2C device address of auxiliary mag from the register 0x4B bit 1 to 7
+ *
+ *
+ *
+ *
+ *  @param v_i2c_device_addr_u8 : The value of mag I2C device address
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_i2c_device_addr(
+u8 v_i2c_device_addr_u8);
+/*!
+ *	@brief This API is used to read
+ *	Burst data length (1,2,6,8 byte) from the register 0x4C bit 0 to 1
+ *
+ *
+ *
+ *
+ *  @param v_mag_burst_u8 : The data of mag burst read lenth
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_burst(
+u8 *v_mag_burst_u8);
+/*!
+ *	@brief This API is used to set
+ *	Burst data length (1,2,6,8 byte) from the register 0x4C bit 0 to 1
+ *
+ *
+ *
+ *
+ *  @param v_mag_burst_u8 : The data of mag burst read lenth
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_burst(
+u8 v_mag_burst_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR MAG OFFSET         */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read
+ *	trigger-readout offset in units of 2.5 ms. If set to zero,
+ *	the offset is maximum, i.e. after readout a trigger
+ *	is issued immediately. from the register 0x4C bit 2 to 5
+ *
+ *
+ *
+ *
+ *  @param v_mag_offset_u8 : The value of mag offset
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_offset(
+u8 *v_mag_offset_u8);
+/*!
+ *	@brief This API is used to set
+ *	trigger-readout offset in units of 2.5 ms. If set to zero,
+ *	the offset is maximum, i.e. after readout a trigger
+ *	is issued immediately. from the register 0x4C bit 2 to 5
+ *
+ *
+ *
+ *
+ *  @param v_mag_offset_u8 : The value of mag offset
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_offset(
+u8 v_mag_offset_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR MAG MANUAL/AUTO MODE SELECTION          */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read
+ *	Enable register access on MAG_IF[2] or MAG_IF[3] writes.
+ *	This implies that the DATA registers are not updated with
+ *	magnetometer values. Accessing magnetometer requires
+ *	the magnetometer in normal mode in PMU_STATUS.
+ *	from the register 0x4C bit 7
+ *
+ *
+ *
+ *  @param v_mag_manual_u8 : The value of mag manual enable
+ *	value    | mag manual
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_manual_enable(
+u8 *v_mag_manual_u8);
+/*!
+ *	@brief This API is used to set
+ *	Enable register access on MAG_IF[2] or MAG_IF[3] writes.
+ *	This implies that the DATA registers are not updated with
+ *	magnetometer values. Accessing magnetometer requires
+ *	the magnetometer in normal mode in PMU_STATUS.
+ *	from the register 0x4C bit 7
+ *
+ *
+ *
+ *  @param v_mag_manual_u8 : The value of mag manual enable
+ *	value    | mag manual
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_manual_enable(
+u8 v_mag_manual_u8);
+/***************************************************************/
+/**\name	FUNCTIONS FOR MAG READ, WRITE AND WRITE DATA ADDRESS  */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read data
+ *	magnetometer address to read from the register 0x4D bit 0 to 7
+ *	@brief It used to provide mag read address of auxiliary mag
+ *
+ *
+ *
+ *
+ *  @param  v_mag_read_addr_u8 : The value of address need to be read
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_read_addr(
+u8 *v_mag_read_addr_u8);
+/*!
+ *	@brief This API is used to set
+ *	magnetometer write address from the register 0x4D bit 0 to 7
+ *	@brief mag write address writes the address of auxiliary mag to write
+ *
+ *
+ *
+ *  @param v_mag_read_addr_u8:
+ *	The data of auxiliary mag address to write data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_read_addr(
+u8 v_mag_read_addr_u8);
+/*!
+ *	@brief This API is used to read
+ *	magnetometer write address from the register 0x4E bit 0 to 7
+ *	@brief mag write address writes the address of auxiliary mag to write
+ *
+ *
+ *
+ *  @param  v_mag_write_addr_u8:
+ *	The data of auxiliary mag address to write data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_write_addr(
+u8 *v_mag_write_addr_u8);
+/*!
+ *	@brief This API is used to set
+ *	magnetometer write address from the register 0x4E bit 0 to 7
+ *	@brief mag write address writes the address of auxiliary mag to write
+ *
+ *
+ *
+ *  @param  v_mag_write_addr_u8:
+ *	The data of auxiliary mag address to write data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_write_addr(
+u8 v_mag_write_addr_u8);
+/*!
+ *	@brief This API is used to read magnetometer write data
+ *	form the resister 0x4F bit 0 to 7
+ *	@brief This writes the data will be wrote to mag
+ *
+ *
+ *
+ *  @param  v_mag_write_data_u8: The value of mag data
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_mag_write_data(
+u8 *v_mag_write_data_u8);
+/*!
+ *	@brief This API is used to set magnetometer write data
+ *	form the resister 0x4F bit 0 to 7
+ *	@brief This writes the data will be wrote to mag
+ *
+ *
+ *
+ *  @param  v_mag_write_data_u8: The value of mag data
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_mag_write_data(
+u8 v_mag_write_data_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT ENABLE OF
+ANY-MOTION XYZ, DOUBLE AND SINGLE TAP, ORIENT AND FLAT         */
+/***************************************************************/
+/*!
+ *	@brief  This API is used to read
+ *	interrupt enable from the register 0x50 bit 0 to 7
+ *
+ *
+ *
+ *
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_ANY_MOTION_X_ENABLE
+ *       1         | BMI160_ANY_MOTION_Y_ENABLE
+ *       2         | BMI160_ANY_MOTION_Z_ENABLE
+ *       3         | BMI160_DOUBLE_TAP_ENABLE
+ *       4         | BMI160_SINGLE_TAP_ENABLE
+ *       5         | BMI160_ORIENT_ENABLE
+ *       6         | BMI160_FLAT_ENABLE
+ *
+ *	@param v_intr_enable_zero_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_enable_0(
+u8 enable, u8 *v_intr_enable_zero_u8);
+/*!
+ *	@brief  This API is used to set
+ *	interrupt enable from the register 0x50 bit 0 to 7
+ *
+ *
+ *
+ *
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_ANY_MOTION_X_ENABLE
+ *       1         | BMI160_ANY_MOTION_Y_ENABLE
+ *       2         | BMI160_ANY_MOTION_Z_ENABLE
+ *       3         | BMI160_DOUBLE_TAP_ENABLE
+ *       4         | BMI160_SINGLE_TAP_ENABLE
+ *       5         | BMI160_ORIENT_ENABLE
+ *       6         | BMI160_FLAT_ENABLE
+ *
+ *	@param v_intr_enable_zero_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_enable_0(
+u8 enable, u8 v_intr_enable_zero_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT ENABLE OF
+HIGH_G XYZ, LOW_G, DATA READY, FIFO FULL AND FIFO WATER MARK  */
+/***************************************************************/
+/*!
+ *	@brief  This API is used to read
+ *	interrupt enable byte1 from the register 0x51 bit 0 to 6
+ *	@brief It read the high_g_x,high_g_y,high_g_z,low_g_enable
+ *	data ready, fifo full and fifo water mark.
+ *
+ *
+ *
+ *  @param  v_enable_u8 :  The value of interrupt enable
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_HIGH_G_X_ENABLE
+ *       1         | BMI160_HIGH_G_Y_ENABLE
+ *       2         | BMI160_HIGH_G_Z_ENABLE
+ *       3         | BMI160_LOW_G_ENABLE
+ *       4         | BMI160_DATA_RDY_ENABLE
+ *       5         | BMI160_FIFO_FULL_ENABLE
+ *       6         | BMI160_FIFO_WM_ENABLE
+ *
+ *	@param v_intr_enable_1_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_enable_1(
+u8 enable, u8 *v_intr_enable_1_u8);
+/*!
+ *	@brief  This API is used to set
+ *	interrupt enable byte1 from the register 0x51 bit 0 to 6
+ *	@brief It read the high_g_x,high_g_y,high_g_z,low_g_enable
+ *	data ready, fifo full and fifo water mark.
+ *
+ *
+ *
+ *  @param  v_enable_u8 :  The value of interrupt enable
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_HIGH_G_X_ENABLE
+ *       1         | BMI160_HIGH_G_Y_ENABLE
+ *       2         | BMI160_HIGH_G_Z_ENABLE
+ *       3         | BMI160_LOW_G_ENABLE
+ *       4         | BMI160_DATA_RDY_ENABLE
+ *       5         | BMI160_FIFO_FULL_ENABLE
+ *       6         | BMI160_FIFO_WM_ENABLE
+ *
+ *	@param v_intr_enable_1_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_enable_1(
+u8 enable, u8 v_intr_enable_1_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT ENABLE OF
+NO MOTION XYZ  */
+/***************************************************************/
+/*!
+ *	@brief  This API is used to read
+ *	interrupt enable byte2 from the register bit 0x52 bit 0 to 3
+ *	@brief It reads no motion x,y and z
+ *
+ *
+ *
+ *	@param v_enable_u8: The value of interrupt enable
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_NOMOTION_X_ENABLE
+ *       1         | BMI160_NOMOTION_Y_ENABLE
+ *       2         | BMI160_NOMOTION_Z_ENABLE
+ *
+ *	@param v_intr_enable_2_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_enable_2(
+u8 enable, u8 *v_intr_enable_2_u8);
+/*!
+ *	@brief  This API is used to set
+ *	interrupt enable byte2 from the register bit 0x52 bit 0 to 3
+ *	@brief It reads no motion x,y and z
+ *
+ *
+ *
+ *	@param v_enable_u8: The value of interrupt enable
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_NOMOTION_X_ENABLE
+ *       1         | BMI160_NOMOTION_Y_ENABLE
+ *       2         | BMI160_NOMOTION_Z_ENABLE
+ *
+ *	@param v_intr_enable_2_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_enable_2(
+u8 enable, u8 v_intr_enable_2_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT ENABLE OF
+  STEP DETECTOR */
+/***************************************************************/
+ /*!
+ *	@brief This API is used to read
+ *	interrupt enable step detector interrupt from
+ *	the register bit 0x52 bit 3
+ *
+ *
+ *
+ *
+ *	@param v_step_intr_u8 : The value of step detector interrupt enable
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_step_detector_enable(
+u8 *v_step_intr_u8);
+ /*!
+ *	@brief This API is used to set
+ *	interrupt enable step detector interrupt from
+ *	the register bit 0x52 bit 3
+ *
+ *
+ *
+ *
+ *	@param v_step_intr_u8 : The value of step detector interrupt enable
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_step_detector_enable(
+u8 v_step_intr_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT CONTROL */
+/***************************************************************/
+/*!
+ *	@brief  Configure trigger condition of interrupt1
+ *	and interrupt2 pin from the register 0x53
+ *	@brief interrupt1 - bit 0
+ *	@brief interrupt2 - bit 4
+ *
+ *  @param v_channel_u8: The value of edge trigger selection
+ *   v_channel_u8  |   Edge trigger
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_EDGE_CTRL
+ *       1         | BMI160_INTR2_EDGE_CTRL
+ *
+ *	@param v_intr_edge_ctrl_u8 : The value of edge trigger enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_EDGE
+ *  0x00     |  BMI160_LEVEL
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_edge_ctrl(
+u8 v_channel_u8, u8 *v_intr_edge_ctrl_u8);
+/*!
+ *	@brief  Configure trigger condition of interrupt1
+ *	and interrupt2 pin from the register 0x53
+ *	@brief interrupt1 - bit 0
+ *	@brief interrupt2 - bit 4
+ *
+ *  @param v_channel_u8: The value of edge trigger selection
+ *   v_channel_u8  |   Edge trigger
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_EDGE_CTRL
+ *       1         | BMI160_INTR2_EDGE_CTRL
+ *
+ *	@param v_intr_edge_ctrl_u8 : The value of edge trigger enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_EDGE
+ *  0x00     |  BMI160_LEVEL
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_edge_ctrl(
+u8 v_channel_u8, u8 v_intr_edge_ctrl_u8);
+/*!
+ *	@brief  API used for get the Configure level condition of interrupt1
+ *	and interrupt2 pin form the register 0x53
+ *	@brief interrupt1 - bit 1
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of level condition selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_LEVEL
+ *       1         | BMI160_INTR2_LEVEL
+ *
+ *	@param v_intr_level_u8 : The value of level of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_LEVEL_HIGH
+ *  0x00     |  BMI160_LEVEL_LOW
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_level(
+u8 v_channel_u8, u8 *v_intr_level_u8);
+/*!
+ *	@brief  API used for set the Configure level condition of interrupt1
+ *	and interrupt2 pin form the register 0x53
+ *	@brief interrupt1 - bit 1
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of level condition selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_LEVEL
+ *       1         | BMI160_INTR2_LEVEL
+ *
+ *	@param v_intr_level_u8 : The value of level of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_LEVEL_HIGH
+ *  0x00     |  BMI160_LEVEL_LOW
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_level(
+u8 v_channel_u8, u8 v_intr_level_u8);
+/*!
+ *	@brief  API used to get configured output enable of interrupt1
+ *	and interrupt2 from the register 0x53
+ *	@brief interrupt1 - bit 2
+ *	@brief interrupt2 - bit 6
+ *
+ *
+ *  @param v_channel_u8: The value of output type enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_OUTPUT_TYPE
+ *       1         | BMI160_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_intr_output_type_u8 :
+ *	The value of output type of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_OPEN_DRAIN
+ *  0x00     |  BMI160_PUSH_PULL
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_output_type(
+u8 v_channel_u8, u8 *v_intr_output_type_u8);
+/*!
+ *	@brief  API used to set output enable of interrupt1
+ *	and interrupt2 from the register 0x53
+ *	@brief interrupt1 - bit 2
+ *	@brief interrupt2 - bit 6
+ *
+ *
+ *  @param v_channel_u8: The value of output type enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_OUTPUT_TYPE
+ *       1         | BMI160_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_intr_output_type_u8 :
+ *	The value of output type of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_OPEN_DRAIN
+ *  0x00     |  BMI160_PUSH_PULL
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_output_type(
+u8 v_channel_u8, u8 v_intr_output_type_u8);
+ /*!
+ *	@brief API used to get the Output enable for interrupt1
+ *	and interrupt1 pin from the register 0x53
+ *	@brief interrupt1 - bit 3
+ *	@brief interrupt2 - bit 7
+ *
+ *  @param v_channel_u8: The value of output enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_OUTPUT_TYPE
+ *       1         | BMI160_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_output_enable_u8 :
+ *	The value of output enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_INPUT
+ *  0x00     |  BMI160_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_output_enable(
+u8 v_channel_u8, u8 *v_output_enable_u8);
+ /*!
+ *	@brief API used to set the Output enable for interrupt1
+ *	and interrupt1 pin from the register 0x53
+ *	@brief interrupt1 - bit 3
+ *	@brief interrupt2 - bit 7
+ *
+ *  @param v_channel_u8: The value of output enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_OUTPUT_TYPE
+ *       1         | BMI160_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_output_enable_u8 :
+ *	The value of output enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_INPUT
+ *  0x00     |  BMI160_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_output_enable(
+u8 v_channel_u8, u8 v_output_enable_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT LATCH INTERRUPT  */
+/***************************************************************/
+/*!
+*	@brief This API is used to get the latch duration
+*	from the register 0x54 bit 0 to 3
+*	@brief This latch selection is not applicable for data ready,
+*	orientation and flat interrupts.
+*
+*
+*
+*  @param v_latch_intr_u8 : The value of latch duration
+*	Latch Duration                      |     value
+* --------------------------------------|------------------
+*    BMI160_LATCH_DUR_NONE              |      0x00
+*    BMI160_LATCH_DUR_312_5_MICRO_SEC   |      0x01
+*    BMI160_LATCH_DUR_625_MICRO_SEC     |      0x02
+*    BMI160_LATCH_DUR_1_25_MILLI_SEC    |      0x03
+*    BMI160_LATCH_DUR_2_5_MILLI_SEC     |      0x04
+*    BMI160_LATCH_DUR_5_MILLI_SEC       |      0x05
+*    BMI160_LATCH_DUR_10_MILLI_SEC      |      0x06
+*    BMI160_LATCH_DUR_20_MILLI_SEC      |      0x07
+*    BMI160_LATCH_DUR_40_MILLI_SEC      |      0x08
+*    BMI160_LATCH_DUR_80_MILLI_SEC      |      0x09
+*    BMI160_LATCH_DUR_160_MILLI_SEC     |      0x0A
+*    BMI160_LATCH_DUR_320_MILLI_SEC     |      0x0B
+*    BMI160_LATCH_DUR_640_MILLI_SEC     |      0x0C
+*    BMI160_LATCH_DUR_1_28_SEC          |      0x0D
+*    BMI160_LATCH_DUR_2_56_SEC          |      0x0E
+*    BMI160_LATCHED                     |      0x0F
+*
+*
+*
+*	@return results of bus communication function
+*	@retval 0 -> Success
+*	@retval -1 -> Error
+*
+*
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_latch_intr(
+u8 *v_latch_intr_u8);
+/*!
+*	@brief This API is used to set the latch duration
+*	from the register 0x54 bit 0 to 3
+*	@brief This latch selection is not applicable for data ready,
+*	orientation and flat interrupts.
+*
+*
+*
+*  @param v_latch_intr_u8 : The value of latch duration
+*	Latch Duration                      |     value
+* --------------------------------------|------------------
+*    BMI160_LATCH_DUR_NONE              |      0x00
+*    BMI160_LATCH_DUR_312_5_MICRO_SEC   |      0x01
+*    BMI160_LATCH_DUR_625_MICRO_SEC     |      0x02
+*    BMI160_LATCH_DUR_1_25_MILLI_SEC    |      0x03
+*    BMI160_LATCH_DUR_2_5_MILLI_SEC     |      0x04
+*    BMI160_LATCH_DUR_5_MILLI_SEC       |      0x05
+*    BMI160_LATCH_DUR_10_MILLI_SEC      |      0x06
+*    BMI160_LATCH_DUR_20_MILLI_SEC      |      0x07
+*    BMI160_LATCH_DUR_40_MILLI_SEC      |      0x08
+*    BMI160_LATCH_DUR_80_MILLI_SEC      |      0x09
+*    BMI160_LATCH_DUR_160_MILLI_SEC     |      0x0A
+*    BMI160_LATCH_DUR_320_MILLI_SEC     |      0x0B
+*    BMI160_LATCH_DUR_640_MILLI_SEC     |      0x0C
+*    BMI160_LATCH_DUR_1_28_SEC          |      0x0D
+*    BMI160_LATCH_DUR_2_56_SEC          |      0x0E
+*    BMI160_LATCHED                     |      0x0F
+*
+*
+*
+*	@return results of bus communication function
+*	@retval 0 -> Success
+*	@retval -1 -> Error
+*
+*
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_latch_intr(
+u8 v_latch_intr_u8);
+/*!
+ *	@brief API used to get input enable for interrupt1
+ *	and interrupt2 pin from the register 0x54
+ *	@brief interrupt1 - bit 4
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of input enable selection
+ *   v_channel_u8  |   input selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_INPUT_ENABLE
+ *       1         | BMI160_INTR2_INPUT_ENABLE
+ *
+ *	@param v_input_en_u8 :
+ *	The value of input enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_INPUT
+ *  0x00     |  BMI160_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_input_enable(
+u8 v_channel_u8, u8 *v_input_en_u8);
+/*!
+ *	@brief API used to set input enable for interrupt1
+ *	and interrupt2 pin from the register 0x54
+ *	@brief interrupt1 - bit 4
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of input enable selection
+ *   v_channel_u8  |   input selection
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_INPUT_ENABLE
+ *       1         | BMI160_INTR2_INPUT_ENABLE
+ *
+ *	@param v_input_en_u8 :
+ *	The value of input enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  BMI160_INPUT
+ *  0x00     |  BMI160_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_input_enable(
+u8 v_channel_u8, u8 v_input_en_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT1 AND INTERRUPT2 MAPPING */
+/***************************************************************/
+ /*!
+ *	@brief reads the Low g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 0 in the register 0x55
+ *	@brief interrupt2 bit 0 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of low_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_LOW_G
+ *       1         | BMI160_INTR2_MAP_LOW_G
+ *
+ *	@param v_intr_low_g_u8 : The value of low_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_low_g(
+u8 v_channel_u8, u8 *v_intr_low_g_u8);
+ /*!
+ *	@brief set the Low g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 0 in the register 0x55
+ *	@brief interrupt2 bit 0 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of low_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_LOW_G
+ *       1         | BMI160_INTR2_MAP_LOW_G
+ *
+ *	@param v_intr_low_g_u8 : The value of low_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_low_g(
+u8 v_channel_u8, u8 v_intr_low_g_u8);
+/*!
+ *	@brief Reads the HIGH g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 1 in the register 0x55
+ *	@brief interrupt2 bit 1 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of high_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_HIGH_G
+ *       1         | BMI160_INTR2_MAP_HIGH_G
+ *
+ *	@param v_intr_high_g_u8 : The value of high_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_high_g(
+u8 v_channel_u8, u8 *v_intr_high_g_u8);
+/*!
+ *	@brief Write the HIGH g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 1 in the register 0x55
+ *	@brief interrupt2 bit 1 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of high_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_HIGH_G
+ *       1         | BMI160_INTR2_MAP_HIGH_G
+ *
+ *	@param v_intr_high_g_u8 : The value of high_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_high_g(
+u8 v_channel_u8, u8 v_intr_high_g_u8);
+/*!
+ *	@brief Reads the Any motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 2 in the register 0x55
+ *	@brief interrupt2 bit 2 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of any motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_ANY_MOTION
+ *       1         | BMI160_INTR2_MAP_ANY_MOTION
+ *
+ *	@param v_intr_any_motion_u8 : The value of any motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_any_motion(
+u8 v_channel_u8, u8 *v_intr_any_motion_u8);
+/*!
+ *	@brief Write the Any motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 2 in the register 0x55
+ *	@brief interrupt2 bit 2 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of any motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_ANY_MOTION
+ *       1         | BMI160_INTR2_MAP_ANY_MOTION
+ *
+ *	@param v_intr_any_motion_u8 : The value of any motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_any_motion(
+u8 v_channel_u8, u8 v_intr_any_motion_u8);
+/*!
+ *	@brief Reads the No motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 3 in the register 0x55
+ *	@brief interrupt2 bit 3 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of no motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_NOMO
+ *       1         | BMI160_INTR2_MAP_NOMO
+ *
+ *	@param v_intr_nomotion_u8 : The value of no motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_nomotion(
+u8 v_channel_u8, u8 *v_intr_nomotion_u8);
+/*!
+ *	@brief Write the No motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 3 in the register 0x55
+ *	@brief interrupt2 bit 3 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of no motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_NOMO
+ *       1         | BMI160_INTR2_MAP_NOMO
+ *
+ *	@param v_intr_nomotion_u8 : The value of no motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_nomotion(
+u8 v_channel_u8, u8 v_intr_nomotion_u8);
+/*!
+ *	@brief Reads the Double Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 4 in the register 0x55
+ *	@brief interrupt2 bit 4 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of double tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_DOUBLE_TAP
+ *       1         | BMI160_INTR2_MAP_DOUBLE_TAP
+ *
+ *	@param v_intr_double_tap_u8 : The value of double tap enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_double_tap(
+u8 v_channel_u8, u8 *v_intr_double_tap_u8);
+/*!
+ *	@brief Write the Double Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 4 in the register 0x55
+ *	@brief interrupt2 bit 4 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of double tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_DOUBLE_TAP
+ *       1         | BMI160_INTR2_MAP_DOUBLE_TAP
+ *
+ *	@param v_intr_double_tap_u8 : The value of double tap enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_double_tap(
+u8 v_channel_u8, u8 v_intr_double_tap_u8);
+/*!
+ *	@brief Reads the Single Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 5 in the register 0x55
+ *	@brief interrupt2 bit 5 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of single tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_SINGLE_TAP
+ *       1         | BMI160_INTR2_MAP_SINGLE_TAP
+ *
+ *	@param v_intr_single_tap_u8 : The value of single tap  enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_single_tap(
+u8 v_channel_u8, u8 *v_intr_single_tap_u8);
+/*!
+ *	@brief Write the Single Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 5 in the register 0x55
+ *	@brief interrupt2 bit 5 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of single tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_SINGLE_TAP
+ *       1         | BMI160_INTR2_MAP_SINGLE_TAP
+ *
+ *	@param v_intr_single_tap_u8 : The value of single tap  enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_single_tap(
+u8 v_channel_u8, u8 v_intr_single_tap_u8);
+/*!
+ *	@brief Reads the Orient interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 6 in the register 0x55
+ *	@brief interrupt2 bit 6 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of orient interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_ORIENT
+ *       1         | BMI160_INTR2_MAP_ORIENT
+ *
+ *	@param v_intr_orient_u8 : The value of orient enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient(
+u8 v_channel_u8, u8 *v_intr_orient_u8);
+/*!
+ *	@brief Write the Orient interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 6 in the register 0x55
+ *	@brief interrupt2 bit 6 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of orient interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_ORIENT
+ *       1         | BMI160_INTR2_MAP_ORIENT
+ *
+ *	@param v_intr_orient_u8 : The value of orient enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient(
+u8 v_channel_u8, u8 v_intr_orient_u8);
+ /*!
+ *	@brief Reads the Flat interrupt
+ *	mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 7 in the register 0x55
+ *	@brief interrupt2 bit 7 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of flat interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_FLAT
+ *       1         | BMI160_INTR2_MAP_FLAT
+ *
+ *	@param v_intr_flat_u8 : The value of flat enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_flat(
+u8 v_channel_u8, u8 *v_intr_flat_u8);
+ /*!
+ *	@brief Write the Flat interrupt
+ *	mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 7 in the register 0x55
+ *	@brief interrupt2 bit 7 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of flat interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_FLAT
+ *       1         | BMI160_INTR2_MAP_FLAT
+ *
+ *	@param v_intr_flat_u8 : The value of flat enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_flat(
+u8 v_channel_u8, u8 v_intr_flat_u8);
+/*!
+ *	@brief Reads PMU trigger interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 0 and 4
+ *	@brief interrupt1 bit 0 in the register 0x56
+ *	@brief interrupt2 bit 4 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of pmu trigger selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_PMUTRIG
+ *       1         | BMI160_INTR2_MAP_PMUTRIG
+ *
+ *	@param v_intr_pmu_trig_u8 : The value of pmu trigger enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_pmu_trig(
+u8 v_channel_u8, u8 *v_intr_pmu_trig_u8);
+/*!
+ *	@brief Write PMU trigger interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 0 and 4
+ *	@brief interrupt1 bit 0 in the register 0x56
+ *	@brief interrupt2 bit 4 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of pmu trigger selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_PMUTRIG
+ *       1         | BMI160_INTR2_MAP_PMUTRIG
+ *
+ *	@param v_intr_pmu_trig_u8 : The value of pmu trigger enable
+ *	value    | trigger enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_pmu_trig(
+u8 v_channel_u8, u8 v_intr_pmu_trig_u8);
+/*!
+ *	@brief Reads FIFO Full interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 5 and 1
+ *	@brief interrupt1 bit 5 in the register 0x56
+ *	@brief interrupt2 bit 1 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo full interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_FIFO_FULL
+ *       1         | BMI160_INTR2_MAP_FIFO_FULL
+ *
+ *	@param v_intr_fifo_full_u8 : The value of fifo full interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_fifo_full(
+u8 v_channel_u8, u8 *v_intr_fifo_full_u8);
+/*!
+ *	@brief Write FIFO Full interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 5 and 1
+ *	@brief interrupt1 bit 5 in the register 0x56
+ *	@brief interrupt2 bit 1 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo full interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_FIFO_FULL
+ *       1         | BMI160_INTR2_MAP_FIFO_FULL
+ *
+ *	@param v_intr_fifo_full_u8 : The value of fifo full interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_fifo_full(
+u8 v_channel_u8, u8 v_intr_fifo_full_u8);
+/*!
+ *	@brief Reads FIFO Watermark interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 6 and 2
+ *	@brief interrupt1 bit 6 in the register 0x56
+ *	@brief interrupt2 bit 2 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo Watermark interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_FIFO_WM
+ *       1         | BMI160_INTR2_MAP_FIFO_WM
+ *
+ *	@param v_intr_fifo_wm_u8 : The value of fifo Watermark interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_fifo_wm(
+u8 v_channel_u8, u8 *v_intr_fifo_wm_u8);
+/*!
+ *	@brief Write FIFO Watermark interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 6 and 2
+ *	@brief interrupt1 bit 6 in the register 0x56
+ *	@brief interrupt2 bit 2 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo Watermark interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_FIFO_WM
+ *       1         | BMI160_INTR2_MAP_FIFO_WM
+ *
+ *	@param v_intr_fifo_wm_u8 : The value of fifo Watermark interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_fifo_wm(
+u8 v_channel_u8, u8 v_intr_fifo_wm_u8);
+/*!
+ *	@brief Reads Data Ready interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56
+ *	@brief interrupt1 bit 7 in the register 0x56
+ *	@brief interrupt2 bit 3 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of data ready interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_DATA_RDY
+ *       1         | BMI160_INTR2_MAP_DATA_RDY
+ *
+ *	@param v_intr_data_rdy_u8 : The value of data ready interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_data_rdy(
+u8 v_channel_u8, u8 *v_intr_data_rdy_u8);
+/*!
+ *	@brief Write Data Ready interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56
+ *	@brief interrupt1 bit 7 in the register 0x56
+ *	@brief interrupt2 bit 3 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of data ready interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | BMI160_INTR1_MAP_DATA_RDY
+ *       1         | BMI160_INTR2_MAP_DATA_RDY
+ *
+ *	@param v_intr_data_rdy_u8 : The value of data ready interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  BMI160_ENABLE
+ *  0x00     |  BMI160_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_data_rdy(
+u8 v_channel_u8, u8 v_intr_data_rdy_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR TAP SOURCE CONFIGURATION          */
+/***************************************************************/
+ /*!
+ *	@brief This API reads data source for the interrupt
+ *	engine for the single and double tap interrupts from the register
+ *	0x58 bit 3
+ *
+ *
+ *  @param v_tap_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_tap_source(
+u8 *v_tap_source_u8);
+ /*!
+ *	@brief This API write data source for the interrupt
+ *	engine for the single and double tap interrupts from the register
+ *	0x58 bit 3
+ *
+ *
+ *  @param v_tap_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_tap_source(
+u8 v_tap_source_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR LOW_G AND HIGH_G SOURCE CONFIGURATION */
+/***************************************************************/
+ /*!
+ *	@brief This API Reads Data source for the
+ *	interrupt engine for the low and high g interrupts
+ *	from the register 0x58 bit 7
+ *
+ *  @param v_low_high_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_low_high_source(
+u8 *v_low_high_source_u8);
+ /*!
+ *	@brief This API write Data source for the
+ *	interrupt engine for the low and high g interrupts
+ *	from the register 0x58 bit 7
+ *
+ *  @param v_low_high_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_low_high_source(
+u8 v_low_high_source_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR MOTION SOURCE CONFIGURATION          */
+/***************************************************************/
+ /*!
+ *	@brief This API reads Data source for the
+ *	interrupt engine for the nomotion and anymotion interrupts
+ *	from the register 0x59 bit 7
+ *
+ *  @param v_motion_source_u8 :
+ *	The value of the any/no motion interrupt source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_motion_source(
+u8 *v_motion_source_u8);
+ /*!
+ *	@brief This API write Data source for the
+ *	interrupt engine for the nomotion and anymotion interrupts
+ *	from the register 0x59 bit 7
+ *
+ *  @param v_motion_source_u8 :
+ *	The value of the any/no motion interrupt source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_motion_source(
+u8 v_motion_source_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR LOW_G DURATION CONFIGURATION          */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read the low_g duration from register
+ *	0x5A bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_durn_u8 : The value of low_g duration
+ *
+ *	@note Low_g duration trigger trigger delay according to
+ *	"(v_low_g_durn_u8 * 2.5)ms" in a range from 2.5ms to 640ms.
+ *	the default corresponds delay is 20ms
+ *	@note When low_g data source of interrupt is unfiltered
+ *	the sensor must not be in low power mode
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_low_g_durn(
+u8 *v_low_durn_u8);
+ /*!
+ *	@brief This API is used to write the low_g duration from register
+ *	0x5A bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_durn_u8 : The value of low_g duration
+ *
+ *	@note Low_g duration trigger trigger delay according to
+ *	"(v_low_g_durn_u8 * 2.5)ms" in a range from 2.5ms to 640ms.
+ *	the default corresponds delay is 20ms
+ *	@note When low_g data source of interrupt is unfiltered
+ *	the sensor must not be in low power mode
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_low_g_durn(
+u8 v_low_durn_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR LOW_G THRESH CONFIGURATION          */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read Threshold
+ *	definition for the low-g interrupt from the register 0x5B bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_thres_u8 : The value of low_g threshold
+ *
+ *	@note Low_g interrupt trigger threshold according to
+ *	(v_low_g_thres_u8 * 7.81)mg for v_low_g_thres_u8 > 0
+ *	3.91 mg for v_low_g_thres_u8 = 0
+ *	The threshold range is form 3.91mg to 2.000mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_low_g_thres(
+u8 *v_low_g_thres_u8);
+/*!
+ *	@brief This API is used to write Threshold
+ *	definition for the low-g interrupt from the register 0x5B bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_thres_u8 : The value of low_g threshold
+ *
+ *	@note Low_g interrupt trigger threshold according to
+ *	(v_low_g_thres_u8 * 7.81)mg for v_low_g_thres_u8 > 0
+ *	3.91 mg for v_low_g_thres_u8 = 0
+ *	The threshold range is form 3.91mg to 2.000mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_low_g_thres(
+u8 v_low_g_thres_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR LOW_G HYSTERESIS CONFIGURATION     */
+/***************************************************************/
+ /*!
+ *	@brief This API Reads Low-g interrupt hysteresis
+ *	from the register 0x5C bit 0 to 1
+ *
+ *  @param v_low_hyst_u8 :The value of low_g hysteresis
+ *
+ *	@note Low_g hysteresis calculated by v_low_hyst_u8*125 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_low_g_hyst(
+u8 *v_low_hyst_u8);
+ /*!
+ *	@brief This API write Low-g interrupt hysteresis
+ *	from the register 0x5C bit 0 to 1
+ *
+ *  @param v_low_hyst_u8 :The value of low_g hysteresis
+ *
+ *	@note Low_g hysteresis calculated by v_low_hyst_u8*125 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_low_g_hyst(
+u8 v_low_hyst_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR LOW_G MODE CONFIGURATION     */
+/***************************************************************/
+/*!
+ *	@brief This API reads Low-g interrupt mode
+ *	from the register 0x5C bit 2
+ *
+ *  @param v_low_g_mode_u8 : The value of low_g mode
+ *	Value    |  Description
+ * ----------|-----------------
+ *	   0     | single-axis
+ *     1     | axis-summing
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_low_g_mode(
+u8 *v_low_g_mode_u8);
+/*!
+ *	@brief This API write Low-g interrupt mode
+ *	from the register 0x5C bit 2
+ *
+ *  @param v_low_g_mode_u8 : The value of low_g mode
+ *	Value    |  Description
+ * ----------|-----------------
+ *	   0     | single-axis
+ *     1     | axis-summing
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_low_g_mode(
+u8 v_low_g_mode_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR HIGH_G HYST CONFIGURATION     */
+/***************************************************************/
+/*!
+ *	@brief This API reads High-g interrupt hysteresis
+ *	from the register 0x5C bit 6 and 7
+ *
+ *  @param v_high_g_hyst_u8 : The value of high hysteresis
+ *
+ *	@note High_g hysteresis changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g hysteresis
+ *  ----------------|---------------------
+ *      2g          |  high_hy*125 mg
+ *      4g          |  high_hy*250 mg
+ *      8g          |  high_hy*500 mg
+ *      16g         |  high_hy*1000 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_high_g_hyst(
+u8 *v_high_g_hyst_u8);
+/*!
+ *	@brief This API write High-g interrupt hysteresis
+ *	from the register 0x5C bit 6 and 7
+ *
+ *  @param v_high_g_hyst_u8 : The value of high hysteresis
+ *
+ *	@note High_g hysteresis changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g hysteresis
+ *  ----------------|---------------------
+ *      2g          |  high_hy*125 mg
+ *      4g          |  high_hy*250 mg
+ *      8g          |  high_hy*500 mg
+ *      16g         |  high_hy*1000 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_high_g_hyst(
+u8 v_high_g_hyst_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR HIGH_G DURATION CONFIGURATION     */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read Delay
+ *	time definition for the high-g interrupt from the register
+ *	0x5D bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_high_g_durn_u8 :  The value of high duration
+ *
+ *	@note High_g interrupt delay triggered according to
+ *	v_high_g_durn_u8 * 2.5ms in a range from 2.5ms to 640ms
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_high_g_durn(
+u8 *v_high_g_durn_u8);
+/*!
+ *	@brief This API is used to write Delay
+ *	time definition for the high-g interrupt from the register
+ *	0x5D bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_high_g_durn_u8 :  The value of high duration
+ *
+ *	@note High_g interrupt delay triggered according to
+ *	v_high_g_durn_u8 * 2.5ms in a range from 2.5ms to 640ms
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_high_g_durn(
+u8 v_high_g_durn_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR HIGH_G THRESHOLD CONFIGURATION     */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read Threshold
+ *	definition for the high-g interrupt from the register 0x5E 0 to 7
+ *
+ *
+ *
+ *
+ *  @param  v_high_g_thres_u8 : Pointer holding the value of Threshold
+ *	@note High_g threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  v_high_g_thres_u8*7.81 mg
+ *      4g          |  v_high_g_thres_u8*15.63 mg
+ *      8g          |  v_high_g_thres_u8*31.25 mg
+ *      16g         |  v_high_g_thres_u8*62.5 mg
+ *	@note when v_high_g_thres_u8 = 0
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  3.91 mg
+ *      4g          |  7.81 mg
+ *      8g          |  15.63 mg
+ *      16g         |  31.25 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_high_g_thres(
+u8 *v_high_g_thres_u8);
+/*!
+ *	@brief This API is used to write Threshold
+ *	definition for the high-g interrupt from the register 0x5E 0 to 7
+ *
+ *
+ *
+ *
+ *  @param  v_high_g_thres_u8 : Pointer holding the value of Threshold
+ *	@note High_g threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  v_high_g_thres_u8*7.81 mg
+ *      4g          |  v_high_g_thres_u8*15.63 mg
+ *      8g          |  v_high_g_thres_u8*31.25 mg
+ *      16g         |  v_high_g_thres_u8*62.5 mg
+ *	@note when v_high_g_thres_u8 = 0
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  3.91 mg
+ *      4g          |  7.81 mg
+ *      8g          |  15.63 mg
+ *      16g         |  31.25 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_high_g_thres(
+u8 v_high_g_thres_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ANY MOTION DURATION CONFIGURATION     */
+/***************************************************************/
+/*!
+ *	@brief This API reads any motion duration
+ *	from the register 0x5F bit 0 and 1
+ *
+ *  @param v_any_motion_durn_u8 : The value of any motion duration
+ *
+ *	@note Any motion duration can be calculated by "v_any_motion_durn_u8 + 1"
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_any_motion_durn(
+u8 *v_any_motion_durn_u8);
+/*!
+ *	@brief This API write any motion duration
+ *	from the register 0x5F bit 0 and 1
+ *
+ *  @param v_any_motion_durn_u8 : The value of any motion duration
+ *
+ *	@note Any motion duration can be calculated by "v_any_motion_durn_u8 + 1"
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_any_motion_durn(
+u8 nomotion);
+/***************************************************************/
+/**\name	FUNCTION FOR SLOW NO MOTION DURATION CONFIGURATION  */
+/***************************************************************/
+ /*!
+ *	@brief This API read Slow/no-motion
+ *	interrupt trigger delay duration from the register 0x5F bit 2 to 7
+ *
+ *  @param v_slow_no_motion_u8 :The value of slow no motion duration
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *	@note
+ *	@note v_slow_no_motion_u8(5:4)=0b00 ->
+ *	[v_slow_no_motion_u8(3:0) + 1] * 1.28s (1.28s-20.48s)
+ *	@note v_slow_no_motion_u8(5:4)=1 ->
+ *	[v_slow_no_motion_u8(3:0)+5] * 5.12s (25.6s-102.4s)
+ *	@note v_slow_no_motion_u8(5)='1' ->
+ *	[(v_slow_no_motion_u8:0)+11] * 10.24s (112.64s-430.08s);
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_slow_no_motion_durn(
+u8 *v_slow_no_motion_u8);
+ /*!
+ *	@brief This API write Slow/no-motion
+ *	interrupt trigger delay duration from the register 0x5F bit 2 to 7
+ *
+ *  @param v_slow_no_motion_u8 :The value of slow no motion duration
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *	@note
+ *	@note v_slow_no_motion_u8(5:4)=0b00 ->
+ *	[v_slow_no_motion_u8(3:0) + 1] * 1.28s (1.28s-20.48s)
+ *	@note v_slow_no_motion_u8(5:4)=1 ->
+ *	[v_slow_no_motion_u8(3:0)+5] * 5.12s (25.6s-102.4s)
+ *	@note v_slow_no_motion_u8(5)='1' ->
+ *	[(v_slow_no_motion_u8:0)+11] * 10.24s (112.64s-430.08s);
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_slow_no_motion_durn(
+u8 v_slow_no_motion_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ANY MOTION THRESHOLD CONFIGURATION  */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read threshold
+ *	definition for the any-motion interrupt
+ *	from the register 0x60 bit 0 to 7
+ *
+ *
+ *  @param  v_any_motion_thres_u8 : The value of any motion threshold
+ *
+ *	@note any motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_any_motion_thres_u8*3.91 mg
+ *      4g          |  v_any_motion_thres_u8*7.81 mg
+ *      8g          |  v_any_motion_thres_u8*15.63 mg
+ *      16g         |  v_any_motion_thres_u8*31.25 mg
+ *	@note when v_any_motion_thres_u8 = 0
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_any_motion_thres(
+u8 *v_any_motion_thres_u8);
+/*!
+ *	@brief This API is used to write threshold
+ *	definition for the any-motion interrupt
+ *	from the register 0x60 bit 0 to 7
+ *
+ *
+ *  @param  v_any_motion_thres_u8 : The value of any motion threshold
+ *
+ *	@note any motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_any_motion_thres_u8*3.91 mg
+ *      4g          |  v_any_motion_thres_u8*7.81 mg
+ *      8g          |  v_any_motion_thres_u8*15.63 mg
+ *      16g         |  v_any_motion_thres_u8*31.25 mg
+ *	@note when v_any_motion_thres_u8 = 0
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_any_motion_thres(
+u8 v_any_motion_thres_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR SLO/NO MOTION THRESHOLD CONFIGURATION  */
+/***************************************************************/
+ /*!
+ *	@brief This API is used to read threshold
+ *	for the slow/no-motion interrupt
+ *	from the register 0x61 bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_slow_no_motion_thres_u8 : The value of slow no motion threshold
+ *	@note slow no motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_slow_no_motion_thres_u8*3.91 mg
+ *      4g          |  v_slow_no_motion_thres_u8*7.81 mg
+ *      8g          |  v_slow_no_motion_thres_u8*15.63 mg
+ *      16g         |  v_slow_no_motion_thres_u8*31.25 mg
+ *	@note when v_slow_no_motion_thres_u8 = 0
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_slow_no_motion_thres(
+u8 *v_slow_no_motion_thres_u8);
+ /*!
+ *	@brief This API is used to write threshold
+ *	for the slow/no-motion interrupt
+ *	from the register 0x61 bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_slow_no_motion_thres_u8 : The value of slow no motion threshold
+ *	@note slow no motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_slow_no_motion_thres_u8*3.91 mg
+ *      4g          |  v_slow_no_motion_thres_u8*7.81 mg
+ *      8g          |  v_slow_no_motion_thres_u8*15.63 mg
+ *      16g         |  v_slow_no_motion_thres_u8*31.25 mg
+ *	@note when v_slow_no_motion_thres_u8 = 0
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_slow_no_motion_thres(
+u8 v_slow_no_motion_thres_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR SLO/NO MOTION SELECT CONFIGURATION  */
+/***************************************************************/
+ /*!
+ *	@brief This API is used to read
+ *	the slow/no-motion selection from the register 0x62 bit 0
+ *
+ *
+ *
+ *
+ *  @param  v_intr_slow_no_motion_select_u8 :
+ *	The value of slow/no-motion select
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  SLOW_MOTION
+ *  0x01     |  NO_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_slow_no_motion_select(
+u8 *v_intr_slow_no_motion_select_u8);
+ /*!
+ *	@brief This API is used to write
+ *	the slow/no-motion selection from the register 0x62 bit 0
+ *
+ *
+ *
+ *
+ *  @param  v_intr_slow_no_motion_select_u8 :
+ *	The value of slow/no-motion select
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  SLOW_MOTION
+ *  0x01     |  NO_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_slow_no_motion_select(
+u8 v_intr_slow_no_motion_select_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR SIGNIFICANT MOTION SELECT CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API is used to select
+ *	the significant or any motion interrupt from the register 0x62 bit 1
+ *
+ *
+ *
+ *
+ *  @param  v_intr_significant_motion_select_u8 :
+ *	the value of significant or any motion interrupt selection
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  ANY_MOTION
+ *  0x01     |  SIGNIFICANT_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_significant_motion_select(
+u8 *int_sig_mot_sel);
+ /*!
+ *	@brief This API is used to write, select
+ *	the significant or any motion interrupt from the register 0x62 bit 1
+ *
+ *
+ *
+ *
+ *  @param  v_intr_significant_motion_select_u8 :
+ *	the value of significant or any motion interrupt selection
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  ANY_MOTION
+ *  0x01     |  SIGNIFICANT_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_significant_motion_select(
+u8 int_sig_mot_sel);
+ /*!
+ *	@brief This API is used to read
+ *	the significant skip time from the register 0x62 bit  2 and 3
+ *
+ *
+ *
+ *
+ *  @param  v_int_sig_mot_skip_u8 : the value of significant skip time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  skip time 1.5 seconds
+ *  0x01     |  skip time 3 seconds
+ *  0x02     |  skip time 6 seconds
+ *  0x03     |  skip time 12 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_significant_motion_skip(
+u8 *v_int_sig_mot_skip_u8);
+ /*!
+ *	@brief This API is used to write
+ *	the significant skip time from the register 0x62 bit  2 and 3
+ *
+ *
+ *
+ *
+ *  @param  v_int_sig_mot_skip_u8 : the value of significant skip time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  skip time 1.5 seconds
+ *  0x01     |  skip time 3 seconds
+ *  0x02     |  skip time 6 seconds
+ *  0x03     |  skip time 12 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_significant_motion_skip(
+u8 v_int_sig_mot_skip_u8);
+ /*!
+ *	@brief This API is used to read
+ *	the significant proof time from the register 0x62 bit  4 and 5
+ *
+ *
+ *
+ *
+ *  @param  v_significant_motion_proof_u8 :
+ *	the value of significant proof time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  proof time 0.25 seconds
+ *  0x01     |  proof time 0.5 seconds
+ *  0x02     |  proof time 1 seconds
+ *  0x03     |  proof time 2 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_significant_motion_proof(
+u8 *int_sig_mot_proof);
+ /*!
+ *	@brief This API is used to write
+ *	the significant proof time from the register 0x62 bit  4 and 5
+ *
+ *
+ *
+ *
+ *  @param  v_significant_motion_proof_u8 :
+ *	the value of significant proof time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  proof time 0.25 seconds
+ *  0x01     |  proof time 0.5 seconds
+ *  0x02     |  proof time 1 seconds
+ *  0x03     |  proof time 2 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_significant_motion_proof(
+u8 int_sig_mot_proof);
+/***************************************************************/
+/**\name	FUNCTION FOR TAP DURATION CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API is used to get the tap duration
+ *	from the register 0x63 bit 0 to 2
+ *
+ *
+ *
+ *  @param v_tap_durn_u8 : The value of tap duration
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | BMI160_TAP_DURN_50MS
+ *  0x01     | BMI160_TAP_DURN_100MS
+ *  0x03     | BMI160_TAP_DURN_150MS
+ *  0x04     | BMI160_TAP_DURN_200MS
+ *  0x05     | BMI160_TAP_DURN_250MS
+ *  0x06     | BMI160_TAP_DURN_375MS
+ *  0x07     | BMI160_TAP_DURN_700MS
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_tap_durn(
+u8 *v_tap_durn_u8);
+/*!
+ *	@brief This API is used to write the tap duration
+ *	from the register 0x63 bit 0 to 2
+ *
+ *
+ *
+ *  @param v_tap_durn_u8 : The value of tap duration
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | BMI160_TAP_DURN_50MS
+ *  0x01     | BMI160_TAP_DURN_100MS
+ *  0x03     | BMI160_TAP_DURN_150MS
+ *  0x04     | BMI160_TAP_DURN_200MS
+ *  0x05     | BMI160_TAP_DURN_250MS
+ *  0x06     | BMI160_TAP_DURN_375MS
+ *  0x07     | BMI160_TAP_DURN_700MS
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_tap_durn(
+u8 v_tap_durn_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR TAP SHOCK CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API read the
+ *	tap shock duration from the register 0x63 bit 2
+ *
+ *  @param v_tap_shock_u8 :The value of tap shock
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | BMI160_TAP_SHOCK_50MS
+ *  0x01     | BMI160_TAP_SHOCK_75MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_tap_shock(
+u8 *v_tap_shock_u8);
+ /*!
+ *	@brief This API write the
+ *	tap shock duration from the register 0x63 bit 2
+ *
+ *  @param v_tap_shock_u8 :The value of tap shock
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | BMI160_TAP_SHOCK_50MS
+ *  0x01     | BMI160_TAP_SHOCK_75MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_tap_shock(
+u8 v_tap_shock_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR TAP QUIET CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API read
+ *	tap quiet duration from the register 0x63 bit 7
+ *
+ *
+ *  @param v_tap_quiet_u8 : The value of tap quiet
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | BMI160_TAP_QUIET_30MS
+ *  0x01     | BMI160_TAP_QUIET_20MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_tap_quiet(
+u8 *v_tap_quiet_u8);
+/*!
+ *	@brief This API write
+ *	tap quiet duration from the register 0x63 bit 7
+ *
+ *
+ *  @param v_tap_quiet_u8 : The value of tap quiet
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | BMI160_TAP_QUIET_30MS
+ *  0x01     | BMI160_TAP_QUIET_20MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_tap_quiet(
+u8 v_tap_quiet_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR TAP THRESHOLD CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API read Threshold of the
+ *	single/double tap interrupt from the register 0x64 bit 0 to 4
+ *
+ *
+ *	@param v_tap_thres_u8 : The value of single/double tap threshold
+ *
+ *	@note single/double tap threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | single/double tap threshold
+ *  ----------------|---------------------
+ *      2g          |  ((v_tap_thres_u8 + 1) * 62.5)mg
+ *      4g          |  ((v_tap_thres_u8 + 1) * 125)mg
+ *      8g          |  ((v_tap_thres_u8 + 1) * 250)mg
+ *      16g         |  ((v_tap_thres_u8 + 1) * 500)mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_tap_thres(
+u8 *v_tap_thres_u8);
+ /*!
+ *	@brief This API write Threshold of the
+ *	single/double tap interrupt from the register 0x64 bit 0 to 4
+ *
+ *
+ *	@param v_tap_thres_u8 : The value of single/double tap threshold
+ *
+ *	@note single/double tap threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | single/double tap threshold
+ *  ----------------|---------------------
+ *      2g          |  ((v_tap_thres_u8 + 1) * 62.5)mg
+ *      4g          |  ((v_tap_thres_u8 + 1) * 125)mg
+ *      8g          |  ((v_tap_thres_u8 + 1) * 250)mg
+ *      16g         |  ((v_tap_thres_u8 + 1) * 500)mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_tap_thres(
+u8 v_tap_thres_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ORIENT MODE CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API read the threshold for orientation interrupt
+ *	from the register 0x65 bit 0 and 1
+ *
+ *  @param v_orient_mode_u8 : The value of threshold for orientation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | symmetrical
+ *  0x01     | high-asymmetrical
+ *  0x02     | low-asymmetrical
+ *  0x03     | symmetrical
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient_mode(
+u8 *v_orient_mode_u8);
+ /*!
+ *	@brief This API write the threshold for orientation interrupt
+ *	from the register 0x65 bit 0 and 1
+ *
+ *  @param v_orient_mode_u8 : The value of threshold for orientation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | symmetrical
+ *  0x01     | high-asymmetrical
+ *  0x02     | low-asymmetrical
+ *  0x03     | symmetrical
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient_mode(
+u8 v_orient_mode_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ORIENT BLOCKING CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API read the orient blocking mode
+ *	that is used for the generation of the orientation interrupt.
+ *	from the register 0x65 bit 2 and 3
+ *
+ *  @param v_orient_blocking_u8 : The value of orient blocking mode
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | No blocking
+ *  0x01     | Theta blocking or acceleration in any axis > 1.5g
+ *  0x02     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.2g or acceleration in any axis > 1.5g
+ *  0x03     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.4g or acceleration in any axis >
+ *   -       | 1.5g and value of orient is not stable
+ *   -       | for at least 100 ms
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient_blocking(
+u8 *v_orient_blocking_u8);
+/*!
+ *	@brief This API write the orient blocking mode
+ *	that is used for the generation of the orientation interrupt.
+ *	from the register 0x65 bit 2 and 3
+ *
+ *  @param v_orient_blocking_u8 : The value of orient blocking mode
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | No blocking
+ *  0x01     | Theta blocking or acceleration in any axis > 1.5g
+ *  0x02     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.2g or acceleration in any axis > 1.5g
+ *  0x03     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.4g or acceleration in any axis >
+ *   -       | 1.5g and value of orient is not stable
+ *   -       | for at least 100 ms
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient_blocking(
+u8 v_orient_blocking_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ORIENT HYSTERESIS CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API read Orient interrupt
+ *	hysteresis, from the register 0x64 bit 4 to 7
+ *
+ *
+ *
+ *  @param v_orient_hyst_u8 : The value of orient hysteresis
+ *
+ *	@note 1 LSB corresponds to 62.5 mg,
+ *	irrespective of the selected accel range
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient_hyst(
+u8 *v_orient_hyst_u8);
+/*!
+ *	@brief This API write Orient interrupt
+ *	hysteresis, from the register 0x64 bit 4 to 7
+ *
+ *
+ *
+ *  @param v_orient_hyst_u8 : The value of orient hysteresis
+ *
+ *	@note 1 LSB corresponds to 62.5 mg,
+ *	irrespective of the selected accel range
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient_hyst(
+u8 v_orient_hyst_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ORIENT THETA CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API read Orient
+ *	blocking angle (0 to 44.8) from the register 0x66 bit 0 to 5
+ *
+ *  @param v_orient_theta_u8 : The value of Orient blocking angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient_theta(
+u8 *v_orient_theta_u8);
+ /*!
+ *	@brief This API write Orient
+ *	blocking angle (0 to 44.8) from the register 0x66 bit 0 to 5
+ *
+ *  @param v_orient_theta_u8 : The value of Orient blocking angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient_theta(
+u8 v_orient_theta_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ORIENT OUTPUT ENABLE CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API read orient change
+ *	of up/down bit from the register 0x66 bit 6
+ *
+ *  @param v_orient_ud_u8 : The value of orient change of up/down
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | Is ignored
+ *  0x01     | Generates orientation interrupt
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient_ud_enable(
+u8 *v_orient_ud_u8);
+/*!
+ *	@brief This API write orient change
+ *	of up/down bit from the register 0x66 bit 6
+ *
+ *  @param v_orient_ud_u8 : The value of orient change of up/down
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | Is ignored
+ *  0x01     | Generates orientation interrupt
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient_ud_enable(
+u8 v_orient_ud_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ORIENT AXIS ENABLE CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API read orientation axes changes
+ *	from the register 0x66 bit 7
+ *
+ *  @param v_orient_axes_u8 : The value of orient axes assignment
+ *	value    |       Behaviour    | Name
+ * ----------|--------------------|------
+ *  0x00     | x = x, y = y, z = z|orient_ax_noex
+ *  0x01     | x = y, y = z, z = x|orient_ax_ex
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_orient_axes_enable(
+u8 *v_orient_axes_u8);
+ /*!
+ *	@brief This API write orientation axes changes
+ *	from the register 0x66 bit 7
+ *
+ *  @param v_orient_axes_u8 : The value of orient axes assignment
+ *	value    |       Behaviour    | Name
+ * ----------|--------------------|------
+ *  0x00     | x = x, y = y, z = z|orient_ax_noex
+ *  0x01     | x = y, y = z, z = x|orient_ax_ex
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_orient_axes_enable(
+u8 v_orient_axes_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR FLAT THETA CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API read Flat angle (0 to 44.8) for flat interrupt
+ *	from the register 0x67 bit 0 to 5
+ *
+ *  @param v_flat_theta_u8 : The value of flat angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_flat_theta(
+u8 *v_flat_theta_u8);
+ /*!
+ *	@brief This API write Flat angle (0 to 44.8) for flat interrupt
+ *	from the register 0x67 bit 0 to 5
+ *
+ *  @param v_flat_theta_u8 : The value of flat angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_flat_theta(
+u8 v_flat_theta_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR FLAT HOLD CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API read Flat interrupt hold time;
+ *	from the register 0x68 bit 4 and 5
+ *
+ *  @param v_flat_hold_u8 : The value of flat hold time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | 0ms
+ *  0x01     | 512ms
+ *  0x01     | 1024ms
+ *  0x01     | 2048ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_flat_hold(
+u8 *v_flat_hold_u8);
+/*!
+ *	@brief This API write Flat interrupt hold time;
+ *	from the register 0x68 bit 4 and 5
+ *
+ *  @param v_flat_hold_u8 : The value of flat hold time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | 0ms
+ *  0x01     | 512ms
+ *  0x01     | 1024ms
+ *  0x01     | 2048ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_flat_hold(
+u8 v_flat_hold_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR FLAT HYSTERESIS CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API read flat interrupt hysteresis
+ *	from the register 0x68 bit 0 to 3
+ *
+ *  @param v_flat_hyst_u8 : The value of flat hysteresis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_intr_flat_hyst(
+u8 *v_flat_hyst_u8);
+/*!
+ *	@brief This API write flat interrupt hysteresis
+ *	from the register 0x68 bit 0 to 3
+ *
+ *  @param v_flat_hyst_u8 : The value of flat hysteresis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_intr_flat_hyst(
+u8 v_flat_hyst_u8);
+/***************************************************************/
+/**\name	FUNCTION FAST OFFSET COMPENSATION FOR ACCEL */
+/***************************************************************/
+ /*!
+ *	@brief This API read accel offset compensation
+ *	target value for z-axis from the register 0x69 bit 0 and 1
+ *
+ *  @param v_foc_accel_z_u8 : the value of accel offset compensation z axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_foc_accel_z(
+u8 *v_foc_accel_z_u8);
+ /*!
+ *	@brief This API write accel offset compensation
+ *	target value for z-axis from the register 0x69 bit 0 and 1
+ *
+ *  @param v_foc_accel_z_u8 : the value of accel offset compensation z axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_foc_accel_z(
+u8 v_foc_accel_z_u8);
+/*!
+ *	@brief This API read accel offset compensation
+ *	target value for y-axis
+ *	from the register 0x69 bit 2 and 3
+ *
+ *  @param v_foc_accel_y_u8 : the value of accel offset compensation y axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_foc_accel_y(
+u8 *v_foc_accel_y_u8);
+/*!
+ *	@brief This API write accel offset compensation
+ *	target value for y-axis
+ *	from the register 0x69 bit 2 and 3
+ *
+ *  @param v_foc_accel_y_u8 : the value of accel offset compensation y axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_foc_accel_y(
+u8 v_foc_accel_y_u8);
+/*!
+ *	@brief This API read accel offset compensation
+ *	target value for x-axis is
+ *	from the register 0x69 bit 4 and 5
+ *
+ *  @param v_foc_accel_x_u8 : the value of accel offset compensation x axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_foc_accel_x(
+u8 *v_foc_accel_x_u8);
+/*!
+ *	@brief This API write accel offset compensation
+ *	target value for x-axis is
+ *	from the register 0x69 bit 4 and 5
+ *
+ *  @param v_foc_accel_x_u8 : the value of accel offset compensation x axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_foc_accel_x(
+u8 v_foc_accel_x_u8);
+/***************************************************************/
+/**\name	FUNCTION FAST OFFSET COMPENSATION FOR GYRO */
+/***************************************************************/
+/*!
+ *	@brief This API write gyro fast offset enable
+ *	from the register 0x69 bit 6
+ *
+ *  @param v_foc_gyro_u8 : The value of gyro fast offset enable
+ *  value    |  Description
+ * ----------|-------------
+ *    0      | fast offset compensation disabled
+ *    1      |  fast offset compensation enabled
+ *
+ *	@param v_gyro_off_x_s16 : The value of gyro fast offset x axis data
+ *	@param v_gyro_off_y_s16 : The value of gyro fast offset y axis data
+ *	@param v_gyro_off_z_s16 : The value of gyro fast offset z axis data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_foc_gyro_enable(
+u8 v_foc_gyro_u8, s16 *v_gyro_off_x_s16,
+s16 *v_gyro_off_y_s16, s16 *v_gyro_off_z_s16);
+/***************************************************/
+/**\name	FUNCTION FOR NVM*/
+/***************************************************/
+ /*!
+ *	@brief This API read NVM program enable
+ *	from the register 0x6A bit 1
+ *
+ *  @param v_nvm_prog_u8 : The value of NVM program enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_nvm_prog_enable(
+u8 *v_nvm_prog_u8);
+ /*!
+ *	@brief This API write NVM program enable
+ *	from the register 0x6A bit 1
+ *
+ *  @param v_nvm_prog_u8 : The value of NVM program enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_nvm_prog_enable(
+u8 v_nvm_prog_u8);
+/***************************************************/
+/**\name	FUNCTION FOR SPI MODE*/
+/***************************************************/
+/*!
+ * @brief This API read to configure SPI
+ * Interface Mode for primary and OIS interface
+ * from the register 0x6B bit 0
+ *
+ *  @param v_spi3_u8 : The value of SPI mode selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  SPI 4-wire mode
+ *   1     |  SPI 3-wire mode
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_spi3(
+u8 *v_spi3_u8);
+/*!
+ * @brief This API write to configure SPI
+ * Interface Mode for primary and OIS interface
+ * from the register 0x6B bit 0
+ *
+ *  @param v_spi3_u8 : The value of SPI mode selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  SPI 4-wire mode
+ *   1     |  SPI 3-wire mode
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_spi3(
+u8 v_spi3_u8);
+/***************************************************/
+/**\name	FUNCTION FOR FOC GYRO */
+/***************************************************/
+/*!
+ *	@brief This API read gyro fast offset enable
+ *	from the register 0x69 bit 6
+ *
+ *  @param v_foc_gyro_u8 : The value of gyro fast offset enable
+ *  value    |  Description
+ * ----------|-------------
+ *    0      | fast offset compensation disabled
+ *    1      |  fast offset compensation enabled
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_foc_gyro_enable(
+u8 *v_foc_gyro_u8);
+/***************************************************/
+/**\name	FUNCTION FOR I2C WATCHDOG TIMBER */
+/***************************************************/
+/*!
+ *	@brief This API read I2C Watchdog timer
+ *	from the register 0x70 bit 1
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watch dog timer
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  I2C watchdog v_timeout_u8 after 1 ms
+ *   1     |  I2C watchdog v_timeout_u8 after 50 ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_i2c_wdt_select(
+u8 *v_i2c_wdt_u8);
+/*!
+ *	@brief This API write I2C Watchdog timer
+ *	from the register 0x70 bit 1
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watch dog timer
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  I2C watchdog v_timeout_u8 after 1 ms
+ *   1     |  I2C watchdog v_timeout_u8 after 50 ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE
+bmi160_set_i2c_wdt_select(u8 v_i2c_wdt_u8);
+/*!
+ *	@brief This API read I2C watchdog enable
+ *	from the register 0x70 bit 2
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watchdog enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_i2c_wdt_enable(
+u8 *v_i2c_wdt_u8);
+/*!
+ *	@brief This API write I2C watchdog enable
+ *	from the register 0x70 bit 2
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watchdog enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_i2c_wdt_enable(
+u8 v_i2c_wdt_u8);
+/***************************************************/
+/**\name	FUNCTION FOR IF MODE*/
+/***************************************************/
+/*!
+ * @brief This API read I2C interface configuration(if) moe
+ * from the register 0x6B bit 4 and 5
+ *
+ *  @param  v_if_mode_u8 : The value of interface configuration mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  |  Primary interface:autoconfig / secondary interface:off
+ *   0x01  |  Primary interface:I2C / secondary interface:OIS
+ *   0x02  |  Primary interface:autoconfig/secondary interface:Magnetometer
+ *   0x03  |   Reserved
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_if_mode(
+u8 *v_if_mode_u8);
+/*!
+ * @brief This API write I2C interface configuration(if) moe
+ * from the register 0x6B bit 4 and 5
+ *
+ *  @param  v_if_mode_u8 : The value of interface configuration mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  |  Primary interface:autoconfig / secondary interface:off
+ *   0x01  |  Primary interface:I2C / secondary interface:OIS
+ *   0x02  |  Primary interface:autoconfig/secondary interface:Magnetometer
+ *   0x03  |   Reserved
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_if_mode(
+u8 v_if_mode_u8);
+/***************************************************/
+/**\name	FUNCTION FOR GYRO SLEEP TRIGGER INTERRUPT CONFIGURATION*/
+/***************************************************/
+/*!
+ *	@brief This API read gyro sleep trigger
+ *	from the register 0x6C bit 0 to 2
+ *
+ *  @param v_gyro_sleep_trigger_u8 : The value of gyro sleep trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | nomotion: no / Not INT1 pin: no / INT2 pin: no
+ *   0x01  | nomotion: no / Not INT1 pin: no / INT2 pin: yes
+ *   0x02  | nomotion: no / Not INT1 pin: yes / INT2 pin: no
+ *   0x03  | nomotion: no / Not INT1 pin: yes / INT2 pin: yes
+ *   0x04  | nomotion: yes / Not INT1 pin: no / INT2 pin: no
+ *   0x05  | anymotion: yes / Not INT1 pin: no / INT2 pin: yes
+ *   0x06  | anymotion: yes / Not INT1 pin: yes / INT2 pin: no
+ *   0x07  | anymotion: yes / Not INT1 pin: yes / INT2 pin: yes
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_sleep_trigger(
+u8 *v_gyro_sleep_trigger_u8);
+/*!
+ *	@brief This API write gyro sleep trigger
+ *	from the register 0x6C bit 0 to 2
+ *
+ *  @param v_gyro_sleep_trigger_u8 : The value of gyro sleep trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | nomotion: no / Not INT1 pin: no / INT2 pin: no
+ *   0x01  | nomotion: no / Not INT1 pin: no / INT2 pin: yes
+ *   0x02  | nomotion: no / Not INT1 pin: yes / INT2 pin: no
+ *   0x03  | nomotion: no / Not INT1 pin: yes / INT2 pin: yes
+ *   0x04  | nomotion: yes / Not INT1 pin: no / INT2 pin: no
+ *   0x05  | anymotion: yes / Not INT1 pin: no / INT2 pin: yes
+ *   0x06  | anymotion: yes / Not INT1 pin: yes / INT2 pin: no
+ *   0x07  | anymotion: yes / Not INT1 pin: yes / INT2 pin: yes
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_sleep_trigger(
+u8 v_gyro_sleep_trigger_u8);
+/*!
+ *	@brief This API read gyro wakeup trigger
+ *	from the register 0x6C bit 3 and 4
+ *
+ *  @param v_gyro_wakeup_trigger_u8 : The value of gyro wakeup trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | anymotion: no / INT1 pin: no
+ *   0x01  | anymotion: no / INT1 pin: yes
+ *   0x02  | anymotion: yes / INT1 pin: no
+ *   0x03  | anymotion: yes / INT1 pin: yes
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_wakeup_trigger(
+u8 *v_gyro_wakeup_trigger_u8);
+/*!
+ *	@brief This API write gyro wakeup trigger
+ *	from the register 0x6C bit 3 and 4
+ *
+ *  @param v_gyro_wakeup_trigger_u8 : The value of gyro wakeup trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | anymotion: no / INT1 pin: no
+ *   0x01  | anymotion: no / INT1 pin: yes
+ *   0x02  | anymotion: yes / INT1 pin: no
+ *   0x03  | anymotion: yes / INT1 pin: yes
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_wakeup_trigger(
+u8 v_gyro_wakeup_trigger_u8);
+/*!
+ *	@brief This API read Target state for gyro sleep mode
+ *	from the register 0x6C bit 5
+ *
+ *  @param v_gyro_sleep_state_u8 : The value of gyro sleep mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | Sleep transition to fast wake up state
+ *   0x01  | Sleep transition to suspend state
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_sleep_state(
+u8 *v_gyro_sleep_state_u8);
+/*!
+ *	@brief This API write Target state for gyro sleep mode
+ *	from the register 0x6C bit 5
+ *
+ *  @param v_gyro_sleep_state_u8 : The value of gyro sleep mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | Sleep transition to fast wake up state
+ *   0x01  | Sleep transition to suspend state
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_sleep_state(
+u8 v_gyro_sleep_state_u8);
+/*!
+ *	@brief This API read gyro wakeup interrupt
+ *	from the register 0x6C bit 6
+ *
+ *  @param v_gyro_wakeup_intr_u8 : The valeu of gyro wakeup interrupt
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | DISABLE
+ *   0x01  | ENABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_wakeup_intr(
+u8 *v_gyro_wakeup_intr_u8);
+/*!
+ *	@brief This API write gyro wakeup interrupt
+ *	from the register 0x6C bit 6
+ *
+ *  @param v_gyro_wakeup_intr_u8 : The valeu of gyro wakeup interrupt
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | DISABLE
+ *   0x01  | ENABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_wakeup_intr(
+u8 v_gyro_wakeup_intr_u8);
+/***************************************************/
+/**\name	FUNCTION FOR ACCEL SELF TEST */
+/***************************************************/
+/*!
+ * @brief This API read accel select axis to be self-test
+ *
+ *  @param v_accel_selftest_axis_u8 :
+ *	The value of accel self test axis selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | disabled
+ *   0x01  | x-axis
+ *   0x02  | y-axis
+ *   0x03  | z-axis
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_selftest_axis(
+u8 *acc_selftest_axis);
+/*!
+ * @brief This API write accel select axis to be self-test
+ *
+ *  @param v_accel_selftest_axis_u8 :
+ *	The value of accel self test axis selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | disabled
+ *   0x01  | x-axis
+ *   0x02  | y-axis
+ *   0x03  | z-axis
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_selftest_axis(
+u8 acc_selftest_axis);
+/*!
+ *	@brief This API read accel self test axis sign
+ *	from the register 0x6D bit 2
+ *
+ *  @param v_accel_selftest_sign_u8: The value of accel self test axis sign
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | negative
+ *   0x01  | positive
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_selftest_sign(
+u8 *acc_selftest_sign);
+/*!
+ *	@brief This API write accel self test axis sign
+ *	from the register 0x6D bit 2
+ *
+ *  @param v_accel_selftest_sign_u8: The value of accel self test axis sign
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | negative
+ *   0x01  | positive
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_selftest_sign(
+u8 acc_selftest_sign);
+/*!
+ *	@brief This API read accel self test amplitude
+ *	from the register 0x6D bit 3
+ *        select amplitude of the selftest deflection:
+ *
+ *  @param v_accel_selftest_amp_u8 : The value of accel self test amplitude
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | LOW
+ *   0x01  | HIGH
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_selftest_amp(
+u8 *acc_selftest_amp);
+/*!
+ *	@brief This API write accel self test amplitude
+ *	from the register 0x6D bit 3
+ *        select amplitude of the selftest deflection:
+ *
+ *  @param v_accel_selftest_amp_u8 : The value of accel self test amplitude
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | LOW
+ *   0x01  | HIGH
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_selftest_amp(
+u8 acc_selftest_amp);
+/***************************************************/
+/**\name	FUNCTION FOR GYRO SELF TEST */
+/***************************************************/
+/*!
+ *	@brief This API read gyro self test trigger
+ *
+ *	@param v_gyro_selftest_start_u8: The value of gyro self test start
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_selftest_start(
+u8 *v_gyro_selftest_start_u8);
+/*!
+ *	@brief This API write gyro self test trigger
+ *
+ *	@param v_gyro_selftest_start_u8: The value of gyro self test start
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_selftest_start(
+u8 v_gyro_selftest_start_u8);
+/***************************************************/
+/**\name	FUNCTION FOR SPI/I2C ENABLE */
+/***************************************************/
+ /*!
+ * @brief This API read primary interface selection I2C or SPI
+ *	from the register 0x70 bit 0
+ *
+ *  @param v_spi_enable_u8: The value of Interface selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | I2C Enable
+ *   0x01  | I2C DISBALE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_spi_enable(
+u8 *v_spi_enable_u8);
+ /*!
+ * @brief This API write primary interface selection I2C or SPI
+ *	from the register 0x70 bit 0
+ *
+ *  @param v_spi_enable_u8: The value of Interface selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | I2C Enable
+ *   0x01  | I2C DISBALE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_spi_enable(
+u8 v_spi_enable_u8);
+ /*!
+ *	@brief This API read the spare zero
+ *	form register 0x70 bit 3
+ *
+ *
+ *  @param v_spare0_trim_u8: The value of spare zero
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_spare0_trim
+(u8 *v_spare0_trim_u8);
+ /*!
+ *	@brief This API write the spare zero
+ *	form register 0x70 bit 3
+ *
+ *
+ *  @param v_spare0_trim_u8: The value of spare zero
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_spare0_trim
+(u8 v_spare0_trim_u8);
+/***************************************************/
+/**\name	FUNCTION FOR NVM COUNTER */
+/***************************************************/
+ /*!
+ *	@brief This API read the NVM counter
+ *	form register 0x70 bit 4 to 7
+ *
+ *
+ *  @param v_nvm_counter_u8: The value of NVM counter
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_nvm_counter(
+u8 *v_nvm_counter_u8);
+ /*!
+ *	@brief This API write the NVM counter
+ *	form register 0x70 bit 4 to 7
+ *
+ *
+ *  @param v_nvm_counter_u8: The value of NVM counter
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_nvm_counter(
+u8 v_nvm_counter_u8);
+/***************************************************/
+/**\name	FUNCTION FOR ACCEL MANUAL OFFSET COMPENSATION */
+/***************************************************/
+/*!
+ *	@brief This API read accel manual offset compensation of x axis
+ *	from the register 0x71 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_x_s8:
+ *	The value of accel manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_offset_compensation_xaxis(
+s8 *v_accel_off_x_s8);
+/*!
+ *	@brief This API write accel manual offset compensation of x axis
+ *	from the register 0x71 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_x_s8:
+ *	The value of accel manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_offset_compensation_xaxis(
+s8 v_accel_off_x_s8);
+/*!
+ *	@brief This API read accel manual offset compensation of y axis
+ *	from the register 0x72 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_y_s8:
+ *	The value of accel manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_offset_compensation_yaxis(
+s8 *v_accel_off_y_s8);
+/*!
+ *	@brief This API write accel manual offset compensation of y axis
+ *	from the register 0x72 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_y_s8:
+ *	The value of accel manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_offset_compensation_yaxis(
+s8 v_accel_off_y_s8);
+/*!
+ *	@brief This API read accel manual offset compensation of z axis
+ *	from the register 0x73 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_z_s8:
+ *	The value of accel manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_offset_compensation_zaxis(
+s8 *v_accel_off_z_s8);
+/*!
+ *	@brief This API write accel manual offset compensation of z axis
+ *	from the register 0x73 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_z_s8:
+ *	The value of accel manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_offset_compensation_zaxis(
+s8 v_accel_off_z_s8);
+/***************************************************/
+/**\name	FUNCTION FOR GYRO MANUAL OFFSET COMPENSATION */
+/***************************************************/
+/*!
+ *	@brief This API read gyro manual offset compensation of x axis
+ *	from the register 0x74 bit 0 to 7 and 0x77 bit 0 and 1
+ *
+ *
+ *
+ *  @param v_gyro_off_x_s16:
+ *	The value of gyro manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_offset_compensation_xaxis(
+s16 *v_gyro_off_x_s16);
+/*!
+ *	@brief This API write gyro manual offset compensation of x axis
+ *	from the register 0x74 bit 0 to 7 and 0x77 bit 0 and 1
+ *
+ *
+ *
+ *  @param v_gyro_off_x_s16:
+ *	The value of gyro manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_offset_compensation_xaxis(
+s16 v_gyro_off_x_s16);
+/*!
+ *	@brief This API read gyro manual offset compensation of y axis
+ *	from the register 0x75 bit 0 to 7 and 0x77 bit 2 and 3
+ *
+ *
+ *
+ *  @param v_gyro_off_y_s16:
+ *	The value of gyro manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_offset_compensation_yaxis(
+s16 *v_gyro_off_y_s16);
+/*!
+ *	@brief This API write gyro manual offset compensation of y axis
+ *	from the register 0x75 bit 0 to 7 and 0x77 bit 2 and 3
+ *
+ *
+ *
+ *  @param v_gyro_off_y_s16:
+ *	The value of gyro manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_offset_compensation_yaxis(
+s16 v_gyro_off_y_s16);
+/*!
+ *	@brief This API read gyro manual offset compensation of z axis
+ *	from the register 0x76 bit 0 to 7 and 0x77 bit 4 and 5
+ *
+ *
+ *
+ *  @param v_gyro_off_z_s16:
+ *	The value of gyro manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_offset_compensation_zaxis(
+s16 *v_gyro_off_z_s16);
+/*!
+ *	@brief This API write gyro manual offset compensation of z axis
+ *	from the register 0x76 bit 0 to 7 and 0x77 bit 4 and 5
+ *
+ *
+ *
+ *  @param v_gyro_off_z_s16:
+ *	The value of gyro manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_offset_compensation_zaxis(
+s16 v_gyro_off_z_s16);
+/*!
+ *	@brief This API writes accel fast offset compensation
+ *	from the register 0x69 bit 0 to 5
+ *	@brief This API writes each axis individually
+ *	FOC_X_AXIS - bit 4 and 5
+ *	FOC_Y_AXIS - bit 2 and 3
+ *	FOC_Z_AXIS - bit 0 and 1
+ *
+ *  @param  v_foc_accel_u8: The value of accel offset compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_axis_u8: The value of accel offset axis selection
+  *	value    | axis
+ * ----------|-------------------
+ *  0        | FOC_X_AXIS
+ *  1        | FOC_Y_AXIS
+ *  2        | FOC_Z_AXIS
+ *
+ *	@param v_accel_offset_s8: The accel offset value
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_foc_trigger(u8 axis,
+u8 foc_acc, s8 *accel_offset);
+/*!
+ *	@brief This API write fast accel offset compensation
+ *	it writes all axis together.To the register 0x69 bit 0 to 5
+ *	FOC_X_AXIS - bit 4 and 5
+ *	FOC_Y_AXIS - bit 2 and 3
+ *	FOC_Z_AXIS - bit 0 and 1
+ *
+ *  @param  v_foc_accel_x_u8: The value of accel offset x compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_foc_accel_y_u8: The value of accel offset y compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_foc_accel_z_u8: The value of accel offset z compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_accel_off_x_s8: The value of accel offset x axis
+ *  @param  v_accel_off_y_s8: The value of accel offset y axis
+ *  @param  v_accel_off_z_s8: The value of accel offset z axis
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_accel_foc_trigger_xyz(u8 v_foc_accel_x_u8,
+u8 v_foc_accel_y_u8, u8 v_foc_accel_z_u8,
+s8 *acc_off_x, s8 *acc_off_y, s8 *acc_off_z);
+/***************************************************/
+/**\name	FUNCTION FOR ACEL AND GYRO OFFSET ENABLE */
+/***************************************************/
+/*!
+ *	@brief This API read the accel offset enable bit
+ *	from the register 0x77 bit 6
+ *
+ *
+ *
+ *  @param v_accel_off_enable_u8: The value of accel offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_accel_offset_enable(
+u8 *acc_off_en);
+/*!
+ *	@brief This API write the accel offset enable bit
+ *	from the register 0x77 bit 6
+ *
+ *
+ *
+ *  @param v_accel_off_enable_u8: The value of accel offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_accel_offset_enable(
+u8 acc_off_en);
+/*!
+ *	@brief This API read the accel offset enable bit
+ *	from the register 0x77 bit 7
+ *
+ *
+ *
+ *  @param v_gyro_off_enable_u8: The value of gyro offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_gyro_offset_enable(
+u8 *v_gyro_off_enable_u8);
+/*!
+ *	@brief This API write the accel offset enable bit
+ *	from the register 0x77 bit 7
+ *
+ *
+ *
+ *  @param v_gyro_off_enable_u8: The value of gyro offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_gyro_offset_enable(
+u8 v_gyro_off_enable_u8);
+/***************************************************/
+/**\name	FUNCTION FOR STEP COUNTER INTERRUPT */
+/***************************************************/
+/*!
+ *	@brief This API reads step counter value
+ *	form the register 0x78 and 0x79
+ *
+ *
+ *
+ *
+ *  @param v_step_cnt_s16 : The value of step counter
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_step_count(u16 *v_step_cnt_s16);
+ /*!
+ *	@brief This API Reads
+ *	step counter configuration
+ *	from the register 0x7A bit 0 to 7
+ *	and from the register 0x7B bit 0 to 2 and 4 to 7
+ *
+ *
+ *  @param v_step_config_u16 : The value of step configuration
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_step_config(
+u16 *v_step_config_u16);
+ /*!
+ *	@brief This API write
+ *	step counter configuration
+ *	from the register 0x7A bit 0 to 7
+ *	and from the register 0x7B bit 0 to 2 and 4 to 7
+ *
+ *
+ *  @param v_step_config_u16   :
+ *	the value of  Enable step configuration
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_step_config(
+u16 v_step_config_u16);
+ /*!
+ *	@brief This API read enable step counter
+ *	from the register 0x7B bit 3
+ *
+ *
+ *  @param v_step_counter_u8 : The value of step counter enable
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_step_counter_enable(
+u8 *v_step_counter_u8);
+ /*!
+ *	@brief This API write enable step counter
+ *	from the register 0x7B bit 3
+ *
+ *
+ *  @param v_step_counter_u8 : The value of step counter enable
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_step_counter_enable(
+u8 v_step_counter_u8);
+ /*!
+ *	@brief This API set Step counter modes
+ *
+ *
+ *  @param  v_step_mode_u8 : The value of step counter mode
+ *  value    |   mode
+ * ----------|-----------
+ *   0       | BMI160_STEP_NORMAL_MODE
+ *   1       | BMI160_STEP_SENSITIVE_MODE
+ *   2       | BMI160_STEP_ROBUST_MODE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_step_mode(u8 v_step_mode_u8);
+/*!
+ *	@brief This API used to trigger the  signification motion
+ *	interrupt
+ *
+ *
+ *  @param  v_significant_u8 : The value of interrupt selection
+ *  value    |  interrupt
+ * ----------|-----------
+ *   0       |  BMI160_MAP_INTR1
+ *   1       |  BMI160_MAP_INTR2
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_map_significant_motion_intr(
+u8 v_significant_u8);
+/*!
+ *	@brief This API used to trigger the step detector
+ *	interrupt
+ *
+ *
+ *  @param  v_step_detector_u8 : The value of interrupt selection
+ *  value    |  interrupt
+ * ----------|-----------
+ *   0       |  BMI160_MAP_INTR1
+ *   1       |  BMI160_MAP_INTR2
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_map_step_detector_intr(
+u8 v_step_detector_u8);
+ /*!
+ *	@brief This API used to clear the step counter interrupt
+ *	interrupt
+ *
+ *
+ *  @param  : None
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_clear_step_counter(void);
+/***************************************************/
+/**\name	FUNCTION FOR STEP COMMAND REGISTER WRITE */
+/***************************************************/
+ /*!
+ *	@brief This API writes value to the register 0x7E bit 0 to 7
+ *
+ *
+ *  @param  v_command_reg_u8 : The value to write command register
+ *  value   |  Description
+ * ---------|--------------------------------------------------------
+ *	0x00	|	Reserved
+ *  0x03	|	Starts fast offset calibration for the accel and gyro
+ *	0x10	|	Sets the PMU mode for the Accelerometer to suspend
+ *	0x11	|	Sets the PMU mode for the Accelerometer to normal
+ *	0x12	|	Sets the PMU mode for the Accelerometer Lowpower
+ *  0x14	|	Sets the PMU mode for the Gyroscope to suspend
+ *	0x15	|	Sets the PMU mode for the Gyroscope to normal
+ *	0x16	|	Reserved
+ *	0x17	|	Sets the PMU mode for the Gyroscope to fast start-up
+ *  0x18	|	Sets the PMU mode for the Magnetometer to suspend
+ *	0x19	|	Sets the PMU mode for the Magnetometer to normal
+ *	0x1A	|	Sets the PMU mode for the Magnetometer to Lowpower
+ *	0xB0	|	Clears all data in the FIFO
+ *  0xB1	|	Resets the interrupt engine
+ *	0xB2	|	step_cnt_clr Clears the step counter
+ *	0xB6	|	Triggers a reset
+ *	0x37	|	See extmode_en_last
+ *	0x9A	|	See extmode_en_last
+ *	0xC0	|	Enable the extended mode
+ *  0xC4	|	Erase NVM cell
+ *	0xC8	|	Load NVM cell
+ *	0xF0	|	Reset acceleration data path
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_command_register(
+u8 v_command_reg_u8);
+/***************************************************/
+/**\name	FUNCTION FOR PAGE ENABLE */
+/***************************************************/
+ /*!
+ *	@brief This API read target page from the register 0x7F bit 4 and 5
+ *
+ *  @param v_target_page_u8: The value of target page
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  User data/configure page
+ *   1      |  Chip level trim/test page
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_target_page(
+u8 *v_target_page_u8);
+ /*!
+ *	@brief This API write target page from the register 0x7F bit 4 and 5
+ *
+ *  @param v_target_page_u8: The value of target page
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  User data/configure page
+ *   1      |  Chip level trim/test page
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_target_page(
+u8 v_target_page_u8);
+ /*!
+ *	@brief This API read page enable from the register 0x7F bit 7
+ *
+ *
+ *
+ *  @param v_page_enable_u8: The value of page enable
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  DISABLE
+ *   1      |  ENABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_paging_enable(
+u8 *v_page_enable_u8);
+ /*!
+ *	@brief This API write page enable from the register 0x7F bit 7
+ *
+ *
+ *
+ *  @param v_page_enable_u8: The value of page enable
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  DISABLE
+ *   1      |  ENABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_paging_enable(
+u8 v_page_enable_u8);
+ /*!
+ *	@brief This API read
+ *	pull up configuration from the register 0X85 bit 4 an 5
+ *
+ *
+ *
+ *  @param v_control_pullup_u8: The value of pull up register
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_get_pullup_configuration(
+u8 *v_control_pullup_u8);
+ /*!
+ *	@brief This API write
+ *	pull up configuration from the register 0X85 bit 4 an 5
+ *
+ *
+ *
+ *  @param v_control_pullup_u8: The value of pull up register
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_pullup_configuration(
+u8 v_control_pullup_u8);
+/***************************************************/
+/**\name	FUNCTION FOR BMM150 */
+/***************************************************/
+ /*!
+ *	@brief This function used for initialize the bmm150 sensor
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bmm150_mag_interface_init(void);
+ /*!
+ *	@brief This function used for set the mag power control
+ *	bit enable
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bmm150_mag_wakeup(void);
+ /*!
+ *	@brief This function used for read the trim values of magnetometer
+ *
+ *	@note
+ *	Before reading the mag trimming values
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_bmm150_mag_trim(void);
+ /*!
+ *	@brief This function used for read the compensated value of mag
+ *	Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bmm150_mag_compensate_xyz(
+struct bmi160_mag_xyz_s32_t *mag_comp_xyz);
+BMI160_RETURN_FUNCTION_TYPE bmi160_bmm150_mag_compensate_xyz_raw(
+struct bmi160_mag_xyz_s32_t *mag_comp_xyz, struct bmi160_mag_xyzr_t mag_xyzr);
+
+/*!
+ *	@brief This API used to get the compensated BMM150-X data
+ *	the out put of X as s32
+ *	Before start reading the mag compensated X data
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *
+ *  @param  v_mag_data_x_s16 : The value of mag raw X data
+ *  @param  v_data_r_u16 : The value of mag R data
+ *
+ *	@return results of compensated X data value output as s32
+ *
+ */
+s32 bmi160_bmm150_mag_compensate_X(s16 v_mag_data_x_s16, u16 v_data_r_u16);
+/*!
+ *	@brief This API used to get the compensated BMM150-Y data
+ *	the out put of Y as s32
+ *	Before start reading the mag compensated Y data
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *
+ *  @param  v_mag_data_y_s16 : The value of mag raw Y data
+ *  @param  v_data_r_u16 : The value of mag R data
+ *
+ *	@return results of compensated Y data value output as s32
+ */
+s32 bmi160_bmm150_mag_compensate_Y(s16 v_mag_data_y_s16, u16 v_data_r_u16);
+/*!
+ *	@brief This API used to get the compensated BMM150-Z data
+ *	the out put of Z as s32
+ *	Before start reading the mag compensated Z data
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *
+ *  @param  v_mag_data_z_s16 : The value of mag raw Z data
+ *  @param  v_data_r_u16 : The value of mag R data
+ *
+ *	@return results of compensated Z data value output as s32
+ */
+s32 bmi160_bmm150_mag_compensate_Z(s16 v_mag_data_z_s16, u16 v_data_r_u16);
+/*!
+ *	@brief This API used to set the pre-set modes of bmm150
+ *	The pre-set mode setting is depend on data rate and xy and z repetitions
+ *
+ *	@note
+ *	Before set the mag preset mode
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param  v_mode_u8: The value of pre-set mode selection value
+ *  value    |  pre_set mode
+ * ----------|------------
+ *   1       | BMI160_MAG_PRESETMODE_LOWPOWER
+ *   2       | BMI160_MAG_PRESETMODE_REGULAR
+ *   3       | BMI160_MAG_PRESETMODE_HIGHACCURACY
+ *   4       | BMI160_MAG_PRESETMODE_ENHANCED
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_bmm150_mag_presetmode(u8 mode);
+/*!
+ *	@brief This function used for set the magnetometer
+ *	power mode.
+ *	@note
+ *	Before set the mag power mode
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *	@param v_mag_pow_mode_u8 : The value of mag power mode
+ *  value    |  mode
+ * ----------|------------
+ *   0       | FORCE_MODE
+ *   1       | SUSPEND_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bmm150_mag_set_power_mode(u8 mag_pow_mode);
+ /*!
+ *	@brief This function used for set the magnetometer
+ *	power mode.
+ *	@note
+ *	Before set the mag power mode
+ *	make sure the following two point is addressed
+ *		Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *
+ *	@param v_mag_sec_if_pow_mode_u8 : The value of mag power mode
+ *  value    |  mode
+ * ----------|------------
+ *   0       | BMI160_MAG_FORCE_MODE
+ *   1       | BMI160_MAG_SUSPEND_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_bmm150_mag_and_secondary_if_power_mode(
+u8 v_mag_sec_if_pow_mode_u8);
+/***************************************************/
+/**\name	FUNCTIONS FOR AKM09911 AND AKM09912*/
+/***************************************************/
+ /*!
+ *	@brief This function used for initialize
+ *	the AKM09911 and AKM09912 sensor
+ *
+ *
+ *	@param v_akm_i2c_address_u8: The value of device address
+ *	AKM sensor   |  Slave address
+ * --------------|---------------------
+ *  AKM09911     |  AKM09911_I2C_ADDR_1
+ *     -         |  and AKM09911_I2C_ADDR_2
+ *  AKM09912     |  AKM09912_I2C_ADDR_1
+ *     -         |  AKM09912_I2C_ADDR_2
+ *     -         |  AKM09912_I2C_ADDR_3
+ *     -         |  AKM09912_I2C_ADDR_4
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_akm_mag_interface_init(
+u8 v_akm_i2c_address_u8);
+ /*!
+ *	@brief This function used for read the sensitivity data of
+ *	AKM09911 and AKM09912
+ *
+ *	@note Before reading the mag sensitivity values
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_bst_akm_sensitivity_data(void);
+/*!
+ *	@brief This API used to get the compensated X data
+ *	of AKM09911 the out put of X as s32
+ *	@note	Before start reading the mag compensated X data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bst_akm_x_s16 : The value of X data
+ *
+ *	@return results of compensated X data value output as s32
+ *
+ */
+s32 bmi160_bst_akm09911_compensate_X(s16 v_bst_akm_x_s16);
+/*!
+ *	@brief This API used to get the compensated Y data
+ *	of AKM09911 the out put of Y as s32
+ *	@note	Before start reading the mag compensated Y data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bst_akm_y_s16 : The value of Y data
+ *
+ *	@return results of compensated Y data value output as s32
+ *
+ */
+s32 bmi160_bst_akm09911_compensate_Y(s16 v_bst_akm_y_s16);
+/*!
+ *	@brief This API used to get the compensated Z data
+ *	of AKM09911 the out put of Z as s32
+ *	@note	Before start reading the mag compensated Z data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bst_akm_z_s16 : The value of Z data
+ *
+ *	@return results of compensated Z data value output as s32
+ *
+ */
+s32 bmi160_bst_akm09911_compensate_Z(s16 v_bst_akm_z_s16);
+/*!
+ *	@brief This API used to get the compensated X data
+ *	of AKM09912 the out put of X as s32
+ *	@note	Before start reading the mag compensated X data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bst_akm_x_s16 : The value of X data
+ *
+ *	@return results of compensated X data value output as s32
+ *
+ */
+s32 bmi160_bst_akm09912_compensate_X(s16 v_bst_akm_x_s16);
+/*!
+ *	@brief This API used to get the compensated Y data
+ *	of AKM09912 the out put of Y as s32
+ *	@note	Before start reading the mag compensated Y data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bst_akm_y_s16 : The value of Y data
+ *
+ *	@return results of compensated Y data value output as s32
+ *
+ */
+s32 bmi160_bst_akm09912_compensate_Y(s16 v_bst_akm_y_s16);
+/*!
+ *	@brief This API used to get the compensated Z data
+ *	of AKM09912 the out put of Z as s32
+ *	@note	Before start reading the mag compensated Z data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bst_akm_z_s16 : The value of Z data
+ *
+ *	@return results of compensated Z data value output as s32
+ *
+ */
+s32 bmi160_bst_akm09912_compensate_Z(s16 v_bst_akm_z_s16);
+ /*!
+ *	@brief This function used for read the compensated value of
+ *	AKM09911
+ *	@note Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_akm09911_compensate_xyz(
+struct bmi160_mag_xyz_s32_t *bst_akm_xyz);
+ /*!
+ *	@brief This function used for read the compensated value of
+ *	AKM09912
+ *	@note Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_akm09912_compensate_xyz(
+struct bmi160_mag_xyz_s32_t *bst_akm_xyz);
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_akm09912_compensate_xyz_raw(
+struct bmi160_mag_xyz_s32_t *bst_akm_xyz);
+/*!
+ *	@brief This function used for set the AKM09911 and AKM09912
+ *	power mode.
+ *	@note Before set the AKM power mode
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function bmi160_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		bmi160_set_command_register(0x19) function.
+ *
+ *	@param v_akm_pow_mode_u8 : The value of akm power mode
+ *  value   |    Description
+ * ---------|--------------------
+ *    0     |  AKM_POWER_DOWN_MODE
+ *    1     |  AKM_SINGLE_MEAS_MODE
+ *    2     |  FUSE_ROM_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_akm_set_powermode(u8 v_akm_pow_mode_u8);
+ /*!
+ *	@brief This function used for set the magnetometer
+ *	power mode of AKM09911 and AKM09912
+ *	@note Before set the mag power mode
+ *	make sure the following two point is addressed
+ *		Make sure the mag interface is enabled or not,
+ *		by using the bmi160_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function bmi160_get_if_mode(0x02)
+ *
+ *	@param v_mag_sec_if_pow_mode_u8 : The value of secondary if power mode
+ *  value   |    Description
+ * ---------|--------------------
+ *    0     |  BMI160_MAG_FORCE_MODE
+ *    1     |  BMI160_MAG_SUSPEND_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_set_bst_akm_and_secondary_if_powermode(
+u8 v_mag_sec_if_pow_mode_u8);
+/***************************************************/
+/**\name	FUNCTIONS FOR YAMAH-YAS532 */
+/***************************************************/
+/*!
+ *	@brief This function used for read the YAMAH-YAS532 init
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yamaha_yas532_mag_interface_init(
+void);
+/*!
+ *	@brief This function used to set the YAS532 initial values
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_set_initial_values(void);
+/*!
+ *	@brief This function used for YAS532 offset correction
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_magnetic_measure_set_offset(
+void);
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS532 calibration data
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yamaha_yas532_calib_values(void);
+/*!
+ *	@brief This function used for calculate the
+ *	YAS532 read the linear data
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_xy1y2_to_linear(
+u16 *v_xy1y2_u16, s32 *xy1y2_linear);
+/*!
+ *	@brief This function used for read the YAS532 sensor data
+ *	@param	v_acquisition_command_u8: used to set the data acquisition
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ *	@param	v_busy_u8 : used to get the busy flay for sensor data read
+ *	@param	v_temp_u16 : used to get the temperature data
+ *	@param	v_xy1y2_u16 : used to get the sensor xy1y2 data
+ *	@param	v_overflow_u8 : used to get the overflow data
+ *
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_normal_measurement_data(
+u8 v_acquisition_command_u8, u8 *v_busy_u8,
+u16 *v_temp_u16, u16 *v_xy1y2_u16, u8 *v_overflow_u8);
+/*!
+ *	@brief This function used for YAS532 sensor data
+ *	@param	v_acquisition_command_u8	:	the value of CMDR
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ * @param xyz_data : the vector xyz output
+ * @param v_overflow_s8 : the value of overflow
+ * @param v_temp_correction_u8 : the value of temperate correction enable
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_measurement_xyz_data(
+struct yas532_vector *xyz_data, u8 *v_overflow_s8, u8 v_temp_correction_u8,
+u8 v_acquisition_command_u8);
+/*!
+ *	@brief This function used for YAS532 write data acquisition
+ *	command register write
+ *	@param	v_command_reg_data_u8	:	the value of data acquisition
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_acquisition_command_register(
+u8 v_command_reg_data_u8);
+/*!
+ *	@brief This function used write offset of YAS532
+ *
+ *	@param	p_offset_s8	: The value of offset to write
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas532_set_offset(
+const s8 *p_offset_s8);
+/*!
+ *	@brief This function used to init the YAMAH-YAS537
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yamaha_yas537_mag_interface_init(
+void);
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 calibration data
+ *
+ *
+ *	@param v_rcoil_u8 : The value of r coil
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yamaha_yas537_calib_values(
+u8 v_rcoil_u8);
+/*!
+ *	@brief This function used for YAS537 write data acquisition
+ *	command register write
+ *	@param	v_command_reg_data_u8	:	the value of data acquisition
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yas537_acquisition_command_register(
+u8 v_command_reg_data_u8);
+
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 xy1y2 data
+ *
+ *	@param v_coil_stat_u8: The value of R coil status
+ *	@param v_busy_u8: The value of busy status
+ *	@param v_temperature_u16: The value of temperature
+ *	@param xy1y2: The value of raw xy1y2 data
+ *	@param v_ouflow_u8: The value of overflow
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yamaha_yas537_read_xy1y2_data(
+u8 *v_coil_stat_u8, u8 *v_busy_u8,
+u16 *v_temperature_u16, u16 *xy1y2, u8 *v_ouflow_u8);
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 xy1y2 data
+ *
+ *	@param v_ouflow_u8: The value of overflow
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_bst_yamaha_yas537_measure_xyz_data(
+u8 *v_ouflow_u8, struct yas_vector *vector_xyz);
+
+/***************************************************/
+/**\name	FUNCTIONS FOR FIFO DATA READ */
+/***************************************************/
+/*!
+ *	@brief This function used for reading the
+ *	fifo data of  header less mode
+ *
+ *
+ *
+ *	@note Configure the below functions for FIFO header less mode
+ *	@note 1. bmi160_set_fifo_down_gyro
+ *	@note 2. bmi160_set_gyro_fifo_filter_data
+ *	@note 3. bmi160_set_fifo_down_accel
+ *	@note 4. bmi160_set_accel_fifo_filter_dat
+ *	@note 5. bmi160_set_fifo_mag_enable
+ *	@note 6. bmi160_set_fifo_accel_enable
+ *	@note 7. bmi160_set_fifo_gyro_enable
+ *	@note For interrupt configuration
+ *	@note 1. bmi160_set_intr_fifo_full
+ *	@note 2. bmi160_set_intr_fifo_wm
+ *	@note 3. bmi160_set_fifo_tag_intr2_enable
+ *	@note 4. bmi160_set_fifo_tag_intr1_enable
+ *
+ *	@note The fifo reads the whole 1024 bytes
+ *	and processing the data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_fifo_headerless_mode(
+void);
+/*!
+ *	@brief This function used for reading the
+ *	fifo data of  header less mode for using user defined length
+ *
+ *
+ *	@param v_fifo_user_length_u16: The value of length of fifo read data
+ *
+ *	@note Configure the below functions for FIFO header less mode
+ *	@note 1. bmi160_set_fifo_down_gyro
+ *	@note 2. bmi160_set_gyro_fifo_filter_data
+ *	@note 3. bmi160_set_fifo_down_accel
+ *	@note 4. bmi160_set_accel_fifo_filter_dat
+ *	@note 5. bmi160_set_fifo_mag_enable
+ *	@note 6. bmi160_set_fifo_accel_enable
+ *	@note 7. bmi160_set_fifo_gyro_enable
+ *	@note For interrupt configuration
+ *	@note 1. bmi160_set_intr_fifo_full
+ *	@note 2. bmi160_set_intr_fifo_wm
+ *	@note 3. bmi160_set_fifo_tag_intr2_enable
+ *	@note 4. bmi160_set_fifo_tag_intr1_enable
+ *
+ *	@note The fifo reads the whole 1024 bytes
+ *	and processing the data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE
+bmi160_read_fifo_headerless_mode_user_defined_length(
+u16 v_fifo_user_length_u16);
+/*!
+ *	@brief This function used for reading the
+ *	fifo data of  header mode
+ *
+ *
+ *	@note Configure the below functions for FIFO header mode
+ *	@note 1. bmi160_set_fifo_down_gyro()
+ *	@note 2. bmi160_set_gyro_fifo_filter_data()
+ *	@note 3. bmi160_set_fifo_down_accel()
+ *	@note 4. bmi160_set_accel_fifo_filter_dat()
+ *	@note 5. bmi160_set_fifo_mag_enable()
+ *	@note 6. bmi160_set_fifo_accel_enable()
+ *	@note 7. bmi160_set_fifo_gyro_enable()
+ *	@note 8. bmi160_set_fifo_header_enable()
+ *	@note For interrupt configuration
+ *	@note 1. bmi160_set_intr_fifo_full()
+ *	@note 2. bmi160_set_intr_fifo_wm()
+ *	@note 3. bmi160_set_fifo_tag_intr2_enable()
+ *	@note 4. bmi160_set_fifo_tag_intr1_enable()
+ *
+ *	@note The fifo reads the whole 1024 bytes
+ *	and processing the data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_fifo_header_data(
+void);
+/*!
+ *	@brief This function used for reading the
+ *	fifo data of  header mode for using user defined length
+ *
+ *
+ *	@note Configure the below functions for FIFO header mode
+ *	@note 1. bmi160_set_fifo_down_gyro()
+ *	@note 2. bmi160_set_gyro_fifo_filter_data()
+ *	@note 3. bmi160_set_fifo_down_accel()
+ *	@note 4. bmi160_set_accel_fifo_filter_dat()
+ *	@note 5. bmi160_set_fifo_mag_enable()
+ *	@note 6. bmi160_set_fifo_accel_enable()
+ *	@note 7. bmi160_set_fifo_gyro_enable()
+ *	@note 8. bmi160_set_fifo_header_enable()
+ *	@note For interrupt configuration
+ *	@note 1. bmi160_set_intr_fifo_full()
+ *	@note 2. bmi160_set_intr_fifo_wm()
+ *	@note 3. bmi160_set_fifo_tag_intr2_enable()
+ *	@note 4. bmi160_set_fifo_tag_intr1_enable()
+ *
+ *	@note The fifo reads the whole 1024 bytes
+ *	and processing the data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+BMI160_RETURN_FUNCTION_TYPE bmi160_read_fifo_header_data_user_defined_length(
+u16 v_fifo_user_length_u16);
+/*!
+ *	@brief This function used for reading
+ *	bmi160_t structure
+ *
+ *  @return the reference and values of bmi160_t
+ *
+ *
+*/
+struct bmi160_t *bmi160_get_ptr(void);
+
+#endif
+
diff --git a/drivers/input/sensors/bmi160/bmi160_driver.c b/drivers/input/sensors/bmi160/bmi160_driver.c
new file mode 100644
index 0000000..66d2248
--- /dev/null
+++ b/drivers/input/sensors/bmi160/bmi160_driver.c
@@ -0,0 +1,4021 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * @filename bmi160_driver.c
+ * @date     2016/08/01 14:40
+ * @id       "b5ff23a"
+ * @version  1.3
+ *
+ * @brief
+ * The core code of BMI160 device driver
+ *
+ * @detail
+ * This file implements the core code of BMI160 device driver,
+ * which includes hardware related functions, input device register,
+ * device attribute files, etc.
+*/
+
+#include "bmi160.h"
+#include "bmi160_driver.h"
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+
+
+#define DRIVER_VERSION "0.0.53.0"
+#define I2C_BURST_READ_MAX_LEN      (256)
+#define BMI160_STORE_COUNT  (6000)
+#define LMADA     (1)
+uint64_t g_current_apts_us;
+
+
+enum BMI_SENSOR_INT_T {
+	/* Interrupt enable0*/
+	BMI_ANYMO_X_INT = 0,
+	BMI_ANYMO_Y_INT,
+	BMI_ANYMO_Z_INT,
+	BMI_D_TAP_INT,
+	BMI_S_TAP_INT,
+	BMI_ORIENT_INT,
+	BMI_FLAT_INT,
+	/* Interrupt enable1*/
+	BMI_HIGH_X_INT,
+	BMI_HIGH_Y_INT,
+	BMI_HIGH_Z_INT,
+	BMI_LOW_INT,
+	BMI_DRDY_INT,
+	BMI_FFULL_INT,
+	BMI_FWM_INT,
+	/* Interrupt enable2 */
+	BMI_NOMOTION_X_INT,
+	BMI_NOMOTION_Y_INT,
+	BMI_NOMOTION_Z_INT,
+	BMI_STEP_DETECTOR_INT,
+	INT_TYPE_MAX
+};
+
+/*bmi fifo sensor type combination*/
+enum BMI_SENSOR_FIFO_COMBINATION {
+	BMI_FIFO_A = 0,
+	BMI_FIFO_G,
+	BMI_FIFO_M,
+	BMI_FIFO_G_A,
+	BMI_FIFO_M_A,
+	BMI_FIFO_M_G,
+	BMI_FIFO_M_G_A,
+	BMI_FIFO_COM_MAX
+};
+
+/*bmi fifo analyse return err status*/
+enum BMI_FIFO_ANALYSE_RETURN_T {
+	FIFO_OVER_READ_RETURN = -10,
+	FIFO_SENSORTIME_RETURN = -9,
+	FIFO_SKIP_OVER_LEN = -8,
+	FIFO_M_G_A_OVER_LEN = -7,
+	FIFO_M_G_OVER_LEN = -6,
+	FIFO_M_A_OVER_LEN = -5,
+	FIFO_G_A_OVER_LEN = -4,
+	FIFO_M_OVER_LEN = -3,
+	FIFO_G_OVER_LEN = -2,
+	FIFO_A_OVER_LEN = -1
+};
+
+/*!bmi sensor generic power mode enum */
+enum BMI_DEV_OP_MODE {
+	SENSOR_PM_NORMAL = 0,
+	SENSOR_PM_LP1,
+	SENSOR_PM_SUSPEND,
+	SENSOR_PM_LP2
+};
+
+/*! bmi acc sensor power mode enum */
+enum BMI_ACC_PM_TYPE {
+	BMI_ACC_PM_NORMAL = 0,
+	BMI_ACC_PM_LP1,
+	BMI_ACC_PM_SUSPEND,
+	BMI_ACC_PM_LP2,
+	BMI_ACC_PM_MAX
+};
+
+/*! bmi gyro sensor power mode enum */
+enum BMI_GYRO_PM_TYPE {
+	BMI_GYRO_PM_NORMAL = 0,
+	BMI_GYRO_PM_FAST_START,
+	BMI_GYRO_PM_SUSPEND,
+	BMI_GYRO_PM_MAX
+};
+
+/*! bmi mag sensor power mode enum */
+enum BMI_MAG_PM_TYPE {
+	BMI_MAG_PM_NORMAL = 0,
+	BMI_MAG_PM_LP1,
+	BMI_MAG_PM_SUSPEND,
+	BMI_MAG_PM_LP2,
+	BMI_MAG_PM_MAX
+};
+
+
+/*! bmi sensor support type*/
+enum BMI_SENSOR_TYPE {
+	BMI_ACC_SENSOR,
+	BMI_GYRO_SENSOR,
+	BMI_MAG_SENSOR,
+	BMI_SENSOR_TYPE_MAX
+};
+
+/*!bmi sensor generic power mode enum */
+enum BMI_AXIS_TYPE {
+	X_AXIS = 0,
+	Y_AXIS,
+	Z_AXIS,
+	AXIS_MAX
+};
+
+/*!bmi sensor generic intterrupt enum */
+enum BMI_INT_TYPE {
+	BMI160_INT0 = 0,
+	BMI160_INT1,
+	BMI160_INT_MAX
+};
+
+/*! bmi sensor time resolution definition*/
+enum BMI_SENSOR_TIME_RS_TYPE {
+	TS_0_78_HZ = 1,/*0.78HZ*/
+	TS_1_56_HZ,/*1.56HZ*/
+	TS_3_125_HZ,/*3.125HZ*/
+	TS_6_25_HZ,/*6.25HZ*/
+	TS_12_5_HZ,/*12.5HZ*/
+	TS_25_HZ,/*25HZ, odr=6*/
+	TS_50_HZ,/*50HZ*/
+	TS_100_HZ,/*100HZ*/
+	TS_200_HZ,/*200HZ*/
+	TS_400_HZ,/*400HZ*/
+	TS_800_HZ,/*800HZ*/
+	TS_1600_HZ,/*1600HZ*/
+	TS_MAX_HZ
+};
+
+/*! bmi sensor interface mode */
+enum BMI_SENSOR_IF_MODE_TYPE {
+	/*primary interface:autoconfig/secondary interface off*/
+	P_AUTO_S_OFF = 0,
+	/*primary interface:I2C/secondary interface:OIS*/
+	P_I2C_S_OIS,
+	/*primary interface:autoconfig/secondary interface:Magnetometer*/
+	P_AUTO_S_MAG,
+	/*interface mode reseved*/
+	IF_MODE_RESEVED
+
+};
+
+/*! bmi160 acc/gyro calibration status in H/W layer */
+enum BMI_CALIBRATION_STATUS_TYPE {
+	/*BMI FAST Calibration ready x/y/z status*/
+	BMI_ACC_X_FAST_CALI_RDY = 0,
+	BMI_ACC_Y_FAST_CALI_RDY,
+	BMI_ACC_Z_FAST_CALI_RDY
+};
+
+unsigned int reg_op_addr;
+
+static const int bmi_pmu_cmd_acc_arr[BMI_ACC_PM_MAX] = {
+	/*!bmi pmu for acc normal, low power1,
+	 * suspend, low power2 mode command */
+	CMD_PMU_ACC_NORMAL,
+	CMD_PMU_ACC_LP1,
+	CMD_PMU_ACC_SUSPEND,
+	CMD_PMU_ACC_LP2
+};
+
+static const int bmi_pmu_cmd_gyro_arr[BMI_GYRO_PM_MAX] = {
+	/*!bmi pmu for gyro normal, fast startup,
+	 * suspend mode command */
+	CMD_PMU_GYRO_NORMAL,
+	CMD_PMU_GYRO_FASTSTART,
+	CMD_PMU_GYRO_SUSPEND
+};
+
+static const int bmi_pmu_cmd_mag_arr[BMI_MAG_PM_MAX] = {
+	/*!bmi pmu for mag normal, low power1,
+	 * suspend, low power2 mode command */
+	CMD_PMU_MAG_NORMAL,
+	CMD_PMU_MAG_LP1,
+	CMD_PMU_MAG_SUSPEND,
+	CMD_PMU_MAG_LP2
+};
+
+static const char *bmi_axis_name[AXIS_MAX] = {"x", "y", "z"};
+
+static const int bmi_interrupt_type[] = {
+	/*!bmi interrupt type */
+	/* Interrupt enable0 , index=0~6*/
+	BMI160_ANY_MOTION_X_ENABLE,
+	BMI160_ANY_MOTION_Y_ENABLE,
+	BMI160_ANY_MOTION_Z_ENABLE,
+	BMI160_DOUBLE_TAP_ENABLE,
+	BMI160_SINGLE_TAP_ENABLE,
+	BMI160_ORIENT_ENABLE,
+	BMI160_FLAT_ENABLE,
+	/* Interrupt enable1, index=7~13*/
+	BMI160_HIGH_G_X_ENABLE,
+	BMI160_HIGH_G_Y_ENABLE,
+	BMI160_HIGH_G_Z_ENABLE,
+	BMI160_LOW_G_ENABLE,
+	BMI160_DATA_RDY_ENABLE,
+	BMI160_FIFO_FULL_ENABLE,
+	BMI160_FIFO_WM_ENABLE,
+	/* Interrupt enable2, index = 14~17*/
+	BMI160_NOMOTION_X_ENABLE,
+	BMI160_NOMOTION_Y_ENABLE,
+	BMI160_NOMOTION_Z_ENABLE,
+	BMI160_STEP_DETECTOR_EN
+};
+
+/*! bmi sensor time depend on ODR*/
+struct bmi_sensor_time_odr_tbl {
+	u32 ts_duration_lsb;
+	u32 ts_duration_us;
+	u32 ts_delat;/*sub current delat fifo_time*/
+};
+
+struct bmi160_axis_data_t {
+	s16 x;
+	s16 y;
+	s16 z;
+};
+
+struct bmi160_type_mapping_type {
+
+	/*! bmi16x sensor chip id */
+	uint16_t chip_id;
+
+	/*! bmi16x chip revision code */
+	uint16_t revision_id;
+
+	/*! bma2x2 sensor name */
+	const char *sensor_name;
+};
+
+struct bmi160_store_info_t {
+	uint8_t current_frm_cnt;
+	uint64_t current_apts_us[2];
+	uint8_t fifo_ts_total_frmcnt;
+	uint64_t fifo_time;
+};
+
+uint64_t get_current_timestamp(void)
+{
+	uint64_t ts_ap;
+	struct timespec tmp_time;
+	get_monotonic_boottime(&tmp_time);
+	ts_ap = (uint64_t)tmp_time.tv_sec * 1000000000 + tmp_time.tv_nsec;
+	return ts_ap;
+
+}
+
+/*! sensor support type map */
+static const struct bmi160_type_mapping_type sensor_type_map[] = {
+
+	{SENSOR_CHIP_ID_BMI, SENSOR_CHIP_REV_ID_BMI, "BMI160/162AB"},
+	{SENSOR_CHIP_ID_BMI_C2, SENSOR_CHIP_REV_ID_BMI, "BMI160C2"},
+	{SENSOR_CHIP_ID_BMI_C3, SENSOR_CHIP_REV_ID_BMI, "BMI160C3"},
+
+};
+
+/*!bmi160 sensor time depends on ODR */
+static const struct bmi_sensor_time_odr_tbl
+		sensortime_duration_tbl[TS_MAX_HZ] = {
+	{0x010000, 2560000, 0x00ffff},/*2560ms, 0.39hz, odr=resver*/
+	{0x008000, 1280000, 0x007fff},/*1280ms, 0.78hz, odr_acc=1*/
+	{0x004000, 640000, 0x003fff},/*640ms, 1.56hz, odr_acc=2*/
+	{0x002000, 320000, 0x001fff},/*320ms, 3.125hz, odr_acc=3*/
+	{0x001000, 160000, 0x000fff},/*160ms, 6.25hz, odr_acc=4*/
+	{0x000800, 80000,  0x0007ff},/*80ms, 12.5hz*/
+	{0x000400, 40000, 0x0003ff},/*40ms, 25hz, odr_acc = odr_gyro =6*/
+	{0x000200, 20000, 0x0001ff},/*20ms, 50hz, odr = 7*/
+	{0x000100, 10000, 0x0000ff},/*10ms, 100hz, odr=8*/
+	{0x000080, 5000, 0x00007f},/*5ms, 200hz, odr=9*/
+	{0x000040, 2500, 0x00003f},/*2.5ms, 400hz, odr=10*/
+	{0x000020, 1250, 0x00001f},/*1.25ms, 800hz, odr=11*/
+	{0x000010, 625, 0x00000f},/*0.625ms, 1600hz, odr=12*/
+
+};
+
+#if defined(CONFIG_USE_QUALCOMM_HAL)
+#define POLL_INTERVAL_MIN_MS	10
+#define POLL_INTERVAL_MAX_MS	4000
+#define POLL_DEFAULT_INTERVAL_MS 200
+#define BMI160_ACCEL_MIN_VALUE	-32768
+#define BMI160_ACCEL_MAX_VALUE	32767
+#define BMI160_GYRO_MIN_VALUE	-32768
+#define BMI160_GYRO_MAX_VALUE	32767
+#define BMI160_ACCEL_DEFAULT_POLL_INTERVAL_MS	200
+#define BMI160_GYRO_DEFAULT_POLL_INTERVAL_MS	200
+#define BMI160_ACCEL_MIN_POLL_INTERVAL_MS	10
+#define BMI160_ACCEL_MAX_POLL_INTERVAL_MS	5000
+#define BMI160_GYRO_MIN_POLL_INTERVAL_MS	10
+#define BMI160_GYRO_MAX_POLL_INTERVAL_MS	5000
+static struct sensors_classdev bmi160_accel_cdev = {
+		.name = "bmi160-accel",
+		.vendor = "bosch",
+		.version = 1,
+		.handle = SENSORS_ACCELERATION_HANDLE,
+		.type = SENSOR_TYPE_ACCELEROMETER,
+		.max_range = "156.8",	/* 16g */
+		.resolution = "0.153125",	/* 15.6mg */
+		.sensor_power = "0.13",	/* typical value */
+		.min_delay = POLL_INTERVAL_MIN_MS * 1000, /* in microseconds */
+		.max_delay = POLL_INTERVAL_MAX_MS,
+		.delay_msec = POLL_DEFAULT_INTERVAL_MS, /* in millisecond */
+		.fifo_reserved_event_count = 0,
+		.fifo_max_event_count = 0,
+		.enabled = 0,
+		.max_latency = 0,
+		.flags = 0,
+		.sensors_enable = NULL,
+		.sensors_poll_delay = NULL,
+		.sensors_set_latency = NULL,
+		.sensors_flush = NULL,
+		.sensors_self_test = NULL,
+};
+static struct sensors_classdev bmi160_gyro_cdev = {
+	.name = "bmi160-gyro",
+	.vendor = "bosch",
+	.version = 1,
+	.handle = SENSORS_GYROSCOPE_HANDLE,
+	.type = SENSOR_TYPE_GYROSCOPE,
+	.max_range = "34.906586",	/* rad/s */
+	.resolution = "0.0010681152",	/* rad/s */
+	.sensor_power = "3.6",	/* 3.6 mA */
+	.min_delay = BMI160_GYRO_MIN_POLL_INTERVAL_MS * 1000,
+	.max_delay = BMI160_GYRO_MAX_POLL_INTERVAL_MS,
+	.delay_msec = BMI160_GYRO_DEFAULT_POLL_INTERVAL_MS,
+	.fifo_reserved_event_count = 0,
+	.fifo_max_event_count = 0,
+	.enabled = 0,
+	.max_latency = 0,
+	.flags = 0, /* SENSOR_FLAG_CONTINUOUS_MODE */
+	.sensors_enable = NULL,
+	.sensors_poll_delay = NULL,
+	.sensors_enable_wakeup = NULL,
+	.sensors_set_latency = NULL,
+	.sensors_flush = NULL,
+};
+#endif
+static void bmi_delay(u32 msec)
+{
+	if (msec <= 20)
+		usleep_range(msec * 1000, msec * 1000);
+	else
+		msleep(msec);
+}
+
+static void bmi_dump_reg(struct bmi_client_data *client_data)
+{
+	#define REG_MAX0 0x24
+	#define REG_MAX1 0x56
+	int i;
+	u8 dbg_buf0[REG_MAX0];
+	u8 dbg_buf1[REG_MAX1];
+	u8 dbg_buf_str0[REG_MAX0 * 3 + 1] = "";
+	u8 dbg_buf_str1[REG_MAX1 * 3 + 1] = "";
+
+	dev_notice(client_data->dev, "\nFrom 0x00:\n");
+
+	client_data->device.bus_read(client_data->device.dev_addr,
+			BMI_REG_NAME(USER_CHIP_ID), dbg_buf0, REG_MAX0);
+	for (i = 0; i < REG_MAX0; i++) {
+		snprintf(dbg_buf_str0 + i * 3, 16, "%02x%c", dbg_buf0[i],
+				(((i + 1) % BYTES_PER_LINE == 0) ? '\n' : ' '));
+	}
+	dev_notice(client_data->dev, "%s\n", dbg_buf_str0);
+
+	client_data->device.bus_read(client_data->device.dev_addr,
+			BMI160_USER_ACCEL_CONFIG_ADDR, dbg_buf1, REG_MAX1);
+	dev_notice(client_data->dev, "\nFrom 0x40:\n");
+	for (i = 0; i < REG_MAX1; i++) {
+		snprintf(dbg_buf_str1 + i * 3, 16, "%02x%c", dbg_buf1[i],
+				(((i + 1) % BYTES_PER_LINE == 0) ? '\n' : ' '));
+	}
+	dev_notice(client_data->dev, "\n%s\n", dbg_buf_str1);
+	}
+
+
+void bmi_fifo_frame_bytes_extend_calc(
+	struct bmi_client_data *client_data,
+	unsigned int *fifo_frmbytes_extend)
+{
+
+	switch (client_data->fifo_data_sel) {
+	case BMI_FIFO_A_SEL:
+	case BMI_FIFO_G_SEL:
+		*fifo_frmbytes_extend = 7;
+		break;
+	case BMI_FIFO_G_A_SEL:
+		*fifo_frmbytes_extend = 13;
+		break;
+	case BMI_FIFO_M_SEL:
+		*fifo_frmbytes_extend = 9;
+		break;
+	case BMI_FIFO_M_A_SEL:
+	case BMI_FIFO_M_G_SEL:
+		/*8(mag) + 6(gyro or acc) +1(head) = 15*/
+		*fifo_frmbytes_extend = 15;
+		break;
+	case BMI_FIFO_M_G_A_SEL:
+		/*8(mag) + 6(gyro or acc) + 6 + 1 = 21*/
+		*fifo_frmbytes_extend = 21;
+		break;
+	default:
+		*fifo_frmbytes_extend = 0;
+		break;
+
+	};
+
+}
+
+
+static int bmi_input_init(struct bmi_client_data *client_data)
+{
+	struct input_dev *dev;
+	int err = 0;
+
+	dev = input_allocate_device();
+	if (NULL == dev)
+		return -ENOMEM;
+#if defined(CONFIG_USE_QUALCOMM_HAL)
+	dev->name = "bmi160-accel";
+#else
+	dev->name = SENSOR_NAME;
+#endif
+	dev->id.bustype = BUS_I2C;
+
+	input_set_capability(dev, EV_MSC, MSC_GESTURE);
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_SGM);
+
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_FAST_GYRO_CALIB_DONE);
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_STEP_DETECTOR);
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_FAST_ACC_CALIB_DONE);
+
+
+	input_set_capability(dev, EV_REL, REL_X);
+	input_set_capability(dev, EV_REL, REL_Y);
+	input_set_capability(dev, EV_REL, REL_Z);
+	#if defined(CONFIG_USE_QUALCOMM_HAL)
+	input_set_capability(dev, EV_ABS, ABS_MISC);
+	input_set_abs_params(dev, ABS_X,
+	BMI160_ACCEL_MIN_VALUE, BMI160_ACCEL_MAX_VALUE,
+	0, 0);
+	input_set_abs_params(dev, ABS_Y,
+	BMI160_ACCEL_MIN_VALUE, BMI160_ACCEL_MAX_VALUE,
+	0, 0);
+	input_set_abs_params(dev, ABS_Z,
+	BMI160_ACCEL_MIN_VALUE, BMI160_ACCEL_MAX_VALUE,
+	0, 0);
+	#endif
+	input_set_drvdata(dev, client_data);
+
+	err = input_register_device(dev);
+	if (err < 0) {
+		input_free_device(dev);
+		dev_notice(client_data->dev, "bmi160 input free!\n");
+		return err;
+	}
+	client_data->input = dev;
+	dev_notice(client_data->dev,
+		"bmi160 input register successfully, %s!\n",
+		client_data->input->name);
+	return err;
+}
+
+//#if defined(CONFIG_USE_QUALCOMM_HAL)
+static int bmi_gyro_input_init(struct bmi_client_data *client_data)
+{
+	struct input_dev *dev;
+	int err = 0;
+
+	dev = input_allocate_device();
+	if (NULL == dev)
+		return -ENOMEM;
+	dev->name = "bmi160-gyro";
+	dev->id.bustype = BUS_I2C;
+	input_set_capability(dev, EV_ABS, ABS_MISC);
+	input_set_capability(dev, EV_MSC, MSC_GESTURE);
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_SGM);
+	
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_FAST_GYRO_CALIB_DONE);
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_STEP_DETECTOR);
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_FAST_ACC_CALIB_DONE);
+	#if defined(CONFIG_USE_QUALCOMM_HAL)
+	input_set_abs_params(dev, ABS_RX,
+	BMI160_ACCEL_MIN_VALUE, BMI160_ACCEL_MAX_VALUE,
+	0, 0);
+	input_set_abs_params(dev, ABS_RY,
+	BMI160_ACCEL_MIN_VALUE, BMI160_ACCEL_MAX_VALUE,
+	0, 0);
+	input_set_abs_params(dev, ABS_RZ,
+	BMI160_ACCEL_MIN_VALUE, BMI160_ACCEL_MAX_VALUE,
+	0, 0);
+	#endif
+	input_set_drvdata(dev, client_data);
+	err = input_register_device(dev);
+	if (err < 0) {
+		input_free_device(dev);
+		dev_notice(client_data->dev, "bmi160 input free!\n");
+		return err;
+	}
+	client_data->gyro_input = dev;
+	dev_notice(client_data->dev,
+		"bmi160 input register successfully, %s!\n",
+		client_data->gyro_input->name);
+	return err;
+}
+//#endif
+static void bmi_input_destroy(struct bmi_client_data *client_data)
+{
+	struct input_dev *dev = client_data->input;
+
+	input_unregister_device(dev);
+	input_free_device(dev);
+}
+
+static int bmi_check_chip_id(struct bmi_client_data *client_data)
+{
+	int8_t err = 0;
+	int8_t i = 0;
+	uint8_t chip_id = 0;
+	uint8_t read_count = 0;
+	u8 bmi_sensor_cnt = sizeof(sensor_type_map)
+				/ sizeof(struct bmi160_type_mapping_type);
+	/* read and check chip id */
+	while (read_count++ < CHECK_CHIP_ID_TIME_MAX) {
+		if (client_data->device.bus_read(client_data->device.dev_addr,
+				BMI_REG_NAME(USER_CHIP_ID), &chip_id, 1) < 0) {
+
+			dev_err(client_data->dev,
+					"Bosch Sensortec Device not found"
+						"read chip_id:%d\n", chip_id);
+			continue;
+		} else {
+			for (i = 0; i < bmi_sensor_cnt; i++) {
+				if (sensor_type_map[i].chip_id == chip_id) {
+					client_data->chip_id = chip_id;
+					dev_notice(client_data->dev,
+					"Bosch Sensortec Device detected, "
+			"HW IC name: %s\n", sensor_type_map[i].sensor_name);
+					break;
+				}
+			}
+			if (i < bmi_sensor_cnt)
+				break;
+			else {
+				if (read_count == CHECK_CHIP_ID_TIME_MAX) {
+					dev_err(client_data->dev,
+				"Failed!Bosch Sensortec Device not found"
+					" mismatch chip_id:%d\n", chip_id);
+					err = -ENODEV;
+					return err;
+				}
+			}
+			bmi_delay(1);
+		}
+	}
+	return err;
+
+}
+
+static int bmi_pmu_set_suspend(struct bmi_client_data *client_data)
+{
+	int err = 0;
+	if (client_data == NULL)
+		return -EINVAL;
+	else {
+		err += BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_acc_arr[SENSOR_PM_SUSPEND]);
+		err += BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_gyro_arr[SENSOR_PM_SUSPEND]);
+		err += BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_mag_arr[SENSOR_PM_SUSPEND]);
+		client_data->pw.acc_pm = BMI_ACC_PM_SUSPEND;
+		client_data->pw.gyro_pm = BMI_GYRO_PM_SUSPEND;
+		client_data->pw.mag_pm = BMI_MAG_PM_SUSPEND;
+	}
+
+	return err;
+}
+
+static int bmi_get_err_status(struct bmi_client_data *client_data)
+{
+	int err = 0;
+
+	err = BMI_CALL_API(get_error_status)(&client_data->err_st.fatal_err,
+		&client_data->err_st.err_code, &client_data->err_st.i2c_fail,
+	&client_data->err_st.drop_cmd, &client_data->err_st.mag_drdy_err);
+	return err;
+}
+
+static void bmi_work_func(struct work_struct *work)
+{
+	struct bmi_client_data *client_data =
+		container_of((struct delayed_work *)work,
+			struct bmi_client_data, work);
+	unsigned long delay =
+		msecs_to_jiffies(atomic_read(&client_data->delay));
+	struct bmi160_accel_t data;
+	int err;
+
+	err = BMI_CALL_API(read_accel_xyz)(&data);
+	if (err < 0)
+		return;
+
+	/*report current frame via input event*/
+	input_event(client_data->input, EV_REL, REL_X, data.x);
+	input_event(client_data->input, EV_REL, REL_Y, data.y);
+	input_event(client_data->input, EV_REL, REL_Z, data.z);
+	input_sync(client_data->input);
+
+	schedule_delayed_work(&client_data->work, delay);
+}
+
+static ssize_t bmi160_chip_id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	return snprintf(buf, 16, "0x%x\n", client_data->chip_id);
+}
+
+static ssize_t bmi160_err_st_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	int err = 0;
+	err = bmi_get_err_status(client_data);
+	if (err)
+		return err;
+	else {
+		return snprintf(buf, 128, "fatal_err:0x%x, err_code:%d,\n\n"
+			"i2c_fail_err:%d, drop_cmd_err:%d, mag_drdy_err:%d\n",
+			client_data->err_st.fatal_err,
+			client_data->err_st.err_code,
+			client_data->err_st.i2c_fail,
+			client_data->err_st.drop_cmd,
+			client_data->err_st.mag_drdy_err);
+
+	}
+}
+
+static ssize_t bmi160_sensor_time_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err = 0;
+	u32 sensor_time;
+	err = BMI_CALL_API(get_sensor_time)(&sensor_time);
+	if (err)
+		return err;
+	else
+		return snprintf(buf, 16, "0x%x\n", (unsigned int)sensor_time);
+}
+
+static ssize_t bmi160_fifo_flush_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long enable;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &enable);
+	if (err)
+		return err;
+	if (enable)
+		err = BMI_CALL_API(set_command_register)(CMD_CLR_FIFO_DATA);
+
+	if (err)
+		dev_err(client_data->dev, "fifo flush failed!\n");
+
+	return count;
+
+}
+
+
+static ssize_t bmi160_fifo_bytecount_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned int fifo_bytecount = 0;
+
+	BMI_CALL_API(fifo_length)(&fifo_bytecount);
+	err = snprintf(buf, 16, "%u\n", fifo_bytecount);
+	return err;
+}
+
+static ssize_t bmi160_fifo_bytecount_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long data;
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	client_data->fifo_bytecount = (unsigned int) data;
+
+	return count;
+}
+
+int bmi160_fifo_data_sel_get(struct bmi_client_data *client_data)
+{
+	int err = 0;
+	unsigned char fifo_acc_en, fifo_gyro_en, fifo_mag_en;
+	unsigned char fifo_datasel;
+
+	err += BMI_CALL_API(get_fifo_accel_enable)(&fifo_acc_en);
+	err += BMI_CALL_API(get_fifo_gyro_enable)(&fifo_gyro_en);
+	err += BMI_CALL_API(get_fifo_mag_enable)(&fifo_mag_en);
+
+	if (err)
+		return err;
+
+	fifo_datasel = (fifo_acc_en << BMI_ACC_SENSOR) |
+			(fifo_gyro_en << BMI_GYRO_SENSOR) |
+				(fifo_mag_en << BMI_MAG_SENSOR);
+
+	client_data->fifo_data_sel = fifo_datasel;
+
+	return err;
+
+
+}
+
+static ssize_t bmi160_fifo_data_sel_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err = 0;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	err = bmi160_fifo_data_sel_get(client_data);
+	if (err) {
+		dev_err(client_data->dev, "get fifo_sel failed!\n");
+		return -EINVAL;
+	}
+	return snprintf(buf, 16, "%d\n", client_data->fifo_data_sel);
+}
+
+/* write any value to clear all the fifo data. */
+static ssize_t bmi160_fifo_data_sel_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long data;
+	unsigned char fifo_datasel;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	/* data format: aimed 0b0000 0x(m)x(g)x(a), x:1 enable, 0:disable*/
+	if (data > 7)
+		return -EINVAL;
+
+
+	fifo_datasel = (unsigned char)data;
+
+
+	err += BMI_CALL_API(set_fifo_accel_enable)
+			((fifo_datasel & (1 << BMI_ACC_SENSOR)) ? 1 :  0);
+	err += BMI_CALL_API(set_fifo_gyro_enable)
+			(fifo_datasel & (1 << BMI_GYRO_SENSOR) ? 1 : 0);
+	err += BMI_CALL_API(set_fifo_mag_enable)
+			((fifo_datasel & (1 << BMI_MAG_SENSOR)) ? 1 : 0);
+
+	err += BMI_CALL_API(set_command_register)(CMD_CLR_FIFO_DATA);
+	if (err)
+		return -EIO;
+	else {
+		dev_notice(client_data->dev, "FIFO A_en:%d, G_en:%d, M_en:%d\n",
+			(fifo_datasel & (1 << BMI_ACC_SENSOR)) ? 1 :  0,
+			(fifo_datasel & (1 << BMI_GYRO_SENSOR) ? 1 : 0),
+			((fifo_datasel & (1 << BMI_MAG_SENSOR)) ? 1 : 0));
+		client_data->fifo_data_sel = fifo_datasel;
+	}
+	return count;
+}
+
+static ssize_t bmi160_fifo_data_out_frame_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	int err = 0;
+	uint32_t fifo_bytecount = 0;
+
+	err = BMI_CALL_API(fifo_length)(&fifo_bytecount);
+	if (err < 0) {
+		dev_err(client_data->dev, "read fifo_length err");
+		return -EINVAL;
+	}
+	if (fifo_bytecount == 0)
+		return 0;
+	err = bmi_burst_read_wrapper(client_data->device.dev_addr,
+		BMI160_USER_FIFO_DATA__REG, buf,
+		fifo_bytecount);
+	if (err) {
+		dev_err(client_data->dev, "read fifo err");
+		BMI_CALL_API(set_command_register)(CMD_CLR_FIFO_DATA);
+		return -EINVAL;
+	}
+	return fifo_bytecount;
+
+}
+
+static ssize_t bmi160_fifo_watermark_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char data = 0xff;
+
+	err = BMI_CALL_API(get_fifo_wm)(&data);
+
+	if (err)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_fifo_watermark_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long data;
+	unsigned char fifo_watermark;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	fifo_watermark = (unsigned char)data;
+	err = BMI_CALL_API(set_fifo_wm)(fifo_watermark);
+	if (err)
+		return -EIO;
+
+	return count;
+}
+
+
+static ssize_t bmi160_fifo_header_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char data = 0xff;
+
+	err = BMI_CALL_API(get_fifo_header_enable)(&data);
+
+	if (err)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_fifo_header_en_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long data;
+	unsigned char fifo_header_en;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	if (data > 1)
+		return -ENOENT;
+
+	fifo_header_en = (unsigned char)data;
+	err = BMI_CALL_API(set_fifo_header_enable)(fifo_header_en);
+	if (err)
+		return -EIO;
+
+	client_data->fifo_head_en = fifo_header_en;
+
+	return count;
+}
+
+static ssize_t bmi160_fifo_time_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char data = 0;
+
+	err = BMI_CALL_API(get_fifo_time_enable)(&data);
+
+	if (!err)
+		err = snprintf(buf, 16, "%d\n", data);
+
+	return err;
+}
+
+static ssize_t bmi160_fifo_time_en_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long data;
+	unsigned char fifo_ts_en;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	fifo_ts_en = (unsigned char)data;
+
+	err = BMI_CALL_API(set_fifo_time_enable)(fifo_ts_en);
+	if (err)
+		return -EIO;
+
+	return count;
+}
+
+static ssize_t bmi160_fifo_int_tag_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err = 0;
+	unsigned char fifo_tag_int1 = 0;
+	unsigned char fifo_tag_int2 = 0;
+	unsigned char fifo_tag_int;
+
+	err += BMI_CALL_API(get_fifo_tag_intr1_enable)(&fifo_tag_int1);
+	err += BMI_CALL_API(get_fifo_tag_intr2_enable)(&fifo_tag_int2);
+
+	fifo_tag_int = (fifo_tag_int1 << BMI160_INT0) |
+			(fifo_tag_int2 << BMI160_INT1);
+
+	if (!err)
+		err = snprintf(buf, 16, "%d\n", fifo_tag_int);
+
+	return err;
+}
+
+static ssize_t bmi160_fifo_int_tag_en_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long data;
+	unsigned char fifo_tag_int_en;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	if (data > 3)
+		return -EINVAL;
+
+	fifo_tag_int_en = (unsigned char)data;
+
+	err += BMI_CALL_API(set_fifo_tag_intr1_enable)
+			((fifo_tag_int_en & (1 << BMI160_INT0)) ? 1 :  0);
+	err += BMI_CALL_API(set_fifo_tag_intr2_enable)
+			((fifo_tag_int_en & (1 << BMI160_INT1)) ? 1 :  0);
+
+	if (err) {
+		dev_err(client_data->dev, "fifo int tag en err:%d\n", err);
+		return -EIO;
+	}
+	client_data->fifo_int_tag_en = fifo_tag_int_en;
+
+	return count;
+}
+
+static int bmi160_set_acc_op_mode(struct bmi_client_data *client_data,
+							unsigned long op_mode)
+{
+	int err = 0;
+	unsigned char stc_enable;
+	unsigned char std_enable;
+	mutex_lock(&client_data->mutex_op_mode);
+
+	if (op_mode < BMI_ACC_PM_MAX) {
+		switch (op_mode) {
+		case BMI_ACC_PM_NORMAL:
+			err = BMI_CALL_API(set_command_register)
+			(bmi_pmu_cmd_acc_arr[BMI_ACC_PM_NORMAL]);
+			client_data->pw.acc_pm = BMI_ACC_PM_NORMAL;
+			bmi_delay(10);
+			break;
+		case BMI_ACC_PM_LP1:
+			err = BMI_CALL_API(set_command_register)
+			(bmi_pmu_cmd_acc_arr[BMI_ACC_PM_LP1]);
+			client_data->pw.acc_pm = BMI_ACC_PM_LP1;
+			bmi_delay(3);
+			break;
+		case BMI_ACC_PM_SUSPEND:
+			BMI_CALL_API(get_step_counter_enable)(&stc_enable);
+			BMI_CALL_API(get_step_detector_enable)(&std_enable);
+			if ((stc_enable == 0) && (std_enable == 0) &&
+				(client_data->sig_flag == 0)) {
+				err = BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_acc_arr[BMI_ACC_PM_SUSPEND]);
+				client_data->pw.acc_pm = BMI_ACC_PM_SUSPEND;
+				bmi_delay(10);
+			}
+			break;
+		case BMI_ACC_PM_LP2:
+			err = BMI_CALL_API(set_command_register)
+			(bmi_pmu_cmd_acc_arr[BMI_ACC_PM_LP2]);
+			client_data->pw.acc_pm = BMI_ACC_PM_LP2;
+			bmi_delay(3);
+			break;
+		default:
+			mutex_unlock(&client_data->mutex_op_mode);
+			return -EINVAL;
+		}
+	} else {
+		mutex_unlock(&client_data->mutex_op_mode);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&client_data->mutex_op_mode);
+
+	return err;
+
+
+}
+
+static ssize_t bmi160_temperature_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	s16 temp = 0xff;
+
+	err = BMI_CALL_API(get_temp)(&temp);
+
+	if (!err)
+		err = snprintf(buf, 16, "0x%x\n", temp);
+
+	return err;
+}
+
+static ssize_t bmi160_place_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	int place = BOSCH_SENSOR_PLACE_UNKNOWN;
+
+	if (NULL != client_data->bst_pd)
+		place = client_data->bst_pd->place;
+
+	return snprintf(buf, 16, "%d\n", place);
+}
+
+static ssize_t bmi160_delay_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	return snprintf(buf, 16, "%d\n", atomic_read(&client_data->delay));
+
+}
+
+static ssize_t bmi160_delay_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long data;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	if (data == 0) {
+		err = -EINVAL;
+		return err;
+	}
+
+	if (data < BMI_DELAY_MIN)
+		data = BMI_DELAY_MIN;
+
+	atomic_set(&client_data->delay, (unsigned int)data);
+
+	return count;
+}
+
+static ssize_t bmi160_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	return snprintf(buf, 16, "%d\n", atomic_read(&client_data->wkqueue_en));
+
+}
+
+static ssize_t bmi160_enable_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long enable;
+	int pre_enable = atomic_read(&client_data->wkqueue_en);
+
+	err = kstrtoul(buf, 10, &enable);
+	if (err)
+		return err;
+
+	enable = enable ? 1 : 0;
+	mutex_lock(&client_data->mutex_enable);
+	if (enable) {
+		if (pre_enable == 0) {
+			bmi160_set_acc_op_mode(client_data,
+							BMI_ACC_PM_NORMAL);
+			schedule_delayed_work(&client_data->work,
+			msecs_to_jiffies(atomic_read(&client_data->delay)));
+			atomic_set(&client_data->wkqueue_en, 1);
+		}
+
+	} else {
+		if (pre_enable == 1) {
+			bmi160_set_acc_op_mode(client_data,
+							BMI_ACC_PM_SUSPEND);
+
+			cancel_delayed_work_sync(&client_data->work);
+			atomic_set(&client_data->wkqueue_en, 0);
+		}
+	}
+
+	mutex_unlock(&client_data->mutex_enable);
+
+	return count;
+}
+
+#if defined(BMI160_ENABLE_INT1) || defined(BMI160_ENABLE_INT2)
+/* accel sensor part */
+static ssize_t bmi160_anymot_duration_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char data;
+
+	err = BMI_CALL_API(get_intr_any_motion_durn)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_anymot_duration_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_intr_any_motion_durn)((unsigned char)data);
+	if (err < 0)
+		return -EIO;
+
+	return count;
+}
+
+static ssize_t bmi160_anymot_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = BMI_CALL_API(get_intr_any_motion_thres)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_anymot_threshold_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_intr_any_motion_thres)((unsigned char)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t bmi160_step_detector_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u8 data = 0;
+	u8 step_det;
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	err = BMI_CALL_API(get_step_detector_enable)(&step_det);
+	/*bmi160_get_status0_step_int*/
+	if (err < 0)
+		return err;
+/*client_data->std will be updated in bmi_stepdetector_interrupt_handle */
+	if ((step_det == 1) && (client_data->std == 1)) {
+		data = 1;
+		client_data->std = 0;
+		}
+	else {
+		data = 0;
+		}
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_step_detector_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = BMI_CALL_API(get_step_detector_enable)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_step_detector_enable_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_step_detector_enable)((unsigned char)data);
+	if (err < 0)
+		return -EIO;
+	if (data == 0)
+		client_data->pedo_data.wkar_step_detector_status = 0;
+	return count;
+}
+
+static ssize_t bmi160_signification_motion_enable_store(
+	struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	/*0x62 (bit 1) INT_MOTION_3 int_sig_mot_sel*/
+	err = BMI_CALL_API(set_intr_significant_motion_select)(
+		(unsigned char)data);
+	if (err < 0)
+		return -EIO;
+	if (data == 1) {
+		err = BMI_CALL_API(set_intr_enable_0)
+					(BMI160_ANY_MOTION_X_ENABLE, 1);
+		err += BMI_CALL_API(set_intr_enable_0)
+					(BMI160_ANY_MOTION_Y_ENABLE, 1);
+		err += BMI_CALL_API(set_intr_enable_0)
+					(BMI160_ANY_MOTION_Z_ENABLE, 1);
+		if (err < 0)
+			return -EIO;
+		enable_irq_wake(client_data->IRQ);
+		client_data->sig_flag = 1;
+	} else {
+		err = BMI_CALL_API(set_intr_enable_0)
+					(BMI160_ANY_MOTION_X_ENABLE, 0);
+		err += BMI_CALL_API(set_intr_enable_0)
+					(BMI160_ANY_MOTION_Y_ENABLE, 0);
+		err += BMI_CALL_API(set_intr_enable_0)
+					(BMI160_ANY_MOTION_Z_ENABLE, 0);
+		if (err < 0)
+			return -EIO;
+		disable_irq_wake(client_data->IRQ);
+		client_data->sig_flag = 0;
+	}
+	return count;
+}
+
+static ssize_t bmi160_signification_motion_enable_show(
+	struct device *dev, struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+	/*0x62 (bit 1) INT_MOTION_3 int_sig_mot_sel*/
+	err = BMI_CALL_API(get_intr_significant_motion_select)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static int sigmotion_init_interrupts(u8 sig_map_int_pin)
+{
+	int ret = 0;
+/*0x60  */
+	ret += bmi160_set_intr_any_motion_thres(0x1e);
+/* 0x62(bit 3~2)	0=1.5s */
+	ret += bmi160_set_intr_significant_motion_skip(0);
+/*0x62(bit 5~4)	1=0.5s*/
+	ret += bmi160_set_intr_significant_motion_proof(1);
+/*0x50 (bit 0, 1, 2)  INT_EN_0 anymo x y z*/
+	ret += bmi160_map_significant_motion_intr(sig_map_int_pin);
+/*0x62 (bit 1) INT_MOTION_3	int_sig_mot_sel
+close the signification_motion*/
+	ret += bmi160_set_intr_significant_motion_select(0);
+/*close the anymotion interrupt*/
+	ret += BMI_CALL_API(set_intr_enable_0)
+					(BMI160_ANY_MOTION_X_ENABLE, 0);
+	ret += BMI_CALL_API(set_intr_enable_0)
+					(BMI160_ANY_MOTION_Y_ENABLE, 0);
+	ret += BMI_CALL_API(set_intr_enable_0)
+					(BMI160_ANY_MOTION_Z_ENABLE, 0);
+	if (ret)
+		printk(KERN_ERR "bmi160 sig motion failed setting,%d!\n", ret);
+	return ret;
+
+}
+#endif
+
+static ssize_t bmi160_acc_range_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char range;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = BMI_CALL_API(get_accel_range)(&range);
+	if (err)
+		return err;
+
+	client_data->range.acc_range = range;
+	return snprintf(buf, 16, "%d\n", range);
+}
+
+static ssize_t bmi160_acc_range_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long range;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+
+	err = kstrtoul(buf, 10, &range);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_accel_range)(range);
+	if (err)
+		return -EIO;
+
+	client_data->range.acc_range = range;
+	return count;
+}
+
+static ssize_t bmi160_acc_odr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char acc_odr;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = BMI_CALL_API(get_accel_output_data_rate)(&acc_odr);
+	if (err)
+		return err;
+
+	client_data->odr.acc_odr = acc_odr;
+	return snprintf(buf, 16, "%d\n", acc_odr);
+}
+
+static ssize_t bmi160_acc_odr_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long acc_odr;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &acc_odr);
+	if (err)
+		return err;
+
+	if (acc_odr < 1 || acc_odr > 12)
+		return -EIO;
+
+	if (acc_odr < 5)
+		err = BMI_CALL_API(set_accel_under_sampling_parameter)(1);
+	else
+		err = BMI_CALL_API(set_accel_under_sampling_parameter)(0);
+
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_accel_output_data_rate)(acc_odr);
+	if (err)
+		return -EIO;
+	client_data->odr.acc_odr = acc_odr;
+	return count;
+}
+
+static ssize_t bmi160_acc_op_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	int err = 0;
+	u8 accel_pmu_status = 0;
+	err = BMI_CALL_API(get_accel_power_mode_stat)(
+		&accel_pmu_status);
+
+	if (err)
+		return err;
+	else
+	return snprintf(buf, 32, "reg:%d, val:%d\n", accel_pmu_status,
+			client_data->pw.acc_pm);
+}
+
+static ssize_t bmi160_acc_op_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long op_mode;
+	err = kstrtoul(buf, 10, &op_mode);
+	if (err)
+		return err;
+
+	err = bmi160_set_acc_op_mode(client_data, op_mode);
+	if (err)
+		return err;
+	else
+		return count;
+
+}
+
+static ssize_t bmi160_acc_value_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct bmi160_accel_t data;
+
+	int err;
+
+	err = BMI_CALL_API(read_accel_xyz)(&data);
+	if (err < 0)
+		return err;
+
+	return snprintf(buf, 48, "%hd %hd %hd\n",
+			data.x, data.y, data.z);
+}
+
+static ssize_t bmi160_acc_fast_calibration_x_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = BMI_CALL_API(get_foc_accel_x)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_acc_fast_calibration_x_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	s8 accel_offset_x = 0;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	/* 0: disable, 1: +1g, 2: -1g, 3: 0g */
+	if (data > 3)
+		return -EINVAL;
+
+	err = BMI_CALL_API(set_accel_foc_trigger)(X_AXIS,
+					data, &accel_offset_x);
+	if (err)
+		return -EIO;
+	else
+		client_data->calib_status |=
+			BMI_FAST_CALI_TRUE << BMI_ACC_X_FAST_CALI_RDY;
+	return count;
+}
+
+static ssize_t bmi160_acc_fast_calibration_y_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = BMI_CALL_API(get_foc_accel_y)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_acc_fast_calibration_y_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	s8 accel_offset_y = 0;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	/* 0: disable, 1: +1g, 2: -1g, 3: 0g */
+	if (data > 3)
+		return -EINVAL;
+
+	err = BMI_CALL_API(set_accel_foc_trigger)(Y_AXIS,
+				data, &accel_offset_y);
+	if (err)
+		return -EIO;
+	else
+		client_data->calib_status |=
+			BMI_FAST_CALI_TRUE << BMI_ACC_Y_FAST_CALI_RDY;
+	return count;
+}
+
+static ssize_t bmi160_acc_fast_calibration_z_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = BMI_CALL_API(get_foc_accel_z)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_acc_fast_calibration_z_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	s8 accel_offset_z = 0;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	unsigned char data1[3] = {0};
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	/* 0: disable, 1: +1g, 2: -1g, 3: 0g */
+	if (data > 3)
+		return -EINVAL;
+
+	err = BMI_CALL_API(set_accel_foc_trigger)(Z_AXIS,
+			data, &accel_offset_z);
+	if (err)
+		return -EIO;
+	else
+		client_data->calib_status |=
+			BMI_FAST_CALI_TRUE << BMI_ACC_Z_FAST_CALI_RDY;
+
+	if (client_data->calib_status == BMI_FAST_CALI_ALL_RDY) {
+		err = BMI_CALL_API(get_accel_offset_compensation_xaxis)(
+			&data1[0]);
+		err += BMI_CALL_API(get_accel_offset_compensation_yaxis)(
+			&data1[1]);
+		err += BMI_CALL_API(get_accel_offset_compensation_zaxis)(
+			&data1[2]);
+		dev_info(client_data->dev, "accx %d, accy %d, accz %d\n",
+			data1[0], data1[1], data1[2]);
+		if (err)
+			return -EIO;
+		input_event(client_data->input, EV_MSC,
+		INPUT_EVENT_FAST_ACC_CALIB_DONE,
+		(data1[0] | (data1[1] << 8) | (data1[2] << 16)));
+		input_sync(client_data->input);
+		client_data->calib_status = 0;
+	}
+
+	return count;
+}
+
+static ssize_t bmi160_acc_offset_x_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = BMI_CALL_API(get_accel_offset_compensation_xaxis)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+
+static ssize_t bmi160_acc_offset_x_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_accel_offset_compensation_xaxis)
+						((unsigned char)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t bmi160_acc_offset_y_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = BMI_CALL_API(get_accel_offset_compensation_yaxis)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_acc_offset_y_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_accel_offset_compensation_yaxis)
+						((unsigned char)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t bmi160_acc_offset_z_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = BMI_CALL_API(get_accel_offset_compensation_zaxis)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_acc_offset_z_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_accel_offset_compensation_zaxis)
+						((unsigned char)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t bmi160_test_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	u8 raw_data[15] = {0};
+	unsigned int sensor_time = 0;
+
+	int err;
+	memset(raw_data, 0, sizeof(raw_data));
+
+	err = client_data->device.bus_read(client_data->device.dev_addr,
+			BMI160_USER_DATA_8_GYRO_X_LSB__REG, raw_data, 15);
+	if (err)
+		return err;
+
+	udelay(10);
+	sensor_time = (u32)(raw_data[14] << 16 | raw_data[13] << 8
+						| raw_data[12]);
+
+	return snprintf(buf, 128, "%d %d %d %d %d %d %u",
+					(s16)(raw_data[1] << 8 | raw_data[0]),
+				(s16)(raw_data[3] << 8 | raw_data[2]),
+				(s16)(raw_data[5] << 8 | raw_data[4]),
+				(s16)(raw_data[7] << 8 | raw_data[6]),
+				(s16)(raw_data[9] << 8 | raw_data[8]),
+				(s16)(raw_data[11] << 8 | raw_data[10]),
+				sensor_time);
+
+}
+
+static ssize_t bmi160_step_counter_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = BMI_CALL_API(get_step_counter_enable)(&data);
+
+	client_data->stc_enable = data;
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_step_counter_enable_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_step_counter_enable)((unsigned char)data);
+
+	client_data->stc_enable = data;
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+
+static ssize_t bmi160_step_counter_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_step_mode)((unsigned char)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t bmi160_step_counter_clc_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = bmi160_clear_step_counter();
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t bmi160_step_counter_value_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u16 data;
+	int err;
+	static u16 last_stc_value;
+
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = BMI_CALL_API(read_step_count)(&data);
+
+	if (err < 0)
+		return err;
+	if (data >= last_stc_value) {
+		client_data->pedo_data.last_step_counter_value += (
+			data - last_stc_value);
+		last_stc_value = data;
+	} else
+		last_stc_value = data;
+	return snprintf(buf, 16, "%d\n",
+		client_data->pedo_data.last_step_counter_value);
+}
+
+static ssize_t bmi160_bmi_value_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	u8 raw_data[12] = {0};
+
+	int err;
+	memset(raw_data, 0, sizeof(raw_data));
+
+	err = client_data->device.bus_read(client_data->device.dev_addr,
+			BMI160_USER_DATA_8_GYRO_X_LSB__REG, raw_data, 12);
+	if (err)
+		return err;
+	/*output:gyro x y z acc x y z*/
+	return snprintf(buf, 96, "%hd %d %hd %hd %hd %hd\n",
+					(s16)(raw_data[1] << 8 | raw_data[0]),
+				(s16)(raw_data[3] << 8 | raw_data[2]),
+				(s16)(raw_data[5] << 8 | raw_data[4]),
+				(s16)(raw_data[7] << 8 | raw_data[6]),
+				(s16)(raw_data[9] << 8 | raw_data[8]),
+				(s16)(raw_data[11] << 8 | raw_data[10]));
+
+}
+
+
+static ssize_t bmi160_selftest_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	return snprintf(buf, 16, "0x%x\n",
+				atomic_read(&client_data->selftest_result));
+}
+
+static int bmi_restore_hw_cfg(struct bmi_client_data *client);
+
+/*!
+ * @brief store selftest result which make up of acc and gyro
+ * format: 0b 0000 xxxx  x:1 failed, 0 success
+ * bit3:     gyro_self
+ * bit2..0: acc_self z y x
+ */
+static ssize_t bmi160_selftest_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	int err = 0;
+	int i = 0;
+
+	u8 acc_selftest = 0;
+	u8 gyro_selftest = 0;
+	u8 bmi_selftest = 0;
+	s16 axis_p_value, axis_n_value;
+	u16 diff_axis[3] = {0xff, 0xff, 0xff};
+	u8 acc_odr, range, acc_selftest_amp, acc_selftest_sign;
+
+	dev_notice(client_data->dev, "Selftest for BMI16x starting.\n");
+
+	client_data->selftest = 1;
+
+	/*soft reset*/
+	err = BMI_CALL_API(set_command_register)(CMD_RESET_USER_REG);
+	msleep(70);
+	err += BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_acc_arr[BMI_ACC_PM_NORMAL]);
+	err += BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_gyro_arr[BMI_GYRO_PM_NORMAL]);
+	err += BMI_CALL_API(set_accel_under_sampling_parameter)(0);
+	err += BMI_CALL_API(set_accel_output_data_rate)(
+	BMI160_ACCEL_OUTPUT_DATA_RATE_1600HZ);
+
+	/* set to 8G range*/
+	err += BMI_CALL_API(set_accel_range)(BMI160_ACCEL_RANGE_8G);
+	/* set to self amp high */
+	err += BMI_CALL_API(set_accel_selftest_amp)(BMI_SELFTEST_AMP_HIGH);
+
+
+	err += BMI_CALL_API(get_accel_output_data_rate)(&acc_odr);
+	err += BMI_CALL_API(get_accel_range)(&range);
+	err += BMI_CALL_API(get_accel_selftest_amp)(&acc_selftest_amp);
+	err += BMI_CALL_API(read_accel_x)(&axis_n_value);
+
+	dev_info(client_data->dev,
+			"acc_odr:%d, acc_range:%d, acc_selftest_amp:%d, acc_x:%d\n",
+				acc_odr, range, acc_selftest_amp, axis_n_value);
+
+	for (i = X_AXIS; i < AXIS_MAX; i++) {
+		axis_n_value = 0;
+		axis_p_value = 0;
+		/* set every selftest axis */
+		/*set_acc_selftest_axis(param),param x:1, y:2, z:3
+		* but X_AXIS:0, Y_AXIS:1, Z_AXIS:2
+		* so we need to +1*/
+		err += BMI_CALL_API(set_accel_selftest_axis)(i + 1);
+		msleep(50);
+		switch (i) {
+		case X_AXIS:
+			/* set negative sign */
+			err += BMI_CALL_API(set_accel_selftest_sign)(0);
+			err += BMI_CALL_API(get_accel_selftest_sign)(
+				&acc_selftest_sign);
+
+			msleep(60);
+			err += BMI_CALL_API(read_accel_x)(&axis_n_value);
+			dev_info(client_data->dev,
+			"acc_x_selftest_sign:%d, axis_n_value:%d\n",
+			acc_selftest_sign, axis_n_value);
+
+			/* set postive sign */
+			err += BMI_CALL_API(set_accel_selftest_sign)(1);
+			err += BMI_CALL_API(get_accel_selftest_sign)(
+				&acc_selftest_sign);
+
+			msleep(60);
+			err += BMI_CALL_API(read_accel_x)(&axis_p_value);
+			dev_info(client_data->dev,
+			"acc_x_selftest_sign:%d, axis_p_value:%d\n",
+			acc_selftest_sign, axis_p_value);
+			diff_axis[i] = abs(axis_p_value - axis_n_value);
+			break;
+
+		case Y_AXIS:
+			/* set negative sign */
+			err += BMI_CALL_API(set_accel_selftest_sign)(0);
+			msleep(60);
+			err += BMI_CALL_API(read_accel_y)(&axis_n_value);
+			/* set postive sign */
+			err += BMI_CALL_API(set_accel_selftest_sign)(1);
+			msleep(60);
+			err += BMI_CALL_API(read_accel_y)(&axis_p_value);
+			diff_axis[i] = abs(axis_p_value - axis_n_value);
+			break;
+
+		case Z_AXIS:
+			/* set negative sign */
+			err += BMI_CALL_API(set_accel_selftest_sign)(0);
+			msleep(60);
+			err += BMI_CALL_API(read_accel_z)(&axis_n_value);
+			/* set postive sign */
+			err += BMI_CALL_API(set_accel_selftest_sign)(1);
+			msleep(60);
+			err += BMI_CALL_API(read_accel_z)(&axis_p_value);
+			/* also start gyro self test */
+			err += BMI_CALL_API(set_gyro_selftest_start)(1);
+			msleep(60);
+			err += BMI_CALL_API(get_gyro_selftest)(&gyro_selftest);
+
+			diff_axis[i] = abs(axis_p_value - axis_n_value);
+			break;
+		default:
+			err += -EINVAL;
+			break;
+		}
+		if (err) {
+			dev_err(client_data->dev,
+				"Failed selftest axis:%s, p_val=%d, n_val=%d\n",
+				bmi_axis_name[i], axis_p_value, axis_n_value);
+			client_data->selftest = 0;
+			return -EINVAL;
+		}
+
+		/*400mg for acc z axis*/
+		if (Z_AXIS == i) {
+			if (diff_axis[i] < 1639) {
+				acc_selftest |= 1 << i;
+				dev_err(client_data->dev,
+					"Over selftest minimum for "
+					"axis:%s,diff=%d,p_val=%d, n_val=%d\n",
+					bmi_axis_name[i], diff_axis[i],
+						axis_p_value, axis_n_value);
+			}
+		} else {
+			/*800mg for x or y axis*/
+			if (diff_axis[i] < 3277) {
+				acc_selftest |= 1 << i;
+
+				if (bmi_get_err_status(client_data) < 0)
+					return err;
+				dev_err(client_data->dev,
+					"Over selftest minimum for "
+					"axis:%s,diff=%d, p_val=%d, n_val=%d\n",
+					bmi_axis_name[i], diff_axis[i],
+						axis_p_value, axis_n_value);
+				dev_err(client_data->dev, "err_st:0x%x\n",
+						client_data->err_st.err_st_all);
+
+			}
+		}
+
+	}
+	/* gyro_selftest==1,gyro selftest successfully,
+	* but bmi_result bit4 0 is successful, 1 is failed*/
+	bmi_selftest = (acc_selftest & 0x0f) | ((!gyro_selftest) << AXIS_MAX);
+	atomic_set(&client_data->selftest_result, bmi_selftest);
+	/*soft reset*/
+	err = BMI_CALL_API(set_command_register)(CMD_RESET_USER_REG);
+	if (err) {
+		client_data->selftest = 0;
+		return err;
+	}
+	msleep(50);
+
+	bmi_restore_hw_cfg(client_data);
+
+	client_data->selftest = 0;
+	dev_notice(client_data->dev, "Selftest for BMI16x finished\n");
+
+	return count;
+}
+
+/* gyro sensor part */
+static ssize_t bmi160_gyro_op_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	int err = 0;
+	u8 gyro_pmu_status = 0;
+
+	err = BMI_CALL_API(get_gyro_power_mode_stat)(
+		&gyro_pmu_status);
+
+	if (err)
+		return err;
+	else
+	return snprintf(buf, 32, "reg:%d, val:%d\n", gyro_pmu_status,
+				client_data->pw.gyro_pm);
+}
+
+static ssize_t bmi160_gyro_op_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	unsigned long op_mode;
+	int err;
+
+	err = kstrtoul(buf, 10, &op_mode);
+	if (err)
+		return err;
+
+	mutex_lock(&client_data->mutex_op_mode);
+
+	if (op_mode < BMI_GYRO_PM_MAX) {
+		switch (op_mode) {
+		case BMI_GYRO_PM_NORMAL:
+			err = BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_gyro_arr[BMI_GYRO_PM_NORMAL]);
+			client_data->pw.gyro_pm = BMI_GYRO_PM_NORMAL;
+			bmi_delay(60);
+			break;
+		case BMI_GYRO_PM_FAST_START:
+			err = BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_gyro_arr[BMI_GYRO_PM_FAST_START]);
+			client_data->pw.gyro_pm = BMI_GYRO_PM_FAST_START;
+			bmi_delay(60);
+			break;
+		case BMI_GYRO_PM_SUSPEND:
+			err = BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_gyro_arr[BMI_GYRO_PM_SUSPEND]);
+			client_data->pw.gyro_pm = BMI_GYRO_PM_SUSPEND;
+			bmi_delay(60);
+			break;
+		default:
+			mutex_unlock(&client_data->mutex_op_mode);
+			return -EINVAL;
+		}
+	} else {
+		mutex_unlock(&client_data->mutex_op_mode);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&client_data->mutex_op_mode);
+
+	if (err)
+		return err;
+	else
+		return count;
+
+}
+
+static ssize_t bmi160_gyro_value_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct bmi160_gyro_t data;
+	int err;
+
+	err = BMI_CALL_API(read_gyro_xyz)(&data);
+	if (err < 0)
+		return err;
+
+
+	return snprintf(buf, 48, "%hd %hd %hd\n", data.x,
+				data.y, data.z);
+}
+
+static ssize_t bmi160_gyro_range_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char range;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = BMI_CALL_API(get_gyro_range)(&range);
+	if (err)
+		return err;
+
+	client_data->range.gyro_range = range;
+	return snprintf(buf, 16, "%d\n", range);
+}
+
+static ssize_t bmi160_gyro_range_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long range;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &range);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_gyro_range)(range);
+	if (err)
+		return -EIO;
+
+	client_data->range.gyro_range = range;
+	return count;
+}
+
+static ssize_t bmi160_gyro_odr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char gyro_odr;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = BMI_CALL_API(get_gyro_output_data_rate)(&gyro_odr);
+	if (err)
+		return err;
+
+	client_data->odr.gyro_odr = gyro_odr;
+	return snprintf(buf, 16, "%d\n", gyro_odr);
+}
+
+static ssize_t bmi160_gyro_odr_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long gyro_odr;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &gyro_odr);
+	if (err)
+		return err;
+
+	if (gyro_odr < 6 || gyro_odr > 13)
+		return -EIO;
+
+	err = BMI_CALL_API(set_gyro_output_data_rate)(gyro_odr);
+	if (err)
+		return -EIO;
+
+	client_data->odr.gyro_odr = gyro_odr;
+	return count;
+}
+
+static ssize_t bmi160_gyro_fast_calibration_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = BMI_CALL_API(get_foc_gyro_enable)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_gyro_fast_calibration_en_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long enable;
+	s8 err;
+	s16 gyr_off_x;
+	s16 gyr_off_y;
+	s16 gyr_off_z;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &enable);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_foc_gyro_enable)((u8)enable,
+				&gyr_off_x, &gyr_off_y, &gyr_off_z);
+
+	if (err < 0)
+		return -EIO;
+	else {
+		input_event(client_data->input, EV_MSC,
+			INPUT_EVENT_FAST_GYRO_CALIB_DONE, 1);
+		input_sync(client_data->input);
+	}
+	return count;
+}
+
+static ssize_t bmi160_gyro_offset_x_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	s16 data = 0;
+	s8 err = 0;
+
+	err = BMI_CALL_API(get_gyro_offset_compensation_xaxis)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_gyro_offset_x_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	s8 err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_gyro_offset_compensation_xaxis)((s16)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t bmi160_gyro_offset_y_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	s16 data = 0;
+	s8 err = 0;
+
+	err = BMI_CALL_API(get_gyro_offset_compensation_yaxis)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_gyro_offset_y_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	s8 err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_gyro_offset_compensation_yaxis)((s16)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t bmi160_gyro_offset_z_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	s16 data = 0;
+	int err = 0;
+
+	err = BMI_CALL_API(get_gyro_offset_compensation_zaxis)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t bmi160_gyro_offset_z_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = BMI_CALL_API(set_gyro_offset_compensation_zaxis)((s16)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+
+/* mag sensor part */
+#ifdef BMI160_MAG_INTERFACE_SUPPORT
+static ssize_t bmi160_mag_op_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	u8 mag_op_mode;
+	s8 err;
+	err = bmi160_get_mag_power_mode_stat(&mag_op_mode);
+	if (err) {
+		dev_err(client_data->dev,
+			"Failed to get BMI160 mag power mode:%d\n", err);
+		return err;
+	} else
+		return snprintf(buf, 32, "%d, reg:%d\n",
+					client_data->pw.mag_pm, mag_op_mode);
+}
+
+static ssize_t bmi160_mag_op_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	unsigned long op_mode;
+	int err;
+
+	err = kstrtoul(buf, 10, &op_mode);
+	if (err)
+		return err;
+
+	if (op_mode == client_data->pw.mag_pm)
+		return count;
+
+	mutex_lock(&client_data->mutex_op_mode);
+
+
+	if (op_mode < BMI_MAG_PM_MAX) {
+		switch (op_mode) {
+		case BMI_MAG_PM_NORMAL:
+			/* need to modify as mag sensor connected,
+			 * set write address to 0x4c and triggers
+			 * write operation
+			 * 0x4c(op mode control reg)
+			 * enables normal mode in magnetometer */
+#if defined(BMI160_AKM09912_SUPPORT)
+			err = bmi160_set_bst_akm_and_secondary_if_powermode(
+			BMI160_MAG_FORCE_MODE);
+#else
+			err = bmi160_set_bmm150_mag_and_secondary_if_power_mode(
+			BMI160_MAG_FORCE_MODE);
+#endif
+			client_data->pw.mag_pm = BMI_MAG_PM_NORMAL;
+			bmi_delay(5);
+			break;
+		case BMI_MAG_PM_LP1:
+			/* need to modify as mag sensor connected,
+			 * set write address to 0x4 band triggers
+			 * write operation
+			 * 0x4b(bmm150, power control reg, bit0)
+			 * enables power in magnetometer*/
+#if defined(BMI160_AKM09912_SUPPORT)
+			err = bmi160_set_bst_akm_and_secondary_if_powermode(
+			BMI160_MAG_FORCE_MODE);
+#else
+			err = bmi160_set_bmm150_mag_and_secondary_if_power_mode(
+			BMI160_MAG_FORCE_MODE);
+#endif
+			client_data->pw.mag_pm = BMI_MAG_PM_LP1;
+			bmi_delay(5);
+			break;
+		case BMI_MAG_PM_SUSPEND:
+		case BMI_MAG_PM_LP2:
+#if defined(BMI160_AKM09912_SUPPORT)
+		err = bmi160_set_bst_akm_and_secondary_if_powermode(
+		BMI160_MAG_SUSPEND_MODE);
+#else
+		err = bmi160_set_bmm150_mag_and_secondary_if_power_mode(
+		BMI160_MAG_SUSPEND_MODE);
+#endif
+			client_data->pw.mag_pm = op_mode;
+			bmi_delay(5);
+			break;
+		default:
+			mutex_unlock(&client_data->mutex_op_mode);
+			return -EINVAL;
+		}
+	} else {
+		mutex_unlock(&client_data->mutex_op_mode);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&client_data->mutex_op_mode);
+
+	if (err) {
+		dev_err(client_data->dev,
+			"Failed to switch BMI160 mag power mode:%d\n",
+			client_data->pw.mag_pm);
+		return err;
+	} else
+		return count;
+
+}
+
+static ssize_t bmi160_mag_odr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err = 0;
+	unsigned char mag_odr = 0;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = BMI_CALL_API(get_mag_output_data_rate)(&mag_odr);
+	if (err)
+		return err;
+
+	client_data->odr.mag_odr = mag_odr;
+	return snprintf(buf, 16, "%d\n", mag_odr);
+}
+
+static ssize_t bmi160_mag_odr_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long mag_odr;
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &mag_odr);
+	if (err)
+		return err;
+	/*1~25/32hz,..6(25hz),7(50hz),... */
+	err = BMI_CALL_API(set_mag_output_data_rate)(mag_odr);
+	if (err)
+		return -EIO;
+
+	client_data->odr.mag_odr = mag_odr;
+	return count;
+}
+
+static ssize_t bmi160_mag_i2c_address_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u8 data;
+	s8 err;
+
+	err = BMI_CALL_API(set_mag_manual_enable)(1);
+	err += BMI_CALL_API(get_i2c_device_addr)(&data);
+	err += BMI_CALL_API(set_mag_manual_enable)(0);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "0x%x\n", data);
+}
+
+static ssize_t bmi160_mag_i2c_address_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err += BMI_CALL_API(set_mag_manual_enable)(1);
+	if (!err)
+		err += BMI_CALL_API(set_i2c_device_addr)((unsigned char)data);
+	err += BMI_CALL_API(set_mag_manual_enable)(0);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t bmi160_mag_value_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	struct bmi160_mag_xyz_s32_t data;
+	int err;
+	/* raw data with compensation */
+#if defined(BMI160_AKM09912_SUPPORT)
+	err = bmi160_bst_akm09912_compensate_xyz(&data);
+#else
+	err = bmi160_bmm150_mag_compensate_xyz(&data);
+#endif
+
+	if (err < 0) {
+		memset(&data, 0, sizeof(data));
+		dev_err(client_data->dev, "mag not ready!\n");
+	}
+	return snprintf(buf, 48, "%hd %hd %hd\n", data.x,
+				data.y, data.z);
+}
+static ssize_t bmi160_mag_offset_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err = 0;
+	unsigned char mag_offset;
+	err = BMI_CALL_API(get_mag_offset)(&mag_offset);
+	if (err)
+		return err;
+
+	return snprintf(buf, 16, "%d\n", mag_offset);
+
+}
+
+static ssize_t bmi160_mag_offset_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err += BMI_CALL_API(set_mag_manual_enable)(1);
+	if (err == 0)
+		err += BMI_CALL_API(set_mag_offset)((unsigned char)data);
+	err += BMI_CALL_API(set_mag_manual_enable)(0);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t bmi160_mag_chip_id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	s8 err = 0;
+	u8 mag_chipid;
+
+	err = bmi160_set_mag_manual_enable(0x01);
+	/* read mag chip_id value */
+#if defined(BMI160_AKM09912_SUPPORT)
+	err += bmi160_set_mag_read_addr(AKM09912_CHIP_ID_REG);
+		/* 0x04 is mag_x lsb register */
+	err += bmi160_read_reg(BMI160_USER_DATA_0_MAG_X_LSB__REG,
+							&mag_chipid, 1);
+
+	/* Must add this commands to re-set data register addr of mag sensor */
+	err += bmi160_set_mag_read_addr(AKM_DATA_REGISTER);
+#else
+	err += bmi160_set_mag_read_addr(BMI160_BMM150_CHIP_ID);
+	/* 0x04 is mag_x lsb register */
+	err += bmi160_read_reg(BMI160_USER_DATA_0_MAG_X_LSB__REG,
+							&mag_chipid, 1);
+
+	/* Must add this commands to re-set data register addr of mag sensor */
+	/* 0x42 is  bmm150 data register address */
+	err += bmi160_set_mag_read_addr(BMI160_BMM150_DATA_REG);
+#endif
+
+	err += bmi160_set_mag_manual_enable(0x00);
+
+	if (err)
+		return err;
+
+	return snprintf(buf, 16, "%x\n", mag_chipid);
+
+}
+
+static ssize_t bmi160_mag_chip_name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u8 mag_chipid = 0;
+#if defined(BMI160_AKM09912_SUPPORT)
+	mag_chipid = 15;
+#else
+	mag_chipid = 150;
+#endif
+	return snprintf(buf, 16, "%d\n", mag_chipid);
+}
+
+struct bmi160_mag_xyz_s32_t mag_compensate;
+static ssize_t bmi160_mag_compensate_xyz_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	memcpy(buf, &mag_compensate, sizeof(mag_compensate));
+	return sizeof(mag_compensate);
+}
+static ssize_t bmi160_mag_compensate_xyz_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct bmi160_mag_xyzr_t mag_raw;
+	memset(&mag_compensate, 0, sizeof(mag_compensate));
+	memset(&mag_raw, 0, sizeof(mag_raw));
+	mag_raw.x = (buf[1] << 8 | buf[0]);
+	mag_raw.y = (buf[3] << 8 | buf[2]);
+	mag_raw.z = (buf[5] << 8 | buf[4]);
+	mag_raw.r = (buf[7] << 8 | buf[6]);
+	mag_raw.x = mag_raw.x >> 3;
+	mag_raw.y = mag_raw.y >> 3;
+	mag_raw.z = mag_raw.z >> 1;
+	mag_raw.r = mag_raw.r >> 2;
+	bmi160_bmm150_mag_compensate_xyz_raw(
+	&mag_compensate, mag_raw);
+	return count;
+}
+
+#endif
+
+#if defined(BMI160_ENABLE_INT1) || defined(BMI160_ENABLE_INT2)
+static ssize_t bmi_enable_int_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int interrupt_type, value;
+
+	sscanf(buf, "%3d %3d", &interrupt_type, &value);
+
+	if (interrupt_type < 0 || interrupt_type > 16)
+		return -EINVAL;
+
+	if (interrupt_type <= BMI_FLAT_INT) {
+		if (BMI_CALL_API(set_intr_enable_0)
+				(bmi_interrupt_type[interrupt_type], value) < 0)
+			return -EINVAL;
+	} else if (interrupt_type <= BMI_FWM_INT) {
+		if (BMI_CALL_API(set_intr_enable_1)
+			(bmi_interrupt_type[interrupt_type], value) < 0)
+			return -EINVAL;
+	} else {
+		if (BMI_CALL_API(set_intr_enable_2)
+			(bmi_interrupt_type[interrupt_type], value) < 0)
+			return -EINVAL;
+	}
+
+	return count;
+}
+
+#endif
+
+static ssize_t bmi160_show_reg_sel(struct device *dev
+		, struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	if (client_data == NULL) {
+		printk(KERN_ERR "Invalid client_data pointer");
+		return -ENODEV;
+	}
+
+	return snprintf(buf, 64, "reg=0X%02X, len=%d\n",
+		client_data->reg_sel, client_data->reg_len);
+}
+
+static ssize_t bmi160_store_reg_sel(struct device *dev
+		, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	ssize_t ret;
+
+	if (client_data == NULL) {
+		printk(KERN_ERR "Invalid client_data pointer");
+		return -ENODEV;
+	}
+	ret = sscanf(buf, "%11X %11d",
+		&client_data->reg_sel, &client_data->reg_len);
+	if (ret != 2) {
+		dev_err(client_data->dev, "Invalid argument");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t bmi160_show_reg_val(struct device *dev
+		, struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+
+	ssize_t ret;
+	u8 reg_data[128], i;
+	int pos;
+
+	if (client_data == NULL) {
+		printk(KERN_ERR "Invalid client_data pointer");
+		return -ENODEV;
+	}
+
+	ret = bmi_burst_read_wrapper(client_data->device.dev_addr,
+		client_data->reg_sel,
+		reg_data, client_data->reg_len);
+	if (ret < 0) {
+		dev_err(client_data->dev, "Reg op failed");
+		return ret;
+	}
+
+	pos = 0;
+	for (i = 0; i < client_data->reg_len; ++i) {
+		pos += snprintf(buf + pos, 16, "%02X", reg_data[i]);
+		buf[pos++] = (i + 1) % 16 == 0 ? '\n' : ' ';
+	}
+	if (buf[pos - 1] == ' ')
+		buf[pos - 1] = '\n';
+
+	return pos;
+}
+
+static ssize_t bmi160_store_reg_val(struct device *dev
+		, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	ssize_t ret;
+	u8 reg_data[32];
+	int i, j, status, digit;
+
+	if (client_data == NULL) {
+		printk(KERN_ERR "Invalid client_data pointer");
+		return -ENODEV;
+	}
+	status = 0;
+	for (i = j = 0; i < count && j < client_data->reg_len; ++i) {
+		if (buf[i] == ' ' || buf[i] == '\n' || buf[i] == '\t' ||
+			buf[i] == '\r') {
+			status = 0;
+			++j;
+			continue;
+		}
+		digit = buf[i] & 0x10 ? (buf[i] & 0xF) : ((buf[i] & 0xF) + 9);
+		printk(KERN_INFO "digit is %d", digit);
+		switch (status) {
+		case 2:
+			++j; /* Fall thru */
+		case 0:
+			reg_data[j] = digit;
+			status = 1;
+			break;
+		case 1:
+			reg_data[j] = reg_data[j] * 16 + digit;
+			status = 2;
+			break;
+		}
+	}
+	if (status > 0)
+		++j;
+	if (j > client_data->reg_len)
+		j = client_data->reg_len;
+	else if (j < client_data->reg_len) {
+		dev_err(client_data->dev, "Invalid argument");
+		return -EINVAL;
+	}
+	printk(KERN_INFO "Reg data read as");
+	for (i = 0; i < j; ++i)
+		printk(KERN_INFO "%d", reg_data[i]);
+
+	ret = BMI_CALL_API(write_reg)(
+		client_data->reg_sel,
+		reg_data, client_data->reg_len);
+	if (ret < 0) {
+		dev_err(client_data->dev, "Reg op failed");
+		return ret;
+	}
+
+	return count;
+}
+
+static ssize_t bmi160_driver_version_show(struct device *dev
+		, struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct bmi_client_data *client_data = input_get_drvdata(input);
+	int ret;
+
+	if (client_data == NULL) {
+		printk(KERN_ERR "Invalid client_data pointer");
+		return -ENODEV;
+	}
+
+	ret = snprintf(buf, 128, "Driver version: %s\n",
+			DRIVER_VERSION);
+
+	return ret;
+}
+static DEVICE_ATTR(chip_id, S_IRUGO,
+		bmi160_chip_id_show, NULL);
+static DEVICE_ATTR(err_st, S_IRUGO,
+		bmi160_err_st_show, NULL);
+static DEVICE_ATTR(sensor_time, S_IRUGO,
+		bmi160_sensor_time_show, NULL);
+
+static DEVICE_ATTR(selftest, S_IRUGO | S_IWUSR,
+		bmi160_selftest_show, bmi160_selftest_store);
+static DEVICE_ATTR(fifo_flush, S_IRUGO | S_IWUSR,
+		NULL, bmi160_fifo_flush_store);
+static DEVICE_ATTR(fifo_bytecount, S_IRUGO | S_IWUSR,
+		bmi160_fifo_bytecount_show, bmi160_fifo_bytecount_store);
+static DEVICE_ATTR(fifo_data_sel, S_IRUGO | S_IWUSR,
+		bmi160_fifo_data_sel_show, bmi160_fifo_data_sel_store);
+static DEVICE_ATTR(fifo_data_frame, S_IRUGO,
+		bmi160_fifo_data_out_frame_show, NULL);
+
+static DEVICE_ATTR(fifo_watermark, S_IRUGO | S_IWUSR,
+		bmi160_fifo_watermark_show, bmi160_fifo_watermark_store);
+
+static DEVICE_ATTR(fifo_header_en, S_IRUGO | S_IWUSR,
+		bmi160_fifo_header_en_show, bmi160_fifo_header_en_store);
+static DEVICE_ATTR(fifo_time_en, S_IRUGO | S_IWUSR,
+		bmi160_fifo_time_en_show, bmi160_fifo_time_en_store);
+static DEVICE_ATTR(fifo_int_tag_en, S_IRUGO | S_IWUSR,
+		bmi160_fifo_int_tag_en_show, bmi160_fifo_int_tag_en_store);
+
+static DEVICE_ATTR(temperature, S_IRUGO,
+		bmi160_temperature_show, NULL);
+static DEVICE_ATTR(place, S_IRUGO,
+		bmi160_place_show, NULL);
+static DEVICE_ATTR(delay, S_IRUGO | S_IWUSR,
+		bmi160_delay_show, bmi160_delay_store);
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
+		bmi160_enable_show, bmi160_enable_store);
+static DEVICE_ATTR(acc_range, S_IRUGO | S_IWUSR,
+		bmi160_acc_range_show, bmi160_acc_range_store);
+static DEVICE_ATTR(acc_odr, S_IRUGO | S_IWUSR,
+		bmi160_acc_odr_show, bmi160_acc_odr_store);
+static DEVICE_ATTR(acc_op_mode, S_IRUGO | S_IWUSR,
+		bmi160_acc_op_mode_show, bmi160_acc_op_mode_store);
+static DEVICE_ATTR(acc_value, S_IRUGO,
+		bmi160_acc_value_show, NULL);
+static DEVICE_ATTR(acc_fast_calibration_x, S_IRUGO | S_IWUSR,
+		bmi160_acc_fast_calibration_x_show,
+		bmi160_acc_fast_calibration_x_store);
+static DEVICE_ATTR(acc_fast_calibration_y, S_IRUGO | S_IWUSR,
+		bmi160_acc_fast_calibration_y_show,
+		bmi160_acc_fast_calibration_y_store);
+static DEVICE_ATTR(acc_fast_calibration_z, S_IRUGO | S_IWUSR,
+		bmi160_acc_fast_calibration_z_show,
+		bmi160_acc_fast_calibration_z_store);
+static DEVICE_ATTR(acc_offset_x, S_IRUGO | S_IWUSR,
+		bmi160_acc_offset_x_show,
+		bmi160_acc_offset_x_store);
+static DEVICE_ATTR(acc_offset_y, S_IRUGO | S_IWUSR,
+		bmi160_acc_offset_y_show,
+		bmi160_acc_offset_y_store);
+static DEVICE_ATTR(acc_offset_z, S_IRUGO | S_IWUSR,
+		bmi160_acc_offset_z_show,
+		bmi160_acc_offset_z_store);
+static DEVICE_ATTR(test, S_IRUGO,
+		bmi160_test_show, NULL);
+static DEVICE_ATTR(stc_enable, S_IRUGO | S_IWUSR,
+		bmi160_step_counter_enable_show,
+		bmi160_step_counter_enable_store);
+static DEVICE_ATTR(stc_mode, S_IRUGO | S_IWUSR,
+		NULL, bmi160_step_counter_mode_store);
+static DEVICE_ATTR(stc_clc, S_IRUGO | S_IWUSR,
+		NULL, bmi160_step_counter_clc_store);
+static DEVICE_ATTR(stc_value, S_IRUGO,
+		bmi160_step_counter_value_show, NULL);
+static DEVICE_ATTR(reg_sel, S_IRUGO | S_IWUSR,
+		bmi160_show_reg_sel, bmi160_store_reg_sel);
+static DEVICE_ATTR(reg_val, S_IRUGO | S_IWUSR,
+		bmi160_show_reg_val, bmi160_store_reg_val);
+static DEVICE_ATTR(driver_version, S_IRUGO,
+		bmi160_driver_version_show, NULL);
+/* gyro part */
+static DEVICE_ATTR(gyro_op_mode, S_IRUGO | S_IWUSR,
+		bmi160_gyro_op_mode_show, bmi160_gyro_op_mode_store);
+static DEVICE_ATTR(gyro_value, S_IRUGO,
+		bmi160_gyro_value_show, NULL);
+static DEVICE_ATTR(gyro_range, S_IRUGO | S_IWUSR,
+		bmi160_gyro_range_show, bmi160_gyro_range_store);
+static DEVICE_ATTR(gyro_odr, S_IRUGO | S_IWUSR,
+		bmi160_gyro_odr_show, bmi160_gyro_odr_store);
+static DEVICE_ATTR(gyro_fast_calibration_en, S_IRUGO | S_IWUSR,
+bmi160_gyro_fast_calibration_en_show, bmi160_gyro_fast_calibration_en_store);
+static DEVICE_ATTR(gyro_offset_x, S_IRUGO | S_IWUSR,
+bmi160_gyro_offset_x_show, bmi160_gyro_offset_x_store);
+static DEVICE_ATTR(gyro_offset_y, S_IRUGO | S_IWUSR,
+bmi160_gyro_offset_y_show, bmi160_gyro_offset_y_store);
+static DEVICE_ATTR(gyro_offset_z, S_IRUGO | S_IWUSR,
+bmi160_gyro_offset_z_show, bmi160_gyro_offset_z_store);
+
+#ifdef BMI160_MAG_INTERFACE_SUPPORT
+static DEVICE_ATTR(mag_op_mode, S_IRUGO | S_IWUSR,
+		bmi160_mag_op_mode_show, bmi160_mag_op_mode_store);
+static DEVICE_ATTR(mag_odr, S_IRUGO | S_IWUSR,
+		bmi160_mag_odr_show, bmi160_mag_odr_store);
+static DEVICE_ATTR(mag_i2c_addr, S_IRUGO | S_IWUSR,
+		bmi160_mag_i2c_address_show, bmi160_mag_i2c_address_store);
+static DEVICE_ATTR(mag_value, S_IRUGO,
+		bmi160_mag_value_show, NULL);
+static DEVICE_ATTR(mag_offset, S_IRUGO | S_IWUSR,
+		bmi160_mag_offset_show, bmi160_mag_offset_store);
+static DEVICE_ATTR(mag_chip_id, S_IRUGO,
+		bmi160_mag_chip_id_show, NULL);
+static DEVICE_ATTR(mag_chip_name, S_IRUGO,
+		bmi160_mag_chip_name_show, NULL);
+static DEVICE_ATTR(mag_compensate, S_IRUGO | S_IWUSR,
+		bmi160_mag_compensate_xyz_show,
+		bmi160_mag_compensate_xyz_store);
+#endif
+
+
+#if defined(BMI160_ENABLE_INT1) || defined(BMI160_ENABLE_INT2)
+static DEVICE_ATTR(enable_int, S_IRUGO | S_IWUSR,
+		NULL, bmi_enable_int_store);
+static DEVICE_ATTR(anymot_duration, S_IRUGO | S_IWUSR,
+		bmi160_anymot_duration_show, bmi160_anymot_duration_store);
+static DEVICE_ATTR(anymot_threshold, S_IRUGO | S_IWUSR,
+		bmi160_anymot_threshold_show, bmi160_anymot_threshold_store);
+static DEVICE_ATTR(std_stu, S_IRUGO,
+		bmi160_step_detector_status_show, NULL);
+static DEVICE_ATTR(std_en, S_IRUGO | S_IWUSR,
+		bmi160_step_detector_enable_show,
+		bmi160_step_detector_enable_store);
+static DEVICE_ATTR(sig_en, S_IRUGO | S_IWUSR,
+		bmi160_signification_motion_enable_show,
+		bmi160_signification_motion_enable_store);
+
+#endif
+
+
+
+static DEVICE_ATTR(bmi_value, S_IRUGO,
+		bmi160_bmi_value_show, NULL);
+
+
+static struct attribute *bmi160_attributes[] = {
+	&dev_attr_chip_id.attr,
+	&dev_attr_err_st.attr,
+	&dev_attr_sensor_time.attr,
+	&dev_attr_selftest.attr,
+	&dev_attr_driver_version.attr,
+	&dev_attr_test.attr,
+	&dev_attr_fifo_flush.attr,
+	&dev_attr_fifo_header_en.attr,
+	&dev_attr_fifo_time_en.attr,
+	&dev_attr_fifo_int_tag_en.attr,
+	&dev_attr_fifo_bytecount.attr,
+	&dev_attr_fifo_data_sel.attr,
+	&dev_attr_fifo_data_frame.attr,
+
+	&dev_attr_fifo_watermark.attr,
+
+	&dev_attr_enable.attr,
+	&dev_attr_delay.attr,
+	&dev_attr_temperature.attr,
+	&dev_attr_place.attr,
+
+	&dev_attr_acc_range.attr,
+	&dev_attr_acc_odr.attr,
+	&dev_attr_acc_op_mode.attr,
+	&dev_attr_acc_value.attr,
+
+	&dev_attr_acc_fast_calibration_x.attr,
+	&dev_attr_acc_fast_calibration_y.attr,
+	&dev_attr_acc_fast_calibration_z.attr,
+	&dev_attr_acc_offset_x.attr,
+	&dev_attr_acc_offset_y.attr,
+	&dev_attr_acc_offset_z.attr,
+
+	&dev_attr_stc_enable.attr,
+	&dev_attr_stc_mode.attr,
+	&dev_attr_stc_clc.attr,
+	&dev_attr_stc_value.attr,
+
+	&dev_attr_gyro_op_mode.attr,
+	&dev_attr_gyro_value.attr,
+	&dev_attr_gyro_range.attr,
+	&dev_attr_gyro_odr.attr,
+	&dev_attr_gyro_fast_calibration_en.attr,
+	&dev_attr_gyro_offset_x.attr,
+	&dev_attr_gyro_offset_y.attr,
+	&dev_attr_gyro_offset_z.attr,
+
+#ifdef BMI160_MAG_INTERFACE_SUPPORT
+	&dev_attr_mag_chip_id.attr,
+	&dev_attr_mag_op_mode.attr,
+	&dev_attr_mag_odr.attr,
+	&dev_attr_mag_i2c_addr.attr,
+	&dev_attr_mag_chip_name.attr,
+	&dev_attr_mag_value.attr,
+	&dev_attr_mag_offset.attr,
+	&dev_attr_mag_compensate.attr,
+#endif
+
+#if defined(BMI160_ENABLE_INT1) || defined(BMI160_ENABLE_INT2)
+	&dev_attr_enable_int.attr,
+
+	&dev_attr_anymot_duration.attr,
+	&dev_attr_anymot_threshold.attr,
+	&dev_attr_std_stu.attr,
+	&dev_attr_std_en.attr,
+	&dev_attr_sig_en.attr,
+
+#endif
+	&dev_attr_reg_sel.attr,
+	&dev_attr_reg_val.attr,
+	&dev_attr_bmi_value.attr,
+	NULL
+};
+
+static struct attribute_group bmi160_attribute_group = {
+	.attrs = bmi160_attributes
+};
+
+#if defined(BMI160_ENABLE_INT1) || defined(BMI160_ENABLE_INT2)
+static void bmi_slope_interrupt_handle(struct bmi_client_data *client_data)
+{
+	/* anym_first[0..2]: x, y, z */
+	u8 anym_first[3] = {0};
+	u8 status2;
+	u8 anym_sign;
+	u8 i = 0;
+
+	client_data->device.bus_read(client_data->device.dev_addr,
+				BMI160_USER_INTR_STAT_2_ADDR, &status2, 1);
+	anym_first[0] = BMI160_GET_BITSLICE(status2,
+				BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_X);
+	anym_first[1] = BMI160_GET_BITSLICE(status2,
+				BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y);
+	anym_first[2] = BMI160_GET_BITSLICE(status2,
+				BMI160_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z);
+	anym_sign = BMI160_GET_BITSLICE(status2,
+				BMI160_USER_INTR_STAT_2_ANY_MOTION_SIGN);
+
+	for (i = 0; i < 3; i++) {
+		if (anym_first[i]) {
+			/*1: negative*/
+			if (anym_sign)
+				dev_notice(client_data->dev,
+				"Anymotion interrupt happend!"
+				"%s axis, negative sign\n", bmi_axis_name[i]);
+			else
+				dev_notice(client_data->dev,
+				"Anymotion interrupt happend!"
+				"%s axis, postive sign\n", bmi_axis_name[i]);
+		}
+	}
+
+
+}
+
+static void bmi_fifo_watermark_interrupt_handle
+				(struct bmi_client_data *client_data)
+{
+	int err = 0;
+	unsigned int fifo_len0 = 0;
+	unsigned int  fifo_frmbytes_ext = 0;
+	unsigned char *fifo_data = NULL;
+	fifo_data = kzalloc(FIFO_DATA_BUFSIZE, GFP_KERNEL);
+	/*TO DO*/
+	if (NULL == fifo_data) {
+			dev_err(client_data->dev, "no memory available");
+			err = -ENOMEM;
+	}
+	bmi_fifo_frame_bytes_extend_calc(client_data, &fifo_frmbytes_ext);
+
+	if (client_data->pw.acc_pm == 2 && client_data->pw.gyro_pm == 2
+					&& client_data->pw.mag_pm == 2)
+		printk(KERN_INFO "pw_acc: %d, pw_gyro: %d\n",
+			client_data->pw.acc_pm, client_data->pw.gyro_pm);
+	if (!client_data->fifo_data_sel)
+		printk(KERN_INFO "no selsect sensor fifo, fifo_data_sel:%d\n",
+						client_data->fifo_data_sel);
+
+	err = BMI_CALL_API(fifo_length)(&fifo_len0);
+	client_data->fifo_bytecount = fifo_len0;
+
+	if (client_data->fifo_bytecount == 0 || err)
+		return;
+
+	if (client_data->fifo_bytecount + fifo_frmbytes_ext > FIFO_DATA_BUFSIZE)
+		client_data->fifo_bytecount = FIFO_DATA_BUFSIZE;
+	/* need give attention for the time of burst read*/
+	if (!err) {
+		err = bmi_burst_read_wrapper(client_data->device.dev_addr,
+			BMI160_USER_FIFO_DATA__REG, fifo_data,
+			client_data->fifo_bytecount + fifo_frmbytes_ext);
+	} else
+		dev_err(client_data->dev, "read fifo leght err");
+
+	if (err)
+		dev_err(client_data->dev, "brust read fifo err\n");
+	/*err = bmi_fifo_analysis_handle(client_data, fifo_data,
+			client_data->fifo_bytecount + 20, fifo_out_data);*/
+	if (fifo_data != NULL) {
+		kfree(fifo_data);
+		fifo_data = NULL;
+	}
+
+}
+static void bmi_data_ready_interrupt_handle(
+	struct bmi_client_data *client_data, uint8_t status)
+{
+	uint8_t data12[12] = {0};
+	struct bmi160_accel_t accel;
+	struct bmi160_gyro_t gyro;
+	struct timespec ts;
+	client_data->device.bus_read(client_data->device.dev_addr,
+	BMI160_USER_DATA_8_ADDR, data12, 12);
+	if (status & 0x80)
+	{
+		/*report acc data*/
+		/* Data X */
+		accel.x = (s16)((((s32)((s8)data12[7])) << BMI160_SHIFT_BIT_POSITION_BY_08_BITS) | (data12[6]));
+		/* Data Y */
+		accel.y = (s16)((((s32)((s8)data12[9])) << BMI160_SHIFT_BIT_POSITION_BY_08_BITS) | (data12[8]));
+		/* Data Z */
+		accel.z = (s16)((((s32)((s8)data12[11]))<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) | (data12[10]));
+		ts = ns_to_timespec(client_data->timestamp);
+		//printk("acc puneet ts.tv_sec %ld ts.tv_nsec %ld\n",ts.tv_sec,ts.tv_nsec);
+		input_event(client_data->input, EV_MSC, 6, ts.tv_sec);
+		input_event(client_data->input, EV_MSC, 6, ts.tv_nsec);
+		input_event(client_data->input, EV_MSC, MSC_GESTURE, accel.x);
+		input_event(client_data->input, EV_MSC, MSC_RAW, accel.y);
+		input_event(client_data->input, EV_MSC, MSC_SCAN, accel.z);
+		input_sync(client_data->input);
+	}
+	if (status & 0x40)
+	{
+		/*report gyro data*/
+		/* Data X */
+		gyro.x = (s16)((((s32)((s8)data12[1])) << BMI160_SHIFT_BIT_POSITION_BY_08_BITS) | (data12[0]));
+		/* Data Y */
+		gyro.y = (s16)((((s32)((s8)data12[3])) << BMI160_SHIFT_BIT_POSITION_BY_08_BITS) | (data12[2]));
+		/* Data Z */
+		gyro.z = (s16)((((s32)((s8)data12[5]))<< BMI160_SHIFT_BIT_POSITION_BY_08_BITS) | (data12[4]));
+		ts = ns_to_timespec(client_data->timestamp);
+		//printk("gyro puneet ts.tv_sec %ld ts.tv_nsec %ld\n",ts.tv_sec,ts.tv_nsec);
+		input_event(client_data->gyro_input, EV_MSC, 6, ts.tv_sec);
+		input_event(client_data->gyro_input, EV_MSC, 6, ts.tv_nsec);
+		input_event(client_data->gyro_input, EV_MSC, MSC_GESTURE, gyro.x);
+		input_event(client_data->gyro_input, EV_MSC, MSC_RAW, gyro.y);
+		input_event(client_data->gyro_input, EV_MSC, MSC_SCAN, gyro.z);
+		input_sync(client_data->gyro_input);
+	}
+}
+
+static void bmi_signification_motion_interrupt_handle(
+		struct bmi_client_data *client_data)
+{
+	printk(KERN_INFO "bmi_signification_motion_interrupt_handle\n");
+	input_event(client_data->input, EV_MSC, INPUT_EVENT_SGM, 1);
+/*input_report_rel(client_data->input,INPUT_EVENT_SGM,1);*/
+	input_sync(client_data->input);
+	bmi160_set_command_register(CMD_RESET_INT_ENGINE);
+
+}
+static void bmi_stepdetector_interrupt_handle(
+	struct bmi_client_data *client_data)
+{
+	u8 current_step_dector_st = 0;
+	client_data->pedo_data.wkar_step_detector_status++;
+	current_step_dector_st =
+		client_data->pedo_data.wkar_step_detector_status;
+	client_data->std = ((current_step_dector_st == 1) ? 0 : 1);
+
+	input_event(client_data->input, EV_MSC, INPUT_EVENT_STEP_DETECTOR, 1);
+	input_sync(client_data->input);
+}
+
+static void bmi_irq_work_func(struct work_struct *work)
+{
+	struct bmi_client_data *client_data =
+		container_of((struct work_struct *)work,
+			struct bmi_client_data, irq_work);
+
+	unsigned char int_status[4] = {0, 0, 0, 0};
+	uint8_t status = 0;
+
+	//client_data->device.bus_read(client_data->device.dev_addr,
+	//			BMI160_USER_INTR_STAT_0_ADDR, int_status, 4);
+	client_data->device.bus_read(client_data->device.dev_addr,
+	BMI160_USER_STAT_ADDR, &status, 1);
+	//printk("status = 0x%x", status);
+	if (BMI160_GET_BITSLICE(int_status[0],
+					BMI160_USER_INTR_STAT_0_ANY_MOTION))
+		bmi_slope_interrupt_handle(client_data);
+
+	if (BMI160_GET_BITSLICE(int_status[0],
+			BMI160_USER_INTR_STAT_0_STEP_INTR))
+		bmi_stepdetector_interrupt_handle(client_data);
+	if (BMI160_GET_BITSLICE(int_status[1],
+			BMI160_USER_INTR_STAT_1_FIFO_WM_INTR))
+		bmi_fifo_watermark_interrupt_handle(client_data);
+	if ((status & 0x80) || (status & 0x40))
+		bmi_data_ready_interrupt_handle(client_data, status);
+	/* Clear ALL inputerrupt status after handler sig mition*/
+	/* Put this commads intot the last one*/
+	if (BMI160_GET_BITSLICE(int_status[0],
+		BMI160_USER_INTR_STAT_0_SIGNIFICANT_INTR))
+		bmi_signification_motion_interrupt_handle(client_data);
+
+}
+
+static void bmi160_delay_sigmo_work_func(struct work_struct *work)
+{
+	struct bmi_client_data *client_data =
+	container_of(work, struct bmi_client_data,
+	delay_work_sig.work);
+	unsigned char int_status[4] = {0, 0, 0, 0};
+
+	client_data->device.bus_read(client_data->device.dev_addr,
+				BMI160_USER_INTR_STAT_0_ADDR, int_status, 4);
+	if (BMI160_GET_BITSLICE(int_status[0],
+		BMI160_USER_INTR_STAT_0_SIGNIFICANT_INTR))
+		bmi_signification_motion_interrupt_handle(client_data);
+}
+
+static irqreturn_t bmi_irq_handler(int irq, void *handle)
+{
+	struct bmi_client_data *client_data = handle;
+	int in_suspend_copy;
+	in_suspend_copy = atomic_read(&client_data->in_suspend);
+	client_data->timestamp= get_current_timestamp();
+	if (client_data == NULL)
+		return IRQ_HANDLED;
+	if (client_data->dev == NULL)
+		return IRQ_HANDLED;
+		/*this only deal with SIG_motion CTS test*/
+	if ((in_suspend_copy == 1) &&
+		(client_data->sig_flag == 1)) {
+		/*wake_lock_timeout(&client_data->wakelock, HZ);*/
+		schedule_delayed_work(&client_data->delay_work_sig,
+			msecs_to_jiffies(50));
+	}
+	schedule_work(&client_data->irq_work);
+
+	return IRQ_HANDLED;
+}
+#endif /* defined(BMI_ENABLE_INT1)||defined(BMI_ENABLE_INT2) */
+
+static int bmi_restore_hw_cfg(struct bmi_client_data *client)
+{
+	int err = 0;
+
+	if ((client->fifo_data_sel) & (1 << BMI_ACC_SENSOR)) {
+		err += BMI_CALL_API(set_accel_range)(client->range.acc_range);
+		err += BMI_CALL_API(set_accel_output_data_rate)
+				(client->odr.acc_odr);
+		err += BMI_CALL_API(set_fifo_accel_enable)(1);
+	}
+	if ((client->fifo_data_sel) & (1 << BMI_GYRO_SENSOR)) {
+		err += BMI_CALL_API(set_gyro_range)(client->range.gyro_range);
+		err += BMI_CALL_API(set_gyro_output_data_rate)
+				(client->odr.gyro_odr);
+		err += BMI_CALL_API(set_fifo_gyro_enable)(1);
+	}
+	if ((client->fifo_data_sel) & (1 << BMI_MAG_SENSOR)) {
+		err += BMI_CALL_API(set_mag_output_data_rate)
+				(client->odr.mag_odr);
+		err += BMI_CALL_API(set_fifo_mag_enable)(1);
+	}
+	err += BMI_CALL_API(set_command_register)(CMD_CLR_FIFO_DATA);
+
+	mutex_lock(&client->mutex_op_mode);
+	if (client->pw.acc_pm != BMI_ACC_PM_SUSPEND) {
+		err += BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_acc_arr[BMI_ACC_PM_NORMAL]);
+		bmi_delay(3);
+	}
+	mutex_unlock(&client->mutex_op_mode);
+
+	mutex_lock(&client->mutex_op_mode);
+	if (client->pw.gyro_pm != BMI_GYRO_PM_SUSPEND) {
+		err += BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_gyro_arr[BMI_GYRO_PM_NORMAL]);
+		bmi_delay(3);
+	}
+	mutex_unlock(&client->mutex_op_mode);
+
+	mutex_lock(&client->mutex_op_mode);
+
+	if (client->pw.mag_pm != BMI_MAG_PM_SUSPEND) {
+#ifdef BMI160_AKM09912_SUPPORT
+		err += bmi160_set_bst_akm_and_secondary_if_powermode
+					(BMI160_MAG_FORCE_MODE);
+#else
+		err += bmi160_set_bmm150_mag_and_secondary_if_power_mode
+					(BMI160_MAG_FORCE_MODE);
+#endif
+		bmi_delay(3);
+	}
+	mutex_unlock(&client->mutex_op_mode);
+
+	return err;
+}
+
+#if defined(CONFIG_USE_QUALCOMM_HAL)
+static void bmi160_accel_work_fn(struct work_struct *work)
+{
+	struct bmi_client_data *sensor;
+	ktime_t timestamp;
+	struct bmi160_accel_t data;
+	int err;
+	sensor = container_of((struct delayed_work *)work,
+				struct bmi_client_data, accel_poll_work);
+	timestamp = ktime_get();
+	err = BMI_CALL_API(read_accel_xyz)(&data);
+	if (err)
+		dev_err(sensor->dev, "read data err");
+	input_report_abs(sensor->input, ABS_X,
+		(data.x));
+	input_report_abs(sensor->input, ABS_Y,
+		(data.y));
+	input_report_abs(sensor->input, ABS_Z,
+		(data.z));
+	input_event(sensor->input,
+			EV_SYN, SYN_TIME_SEC,
+			ktime_to_timespec(timestamp).tv_sec);
+	input_event(sensor->input, EV_SYN,
+		SYN_TIME_NSEC,
+		ktime_to_timespec(timestamp).tv_nsec);
+	input_sync(sensor->input);
+	if (atomic_read(&sensor->accel_en))
+		queue_delayed_work(sensor->data_wq,
+			&sensor->accel_poll_work,
+			msecs_to_jiffies(sensor->accel_poll_ms));
+}
+static void bmi160_gyro_work_fn(struct work_struct *work)
+{
+	struct bmi_client_data *sensor;
+	ktime_t timestamp;
+	struct bmi160_gyro_t data;
+	int err;
+	sensor = container_of((struct delayed_work *)work,
+				struct bmi_client_data, gyro_poll_work);
+	timestamp = ktime_get();
+	err = BMI_CALL_API(read_gyro_xyz)(&data);
+	if (err)
+		dev_err(sensor->dev, "read data err");
+	input_report_abs(sensor->gyro_input, ABS_RX,
+		(data.x));
+	input_report_abs(sensor->gyro_input, ABS_RY,
+		(data.y));
+	input_report_abs(sensor->gyro_input, ABS_RZ,
+		(data.z));
+	input_event(sensor->gyro_input,
+			EV_SYN, SYN_TIME_SEC,
+			ktime_to_timespec(timestamp).tv_sec);
+	input_event(sensor->gyro_input, EV_SYN,
+		SYN_TIME_NSEC,
+		ktime_to_timespec(timestamp).tv_nsec);
+	input_sync(sensor->gyro_input);
+	if (atomic_read(&sensor->gyro_en))
+		queue_delayed_work(sensor->data_wq,
+			&sensor->gyro_poll_work,
+			msecs_to_jiffies(sensor->gyro_poll_ms));
+}
+static int bmi160_set_gyro_op_mode(struct bmi_client_data *client_data,
+							unsigned long op_mode)
+{
+	int err = 0;
+	mutex_lock(&client_data->mutex_op_mode);
+	if (op_mode < BMI_GYRO_PM_MAX) {
+		switch (op_mode) {
+		case BMI_GYRO_PM_NORMAL:
+			err = BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_gyro_arr[BMI_GYRO_PM_NORMAL]);
+			client_data->pw.gyro_pm = BMI_GYRO_PM_NORMAL;
+			bmi_delay(60);
+			break;
+		case BMI_GYRO_PM_FAST_START:
+			err = BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_gyro_arr[BMI_GYRO_PM_FAST_START]);
+			client_data->pw.gyro_pm = BMI_GYRO_PM_FAST_START;
+			bmi_delay(60);
+			break;
+		case BMI_GYRO_PM_SUSPEND:
+			err = BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_gyro_arr[BMI_GYRO_PM_SUSPEND]);
+			client_data->pw.gyro_pm = BMI_GYRO_PM_SUSPEND;
+			bmi_delay(60);
+			break;
+		default:
+			mutex_unlock(&client_data->mutex_op_mode);
+			return -EINVAL;
+		}
+	} else {
+		mutex_unlock(&client_data->mutex_op_mode);
+		return -EINVAL;
+	}
+	mutex_unlock(&client_data->mutex_op_mode);
+	return err;
+}
+static int bmi160_accel_set_enable(
+	struct bmi_client_data *client_data, bool enable)
+{
+	int ret = 0;
+	dev_notice(client_data->dev,
+		"bmi160_accel_set_enable enable=%d\n", enable);
+	if (enable) {
+		ret = bmi160_set_acc_op_mode(client_data, 0);
+		if (ret) {
+			dev_err(client_data->dev,
+				"Fail to enable accel engine ret=%d\n", ret);
+			ret = -EBUSY;
+			goto exit;
+		}
+		queue_delayed_work(client_data->data_wq,
+				&client_data->accel_poll_work,
+				msecs_to_jiffies(client_data->accel_poll_ms));
+		atomic_set(&client_data->accel_en, 1);
+	} else {
+		atomic_set(&client_data->accel_en, 0);
+		cancel_delayed_work_sync(&client_data->accel_poll_work);
+		ret = bmi160_set_acc_op_mode(client_data, 2);
+		if (ret) {
+			dev_err(client_data->dev,
+				"Fail to disable accel engine ret=%d\n", ret);
+			ret = -EBUSY;
+			goto exit;
+		}
+	}
+exit:
+	return ret;
+}
+static int bmi160_accel_set_poll_delay(struct bmi_client_data *client_data,
+					unsigned long delay)
+{
+	dev_info(client_data->dev,
+		"bmi160_accel_set_poll_delay delay_ms=%ld\n", delay);
+	if (delay < BMI160_ACCEL_MIN_POLL_INTERVAL_MS)
+		delay = BMI160_ACCEL_MIN_POLL_INTERVAL_MS;
+	if (delay > BMI160_ACCEL_MAX_POLL_INTERVAL_MS)
+		delay = BMI160_ACCEL_MAX_POLL_INTERVAL_MS;
+	client_data->accel_poll_ms = delay;
+	if (!atomic_read(&client_data->accel_en))
+		goto exit;
+	cancel_delayed_work_sync(&client_data->accel_poll_work);
+	queue_delayed_work(client_data->data_wq,
+			&client_data->accel_poll_work,
+			msecs_to_jiffies(client_data->accel_poll_ms));
+exit:
+	return 0;
+}
+static int bmi160_gyro_set_enable(
+	struct bmi_client_data *client_data, bool enable)
+{
+	int ret = 0;
+	dev_notice(client_data->dev,
+		"bmi160_gyro_set_enable enable=%d\n", enable);
+	if (enable) {
+		ret = bmi160_set_gyro_op_mode(client_data, 0);
+		if (ret) {
+			dev_err(client_data->dev,
+				"Fail to enable gyro engine ret=%d\n", ret);
+			ret = -EBUSY;
+			goto exit;
+		}
+		queue_delayed_work(client_data->data_wq,
+				&client_data->gyro_poll_work,
+				msecs_to_jiffies(client_data->gyro_poll_ms));
+		atomic_set(&client_data->gyro_en, 1);
+	} else {
+		atomic_set(&client_data->gyro_en, 0);
+		cancel_delayed_work_sync(&client_data->gyro_poll_work);
+		ret = bmi160_set_gyro_op_mode(client_data, 2);
+		if (ret) {
+			dev_err(client_data->dev,
+				"Fail to disable accel engine ret=%d\n", ret);
+			ret = -EBUSY;
+			goto exit;
+		}
+	}
+exit:
+	return ret;
+}
+static int bmi160_gyro_set_poll_delay(struct bmi_client_data *client_data,
+					unsigned long delay)
+{
+	dev_info(client_data->dev,
+		"bmi160_accel_set_poll_delay delay_ms=%ld\n", delay);
+	if (delay < BMI160_GYRO_MIN_POLL_INTERVAL_MS)
+		delay = BMI160_GYRO_MIN_POLL_INTERVAL_MS;
+	if (delay > BMI160_GYRO_MAX_POLL_INTERVAL_MS)
+		delay = BMI160_GYRO_MAX_POLL_INTERVAL_MS;
+	client_data->gyro_poll_ms = delay;
+	if (!atomic_read(&client_data->gyro_en))
+		goto exit;
+	cancel_delayed_work_sync(&client_data->gyro_poll_work);
+	queue_delayed_work(client_data->data_wq,
+			&client_data->gyro_poll_work,
+			msecs_to_jiffies(client_data->gyro_poll_ms));
+exit:
+	return 0;
+}
+static int bmi160_accel_cdev_enable(struct sensors_classdev *sensors_cdev,
+			unsigned int enable)
+{
+	struct bmi_client_data *sensor = container_of(sensors_cdev,
+			struct bmi_client_data, accel_cdev);
+	return bmi160_accel_set_enable(sensor, enable);
+}
+static int bmi160_accel_cdev_poll_delay(struct sensors_classdev *sensors_cdev,
+			unsigned int delay_ms)
+{
+	struct bmi_client_data *sensor = container_of(sensors_cdev,
+			struct bmi_client_data, accel_cdev);
+
+	return bmi160_accel_set_poll_delay(sensor, delay_ms);
+}
+
+static int bmi160_gyro_cdev_enable(struct sensors_classdev *sensors_cdev,
+			unsigned int enable)
+{
+	struct bmi_client_data *sensor = container_of(sensors_cdev,
+			struct bmi_client_data, gyro_cdev);
+
+	return bmi160_gyro_set_enable(sensor, enable);
+}
+
+static int bmi160_gyro_cdev_poll_delay(struct sensors_classdev *sensors_cdev,
+			unsigned int delay_ms)
+{
+	struct bmi_client_data *sensor = container_of(sensors_cdev,
+			struct bmi_client_data, gyro_cdev);
+
+	return	bmi160_gyro_set_poll_delay(sensor, delay_ms);
+}
+#endif
+
+int bmi_probe(struct bmi_client_data *client_data, struct device *dev)
+{
+	int err = 0;
+#ifdef BMI160_MAG_INTERFACE_SUPPORT
+	u8 mag_dev_addr;
+	u8 mag_urst_len;
+	u8 mag_op_mode;
+#endif
+	/* check chip id */
+	err = bmi_check_chip_id(client_data);
+	if (err)
+		goto exit_err_clean;
+
+	dev_set_drvdata(dev, client_data);
+	client_data->dev = dev;
+
+	mutex_init(&client_data->mutex_enable);
+	mutex_init(&client_data->mutex_op_mode);
+
+	/* input device init */
+	err = bmi_input_init(client_data);
+	if (err < 0)
+		goto exit_err_clean;
+
+	/* sysfs node creation */
+	err = sysfs_create_group(&client_data->input->dev.kobj,
+			&bmi160_attribute_group);
+
+	if (err < 0)
+		goto exit_err_sysfs;
+
+	if (NULL != dev->platform_data) {
+		client_data->bst_pd = kzalloc(sizeof(*client_data->bst_pd),
+				GFP_KERNEL);
+
+		if (NULL != client_data->bst_pd) {
+			memcpy(client_data->bst_pd, dev->platform_data,
+					sizeof(*client_data->bst_pd));
+			dev_notice(dev, "%s sensor driver set place: p%d\n",
+					client_data->bst_pd->name,
+					client_data->bst_pd->place);
+		}
+	}
+
+	if (NULL != client_data->bst_pd) {
+			memcpy(client_data->bst_pd, dev->platform_data,
+					sizeof(*client_data->bst_pd));
+			dev_notice(dev, "%s sensor driver set place: p%d\n",
+					client_data->bst_pd->name,
+					client_data->bst_pd->place);
+		}
+
+
+	/* workqueue init */
+	INIT_DELAYED_WORK(&client_data->work, bmi_work_func);
+	atomic_set(&client_data->delay, BMI_DELAY_DEFAULT);
+	atomic_set(&client_data->wkqueue_en, 0);
+
+	/* h/w init */
+	client_data->device.delay_msec = bmi_delay;
+	err = BMI_CALL_API(init)(&client_data->device);
+
+	bmi_dump_reg(client_data);
+
+	/*power on detected*/
+	/*or softrest(cmd 0xB6) */
+	/*fatal err check*/
+	/*soft reset*/
+	err += BMI_CALL_API(set_command_register)(CMD_RESET_USER_REG);
+	bmi_delay(3);
+	if (err)
+		dev_err(dev, "Failed soft reset, er=%d", err);
+	/*usr data config page*/
+	err += BMI_CALL_API(set_target_page)(USER_DAT_CFG_PAGE);
+	if (err)
+		dev_err(dev, "Failed cffg page, er=%d", err);
+	err += bmi_get_err_status(client_data);
+	if (err) {
+		dev_err(dev, "Failed to bmi16x init!err_st=0x%x\n",
+				client_data->err_st.err_st_all);
+		goto exit_err_sysfs;
+	}
+
+#ifdef BMI160_MAG_INTERFACE_SUPPORT
+	err += bmi160_set_command_register(MAG_MODE_NORMAL);
+	bmi_delay(2);
+	err += bmi160_get_mag_power_mode_stat(&mag_op_mode);
+	bmi_delay(2);
+	err += BMI_CALL_API(get_i2c_device_addr)(&mag_dev_addr);
+	bmi_delay(2);
+#if defined(BMI160_AKM09912_SUPPORT)
+	err += BMI_CALL_API(set_i2c_device_addr)(BMI160_AKM09912_I2C_ADDRESS);
+	bmi160_bst_akm_mag_interface_init(BMI160_AKM09912_I2C_ADDRESS);
+#else
+	err += BMI_CALL_API(set_i2c_device_addr)(
+		BMI160_AUX_BMM150_I2C_ADDRESS);
+	bmi160_bmm150_mag_interface_init();
+#endif
+
+	err += bmi160_set_mag_burst(3);
+	err += bmi160_get_mag_burst(&mag_urst_len);
+	if (err)
+		dev_err(client_data->dev, "Failed cffg mag, er=%d", err);
+	dev_info(client_data->dev,
+		"BMI160 mag_urst_len:%d, mag_add:0x%x, mag_op_mode:%d\n",
+		mag_urst_len, mag_dev_addr, mag_op_mode);
+#endif
+	if (err < 0)
+		goto exit_err_sysfs;
+
+
+#if defined(BMI160_ENABLE_INT1) || defined(BMI160_ENABLE_INT2)
+		/*wake_lock_init(&client_data->wakelock,
+			WAKE_LOCK_SUSPEND, "bmi160");*/
+		client_data->gpio_pin = of_get_named_gpio_flags(dev->of_node,
+					"bmi,gpio_irq", 0, NULL);
+		dev_info(client_data->dev, "BMI160 qpio number:%d\n",
+					client_data->gpio_pin);
+		err += gpio_request_one(client_data->gpio_pin,
+					GPIOF_IN, "bmi160_int");
+		err += gpio_direction_input(client_data->gpio_pin);
+		client_data->IRQ = gpio_to_irq(client_data->gpio_pin);
+		if (err) {
+			dev_err(client_data->dev,
+				"can not request gpio to irq number\n");
+			client_data->gpio_pin = 0;
+		}
+		INIT_DELAYED_WORK(&client_data->delay_work_sig,
+			bmi160_delay_sigmo_work_func);
+#ifdef BMI160_ENABLE_INT1
+		/* maps interrupt to INT1/InT2 pin */
+		BMI_CALL_API(set_intr_any_motion)(BMI_INT0, ENABLE);
+		BMI_CALL_API(set_intr_fifo_wm)(BMI_INT0, ENABLE);
+		BMI_CALL_API(set_intr_data_rdy)(BMI_INT0, ENABLE);
+
+		/*Set interrupt trige level way */
+		BMI_CALL_API(set_intr_edge_ctrl)(BMI_INT0, BMI_INT_LEVEL);
+		bmi160_set_intr_level(BMI_INT0, 1);
+		/*set interrupt latch temporary, 5 ms*/
+		/*bmi160_set_latch_int(5);*/
+
+		BMI_CALL_API(set_output_enable)(
+		BMI160_INTR1_OUTPUT_ENABLE, ENABLE);
+		sigmotion_init_interrupts(BMI160_MAP_INTR1);
+		BMI_CALL_API(map_step_detector_intr)(BMI160_MAP_INTR1);
+		/*close step_detector in init function*/
+		BMI_CALL_API(set_step_detector_enable)(0);
+#endif
+
+#ifdef BMI160_ENABLE_INT2
+		/* maps interrupt to INT1/InT2 pin */
+		BMI_CALL_API(set_intr_any_motion)(BMI_INT1, ENABLE);
+		BMI_CALL_API(set_intr_fifo_wm)(BMI_INT1, ENABLE);
+		BMI_CALL_API(set_intr_data_rdy)(BMI_INT1, ENABLE);
+
+		/*Set interrupt trige level way */
+		BMI_CALL_API(set_intr_edge_ctrl)(BMI_INT1, BMI_INT_LEVEL);
+		bmi160_set_intr_level(BMI_INT1, 1);
+		/*set interrupt latch temporary, 5 ms*/
+		/*bmi160_set_latch_int(5);*/
+
+		BMI_CALL_API(set_output_enable)(
+		BMI160_INTR2_OUTPUT_ENABLE, ENABLE);
+		sigmotion_init_interrupts(BMI160_MAP_INTR2);
+		BMI_CALL_API(map_step_detector_intr)(BMI160_MAP_INTR2);
+		/*close step_detector in init function*/
+		BMI_CALL_API(set_step_detector_enable)(0);
+#endif
+		err = request_irq(client_data->IRQ, bmi_irq_handler,
+				IRQF_TRIGGER_RISING, "bmi160", client_data);
+		if (err)
+			dev_err(client_data->dev, "could not request irq\n");
+
+		INIT_WORK(&client_data->irq_work, bmi_irq_work_func);
+#endif
+
+	client_data->selftest = 0;
+
+	client_data->fifo_data_sel = 0;
+	#if defined(CONFIG_USE_QUALCOMM_HAL)
+	BMI_CALL_API(set_accel_output_data_rate)(9);/*defalut odr 200HZ*/
+	BMI_CALL_API(set_gyro_output_data_rate)(9);/*defalut odr 200HZ*/
+	#endif
+	BMI_CALL_API(get_accel_output_data_rate)(&client_data->odr.acc_odr);
+	BMI_CALL_API(get_gyro_output_data_rate)(&client_data->odr.gyro_odr);
+	BMI_CALL_API(get_mag_output_data_rate)(&client_data->odr.mag_odr);
+	BMI_CALL_API(set_fifo_time_enable)(1);
+	BMI_CALL_API(get_accel_range)(&client_data->range.acc_range);
+	BMI_CALL_API(get_gyro_range)(&client_data->range.gyro_range);
+	/* now it's power on which is considered as resuming from suspend */
+	
+	/* gyro input device init */
+	err = bmi_gyro_input_init(client_data);
+	#if defined(CONFIG_USE_QUALCOMM_HAL)
+	/* gyro input device init */
+	err = bmi_gyro_input_init(client_data);
+	if (err < 0)
+		goto exit_err_clean;
+	client_data->accel_poll_ms = BMI160_ACCEL_DEFAULT_POLL_INTERVAL_MS;
+	client_data->gyro_poll_ms = BMI160_GYRO_DEFAULT_POLL_INTERVAL_MS;
+	client_data->data_wq = create_freezable_workqueue("bmi160_data_work");
+	if (!client_data->data_wq) {
+		dev_err(dev, "Cannot create workqueue!\n");
+		goto exit_err_clean;
+	}
+	INIT_DELAYED_WORK(&client_data->accel_poll_work,
+		bmi160_accel_work_fn);
+	client_data->accel_cdev = bmi160_accel_cdev;
+	client_data->accel_cdev.delay_msec = client_data->accel_poll_ms;
+	client_data->accel_cdev.sensors_enable = bmi160_accel_cdev_enable;
+	client_data->accel_cdev.sensors_poll_delay =
+	bmi160_accel_cdev_poll_delay;
+	err = sensors_classdev_register(dev, &client_data->accel_cdev);
+	if (err) {
+		dev_err(dev,
+			"create accel class device file failed!\n");
+		goto exit_err_clean;
+	}
+	INIT_DELAYED_WORK(&client_data->gyro_poll_work, bmi160_gyro_work_fn);
+	client_data->gyro_cdev = bmi160_gyro_cdev;
+	client_data->gyro_cdev.delay_msec = client_data->gyro_poll_ms;
+	client_data->gyro_cdev.sensors_enable = bmi160_gyro_cdev_enable;
+	client_data->gyro_cdev.sensors_poll_delay = bmi160_gyro_cdev_poll_delay;
+	err = sensors_classdev_register(dev, &client_data->gyro_cdev);
+	if (err) {
+		dev_err(dev,
+			"create accel class device file failed!\n");
+		goto exit_err_clean;
+	}
+	#endif
+	/* set sensor PMU into suspend power mode for all */
+	if (bmi_pmu_set_suspend(client_data) < 0) {
+		dev_err(dev, "Failed to set BMI160 to suspend power mode\n");
+		goto exit_err_sysfs;
+	}
+	/*enable the data ready interrupt*/
+	BMI_CALL_API(set_intr_enable_1)(BMI160_DATA_RDY_ENABLE, 1);
+	dev_notice(dev, "sensor_time:%d, %d, %d",
+		sensortime_duration_tbl[0].ts_delat,
+		sensortime_duration_tbl[0].ts_duration_lsb,
+		sensortime_duration_tbl[0].ts_duration_us);
+	dev_notice(dev, "sensor %s probed successfully", SENSOR_NAME);
+
+	return 0;
+
+exit_err_sysfs:
+	if (err)
+		bmi_input_destroy(client_data);
+
+exit_err_clean:
+	if (err) {
+		if (client_data != NULL) {
+			if (NULL != client_data->bst_pd) {
+				kfree(client_data->bst_pd);
+				client_data->bst_pd = NULL;
+			}
+		}
+	}
+	return err;
+}
+EXPORT_SYMBOL(bmi_probe);
+
+/*!
+ * @brief remove bmi client
+ *
+ * @param dev the pointer of device
+ *
+ * @return zero
+ * @retval zero
+*/
+int bmi_remove(struct device *dev)
+{
+	int err = 0;
+	struct bmi_client_data *client_data = dev_get_drvdata(dev);
+
+	if (NULL != client_data) {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		unregister_early_suspend(&client_data->early_suspend_handler);
+#endif
+		mutex_lock(&client_data->mutex_enable);
+		if (BMI_ACC_PM_NORMAL == client_data->pw.acc_pm ||
+			BMI_GYRO_PM_NORMAL == client_data->pw.gyro_pm ||
+				BMI_MAG_PM_NORMAL == client_data->pw.mag_pm) {
+			cancel_delayed_work_sync(&client_data->work);
+		}
+		mutex_unlock(&client_data->mutex_enable);
+
+		err = bmi_pmu_set_suspend(client_data);
+
+		bmi_delay(5);
+
+		sysfs_remove_group(&client_data->input->dev.kobj,
+				&bmi160_attribute_group);
+		bmi_input_destroy(client_data);
+
+		if (NULL != client_data->bst_pd) {
+			kfree(client_data->bst_pd);
+			client_data->bst_pd = NULL;
+		}
+		kfree(client_data);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(bmi_remove);
+
+static int bmi_post_resume(struct bmi_client_data *client_data)
+{
+	int err = 0;
+
+	mutex_lock(&client_data->mutex_enable);
+
+	if (atomic_read(&client_data->wkqueue_en) == 1) {
+		bmi160_set_acc_op_mode(client_data, BMI_ACC_PM_NORMAL);
+		schedule_delayed_work(&client_data->work,
+				msecs_to_jiffies(
+					atomic_read(&client_data->delay)));
+	}
+	mutex_unlock(&client_data->mutex_enable);
+
+	return err;
+}
+
+
+int bmi_suspend(struct device *dev)
+{
+	int err = 0;
+	struct bmi_client_data *client_data = dev_get_drvdata(dev);
+	unsigned char stc_enable;
+	unsigned char std_enable;
+	dev_err(client_data->dev, "bmi suspend function entrance");
+
+	atomic_set(&client_data->in_suspend, 1);
+	if (atomic_read(&client_data->wkqueue_en) == 1) {
+		bmi160_set_acc_op_mode(client_data, BMI_ACC_PM_SUSPEND);
+		cancel_delayed_work_sync(&client_data->work);
+	}
+	BMI_CALL_API(get_step_counter_enable)(&stc_enable);
+	BMI_CALL_API(get_step_detector_enable)(&std_enable);
+	if (client_data->pw.acc_pm != BMI_ACC_PM_SUSPEND &&
+		(stc_enable != 1) && (std_enable != 1) &&
+		(client_data->sig_flag != 1)) {
+		err += BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_acc_arr[BMI_ACC_PM_SUSPEND]);
+		bmi_delay(3);
+	}
+	if (client_data->pw.gyro_pm != BMI_GYRO_PM_SUSPEND) {
+		err += BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_gyro_arr[BMI_GYRO_PM_SUSPEND]);
+		bmi_delay(3);
+	}
+
+	if (client_data->pw.mag_pm != BMI_MAG_PM_SUSPEND) {
+#if defined(BMI160_AKM09912_SUPPORT)
+		err += bmi160_set_bst_akm_and_secondary_if_powermode(
+		BMI160_MAG_SUSPEND_MODE);
+#else
+		err += bmi160_set_bmm150_mag_and_secondary_if_power_mode(
+		BMI160_MAG_SUSPEND_MODE);
+#endif
+		bmi_delay(3);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(bmi_suspend);
+
+int bmi_resume(struct device *dev)
+{
+	int err = 0;
+	struct bmi_client_data *client_data = dev_get_drvdata(dev);
+	atomic_set(&client_data->in_suspend, 0);
+	if (client_data->pw.acc_pm != BMI_ACC_PM_SUSPEND) {
+		err += BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_acc_arr[BMI_ACC_PM_NORMAL]);
+		bmi_delay(3);
+	}
+	if (client_data->pw.gyro_pm != BMI_GYRO_PM_SUSPEND) {
+		err += BMI_CALL_API(set_command_register)
+				(bmi_pmu_cmd_gyro_arr[BMI_GYRO_PM_NORMAL]);
+		bmi_delay(3);
+	}
+
+	if (client_data->pw.mag_pm != BMI_MAG_PM_SUSPEND) {
+#if defined(BMI160_AKM09912_SUPPORT)
+		err += bmi160_set_bst_akm_and_secondary_if_powermode
+					(BMI160_MAG_FORCE_MODE);
+#else
+		err += bmi160_set_bmm150_mag_and_secondary_if_power_mode
+					(BMI160_MAG_FORCE_MODE);
+#endif
+		bmi_delay(3);
+	}
+	/* post resume operation */
+	err += bmi_post_resume(client_data);
+
+	return err;
+}
+EXPORT_SYMBOL(bmi_resume);
+
diff --git a/drivers/input/sensors/bmi160/bmi160_driver.h b/drivers/input/sensors/bmi160/bmi160_driver.h
new file mode 100644
index 0000000..993a885
--- /dev/null
+++ b/drivers/input/sensors/bmi160/bmi160_driver.h
@@ -0,0 +1,409 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * @filename bmi160_driver.h
+ * @date     2015/08/17 14:40
+ * @id       "e90a329"
+ * @version  1.3
+ *
+ * @brief
+ * The head file of BMI160 device driver core code
+*/
+#ifndef _BMI160_DRIVER_H
+#define _BMI160_DRIVER_H
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/unistd.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#else
+#include <unistd.h>
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/ktime.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include "bmi160.h"
+
+#if defined(CONFIG_USE_QUALCOMM_HAL)
+#include <linux/sensors.h>
+#endif
+/* sensor specific */
+#define SENSOR_NAME "bmi160"
+#define BMI160_ENABLE_INT1 1
+//#define BMI160_ENABLE_INT2 1
+/*#define BMI160_MAG_INTERFACE_SUPPORT 1*/
+
+/*#define BMI160_AKM09912_SUPPORT 1*/
+#define BMI_USE_BASIC_I2C_FUNC 1
+#define SENSOR_CHIP_ID_BMI (0xD0)
+#define SENSOR_CHIP_ID_BMI_C2 (0xD1)
+#define SENSOR_CHIP_ID_BMI_C3 (0xD3)
+
+#define SENSOR_CHIP_REV_ID_BMI (0x00)
+
+#define CHECK_CHIP_ID_TIME_MAX  5
+
+#define BMI_REG_NAME(name) BMI160_##name##__REG
+#define BMI_VAL_NAME(name) BMI160_##name
+#define BMI_CALL_API(name) bmi160_##name
+
+#define BMI_I2C_WRITE_DELAY_TIME (1)
+
+/* generic */
+#define BMI_MAX_RETRY_I2C_XFER (10)
+#define BMI_MAX_RETRY_WAKEUP (5)
+#define BMI_MAX_RETRY_WAIT_DRDY (100)
+
+#define BMI_DELAY_MIN (1)
+#define BMI_DELAY_DEFAULT (200)
+
+#define BMI_VALUE_MAX (32767)
+#define BMI_VALUE_MIN (-32768)
+
+#define BYTES_PER_LINE (16)
+
+#define BUF_SIZE_PRINT (16)
+
+#define BMI_FAST_CALI_TRUE  (1)
+#define BMI_FAST_CALI_ALL_RDY (7)
+
+/*! FIFO 1024 byte, max fifo frame count not over 150 */
+#define FIFO_FRAME_CNT 170
+#define FIFO_DATA_BUFSIZE    1024
+
+
+#define FRAME_LEN_ACC    6
+#define FRAME_LEN_GYRO    6
+#define FRAME_LEN_MAG    8
+
+/*! BMI Self test */
+#define BMI_SELFTEST_AMP_HIGH       1
+
+/* CMD  */
+#define CMD_FOC_START                 0x03
+#define CMD_PMU_ACC_SUSPEND           0x10
+#define CMD_PMU_ACC_NORMAL            0x11
+#define CMD_PMU_ACC_LP1               0x12
+#define CMD_PMU_ACC_LP2               0x13
+#define CMD_PMU_GYRO_SUSPEND          0x14
+#define CMD_PMU_GYRO_NORMAL           0x15
+#define CMD_PMU_GYRO_FASTSTART        0x17
+#define CMD_PMU_MAG_SUSPEND           0x18
+#define CMD_PMU_MAG_NORMAL            0x19
+#define CMD_PMU_MAG_LP1               0x1A
+#define CMD_PMU_MAG_LP2               0x1B
+#define CMD_CLR_FIFO_DATA             0xB0
+#define CMD_RESET_INT_ENGINE          0xB1
+#define CMD_RESET_USER_REG            0xB6
+
+#define USER_DAT_CFG_PAGE              0x00
+
+/*! FIFO Head definition*/
+#define FIFO_HEAD_A        0x84
+#define FIFO_HEAD_G        0x88
+#define FIFO_HEAD_M        0x90
+
+#define FIFO_HEAD_G_A        (FIFO_HEAD_G | FIFO_HEAD_A)
+#define FIFO_HEAD_M_A        (FIFO_HEAD_M | FIFO_HEAD_A)
+#define FIFO_HEAD_M_G        (FIFO_HEAD_M | FIFO_HEAD_G)
+
+#define FIFO_HEAD_M_G_A         (FIFO_HEAD_M | FIFO_HEAD_G | FIFO_HEAD_A)
+
+#define FIFO_HEAD_SENSOR_TIME        0x44
+#define FIFO_HEAD_SKIP_FRAME        0x40
+#define FIFO_HEAD_OVER_READ_LSB       0x80
+#define FIFO_HEAD_OVER_READ_MSB       0x00
+
+/*! FIFO head mode Frame bytes number definition */
+#define A_BYTES_FRM      6
+#define G_BYTES_FRM      6
+#define M_BYTES_FRM      8
+#define GA_BYTES_FRM     12
+#define MG_BYTES_FRM     14
+#define MA_BYTES_FRM     14
+#define MGA_BYTES_FRM    20
+
+#define ACC_FIFO_HEAD       "acc"
+#define GYRO_FIFO_HEAD     "gyro"
+#define MAG_FIFO_HEAD         "mag"
+
+/*! Bosch sensor unknown place*/
+#define BOSCH_SENSOR_PLACE_UNKNOWN (-1)
+/*! Bosch sensor remapping table size P0~P7*/
+#define MAX_AXIS_REMAP_TAB_SZ 8
+
+#define ENABLE     1
+#define DISABLE    0
+
+/* bmi sensor HW interrupt pin number */
+#define BMI_INT0      0
+#define BMI_INT1       1
+
+#define BMI_INT_LEVEL      0
+#define BMI_INT_EDGE        1
+
+/*! BMI mag interface */
+
+
+/* compensated output value returned if sensor had overflow */
+#define BMM050_OVERFLOW_OUTPUT       -32768
+#define BMM050_OVERFLOW_OUTPUT_S32   ((s32)(-2147483647-1))
+
+/* Trim Extended Registers */
+#define BMM050_DIG_X1                      0x5D
+#define BMM050_DIG_Y1                      0x5E
+#define BMM050_DIG_Z4_LSB                  0x62
+#define BMM050_DIG_Z4_MSB                  0x63
+#define BMM050_DIG_X2                      0x64
+#define BMM050_DIG_Y2                      0x65
+#define BMM050_DIG_Z2_LSB                  0x68
+#define BMM050_DIG_Z2_MSB                  0x69
+#define BMM050_DIG_Z1_LSB                  0x6A
+#define BMM050_DIG_Z1_MSB                  0x6B
+#define BMM050_DIG_XYZ1_LSB                0x6C
+#define BMM050_DIG_XYZ1_MSB                0x6D
+#define BMM050_DIG_Z3_LSB                  0x6E
+#define BMM050_DIG_Z3_MSB                  0x6F
+#define BMM050_DIG_XY2                     0x70
+#define BMM050_DIG_XY1                     0x71
+
+struct bmi160mag_compensate_t {
+	signed char dig_x1;
+	signed char dig_y1;
+
+	signed char dig_x2;
+	signed char dig_y2;
+
+	u16 dig_z1;
+	s16 dig_z2;
+	s16 dig_z3;
+	s16 dig_z4;
+
+	unsigned char dig_xy1;
+	signed char dig_xy2;
+
+	u16 dig_xyz1;
+};
+
+/*bmi fifo sensor type combination*/
+enum BMI_FIFO_DATA_SELECT_T {
+	BMI_FIFO_A_SEL = 1,
+	BMI_FIFO_G_SEL,
+	BMI_FIFO_G_A_SEL,
+	BMI_FIFO_M_SEL,
+	BMI_FIFO_M_A_SEL,
+	BMI_FIFO_M_G_SEL,
+	BMI_FIFO_M_G_A_SEL,
+	BMI_FIFO_DATA_SEL_MAX
+};
+
+/*bmi interrupt about step_detector and sgm*/
+#define INPUT_EVENT_STEP_DETECTOR    5
+#define INPUT_EVENT_SGM              3/*7*/
+#define INPUT_EVENT_FAST_ACC_CALIB_DONE    6
+#define INPUT_EVENT_FAST_GYRO_CALIB_DONE    4
+
+
+/*!
+* Bst sensor common definition,
+* please give parameters in BSP file.
+*/
+struct bosch_sensor_specific {
+	char *name;
+	/* 0 to 7 */
+	unsigned int place:3;
+	int irq;
+	int (*irq_gpio_cfg)(void);
+};
+
+/*! bmi160 sensor spec of power mode */
+struct pw_mode {
+	u8 acc_pm;
+	u8 gyro_pm;
+	u8 mag_pm;
+};
+
+/*! bmi160 sensor spec of odr */
+struct odr_t {
+	u8 acc_odr;
+	u8 gyro_odr;
+	u8 mag_odr;
+};
+
+/*! bmi160 sensor spec of range */
+struct range_t {
+	u8 acc_range;
+	u8 gyro_range;
+};
+
+/*! bmi160 sensor error status */
+struct err_status {
+	u8 fatal_err;
+	u8 err_code;
+	u8 i2c_fail;
+	u8 drop_cmd;
+	u8 mag_drdy_err;
+	u8 err_st_all;
+};
+
+/*! bmi160 fifo frame for all sensors */
+struct fifo_frame_t {
+	struct bmi160_accel_t *acc_farr;
+	struct bmi160_gyro_t *gyro_farr;
+	struct bmi160_mag_xyz_s32_t *mag_farr;
+
+	unsigned char acc_frame_cnt;
+	unsigned char gyro_frame_cnt;
+	unsigned char mag_frame_cnt;
+
+	u32 acc_lastf_ts;
+	u32 gyro_lastf_ts;
+	u32 mag_lastf_ts;
+};
+
+/*! bmi160 fifo sensor time */
+struct fifo_sensor_time_t {
+	u32 acc_ts;
+	u32 gyro_ts;
+	u32 mag_ts;
+};
+
+struct pedometer_data_t {
+	/*! Fix step detector misinformation for the first time*/
+	u8 wkar_step_detector_status;
+	u_int32_t last_step_counter_value;
+};
+
+struct bmi_client_data {
+	struct bmi160_t device;
+	struct device *dev;
+	struct input_dev *input;/*acc_device*/
+	struct input_dev *gyro_input;
+	#if defined(CONFIG_USE_QUALCOMM_HAL)
+	struct input_dev *gyro_input;
+	struct sensors_classdev accel_cdev;
+	struct sensors_classdev gyro_cdev;
+	struct delayed_work accel_poll_work;
+	struct delayed_work gyro_poll_work;
+	u32 accel_poll_ms;
+	u32 gyro_poll_ms;
+	u32 accel_latency_ms;
+	u32 gyro_latency_ms;
+	atomic_t accel_en;
+	atomic_t gyro_en;
+	struct workqueue_struct *data_wq;
+	#endif
+	struct delayed_work work;
+	struct work_struct irq_work;
+
+	u8 chip_id;
+
+	struct pw_mode pw;
+	struct odr_t odr;
+	struct range_t range; /*TO DO*/
+	struct err_status err_st;
+	struct pedometer_data_t pedo_data;
+	s8 place;
+	u8 selftest;
+	/*struct wake_lock wakelock;*/
+	struct delayed_work delay_work_sig;
+	atomic_t in_suspend;
+
+	atomic_t wkqueue_en; /*TO DO acc gyro mag*/
+	atomic_t delay;
+	atomic_t selftest_result;
+
+	u8  fifo_data_sel;
+	u16 fifo_bytecount;
+	u8 fifo_head_en;
+	unsigned char fifo_int_tag_en;
+	struct fifo_frame_t fifo_frame;
+
+	unsigned char *fifo_data;
+	u64 fifo_time;
+	u8 stc_enable;
+	uint16_t gpio_pin;
+	u8 std;
+	u8 sig_flag;
+	unsigned char calib_status;
+	struct mutex mutex_op_mode;
+	struct mutex mutex_enable;
+	struct bosch_sensor_specific *bst_pd;
+	int IRQ;
+	int reg_sel;
+	int reg_len;
+	uint64_t timestamp;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend_handler;
+#endif
+};
+
+
+/*!
+ * we use a typedef to hide the detail,
+ * because this type might be changed
+ */
+struct bosch_sensor_axis_remap {
+	/* src means which source will be mapped to target x, y, z axis */
+	/* if an target OS axis is remapped from (-)x,
+	 * src is 0, sign_* is (-)1 */
+	/* if an target OS axis is remapped from (-)y,
+	 * src is 1, sign_* is (-)1 */
+	/* if an target OS axis is remapped from (-)z,
+	 * src is 2, sign_* is (-)1 */
+	int src_x:3;
+	int src_y:3;
+	int src_z:3;
+
+	int sign_x:2;
+	int sign_y:2;
+	int sign_z:2;
+};
+
+
+struct bosch_sensor_data {
+	union {
+		int16_t v[3];
+		struct {
+			int16_t x;
+			int16_t y;
+			int16_t z;
+		};
+	};
+};
+
+s8 bmi_burst_read_wrapper(u8 dev_addr, u8 reg_addr, u8 *data, u16 len);
+int bmi_probe(struct bmi_client_data *client_data, struct device *dev);
+int bmi_remove(struct device *dev);
+int bmi_suspend(struct device *dev);
+int bmi_resume(struct device *dev);
+
+
+
+
+#endif/*_BMI160_DRIVER_H*/
+/*@}*/
+
diff --git a/drivers/input/sensors/bmi160/bmi160_i2c.c b/drivers/input/sensors/bmi160/bmi160_i2c.c
new file mode 100644
index 0000000..57d42c62
--- /dev/null
+++ b/drivers/input/sensors/bmi160/bmi160_i2c.c
@@ -0,0 +1,369 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * @filename bmi160_i2c.c
+ * @date     2014/11/25 14:40
+ * @id       "20f77db"
+ * @version  1.3
+ *
+ * @brief
+ * This file implements moudle function, which add
+ * the driver to I2C core.
+*/
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include "bmi160_driver.h"
+
+/*! @defgroup bmi160_i2c_src
+ *  @brief bmi160 i2c driver module
+ @{*/
+
+static struct i2c_client *bmi_client;
+/*!
+ * @brief define i2c wirte function
+ *
+ * @param client the pointer of i2c client
+ * @param reg_addr register address
+ * @param data the pointer of data buffer
+ * @param len block size need to write
+ *
+ * @return zero success, non-zero failed
+ * @retval zero success
+ * @retval non-zero failed
+*/
+/*	i2c read routine for API*/
+static s8 bmi_i2c_read(struct i2c_client *client, u8 reg_addr,
+			u8 *data, u8 len)
+	{
+#if !defined BMI_USE_BASIC_I2C_FUNC
+		s32 dummy;
+		if (NULL == client)
+			return -EINVAL;
+
+		while (0 != len--) {
+#ifdef BMI_SMBUS
+			dummy = i2c_smbus_read_byte_data(client, reg_addr);
+			if (dummy < 0) {
+				dev_err(&client->dev, "i2c smbus read error");
+				return -EIO;
+			}
+			*data = (u8)(dummy & 0xff);
+#else
+			dummy = i2c_master_send(client, (char *)&reg_addr, 1);
+			if (dummy < 0) {
+				dev_err(&client->dev, "i2c bus master write error");
+				return -EIO;
+			}
+
+			dummy = i2c_master_recv(client, (char *)data, 1);
+			if (dummy < 0) {
+				dev_err(&client->dev, "i2c bus master read error");
+				return -EIO;
+			}
+#endif
+			reg_addr++;
+			data++;
+		}
+		return 0;
+#else
+		int retry;
+
+		struct i2c_msg msg[] = {
+			{
+			 .addr = client->addr,
+			 .flags = 0,
+			 .len = 1,
+			 .buf = &reg_addr,
+			},
+
+			{
+			 .addr = client->addr,
+			 .flags = I2C_M_RD,
+			 .len = len,
+			 .buf = data,
+			 },
+		};
+
+		for (retry = 0; retry < BMI_MAX_RETRY_I2C_XFER; retry++) {
+			if (i2c_transfer(client->adapter, msg,
+						ARRAY_SIZE(msg)) > 0)
+				break;
+			else
+				usleep_range(BMI_I2C_WRITE_DELAY_TIME * 1000,
+				BMI_I2C_WRITE_DELAY_TIME * 1000);
+		}
+
+		if (BMI_MAX_RETRY_I2C_XFER <= retry) {
+			dev_err(&client->dev, "I2C xfer error");
+			return -EIO;
+		}
+
+		return 0;
+#endif
+	}
+
+
+static s8 bmi_i2c_burst_read(struct i2c_client *client, u8 reg_addr,
+		u8 *data, u16 len)
+{
+	int retry;
+
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &reg_addr,
+		},
+
+		{
+			.addr = client->addr,
+			.flags = I2C_M_RD,
+			.len = len,
+			.buf = data,
+		},
+	};
+
+	for (retry = 0; retry < BMI_MAX_RETRY_I2C_XFER; retry++) {
+		if (i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)) > 0)
+			break;
+		else
+			usleep_range(BMI_I2C_WRITE_DELAY_TIME * 1000,
+				BMI_I2C_WRITE_DELAY_TIME * 1000);
+	}
+
+	if (BMI_MAX_RETRY_I2C_XFER <= retry) {
+		dev_err(&client->dev, "I2C xfer error");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+
+/* i2c write routine for */
+static s8 bmi_i2c_write(struct i2c_client *client, u8 reg_addr,
+		u8 *data, u8 len)
+{
+#if !defined BMI_USE_BASIC_I2C_FUNC
+	s32 dummy;
+
+#ifndef BMI_SMBUS
+	u8 buffer[2];
+#endif
+
+	if (NULL == client)
+		return -EPERM;
+
+	while (0 != len--) {
+#ifdef BMI_SMBUS
+		dummy = i2c_smbus_write_byte_data(client, reg_addr, *data);
+#else
+		buffer[0] = reg_addr;
+		buffer[1] = *data;
+		dummy = i2c_master_send(client, (char *)buffer, 2);
+#endif
+		reg_addr++;
+		data++;
+		if (dummy < 0) {
+			dev_err(&client->dev, "error writing i2c bus");
+			return -EPERM;
+		}
+
+	}
+	usleep_range(BMI_I2C_WRITE_DELAY_TIME * 1000,
+	BMI_I2C_WRITE_DELAY_TIME * 1000);
+	return 0;
+#else
+	u8 buffer[2];
+	int retry;
+	struct i2c_msg msg[] = {
+		{
+		 .addr = client->addr,
+		 .flags = 0,
+		 .len = 2,
+		 .buf = buffer,
+		 },
+	};
+
+	while (0 != len--) {
+		buffer[0] = reg_addr;
+		buffer[1] = *data;
+		for (retry = 0; retry < BMI_MAX_RETRY_I2C_XFER; retry++) {
+			if (i2c_transfer(client->adapter, msg,
+						ARRAY_SIZE(msg)) > 0) {
+				break;
+			} else {
+				usleep_range(BMI_I2C_WRITE_DELAY_TIME * 1000,
+				BMI_I2C_WRITE_DELAY_TIME * 1000);
+			}
+		}
+		if (BMI_MAX_RETRY_I2C_XFER <= retry) {
+			dev_err(&client->dev, "I2C xfer error");
+			return -EIO;
+		}
+		reg_addr++;
+		data++;
+	}
+
+	usleep_range(BMI_I2C_WRITE_DELAY_TIME * 1000,
+	BMI_I2C_WRITE_DELAY_TIME * 1000);
+	return 0;
+#endif
+}
+
+
+static s8 bmi_i2c_read_wrapper(u8 dev_addr, u8 reg_addr, u8 *data, u8 len)
+{
+	int err = 0;
+	err = bmi_i2c_read(bmi_client, reg_addr, data, len);
+	return err;
+}
+
+static s8 bmi_i2c_write_wrapper(u8 dev_addr, u8 reg_addr, u8 *data, u8 len)
+{
+	int err = 0;
+	err = bmi_i2c_write(bmi_client, reg_addr, data, len);
+	return err;
+}
+
+s8 bmi_burst_read_wrapper(u8 dev_addr, u8 reg_addr, u8 *data, u16 len)
+{
+	int err = 0;
+	err = bmi_i2c_burst_read(bmi_client, reg_addr, data, len);
+	return err;
+}
+EXPORT_SYMBOL(bmi_burst_read_wrapper);
+/*!
+ * @brief BMI probe function via i2c bus
+ *
+ * @param client the pointer of i2c client
+ * @param id the pointer of i2c device id
+ *
+ * @return zero success, non-zero failed
+ * @retval zero success
+ * @retval non-zero failed
+*/
+static int bmi_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+		int err = 0;
+		struct bmi_client_data *client_data = NULL;
+
+		dev_info(&client->dev, "BMI160 i2c function probe entrance");
+
+		if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+			dev_err(&client->dev, "i2c_check_functionality error!");
+			err = -EIO;
+			goto exit_err_clean;
+		}
+
+		if (NULL == bmi_client) {
+			bmi_client = client;
+		} else {
+			dev_err(&client->dev,
+				"this driver does not support multiple clients");
+			err = -EBUSY;
+			goto exit_err_clean;
+		}
+
+		client_data = kzalloc(sizeof(struct bmi_client_data),
+							GFP_KERNEL);
+		if (NULL == client_data) {
+			dev_err(&client->dev, "no memory available");
+			err = -ENOMEM;
+			goto exit_err_clean;
+		}
+
+		client_data->device.bus_read = bmi_i2c_read_wrapper;
+		client_data->device.bus_write = bmi_i2c_write_wrapper;
+
+		return bmi_probe(client_data, &client->dev);
+
+exit_err_clean:
+		if (err)
+			bmi_client = NULL;
+		return err;
+}
+/*
+static int bmi_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+	int err = 0;
+	err = bmi_suspend(&client->dev);
+	return err;
+}
+
+static int bmi_i2c_resume(struct i2c_client *client)
+{
+	int err = 0;
+
+	err = bmi_resume(&client->dev);
+
+	return err;
+}
+*/
+
+static int bmi_i2c_remove(struct i2c_client *client)
+{
+	int err = 0;
+	err = bmi_remove(&client->dev);
+	bmi_client = NULL;
+
+	return err;
+}
+
+
+
+static const struct i2c_device_id bmi_id[] = {
+	{SENSOR_NAME, 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, bmi_id);
+
+static const struct of_device_id bmi160_of_match[] = {
+	{ .compatible = "bosch-sensortec,bmi160", },
+	{ .compatible = "bmi160", },
+	{ .compatible = "bosch, bmi160", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, bmi160_of_match);
+
+static struct i2c_driver bmi_i2c_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = SENSOR_NAME,
+		.of_match_table = bmi160_of_match,
+	},
+	.class = I2C_CLASS_HWMON,
+	.id_table = bmi_id,
+	.probe = bmi_i2c_probe,
+	.remove = bmi_i2c_remove,
+	/*.suspend = bmi_i2c_suspend,
+	.resume = bmi_i2c_resume,*/
+};
+
+static int __init BMI_i2c_init(void)
+{
+	return i2c_add_driver(&bmi_i2c_driver);
+}
+
+static void __exit BMI_i2c_exit(void)
+{
+	i2c_del_driver(&bmi_i2c_driver);
+}
+
+MODULE_AUTHOR("Contact <contact@bosch-sensortec.com>");
+MODULE_DESCRIPTION("driver for " SENSOR_NAME);
+MODULE_LICENSE("GPL v2");
+
+module_init(BMI_i2c_init);
+module_exit(BMI_i2c_exit);
+
diff --git a/drivers/input/sensors/bmi160/bmi160_spi.c b/drivers/input/sensors/bmi160/bmi160_spi.c
new file mode 100644
index 0000000..8da1e21
--- /dev/null
+++ b/drivers/input/sensors/bmi160/bmi160_spi.c
@@ -0,0 +1,299 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * @filename bmi160_spi.c
+ * @date     2014/11/25 14:40
+ * @id       "20f77db"
+ * @version  1.3
+ *
+ * @brief
+ * This file implements moudle function, which add
+ * the driver to SPI core.
+*/
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include "bmi160_driver.h"
+
+/*! @defgroup bmi160_spi_src
+ *  @brief bmi160 spi driver module
+ @{*/
+/*! the maximum of transfer buffer size */
+#define BMI_MAX_BUFFER_SIZE      32
+
+static struct spi_device *bmi_spi_client;
+
+/*!
+ * @brief define spi wirte function
+ *
+ * @param dev_addr sensor device address
+ * @param reg_addr register address
+ * @param data the pointer of data buffer
+ * @param len block size need to write
+ *
+ * @return zero success, non-zero failed
+ * @retval zero success
+ * @retval non-zero failed
+*/
+static char bmi_spi_write_block(u8 dev_addr, u8 reg_addr, u8 *data, u8 len)
+{
+	struct spi_device *client = bmi_spi_client;
+	u8 buffer[BMI_MAX_BUFFER_SIZE + 1];
+	struct spi_transfer xfer = {
+		.tx_buf     = buffer,
+		.len        = len + 1,
+	};
+	struct spi_message msg;
+
+	if (len > BMI_MAX_BUFFER_SIZE)
+		return -EINVAL;
+
+	buffer[0] = reg_addr&0x7F;/* write: MSB = 0 */
+	memcpy(&buffer[1], data, len);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	return spi_sync(client, &msg);
+}
+
+/*!
+ * @brief define spi read function
+ *
+ * @param dev_addr sensor device address
+ * @param reg_addr register address
+ * @param data the pointer of data buffer
+ * @param len block size need to read
+ *
+ * @return zero success, non-zero failed
+ * @retval zero success
+ * @retval non-zero failed
+*/
+static char bmi_spi_read_block(u8 dev_addr, u8 reg_addr, u8 *data, u8 len)
+{
+	struct spi_device *client = bmi_spi_client;
+	u8 reg = reg_addr | 0x80;/* read: MSB = 1 */
+	struct spi_transfer xfer[2] = {
+		[0] = {
+			.tx_buf = &reg,
+			.len = 1,
+		},
+		[1] = {
+			.rx_buf = data,
+			.len = len,
+		}
+	};
+	struct spi_message msg;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer[0], &msg);
+	spi_message_add_tail(&xfer[1], &msg);
+	return spi_sync(client, &msg);
+}
+
+s8 bmi_burst_read_wrapper(u8 dev_addr, u8 reg_addr, u8 *data, u16 len)
+{
+	struct spi_device *client = bmi_spi_client;
+	u8 reg = reg_addr | 0x80;/* read: MSB = 1 */
+	struct spi_transfer xfer[2] = {
+		[0] = {
+			.tx_buf = &reg,
+			.len = 1,
+		},
+		[1] = {
+			.rx_buf = data,
+			.len = len,
+		}
+	};
+	struct spi_message msg;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer[0], &msg);
+	spi_message_add_tail(&xfer[1], &msg);
+	return spi_sync(client, &msg);
+}
+EXPORT_SYMBOL(bmi_burst_read_wrapper);
+/*!
+ * @brief BMI probe function via spi bus
+ *
+ * @param client the pointer of spi client
+ *
+ * @return zero success, non-zero failed
+ * @retval zero success
+ * @retval non-zero failed
+*/
+static int bmi_spi_probe(struct spi_device *client)
+{
+	int status;
+	int err = 0;
+	struct bmi_client_data *client_data = NULL;
+
+	if (NULL == bmi_spi_client)
+		bmi_spi_client = client;
+	else{
+		dev_err(&client->dev, "This driver does not support multiple clients!\n");
+		return -EBUSY;
+	}
+
+	client->bits_per_word = 8;
+	status = spi_setup(client);
+	if (status < 0) {
+		dev_err(&client->dev, "spi_setup failed!\n");
+		return status;
+	}
+
+	client_data = kzalloc(sizeof(struct bmi_client_data), GFP_KERNEL);
+	if (NULL == client_data) {
+		dev_err(&client->dev, "no memory available");
+		err = -ENOMEM;
+		goto exit_err_clean;
+	}
+
+	client_data->device.bus_read = bmi_spi_read_block;
+	client_data->device.bus_write = bmi_spi_write_block;
+
+	return bmi_probe(client_data, &client->dev);
+
+exit_err_clean:
+	if (err)
+		bmi_spi_client = NULL;
+	return err;
+}
+
+/*!
+ * @brief shutdown bmi device in spi driver
+ *
+ * @param client the pointer of spi client
+ *
+ * @return no return value
+*/
+static void bmi_spi_shutdown(struct spi_device *client)
+{
+#ifdef CONFIG_PM
+	bmi_suspend(&client->dev);
+#endif
+}
+
+/*!
+ * @brief remove bmi spi client
+ *
+ * @param client the pointer of spi client
+ *
+ * @return zero
+ * @retval zero
+*/
+static int bmi_spi_remove(struct spi_device *client)
+{
+	int err = 0;
+	err = bmi_remove(&client->dev);
+	bmi_spi_client = NULL;
+
+	return err;
+}
+
+#ifdef CONFIG_PM
+/*!
+ * @brief suspend bmi device in spi driver
+ *
+ * @param dev the pointer of device
+ *
+ * @return zero
+ * @retval zero
+*/
+static int bmi_spi_suspend(struct device *dev)
+{
+	int err = 0;
+	err = bmi_suspend(dev);
+	return err;
+}
+
+/*!
+ * @brief resume bmi device in spi driver
+ *
+ * @param dev the pointer of device
+ *
+ * @return zero
+ * @retval zero
+*/
+static int bmi_spi_resume(struct device *dev)
+{
+	int err = 0;
+	/* post resume operation */
+	err = bmi_resume(dev);
+
+	return err;
+}
+
+/*!
+ * @brief register spi device power manager hooks
+*/
+static const struct dev_pm_ops bmi_spi_pm_ops = {
+	/**< device suspend */
+	.suspend = bmi_spi_suspend,
+	/**< device resume */
+	.resume  = bmi_spi_resume
+};
+#endif
+
+/*!
+ * @brief register spi device id
+*/
+static const struct spi_device_id bmi_id[] = {
+	{ SENSOR_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, bmi_id);
+
+/*!
+ * @brief register spi driver hooks
+*/
+static struct spi_driver bmi_spi_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name  = SENSOR_NAME,
+#ifdef CONFIG_PM
+		.pm = &bmi_spi_pm_ops,
+#endif
+	},
+	.id_table = bmi_id,
+	.probe    = bmi_spi_probe,
+	.shutdown = bmi_spi_shutdown,
+	.remove   = bmi_spi_remove
+};
+
+/*!
+ * @brief initialize bmi spi module
+ *
+ * @return zero success, non-zero failed
+ * @retval zero success
+ * @retval non-zero failed
+*/
+static int __init bmi_spi_init(void)
+{
+	return spi_register_driver(&bmi_spi_driver);
+}
+
+/*!
+ * @brief remove bmi spi module
+ *
+ * @return no return value
+*/
+static void __exit bmi_spi_exit(void)
+{
+	spi_unregister_driver(&bmi_spi_driver);
+}
+
+
+MODULE_AUTHOR("Contact <contact@bosch-sensortec.com>");
+MODULE_DESCRIPTION("BMI160 SPI DRIVER");
+MODULE_LICENSE("GPL v2");
+
+module_init(bmi_spi_init);
+module_exit(bmi_spi_exit);
+/*@}*/
+
diff --git a/drivers/input/sensors/bmi160/bs_log.c b/drivers/input/sensors/bmi160/bs_log.c
new file mode 100644
index 0000000..6574607
--- /dev/null
+++ b/drivers/input/sensors/bmi160/bs_log.c
@@ -0,0 +1,50 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * @filename bs_log.c
+ * @date     "Wed Sep 24 15:27:12 2014 +0800"
+ * @id       "e416c14"
+ *
+ * @brief
+ * The source file of BOSCH SENSOR LOG
+*/
+
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/unistd.h>
+#include <linux/types.h>
+#else
+#include <unistd.h>
+#include <sys/types.h>
+#endif
+
+#include <linux/time.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#ifdef BOSCH_DRIVER_LOG_FUNC
+#define BSLOG_VAR_DEF
+#include "bs_log.h"
+
+void set_debug_log_level(uint8_t level)
+{
+	debug_log_level = level;
+}
+
+uint8_t get_debug_log_level(void)
+{
+	return debug_log_level;
+}
+
+EXPORT_SYMBOL(set_debug_log_level);
+EXPORT_SYMBOL(get_debug_log_level);
+
+#endif/*BOSCH_DRIVER_LOG_FUNC*/

+/*@}*/
diff --git a/drivers/input/sensors/bmi160/bs_log.h b/drivers/input/sensors/bmi160/bs_log.h
new file mode 100644
index 0000000..ed2d2b3
--- /dev/null
+++ b/drivers/input/sensors/bmi160/bs_log.h
@@ -0,0 +1,171 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * @filename bs_log.h
+ * @date     "Sat Oct 11 16:12:16 2014 +0800"
+ * @id       "762cc9e"
+ *
+ * @brief
+ * The head file of BOSCH SENSOR LOG
+*/
+
+#ifndef __BS_LOG_H
+#define __BS_LOG_H
+
+#include <linux/kernel.h>
+
+/*! @ trace functions
+ @{*/
+/*! ERROR LOG LEVEL */
+#define LOG_LEVEL_E 3
+/*! NOTICE LOG LEVEL */
+#define LOG_LEVEL_N 5
+/*! INFORMATION LOG LEVEL */
+#define LOG_LEVEL_I 6
+/*! DEBUG LOG LEVEL */
+#define LOG_LEVEL_D 7
+/*! DEBUG_FWDL LOG LEVEL */
+#define LOG_LEVEL_DF 10
+/*! DEBUG_DATA LOG LEVEL */
+#define LOG_LEVEL_DA 15
+/*! ALL LOG LEVEL */
+#define LOG_LEVEL_A 20
+
+#ifndef MODULE_TAG
+/*! MODULE TAG DEFINATION */
+#define MODULE_TAG "<BS_LOG>"
+#endif
+
+#ifndef LOG_LEVEL
+/*! LOG LEVEL DEFINATION */
+#define LOG_LEVEL LOG_LEVEL_I
+#endif
+
+#ifdef BOSCH_DRIVER_LOG_FUNC
+	#ifdef BSLOG_VAR_DEF
+		uint8_t debug_log_level = LOG_LEVEL;
+	#else
+		extern uint8_t debug_log_level;
+	#endif
+
+	/*! print error message */
+	#define PERR(fmt, args...) do\
+	{\
+		if (debug_log_level >= LOG_LEVEL_E)\
+			printk(KERN_INFO "\n" "[E]" KERN_ERR MODULE_TAG \
+				"<%s><%d>" fmt "\n", __func__, __LINE__, ##args);\
+	} while (0)
+
+	/*! print notice message */
+	#define PNOTICE(fmt, args...) do\
+	{\
+		if (debug_log_level >= LOG_LEVEL_N)\
+			printk(KERN_INFO "\n" "[N]" KERN_NOTICE MODULE_TAG \
+				"<%s><%d>" fmt "\n", __func__, __LINE__, ##args);\
+	} while (0)
+
+	/*! print information message */
+	#define PINFO(fmt, args...) do\
+	{\
+		if (debug_log_level >= LOG_LEVEL_I)\
+			printk(KERN_INFO "\n" "[I]" KERN_INFO MODULE_TAG \
+				"<%s><%d>" fmt "\n", __func__, __LINE__, ##args);\
+	} while (0)
+
+	/*! print debug message */
+	#define PDEBUG(fmt, args...) do\
+	{\
+		if (debug_log_level >= LOG_LEVEL_D)\
+			printk(KERN_INFO "\n" "[D]" KERN_DEBUG MODULE_TAG \
+				"<%s><%d>" fmt "\n", __func__, __LINE__, ##args);\
+	} while (0)
+
+	/*! print debug fw download message */
+	#define PDEBUG_FWDL(fmt, args...) do\
+	{\
+		if (debug_log_level >= LOG_LEVEL_DF)\
+			printk(KERN_INFO "\n" "[DF]" KERN_DEBUG MODULE_TAG \
+				"<%s><%d>" fmt "\n", __func__, __LINE__, ##args);\
+	} while (0)
+
+	/*! print debug data log message */
+	#define PDEBUG_DLOG(fmt, args...) do\
+	{\
+		if (debug_log_level >= LOG_LEVEL_DA)\
+			printk(KERN_INFO "\n" "[DA]" KERN_DEBUG MODULE_TAG \
+				"<%s><%d>" fmt "\n", __func__, __LINE__, ##args);\
+	} while (0)
+
+	void set_debug_log_level(uint8_t level);
+	uint8_t get_debug_log_level(void);
+
+#else
+
+	#if (LOG_LEVEL >= LOG_LEVEL_E)
+	/*! print error message */
+	#define PERR(fmt, args...) \
+		printk(KERN_INFO "\n" "[E]" KERN_ERR MODULE_TAG \
+		"<%s><%d>" fmt "\n", __func__, __LINE__, ##args)
+	#else
+	/*! invalid message */
+	#define PERR(fmt, args...)
+	#endif
+
+	#if (LOG_LEVEL >= LOG_LEVEL_N)
+	/*! print notice message */
+	#define PNOTICE(fmt, args...) \
+		printk(KERN_INFO "\n" "[N]" KERN_NOTICE MODULE_TAG \
+		"<%s><%d>" fmt "\n", __func__, __LINE__, ##args)
+	#else
+	/*! invalid message */
+	#define PNOTICE(fmt, args...)
+	#endif
+
+	#if (LOG_LEVEL >= LOG_LEVEL_I)
+	/*! print information message */
+	#define PINFO(fmt, args...) printk(KERN_INFO "\n" "[I]" KERN_INFO MODULE_TAG \
+		"<%s><%d>" fmt "\n", __func__, __LINE__, ##args)
+	#else
+	/*! invalid message */
+	#define PINFO(fmt, args...)
+	#endif
+
+	#if (LOG_LEVEL >= LOG_LEVEL_D)
+	/*! print debug message */
+	#define PDEBUG(fmt, args...) printk(KERN_INFO "\n" "[D]" KERN_DEBUG MODULE_TAG \
+		"<%s><%d>" fmt "\n", __func__, __LINE__, ##args)
+	#else
+	/*! invalid message */
+	#define PDEBUG(fmt, args...)
+	#endif
+
+	#if (LOG_LEVEL >= LOG_LEVEL_DF)
+	/*! print debug fw download message */
+	#define PDEBUG_FWDL(fmt, args...) printk(KERN_INFO "\n" "[DF]" KERN_DEBUG MODULE_TAG \
+		"<%s><%d>" fmt "\n", __func__, __LINE__, ##args)
+	#else
+	/*! invalid message */
+	#define PDEBUG_FWDL(fmt, args...)
+	#endif
+
+	#if (LOG_LEVEL >= LOG_LEVEL_DA)
+	/*! print debug data log message */
+	#define PDEBUG_DLOG(fmt, args...) printk(KERN_INFO "\n" "[DA]" KERN_DEBUG MODULE_TAG \
+		"<%s><%d>" fmt "\n", __func__, __LINE__, ##args)
+	#else
+	/*! invalid message */
+	#define PDEBUG_DLOG(fmt, args...)
+	#endif
+
+	#define set_debug_log_level(level) {}
+	#define get_debug_log_level() (LOG_LEVEL)
+
+#endif
+
+#endif/*__BS_LOG_H*/
+/*@}*/
diff --git a/drivers/input/sensors/bmi160/bstclass.c b/drivers/input/sensors/bmi160/bstclass.c
new file mode 100644
index 0000000..4937cca
--- /dev/null
+++ b/drivers/input/sensors/bmi160/bstclass.c
@@ -0,0 +1,238 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * @filename bstclass.c
+ * @date     2015/11/17 13:44
+ * @id       "836294d"
+ * @version  1.5.9
+ *
+ * @brief    
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/compiler.h>
+#include <linux/compat.h>
+#include "bstclass.h"
+#include "bs_log.h"
+
+static LIST_HEAD(bst_dev_list);
+
+/*
+ * bst_mutex protects access to both bst_dev_list and input_handler_list.
+ * This also causes bst_[un]register_device and bst_[un]register_handler
+ * be mutually exclusive which simplifies locking in drivers implementing
+ * input handlers.
+ */
+static DEFINE_MUTEX(bst_mutex);
+
+
+static void bst_dev_release(struct device *device)
+{
+	struct bst_dev *dev = to_bst_dev(device);
+	if (NULL != dev)
+		kfree(dev);
+	module_put(THIS_MODULE);
+}
+
+
+#ifdef CONFIG_PM
+static int bst_dev_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int bst_dev_resume(struct device *dev)
+{
+	return 0;
+}
+
+static const struct dev_pm_ops bst_dev_pm_ops = {
+	.suspend    = bst_dev_suspend,
+	.resume     = bst_dev_resume,
+	.poweroff   = bst_dev_suspend,
+	.restore    = bst_dev_resume,
+};
+#endif /* CONFIG_PM */
+
+static const struct attribute_group *bst_dev_attr_groups[] = {
+	NULL
+};
+
+static struct device_type bst_dev_type = {
+	.groups      = bst_dev_attr_groups,
+	.release = bst_dev_release,
+#ifdef CONFIG_PM
+	.pm      = &bst_dev_pm_ops,
+#endif
+};
+
+
+
+static char *bst_devnode(struct device *dev, mode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+}
+
+struct class bst_class = {
+	.name        = "bst",
+	.owner       = THIS_MODULE,
+	.devnode     = (void*)bst_devnode,
+	.dev_release = bst_dev_release,
+};
+EXPORT_SYMBOL_GPL(bst_class);
+
+/**
+ * bst_allocate_device - allocate memory for new input device
+ *
+ * Returns prepared struct bst_dev or NULL.
+ *
+ * NOTE: Use bst_free_device() to free devices that have not been
+ * registered; bst_unregister_device() should be used for already
+ * registered devices.
+ */
+struct bst_dev *bst_allocate_device(void)
+{
+	struct bst_dev *dev;
+
+	dev = kzalloc(sizeof(struct bst_dev), GFP_KERNEL);
+	if (dev) {
+		dev->dev.type = &bst_dev_type;
+		dev->dev.class = &bst_class;
+		device_initialize(&dev->dev);
+		mutex_init(&dev->mutex);
+		INIT_LIST_HEAD(&dev->node);
+		__module_get(THIS_MODULE);
+	}
+	return dev;
+}
+EXPORT_SYMBOL(bst_allocate_device);
+
+
+
+/**
+ * bst_free_device - free memory occupied by bst_dev structure
+ * @dev: input device to free
+ *
+ * This function should only be used if bst_register_device()
+ * was not called yet or if it failed. Once device was registered
+ * use bst_unregister_device() and memory will be freed once last
+ * reference to the device is dropped.
+ *
+ * Device should be allocated by bst_allocate_device().
+ *
+ * NOTE: If there are references to the input device then memory
+ * will not be freed until last reference is dropped.
+ */
+void bst_free_device(struct bst_dev *dev)
+{
+	if (dev)
+		bst_put_device(dev);
+}
+EXPORT_SYMBOL(bst_free_device);
+
+/**
+ * bst_register_device - register device with input core
+ * @dev: device to be registered
+ *
+ * This function registers device with input core. The device must be
+ * allocated with bst_allocate_device() and all it's capabilities
+ * set up before registering.
+ * If function fails the device must be freed with bst_free_device().
+ * Once device has been successfully registered it can be unregistered
+ * with bst_unregister_device(); bst_free_device() should not be
+ * called in this case.
+ */
+int bst_register_device(struct bst_dev *dev)
+{
+	const char *path;
+	int error;
+
+
+	/*
+	 * If delay and period are pre-set by the driver, then autorepeating
+	 * is handled by the driver itself and we don't do it in input.c.
+	 */
+	dev_set_name(&dev->dev, dev->name);
+
+	error = device_add(&dev->dev);
+	if (error)
+		return error;
+
+	path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
+	PINFO("%s as %s\n",
+			dev->name ? dev->name : "Unspecified device",
+			path ? path : "N/A");
+	kfree(path);
+	error = mutex_lock_interruptible(&bst_mutex);
+	if (error) {
+		device_del(&dev->dev);
+		return error;
+	}
+
+	list_add_tail(&dev->node, &bst_dev_list);
+
+	mutex_unlock(&bst_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(bst_register_device);
+
+/**
+ * bst_unregister_device - unregister previously registered device
+ * @dev: device to be unregistered
+ *
+ * This function unregisters an input device. Once device is unregistered
+ * the caller should not try to access it as it may get freed at any moment.
+ */
+void bst_unregister_device(struct bst_dev *dev)
+{
+	int ret = 0;
+	ret = mutex_lock_interruptible(&bst_mutex);
+	if(ret){
+		return;
+	}
+
+	list_del_init(&dev->node);
+	mutex_unlock(&bst_mutex);
+	device_unregister(&dev->dev);
+}
+EXPORT_SYMBOL(bst_unregister_device);
+
+static int __init bst_init(void)
+{
+	int err;
+	/*bst class register*/
+	err = class_register(&bst_class);
+	if (err) {
+		pr_err("unable to register bst_dev class\n");
+		return err;
+	}
+	return err;
+}
+
+static void __exit bst_exit(void)
+{
+	/*bst class*/
+	class_unregister(&bst_class);
+}
+
+/*subsys_initcall(bst_init);*/
+
+MODULE_AUTHOR("contact@bosch-sensortec.com");
+MODULE_DESCRIPTION("BST CLASS CORE");
+MODULE_LICENSE("GPL V2");
+
+module_init(bst_init);
+module_exit(bst_exit);
diff --git a/drivers/input/sensors/bmi160/bstclass.h b/drivers/input/sensors/bmi160/bstclass.h
new file mode 100644
index 0000000..7aa7760
--- /dev/null
+++ b/drivers/input/sensors/bmi160/bstclass.h
@@ -0,0 +1,78 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * @filename bstcalss.h
+ * @date     2015/11/17 13:44
+ * @id       "836294d"
+ * @version  1.5.9
+ *
+ * @brief  
+ */
+
+#ifndef _BSTCLASS_H
+#define _BSTCLASS_H
+
+#ifdef __KERNEL__
+#include <linux/time.h>
+#include <linux/list.h>
+#else
+#include <sys/time.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <linux/types.h>
+#endif
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/mod_devicetable.h>
+
+struct bst_dev {
+	const char *name;
+
+	int (*open)(struct bst_dev *dev);
+	void (*close)(struct bst_dev *dev);
+	struct mutex mutex;
+	struct device dev;
+	struct list_head node;
+};
+
+#define to_bst_dev(d) container_of(d, struct bst_dev, dev)
+
+struct bst_dev *bst_allocate_device(void);
+void bst_free_device(struct bst_dev *dev);
+
+static inline struct bst_dev *bst_get_device(struct bst_dev *dev)
+{
+	return dev ? to_bst_dev(get_device(&dev->dev)) : NULL;
+}
+
+static inline void bst_put_device(struct bst_dev *dev)
+{
+	if (dev)
+		put_device(&dev->dev);
+}
+
+static inline void *bst_get_drvdata(struct bst_dev *dev)
+{
+	return dev_get_drvdata(&dev->dev);
+}
+
+static inline void bst_set_drvdata(struct bst_dev *dev, void *data)
+{
+	dev_set_drvdata(&dev->dev, data);
+}
+
+int __must_check bst_register_device(struct bst_dev *);
+void bst_unregister_device(struct bst_dev *);
+
+void bst_reset_device(struct bst_dev *);
+
+
+extern struct class bst_class;
+
+#endif
diff --git a/drivers/input/sensors/smi130/Kconfig b/drivers/input/sensors/smi130/Kconfig
new file mode 100644
index 0000000..0dd0b39
--- /dev/null
+++ b/drivers/input/sensors/smi130/Kconfig
@@ -0,0 +1,121 @@
+#
+# Makefile for Bosch sensors driver.
+#
+config BOSCH_DRIVER_LOG_FUNC
+	tristate "Bosch Sensortec driver smart log function support"
+	depends on (I2C || SPI_MASTER) && SYSFS
+	help
+	 If you say yes here, you get support for smart log function in Bosch Sensortec driver.
+
+config SENSORS_SMI_ACC2X2
+	tristate "SMI130_ACC acceleration sensor support"
+	depends on I2C
+	help
+	  If you say yes here, you get support for Bosch Sensortec's
+	  acceleration sensors SMI_ACC255/SMI_ACC254/SMI_ACC355/SMI_ACC250E/SMI_ACC222E/SMI_ACC280.
+
+config SENSORS_SMI_ACC2X2_ENABLE_INT1
+	tristate "SMI_ACC2X2 acceleration sensor interrupt INT1 support"
+	depends on SENSORS_SMI_ACC2X2
+	help
+	 If you say yes here, you get INT1 support for Bosch Sensortec
+	 acceleration sensors SMI_ACC255/SMI_ACC250E/SMI_ACC222E/SMI_ACC280.
+	 Select it will disable interrupt INT2 support
+
+config SENSORS_SMI_ACC2X2_ENABLE_INT2
+	tristate "SMI_ACC2X2 acceleration sensor interrupt INT2 support"
+	depends on SENSORS_SMI_ACC2X2 && !SENSORS_SMI_ACC2X2_ENABLE_INT1
+	help
+	 If you say yes here, you get INT2 support for Bosch Sensortec
+	 acceleration sensors SMI_ACC255/SMI_ACC250E/SMI_ACC222E/SMI_ACC280.
+	 Can only open if you do NOT open interrupt INT1 support
+
+config SIG_MOTION
+	tristate "support significant motion sensor function"
+	depends on SENSORS_SMI_ACC2X2  && ( SENSORS_SMI_ACC2X2_ENABLE_INT1 || SENSORS_SMI_ACC2X2_ENABLE_INT2)
+	help
+	 If you say yes here, if you want to support Bosch significant motion sensor function
+
+config DOUBLE_TAP
+	tristate "support double tap sensor function"
+	depends on SENSORS_SMI_ACC2X2  && ( SENSORS_SMI_ACC2X2_ENABLE_INT1 || SENSORS_SMI_ACC2X2_ENABLE_INT2)
+	help
+	 If you say yes here, you get support Bosch double tap sensor function
+
+config SENSORS_SMI_GYRO
+	tristate "Bosch Gyroscope Sensor Driver"
+	depends on I2C
+	help
+	 If you say yes here, you get support for Bosch Sensortec's
+	 gyroscope sensor drivers of SMI130_GYRO/SMI055/BMI058 e.t.c.
+
+config SENSORS_SMI_GYRO_FIFO
+	tristate "Bosch Gyroscope FIFO Support"
+	depends on SENSORS_SMI_GYRO
+	help
+	 If you say yes here, you get support for Gyroscope sensor FIFO operations.
+	 Please check whether the chip supports fifo feature to open it.
+
+config SENSORS_BMI058
+	tristate "BMI058 Sensor Support"
+	depends on (SENSORS_SMI_GYRO || SENSORS_SMI_ACC2X2)
+	help
+	 If you say yes here, you get support for Bosch Sensortec's
+	 sensor driver of BMI058.
+
+config SENSORS_YAS537
+	tristate "YAS537 Magnetic Sensor Driver"
+	depends on I2C
+	help
+	 If you say yes here, you get support for YAMAHA
+	 sensor YAS537 Magnetic Sensor
+
+config SENSORS_BMM050
+	tristate "BMM050 Magnetic Sensor Driver"
+	depends on I2C
+	help
+	 If you say yes here, you get support for Bosch Sensortec's
+	 sensor BMM050 Magnetic Sensor
+
+config SENSORS_AKM09911
+	tristate "AKM09911 Mag Sensor Driver"
+	depends on I2C
+	help
+	 If you say yes here, you get support AKM09911 Sensor support.
+
+config SENSORS_AKM09912
+	tristate "AKM09912 Mag Sensor Driver"
+	depends on I2C
+	help
+	 If you say yes here, you get support AKM09912 Sensor support.
+
+config SENSORS_SMI_ACC420
+	tristate "SMI_ACC4XY Sensor Support"
+	depends on I2C || SPI_MASTER
+	help
+	If you say yes here, you get support for Bosch Sensortec's sensor driver of SMI_ACC420.
+config SENSORS_SMI_ACC421
+	tristate "SMI_ACC4XY Sensor Support"
+	depends on I2C || SPI_MASTER
+	help
+	If you say yes here, you get support for Bosch Sensortec's sensor driver of SMI_ACC421.
+config SENSORS_SMI_ACC422
+	tristate "SMI_ACC4XY Sensor Support"
+	depends on I2C || SPI_MASTER
+	help
+	If you say yes here, you get support for Bosch Sensortec's sensor driver of SMI_ACC422.
+config SENSORS_SMI_ACC455
+	tristate "SMI_ACC4XY Sensor Support"
+	depends on I2C || SPI_MASTER
+	help
+	If you say yes here, you get support for Bosch Sensortec's sensor driver of SMI_ACC455.
+
+config SMI_ACC4XY_MAG_INTERFACE_SUPPORT
+tristate "SMI_ACC4XY Sensor mag interface support"
+depends on SENSORS_SMI_ACC4XY
+	help
+	 If you say yes here, you get support for Bosch Sensortec's
+	 sensor driver of SMI_ACC4XY with mag sensor support.
+
+
+
diff --git a/drivers/input/sensors/smi130/Makefile b/drivers/input/sensors/smi130/Makefile
new file mode 100644
index 0000000..ad1e543
--- /dev/null
+++ b/drivers/input/sensors/smi130/Makefile
@@ -0,0 +1,47 @@
+#
+# Makefile for Bosch sensor driver.
+#
+
+obj-$(CONFIG_BOSCH_DRIVER_LOG_FUNC) += bs_log.o
+obj-y += boschclass.o
+ifeq ($(CONFIG_BOSCH_DRIVER_LOG_FUNC),y)
+	EXTRA_CFLAGS += -DBOSCH_DRIVER_LOG_FUNC
+endif
+
+obj-y   += smi130_acc.o
+
+ifeq ($(CONFIG_SENSORS_SMI_ACC2X2_ENABLE_INT1),y)
+	EXTRA_CFLAGS += -DSMI_ACC2X2_ENABLE_INT1
+endif
+
+ifeq ($(CONFIG_BOSCH_SMI_ACC2X2_ENABLE_INT2),y)
+	EXTRA_CFLAGS += -DSMI_ACC2X2_ENABLE_INT2
+endif
+
+obj-y    += smi130_gyro_driver.o smi130_gyro.o
+EXTRA_CFLAGS += -DSMI_GYRO_USE_BASIC_I2C_FUNC
+
+obj-y    += smi130_driver.o smi130.o
+ifeq ($(CONFIG_SMI130_MAG_INTERFACE_SUPPORT),y)
+		EXTRA_CFLAGS += -DSMI130_MAG_INTERFACE_SUPPORT
+endif
+ifeq ($(CONFIG_SENSORS_SMI130_ENABLE_INT1),y)
+		EXTRA_CFLAGS += -DSMI130_ENABLE_INT1
+endif
+
+ifeq ($(CONFIG_SENSORS_SMI130_ENABLE_INT2),y)
+		EXTRA_CFLAGS += -DSMI130_ENABLE_INT2
+endif
+
+obj-y  += smi130_i2c.o
+
+EXTRA_CFLAGS += -DSMI_USE_BASIC_I2C_FUNC
+
+obj-$(CONFIG_SENSORS_SMI130_SPI)  += smi130_spi.o
+
+
+
+
+
+
+
diff --git a/drivers/input/sensors/smi130/boschclass.c b/drivers/input/sensors/smi130/boschclass.c
new file mode 100644
index 0000000..8c28ab1
--- /dev/null
+++ b/drivers/input/sensors/smi130/boschclass.c
@@ -0,0 +1,341 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+ *
+ * @filename boschclass.c
+ * @date     2015/11/17 13:44
+ * @Modification Date 2018/08/28 18:20
+ * @id       "836294d"
+ * @version  1.5.9
+ *
+ * @brief    
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/compiler.h>
+#include <linux/compat.h>
+#include "boschclass.h"
+#include "bs_log.h"
+
+static LIST_HEAD(bosch_dev_list);
+
+/*
+ * bosch_mutex protects access to both bosch_dev_list and input_handler_list.
+ * This also causes bosch_[un]register_device and bosch_[un]register_handler
+ * be mutually exclusive which simplifies locking in drivers implementing
+ * input handlers.
+ */
+static DEFINE_MUTEX(bosch_mutex);
+
+
+static void bosch_dev_release(struct device *device)
+{
+	struct bosch_dev *dev = to_bosch_dev(device);
+	if (NULL != dev)
+		kfree(dev);
+	module_put(THIS_MODULE);
+}
+
+
+#ifdef CONFIG_PM
+static int bosch_dev_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int bosch_dev_resume(struct device *dev)
+{
+	return 0;
+}
+
+static const struct dev_pm_ops bosch_dev_pm_ops = {
+	.suspend    = bosch_dev_suspend,
+	.resume     = bosch_dev_resume,
+	.poweroff   = bosch_dev_suspend,
+	.restore    = bosch_dev_resume,
+};
+#endif /* CONFIG_PM */
+
+static const struct attribute_group *bosch_dev_attr_groups[] = {
+	NULL
+};
+
+static struct device_type bosch_dev_type = {
+	.groups      = bosch_dev_attr_groups,
+	.release = bosch_dev_release,
+#ifdef CONFIG_PM
+	.pm      = &bosch_dev_pm_ops,
+#endif
+};
+
+
+
+static char *bosch_devnode(struct device *dev, mode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+}
+
+struct class bosch_class = {
+	.name        = "bosch",
+	.owner       = THIS_MODULE,
+	.devnode     = (void*)bosch_devnode,
+	.dev_release = bosch_dev_release,
+};
+EXPORT_SYMBOL_GPL(bosch_class);
+
+/**
+ * bosch_allocate_device - allocate memory for new input device
+ *
+ * Returns prepared struct bosch_dev or NULL.
+ *
+ * NOTE: Use bosch_free_device() to free devices that have not been
+ * registered; bosch_unregister_device() should be used for already
+ * registered devices.
+ */
+struct bosch_dev *bosch_allocate_device(void)
+{
+	struct bosch_dev *dev;
+
+	dev = kzalloc(sizeof(struct bosch_dev), GFP_KERNEL);
+	if (dev) {
+		dev->dev.type = &bosch_dev_type;
+		dev->dev.class = &bosch_class;
+		device_initialize(&dev->dev);
+		mutex_init(&dev->mutex);
+		INIT_LIST_HEAD(&dev->node);
+		__module_get(THIS_MODULE);
+	}
+	return dev;
+}
+EXPORT_SYMBOL(bosch_allocate_device);
+
+
+
+/**
+ * bosch_free_device - free memory occupied by bosch_dev structure
+ * @dev: input device to free
+ *
+ * This function should only be used if bosch_register_device()
+ * was not called yet or if it failed. Once device was registered
+ * use bosch_unregister_device() and memory will be freed once last
+ * reference to the device is dropped.
+ *
+ * Device should be allocated by bosch_allocate_device().
+ *
+ * NOTE: If there are references to the input device then memory
+ * will not be freed until last reference is dropped.
+ */
+void bosch_free_device(struct bosch_dev *dev)
+{
+	if (dev)
+		bosch_put_device(dev);
+}
+EXPORT_SYMBOL(bosch_free_device);
+
+/**
+ * bosch_register_device - register device with input core
+ * @dev: device to be registered
+ *
+ * This function registers device with input core. The device must be
+ * allocated with bosch_allocate_device() and all it's capabilities
+ * set up before registering.
+ * If function fails the device must be freed with bosch_free_device().
+ * Once device has been successfully registered it can be unregistered
+ * with bosch_unregister_device(); bosch_free_device() should not be
+ * called in this case.
+ */
+int bosch_register_device(struct bosch_dev *dev)
+{
+	const char *path;
+	int error;
+
+
+	/*
+	 * If delay and period are pre-set by the driver, then autorepeating
+	 * is handled by the driver itself and we don't do it in input.c.
+	 */
+	dev_set_name(&dev->dev, dev->name);
+
+	error = device_add(&dev->dev);
+	if (error)
+		return error;
+
+	path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
+	PINFO("%s as %s\n",
+			dev->name ? dev->name : "Unspecified device",
+			path ? path : "N/A");
+	kfree(path);
+	error = mutex_lock_interruptible(&bosch_mutex);
+	if (error) {
+		device_del(&dev->dev);
+		return error;
+	}
+
+	list_add_tail(&dev->node, &bosch_dev_list);
+
+	mutex_unlock(&bosch_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(bosch_register_device);
+
+/**
+ * bosch_unregister_device - unregister previously registered device
+ * @dev: device to be unregistered
+ *
+ * This function unregisters an input device. Once device is unregistered
+ * the caller should not try to access it as it may get freed at any moment.
+ */
+void bosch_unregister_device(struct bosch_dev *dev)
+{
+	int ret = 0;
+	ret = mutex_lock_interruptible(&bosch_mutex);
+	if(ret){
+		return;
+	}
+
+	list_del_init(&dev->node);
+	mutex_unlock(&bosch_mutex);
+	device_unregister(&dev->dev);
+}
+EXPORT_SYMBOL(bosch_unregister_device);
+
+static int __init bosch_init(void)
+{
+	int err;
+	/*bosch class register*/
+	err = class_register(&bosch_class);
+	if (err) {
+		pr_err("unable to register bosch_dev class\n");
+		return err;
+	}
+	return err;
+}
+
+static void __exit bosch_exit(void)
+{
+	/*bosch class*/
+	class_unregister(&bosch_class);
+}
+
+/*subsys_initcall(bosch_init);*/
+
+MODULE_AUTHOR("contact@bosch-sensortec.com");
+MODULE_DESCRIPTION("BST CLASS CORE");
+MODULE_LICENSE("GPL V2");
+
+module_init(bosch_init);
+module_exit(bosch_exit);
diff --git a/drivers/input/sensors/smi130/boschclass.h b/drivers/input/sensors/smi130/boschclass.h
new file mode 100644
index 0000000..a89cc5d
--- /dev/null
+++ b/drivers/input/sensors/smi130/boschclass.h
@@ -0,0 +1,181 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+ *
+ * @filename boschcalss.h
+ * @date     2015/11/17 13:44
+ * @Modification Date 2018/08/28 18:20
+ * @id       "836294d"
+ * @version  1.5.9
+ *
+ * @brief  
+ */
+
+#ifndef _BSTCLASS_H
+#define _BSTCLASS_H
+
+#ifdef __KERNEL__
+#include <linux/time.h>
+#include <linux/list.h>
+#else
+#include <sys/time.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <linux/types.h>
+#endif
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/mod_devicetable.h>
+
+struct bosch_dev {
+	const char *name;
+
+	int (*open)(struct bosch_dev *dev);
+	void (*close)(struct bosch_dev *dev);
+	struct mutex mutex;
+	struct device dev;
+	struct list_head node;
+};
+
+#define to_bosch_dev(d) container_of(d, struct bosch_dev, dev)
+
+struct bosch_dev *bosch_allocate_device(void);
+void bosch_free_device(struct bosch_dev *dev);
+
+static inline struct bosch_dev *bosch_get_device(struct bosch_dev *dev)
+{
+	return dev ? to_bosch_dev(get_device(&dev->dev)) : NULL;
+}
+
+static inline void bosch_put_device(struct bosch_dev *dev)
+{
+	if (dev)
+		put_device(&dev->dev);
+}
+
+static inline void *bosch_get_drvdata(struct bosch_dev *dev)
+{
+	return dev_get_drvdata(&dev->dev);
+}
+
+static inline void bosch_set_drvdata(struct bosch_dev *dev, void *data)
+{
+	dev_set_drvdata(&dev->dev, data);
+}
+
+int __must_check bosch_register_device(struct bosch_dev *);
+void bosch_unregister_device(struct bosch_dev *);
+
+void bosch_reset_device(struct bosch_dev *);
+
+
+extern struct class bosch_class;
+
+#endif
diff --git a/drivers/input/sensors/smi130/bs_log.c b/drivers/input/sensors/smi130/bs_log.c
new file mode 100644
index 0000000..05ddddd
--- /dev/null
+++ b/drivers/input/sensors/smi130/bs_log.c
@@ -0,0 +1,153 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+ *
+ * @filename bs_log.c
+ * @date     "Wed Sep 24 15:27:12 2014 +0800"
+ * @Modification Date 2018/08/28 18:20
+ * @id       "e416c14"
+ *
+ * @brief
+ * The source file of BOSCH SENSOR LOG
+*/
+
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/unistd.h>
+#include <linux/types.h>
+#else
+#include <unistd.h>
+#include <sys/types.h>
+#endif
+
+#include <linux/time.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#ifdef BOSCH_DRIVER_LOG_FUNC
+#define BSLOG_VAR_DEF
+#include "bs_log.h"
+
+void set_debug_log_level(uint8_t level)
+{
+	debug_log_level = level;
+}
+
+uint8_t get_debug_log_level(void)
+{
+	return debug_log_level;
+}
+
+EXPORT_SYMBOL(set_debug_log_level);
+EXPORT_SYMBOL(get_debug_log_level);
+
+#endif/*BOSCH_DRIVER_LOG_FUNC*/

+/*@}*/
diff --git a/drivers/input/sensors/smi130/bs_log.h b/drivers/input/sensors/smi130/bs_log.h
new file mode 100644
index 0000000..86ef153
--- /dev/null
+++ b/drivers/input/sensors/smi130/bs_log.h
@@ -0,0 +1,274 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+ *
+ * @filename bs_log.h
+ * @date     "Sat Oct 11 16:12:16 2014 +0800"
+ * @Modification Date 2018/08/28 18:20
+ * @id       "762cc9e"
+ *
+ * @brief
+ * The head file of BOSCH SENSOR LOG
+*/
+
+#ifndef __BS_LOG_H
+#define __BS_LOG_H
+
+#include <linux/kernel.h>
+
+/*! @ trace functions
+ @{*/
+/*! ERROR LOG LEVEL */
+#define LOG_LEVEL_E 3
+/*! NOTICE LOG LEVEL */
+#define LOG_LEVEL_N 5
+/*! INFORMATION LOG LEVEL */
+#define LOG_LEVEL_I 6
+/*! DEBUG LOG LEVEL */
+#define LOG_LEVEL_D 7
+/*! DEBUG_FWDL LOG LEVEL */
+#define LOG_LEVEL_DF 10
+/*! DEBUG_DATA LOG LEVEL */
+#define LOG_LEVEL_DA 15
+/*! ALL LOG LEVEL */
+#define LOG_LEVEL_A 20
+
+#ifndef MODULE_TAG
+/*! MODULE TAG DEFINATION */
+#define MODULE_TAG "<BS_LOG>"
+#endif
+
+#ifndef LOG_LEVEL
+/*! LOG LEVEL DEFINATION */
+#define LOG_LEVEL LOG_LEVEL_I
+#endif
+
+#ifdef BOSCH_DRIVER_LOG_FUNC
+	#ifdef BSLOG_VAR_DEF
+		uint8_t debug_log_level = LOG_LEVEL;
+	#else
+		extern uint8_t debug_log_level;
+	#endif
+
+	/*! print error message */
+	#define PERR(fmt, args...) do\
+	{\
+		if (debug_log_level >= LOG_LEVEL_E)\
+			printk(KERN_INFO "\n" "[E]" KERN_ERR MODULE_TAG \
+				"<%s><%d>" fmt "\n", __func__, __LINE__, ##args);\
+	} while (0)
+
+	/*! print notice message */
+	#define PNOTICE(fmt, args...) do\
+	{\
+		if (debug_log_level >= LOG_LEVEL_N)\
+			printk(KERN_INFO "\n" "[N]" KERN_NOTICE MODULE_TAG \
+				"<%s><%d>" fmt "\n", __func__, __LINE__, ##args);\
+	} while (0)
+
+	/*! print information message */
+	#define PINFO(fmt, args...) do\
+	{\
+		if (debug_log_level >= LOG_LEVEL_I)\
+			printk(KERN_INFO "\n" "[I]" KERN_INFO MODULE_TAG \
+				"<%s><%d>" fmt "\n", __func__, __LINE__, ##args);\
+	} while (0)
+
+	/*! print debug message */
+	#define PDEBUG(fmt, args...) do\
+	{\
+		if (debug_log_level >= LOG_LEVEL_D)\
+			printk(KERN_INFO "\n" "[D]" KERN_DEBUG MODULE_TAG \
+				"<%s><%d>" fmt "\n", __func__, __LINE__, ##args);\
+	} while (0)
+
+	/*! print debug fw download message */
+	#define PDEBUG_FWDL(fmt, args...) do\
+	{\
+		if (debug_log_level >= LOG_LEVEL_DF)\
+			printk(KERN_INFO "\n" "[DF]" KERN_DEBUG MODULE_TAG \
+				"<%s><%d>" fmt "\n", __func__, __LINE__, ##args);\
+	} while (0)
+
+	/*! print debug data log message */
+	#define PDEBUG_DLOG(fmt, args...) do\
+	{\
+		if (debug_log_level >= LOG_LEVEL_DA)\
+			printk(KERN_INFO "\n" "[DA]" KERN_DEBUG MODULE_TAG \
+				"<%s><%d>" fmt "\n", __func__, __LINE__, ##args);\
+	} while (0)
+
+	void set_debug_log_level(uint8_t level);
+	uint8_t get_debug_log_level(void);
+
+#else
+
+	#if (LOG_LEVEL >= LOG_LEVEL_E)
+	/*! print error message */
+	#define PERR(fmt, args...) \
+		printk(KERN_INFO "\n" "[E]" KERN_ERR MODULE_TAG \
+		"<%s><%d>" fmt "\n", __func__, __LINE__, ##args)
+	#else
+	/*! invalid message */
+	#define PERR(fmt, args...)
+	#endif
+
+	#if (LOG_LEVEL >= LOG_LEVEL_N)
+	/*! print notice message */
+	#define PNOTICE(fmt, args...) \
+		printk(KERN_INFO "\n" "[N]" KERN_NOTICE MODULE_TAG \
+		"<%s><%d>" fmt "\n", __func__, __LINE__, ##args)
+	#else
+	/*! invalid message */
+	#define PNOTICE(fmt, args...)
+	#endif
+
+	#if (LOG_LEVEL >= LOG_LEVEL_I)
+	/*! print information message */
+	#define PINFO(fmt, args...) printk(KERN_INFO "\n" "[I]" KERN_INFO MODULE_TAG \
+		"<%s><%d>" fmt "\n", __func__, __LINE__, ##args)
+	#else
+	/*! invalid message */
+	#define PINFO(fmt, args...)
+	#endif
+
+	#if (LOG_LEVEL >= LOG_LEVEL_D)
+	/*! print debug message */
+	#define PDEBUG(fmt, args...) printk(KERN_INFO "\n" "[D]" KERN_DEBUG MODULE_TAG \
+		"<%s><%d>" fmt "\n", __func__, __LINE__, ##args)
+	#else
+	/*! invalid message */
+	#define PDEBUG(fmt, args...)
+	#endif
+
+	#if (LOG_LEVEL >= LOG_LEVEL_DF)
+	/*! print debug fw download message */
+	#define PDEBUG_FWDL(fmt, args...) printk(KERN_INFO "\n" "[DF]" KERN_DEBUG MODULE_TAG \
+		"<%s><%d>" fmt "\n", __func__, __LINE__, ##args)
+	#else
+	/*! invalid message */
+	#define PDEBUG_FWDL(fmt, args...)
+	#endif
+
+	#if (LOG_LEVEL >= LOG_LEVEL_DA)
+	/*! print debug data log message */
+	#define PDEBUG_DLOG(fmt, args...) printk(KERN_INFO "\n" "[DA]" KERN_DEBUG MODULE_TAG \
+		"<%s><%d>" fmt "\n", __func__, __LINE__, ##args)
+	#else
+	/*! invalid message */
+	#define PDEBUG_DLOG(fmt, args...)
+	#endif
+
+	#define set_debug_log_level(level) {}
+	#define get_debug_log_level() (LOG_LEVEL)
+
+#endif
+
+#endif/*__BS_LOG_H*/
+/*@}*/
diff --git a/drivers/input/sensors/smi130/modules.order b/drivers/input/sensors/smi130/modules.order
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/drivers/input/sensors/smi130/modules.order
diff --git a/drivers/input/sensors/smi130/readme.md b/drivers/input/sensors/smi130/readme.md
new file mode 100644
index 0000000..48b2cc1
--- /dev/null
+++ b/drivers/input/sensors/smi130/readme.md
@@ -0,0 +1,49 @@
+# SMI130 sensor API

+## Introduction

+This package contains the Robert Bosch GmbH's SMI130 sensor driver (sensor API)

+

+## Version

+File                 | Version | Date

+---------------------|---------|---------------

+smi130.h             |  2.0.9  |   2018/08/28

+smi130.c             |  2.0.9  |   2018/08/28

+smi130_spi.c         |   1.3   |   2018/08/28

+smi130_i2c.c         |   1.3   |   2018/08/28

+smi130_gyro_driver.c |   1.5.9 |   2018/08/28

+smi130_gyro.c        |   1.5   |   2018/08/28

+smi130_gyro.h        |   1.5   |   2018/08/28

+smi130_driver.h      |   1.3   |   2018/08/28

+smi130_driver.c      |   1.3   |   2018/08/28

+smi130_acc.c         |   2.1.2 |   2018/08/28

+bs_log.h             |         |   2018/08/28

+bs_log.c             |         |   2018/08/28

+boschcalss.h         |  1.5.9  |   2018/08/28

+boschclass.c         |  1.5.9  |   2018/08/28

+

+

+

+## File information

+* smi130.h : The head file of SMI130API

+* smi130.c : Sensor Driver for SMI130 sensor

+* smi130_spi.c : This file implements moudle function, which add the driver to SPI core.

+* smi130_i2c.c : This file implements moudle function, which add the driver to I2C core.

+* smi130_driver.h : The head file of SMI130 device driver core code

+* smi130_driver.c : This file implements the core code of SMI130 device driver

+* bs_log.h : The head file of BOSCH SENSOR LOG

+* bs_log.c : The source file of BOSCH SENSOR LOG

+* boschcalss.h :

+* boschclass.c :

+

+

+## Supported sensor interface

+* SPI 4-wire

+* I2C

+

+## Copyright

+

+Copyright (C) 2016 - 2017 Bosch Sensortec GmbH

+Modification Copyright (C) 2018 Robert Bosch Kft  All Rights Reserved

+

+This software program is licensed subject to the GNU General

+Public License (GPL).Version 2,June 1991,

+available at http://www.fsf.org/copyleft/gpl.html
\ No newline at end of file
diff --git a/drivers/input/sensors/smi130/smi130.c b/drivers/input/sensors/smi130/smi130.c
new file mode 100644
index 0000000..1ddd3b5
--- /dev/null
+++ b/drivers/input/sensors/smi130/smi130.c
@@ -0,0 +1,18785 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+*
+* @filename smi130.c
+* @Date: 2015/04/02
+* @Modification Date 2018/08/28 18:20
+* @id       836294d
+* @Revision: 2.0.9 $
+*
+* Usage: Sensor Driver for SMI130 sensor
+*/
+
+
+#include "smi130.h"
+#include <linux/kernel.h>
+
+/* user defined code to be added here ... */
+struct smi130_t *p_smi130;
+/* used for reading the mag trim values for compensation*/
+struct trim_data_t mag_trim_mbl;
+/* the following variable used for avoiding the selecting of auto mode
+when it is running in the manual mode of BMM150 mag interface*/
+u8 V_bmm150_maual_auto_condition_u8_mbl = SMI130_INIT_VALUE;
+/* used for reading the AKM compensating data */
+struct bosch_akm_sensitivity_data_t akm_asa_data_mbl;
+/* Assign the fifo time */
+u32 V_fifo_time_U32_mbl = SMI130_INIT_VALUE;
+
+/* FIFO data read for 1024 bytes of data */
+u8 v_fifo_data_u8_mbl[FIFO_FRAME] = {SMI130_INIT_VALUE};
+/* YAMAHA-YAS532*/
+/* value of coeff*/
+static const int yas532_version_ac_coef[] = {YAS532_VERSION_AC_COEF_X,
+YAS532_VERSION_AC_COEF_Y1, YAS532_VERSION_AC_COEF_Y2};
+/* used for reading the yas532 calibration data*/
+struct yas532_t yas532_data_mbl;
+/* used for reading the yas537 calibration data*/
+struct yas537_t yas537_data_mbl;
+/*!
+ *	@brief
+ *	This function is used for initialize
+ *	bus read and bus write functions
+ *	assign the chip id and device address
+ *	chip id is read in the register 0x00 bit from 0 to 7
+ *
+ *	@param smi130 : structure pointer
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *	@note
+ *	While changing the parameter of the smi130_t
+ *	consider the following point:
+ *	Changing the reference value of the parameter
+ *	will changes the local copy or local reference
+ *	make sure your changes will not
+ *	affect the reference value of the parameter
+ *	(Better case don't change the reference value of the parameter)
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_init(struct smi130_t *smi130)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	u8 v_pmu_data_u8 = SMI130_INIT_VALUE;
+	/* assign smi130 ptr */
+	p_smi130 = smi130;
+	com_rslt =
+	p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+	SMI130_USER_CHIP_ID__REG,
+	&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	/* read Chip Id */
+	p_smi130->chip_id = v_data_u8;
+	/* To avoid gyro wakeup it is required to write 0x00 to 0x6C*/
+	com_rslt += smi130_write_reg(SMI130_USER_PMU_TRIGGER_ADDR,
+	&v_pmu_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	return com_rslt;
+}
+/*!
+ * @brief
+ *	This API write the data to
+ *	the given register
+ *
+ *
+ *	@param v_addr_u8 -> Address of the register
+ *	@param v_data_u8 -> The data from the register
+ *	@param v_len_u8 -> no of bytes to read
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_write_reg(u8 v_addr_u8,
+u8 *v_data_u8, u8 v_len_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write data from register*/
+			com_rslt =
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->dev_addr,
+			v_addr_u8, v_data_u8, v_len_u8);
+		}
+	return com_rslt;
+}
+/*!
+ * @brief
+ *	This API reads the data from
+ *	the given register
+ *
+ *
+ *	@param v_addr_u8 -> Address of the register
+ *	@param v_data_u8 -> The data from the register
+ *	@param v_len_u8 -> no of bytes to read
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_read_reg(u8 v_addr_u8,
+u8 *v_data_u8, u8 v_len_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* Read data from register*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			v_addr_u8, v_data_u8, v_len_u8);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to reads the fatal error
+ *	from the Register 0x02 bit 0
+ *	This flag will be reset only by power-on-reset and soft reset
+ *
+ *
+ *  @param v_fatal_err_u8 : The status of fatal error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fatal_err(u8
+*v_fatal_err_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* reading the fatal error status*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FATAL_ERR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fatal_err_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FATAL_ERR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to read the error code
+ *	from register 0x02 bit 1 to 4
+ *
+ *
+ *  @param v_err_code_u8 : The status of error codes
+ *	error_code  |    description
+ *  ------------|---------------
+ *	0x00        |no error
+ *	0x01        |ACC_CONF error (accel ODR and bandwidth not compatible)
+ *	0x02        |GYR_CONF error (Gyroscope ODR and bandwidth not compatible)
+ *	0x03        |Under sampling mode and interrupt uses pre filtered data
+ *	0x04        |reserved
+ *	0x05        |Selected trigger-readout offset in
+ *    -         |MAG_IF greater than selected ODR
+ *	0x06        |FIFO configuration error for header less mode
+ *	0x07        |Under sampling mode and pre filtered data as FIFO source
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_err_code(u8
+*v_err_code_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_ERR_CODE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_err_code_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_ERR_CODE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API Reads the i2c error code from the
+ *	Register 0x02 bit 5.
+ *	This error occurred in I2C master detected
+ *
+ *  @param v_i2c_err_code_u8 : The status of i2c fail error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_i2c_fail_err(u8
+*v_i2c_err_code_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_I2C_FAIL_ERR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_i2c_err_code_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_I2C_FAIL_ERR);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API Reads the dropped command error
+ *	from the register 0x02 bit 6
+ *
+ *
+ *  @param v_drop_cmd_err_u8 : The status of drop command error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_drop_cmd_err(u8
+*v_drop_cmd_err_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_DROP_CMD_ERR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_drop_cmd_err_u8 = SMI130_GET_BITSLICE(
+			v_data_u8,
+			SMI130_USER_DROP_CMD_ERR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the magnetometer data ready
+ *	interrupt not active.
+ *	It reads from the error register 0x0x2 bit 7
+ *
+ *
+ *
+ *
+ *  @param v_mag_data_rdy_err_u8 : The status of mag data ready interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_dada_rdy_err(
+u8 *v_mag_data_rdy_err_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_MAG_DADA_RDY_ERR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_data_rdy_err_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_MAG_DADA_RDY_ERR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the error status
+ *	from the error register 0x02 bit 0 to 7
+ *
+ *  @param v_mag_data_rdy_err_u8 : The status of mag data ready interrupt
+ *  @param v_fatal_er_u8r : The status of fatal error
+ *  @param v_err_code_u8 : The status of error code
+ *  @param v_i2c_fail_err_u8 : The status of I2C fail error
+ *  @param v_drop_cmd_err_u8 : The status of drop command error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_error_status(u8 *v_fatal_er_u8r,
+u8 *v_err_code_u8, u8 *v_i2c_fail_err_u8,
+u8 *v_drop_cmd_err_u8, u8 *v_mag_data_rdy_err_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the error codes*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_ERR_STAT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			/* fatal error*/
+			*v_fatal_er_u8r =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FATAL_ERR);
+			/* user error*/
+			*v_err_code_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_ERR_CODE);
+			/* i2c fail error*/
+			*v_i2c_fail_err_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_I2C_FAIL_ERR);
+			/* drop command error*/
+			*v_drop_cmd_err_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_DROP_CMD_ERR);
+			/* mag data ready error*/
+			*v_mag_data_rdy_err_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_MAG_DADA_RDY_ERR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the magnetometer power mode from
+ *	PMU status register 0x03 bit 0 and 1
+ *
+ *  @param v_mag_power_mode_stat_u8 : The value of mag power mode
+ *	mag_powermode    |   value
+ * ------------------|----------
+ *    SUSPEND        |   0x00
+ *    NORMAL         |   0x01
+ *   LOW POWER       |   0x02
+ *
+ *
+ * @note The power mode of mag set by the 0x7E command register
+ * @note using the function "smi130_set_command_register()"
+ *  value    |   mode
+ *  ---------|----------------
+ *   0x18    | MAG_MODE_SUSPEND
+ *   0x19    | MAG_MODE_NORMAL
+ *   0x1A    | MAG_MODE_LOWPOWER
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_power_mode_stat(u8
+*v_mag_power_mode_stat_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_MAG_POWER_MODE_STAT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_power_mode_stat_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_MAG_POWER_MODE_STAT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the gyroscope power mode from
+ *	PMU status register 0x03 bit 2 and 3
+ *
+ *  @param v_gyro_power_mode_stat_u8 :	The value of gyro power mode
+ *	gyro_powermode   |   value
+ * ------------------|----------
+ *    SUSPEND        |   0x00
+ *    NORMAL         |   0x01
+ *   FAST POWER UP   |   0x03
+ *
+ * @note The power mode of gyro set by the 0x7E command register
+ * @note using the function "smi130_set_command_register()"
+ *  value    |   mode
+ *  ---------|----------------
+ *   0x14    | GYRO_MODE_SUSPEND
+ *   0x15    | GYRO_MODE_NORMAL
+ *   0x17    | GYRO_MODE_FASTSTARTUP
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_power_mode_stat(u8
+*v_gyro_power_mode_stat_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_GYRO_POWER_MODE_STAT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_power_mode_stat_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_GYRO_POWER_MODE_STAT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the accelerometer power mode from
+ *	PMU status register 0x03 bit 4 and 5
+ *
+ *
+ *  @param v_accel_power_mode_stat_u8 :	The value of accel power mode
+ *	accel_powermode  |   value
+ * ------------------|----------
+ *    SUSPEND        |   0x00
+ *    NORMAL         |   0x01
+ *  LOW POWER        |   0x02
+ *
+ * @note The power mode of accel set by the 0x7E command register
+ * @note using the function "smi130_set_command_register()"
+ *  value    |   mode
+ *  ---------|----------------
+ *   0x11    | ACCEL_MODE_NORMAL
+ *   0x12    | ACCEL_LOWPOWER
+ *   0x10    | ACCEL_SUSPEND
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_power_mode_stat(u8
+*v_accel_power_mode_stat_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_ACCEL_POWER_MODE_STAT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_power_mode_stat_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_ACCEL_POWER_MODE_STAT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API switch mag interface to normal mode
+ *	and confirm whether the mode switching done successfully or not
+*
+ *	@return results of bus communication function and current MAG_PMU result
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_interface_normal(void)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = SMI130_INIT_VALUE;
+	/* aim to check the result of switching mag normal */
+	u8 v_try_times_u8 = SMI130_MAG_NOAMRL_SWITCH_TIMES;
+	u8 v_mag_pum_status_u8 = SMI130_INIT_VALUE;
+
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	com_rslt = smi130_set_command_register(MAG_MODE_NORMAL);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	while (v_try_times_u8) {
+		com_rslt = smi130_get_mag_power_mode_stat(&v_mag_pum_status_u8);
+		if (v_mag_pum_status_u8 == MAG_INTERFACE_PMU_ENABLE)
+			break;
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		v_try_times_u8--;
+	}
+	if (v_mag_pum_status_u8 == MAG_INTERFACE_PMU_ENABLE)
+		com_rslt += SUCCESS;
+	else
+		com_rslt += E_SMI130_COMM_RES;
+
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads magnetometer data X values
+ *	from the register 0x04 and 0x05
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param v_mag_x_s16 : The value of mag x
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note smi130_set_mag_output_data_rate()
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_mag_x(s16 *v_mag_x_s16,
+u8 v_sensor_select_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the mag X lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[1] - MSB*/
+	u8 v_data_u8[SMI130_MAG_X_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_sensor_select_u8) {
+		case BST_BMM:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_DATA_MAG_X_LSB__REG,
+			v_data_u8, SMI130_MAG_X_DATA_LENGTH);
+			/* X axis*/
+			v_data_u8[SMI130_MAG_X_LSB_BYTE] =
+			SMI130_GET_BITSLICE(v_data_u8[SMI130_MAG_X_LSB_BYTE],
+			SMI130_USER_DATA_MAG_X_LSB);
+			*v_mag_x_s16 = (s16)
+			((((s32)((s8)v_data_u8[SMI130_MAG_X_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_05_BITS) |
+			(v_data_u8[SMI130_MAG_X_LSB_BYTE]));
+		break;
+		case BST_AKM:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_DATA_0_MAG_X_LSB__REG,
+			v_data_u8, SMI130_MAG_X_DATA_LENGTH);
+			*v_mag_x_s16 = (s16)
+			((((s32)((s8)v_data_u8[SMI130_MAG_X_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[SMI130_MAG_X_LSB_BYTE]));
+		break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads magnetometer data Y values
+ *	from the register 0x06 and 0x07
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param v_mag_y_s16 : The value of mag y
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note smi130_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_mag_y(s16 *v_mag_y_s16,
+u8 v_sensor_select_u8)
+{
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_OUT_OF_RANGE;
+	/* Array contains the mag Y lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[1] - MSB*/
+	u8 v_data_u8[SMI130_MAG_Y_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_sensor_select_u8) {
+		case BST_BMM:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_DATA_MAG_Y_LSB__REG,
+			v_data_u8, SMI130_MAG_Y_DATA_LENGTH);
+			/*Y-axis lsb value shifting*/
+			v_data_u8[SMI130_MAG_Y_LSB_BYTE] =
+			SMI130_GET_BITSLICE(v_data_u8[SMI130_MAG_Y_LSB_BYTE],
+			SMI130_USER_DATA_MAG_Y_LSB);
+			*v_mag_y_s16 = (s16)
+			((((s32)((s8)v_data_u8[SMI130_MAG_Y_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_05_BITS) |
+			(v_data_u8[SMI130_MAG_Y_LSB_BYTE]));
+		break;
+		case BST_AKM:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_DATA_2_MAG_Y_LSB__REG,
+			v_data_u8, SMI130_MAG_Y_DATA_LENGTH);
+			*v_mag_y_s16 = (s16)
+			((((s32)((s8)v_data_u8[SMI130_MAG_Y_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[SMI130_MAG_Y_LSB_BYTE]));
+		break;
+		default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads magnetometer data Z values
+ *	from the register 0x08 and 0x09
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param v_mag_z_s16 : The value of mag z
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note smi130_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_mag_z(s16 *v_mag_z_s16,
+u8 v_sensor_select_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the mag Z lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[1] - MSB*/
+	u8 v_data_u8[SMI130_MAG_Z_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_sensor_select_u8) {
+		case BST_BMM:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_DATA_MAG_Z_LSB__REG,
+			v_data_u8, SMI130_MAG_Z_DATA_LENGTH);
+			/*Z-axis lsb value shifting*/
+			v_data_u8[SMI130_MAG_Z_LSB_BYTE] =
+			SMI130_GET_BITSLICE(v_data_u8[SMI130_MAG_Z_LSB_BYTE],
+			SMI130_USER_DATA_MAG_Z_LSB);
+			*v_mag_z_s16 = (s16)
+			((((s32)((s8)v_data_u8[SMI130_MAG_Z_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_07_BITS) |
+			(v_data_u8[SMI130_MAG_Z_LSB_BYTE]));
+		break;
+		case BST_AKM:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_DATA_4_MAG_Z_LSB__REG,
+			v_data_u8, SMI130_MAG_Z_DATA_LENGTH);
+			*v_mag_z_s16 = (s16)
+			((((s32)((s8)v_data_u8[SMI130_MAG_Z_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) | (
+			v_data_u8[SMI130_MAG_Z_LSB_BYTE]));
+		break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads magnetometer data RHALL values
+ *	from the register 0x0A and 0x0B
+ *
+ *
+ *  @param v_mag_r_s16 : The value of BMM150 r data
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_mag_r(s16 *v_mag_r_s16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the mag R lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[1] - MSB*/
+	u8 v_data_u8[SMI130_MAG_R_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_DATA_6_RHALL_LSB__REG,
+			v_data_u8, SMI130_MAG_R_DATA_LENGTH);
+			/*R-axis lsb value shifting*/
+			v_data_u8[SMI130_MAG_R_LSB_BYTE] =
+			SMI130_GET_BITSLICE(v_data_u8[SMI130_MAG_R_LSB_BYTE],
+			SMI130_USER_DATA_MAG_R_LSB);
+			*v_mag_r_s16 = (s16)
+			((((s32)((s8)v_data_u8[SMI130_MAG_R_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_06_BITS) |
+			(v_data_u8[SMI130_MAG_R_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads magnetometer data X,Y,Z values
+ *	from the register 0x04 to 0x09
+ *
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param mag : The value of mag xyz data
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note smi130_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_mag_xyz(
+struct smi130_mag_t *mag, u8 v_sensor_select_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the mag XYZ lSB and MSB data
+		v_data_u8[0] - X-LSB
+		v_data_u8[1] - X-MSB
+		v_data_u8[0] - Y-LSB
+		v_data_u8[1] - Y-MSB
+		v_data_u8[0] - Z-LSB
+		v_data_u8[1] - Z-MSB
+		*/
+	u8 v_data_u8[SMI130_MAG_XYZ_DATA_SIZE] = {
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_sensor_select_u8) {
+		case BST_BMM:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_DATA_MAG_X_LSB__REG,
+			v_data_u8, SMI130_MAG_XYZ_DATA_LENGTH);
+			/*X-axis lsb value shifting*/
+			v_data_u8[SMI130_DATA_FRAME_MAG_X_LSB_BYTE] =
+			SMI130_GET_BITSLICE(
+			v_data_u8[SMI130_DATA_FRAME_MAG_X_LSB_BYTE],
+			SMI130_USER_DATA_MAG_X_LSB);
+			/* Data X */
+			mag->x = (s16)
+			((((s32)((s8)v_data_u8[
+			SMI130_DATA_FRAME_MAG_X_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_05_BITS) |
+			(v_data_u8[SMI130_DATA_FRAME_MAG_X_LSB_BYTE]));
+			/* Data Y */
+			/*Y-axis lsb value shifting*/
+			v_data_u8[SMI130_DATA_FRAME_MAG_Y_LSB_BYTE] =
+			SMI130_GET_BITSLICE(
+			v_data_u8[SMI130_DATA_FRAME_MAG_Y_LSB_BYTE],
+			SMI130_USER_DATA_MAG_Y_LSB);
+			mag->y = (s16)
+			((((s32)((s8)v_data_u8[
+			SMI130_DATA_FRAME_MAG_Y_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_05_BITS) |
+			(v_data_u8[SMI130_DATA_FRAME_MAG_Y_LSB_BYTE]));
+
+			/* Data Z */
+			/*Z-axis lsb value shifting*/
+			v_data_u8[SMI130_DATA_FRAME_MAG_Z_LSB_BYTE]
+			= SMI130_GET_BITSLICE(
+			v_data_u8[SMI130_DATA_FRAME_MAG_Z_LSB_BYTE],
+			SMI130_USER_DATA_MAG_Z_LSB);
+			mag->z = (s16)
+			((((s32)((s8)v_data_u8[
+			SMI130_DATA_FRAME_MAG_Z_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_07_BITS) |
+			(v_data_u8[SMI130_DATA_FRAME_MAG_Z_LSB_BYTE]));
+		break;
+		case BST_AKM:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_DATA_0_MAG_X_LSB__REG,
+			v_data_u8, SMI130_MAG_XYZ_DATA_LENGTH);
+			/* Data X */
+			mag->x = (s16)
+			((((s32)((s8)v_data_u8[
+			SMI130_DATA_FRAME_MAG_X_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[SMI130_DATA_FRAME_MAG_X_LSB_BYTE]));
+			/* Data Y */
+			mag->y  = ((((s32)((s8)v_data_u8[
+			SMI130_DATA_FRAME_MAG_Y_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[SMI130_DATA_FRAME_MAG_Y_LSB_BYTE]));
+			/* Data Z */
+			mag->z = (s16)
+			((((s32)((s8)v_data_u8[
+			SMI130_DATA_FRAME_MAG_Z_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[SMI130_DATA_FRAME_MAG_Z_LSB_BYTE]));
+		break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+ /*!*
+ *	@brief This API reads magnetometer data X,Y,Z,r
+ *	values from the register 0x04 to 0x0B
+ *
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param mag : The value of mag-BMM150 xyzr data
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note smi130_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_mag_xyzr(
+struct smi130_mag_xyzr_t *mag)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8[SMI130_MAG_XYZR_DATA_SIZE] = {
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_DATA_MAG_X_LSB__REG,
+			v_data_u8, SMI130_MAG_XYZR_DATA_LENGTH);
+
+			/* Data X */
+			/*X-axis lsb value shifting*/
+			v_data_u8[SMI130_DATA_FRAME_MAG_X_LSB_BYTE]
+			= SMI130_GET_BITSLICE(
+			v_data_u8[SMI130_DATA_FRAME_MAG_X_LSB_BYTE],
+			SMI130_USER_DATA_MAG_X_LSB);
+			mag->x = (s16)
+			((((s32)((s8)v_data_u8[
+			SMI130_DATA_FRAME_MAG_X_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_05_BITS)
+			| (v_data_u8[SMI130_DATA_FRAME_MAG_X_LSB_BYTE]));
+			/* Data Y */
+			/*Y-axis lsb value shifting*/
+			v_data_u8[SMI130_DATA_FRAME_MAG_Y_LSB_BYTE]
+			= SMI130_GET_BITSLICE(
+			v_data_u8[SMI130_DATA_FRAME_MAG_Y_LSB_BYTE],
+			SMI130_USER_DATA_MAG_Y_LSB);
+			mag->y = (s16)
+			((((s32)((s8)v_data_u8[
+			SMI130_DATA_FRAME_MAG_Y_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_05_BITS)
+			| (v_data_u8[
+			SMI130_DATA_FRAME_MAG_Y_LSB_BYTE]));
+
+			/* Data Z */
+			/*Z-axis lsb value shifting*/
+			v_data_u8[SMI130_DATA_FRAME_MAG_Z_LSB_BYTE]
+			= SMI130_GET_BITSLICE(
+			v_data_u8[SMI130_DATA_FRAME_MAG_Z_LSB_BYTE],
+			SMI130_USER_DATA_MAG_Z_LSB);
+			mag->z = (s16)
+			((((s32)((s8)v_data_u8[
+			SMI130_DATA_FRAME_MAG_Z_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_07_BITS)
+			| (v_data_u8[SMI130_DATA_FRAME_MAG_Z_LSB_BYTE]));
+
+			/* RHall */
+			/*R-axis lsb value shifting*/
+			v_data_u8[SMI130_DATA_FRAME_MAG_R_LSB_BYTE]
+			= SMI130_GET_BITSLICE(
+			v_data_u8[SMI130_DATA_FRAME_MAG_R_LSB_BYTE],
+			SMI130_USER_DATA_MAG_R_LSB);
+			mag->r = (s16)
+			((((s32)((s8)v_data_u8[
+			SMI130_DATA_FRAME_MAG_R_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_06_BITS)
+			| (v_data_u8[SMI130_DATA_FRAME_MAG_R_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads gyro data X values
+ *	form the register 0x0C and 0x0D
+ *
+ *
+ *
+ *
+ *  @param v_gyro_x_s16 : The value of gyro x data
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note smi130_set_gyro_output_data_rate()
+ *	@note smi130_set_gyro_bw()
+ *	@note smi130_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_gyro_x(s16 *v_gyro_x_s16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the gyro X lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[MSB_ONE] - MSB*/
+	u8 v_data_u8[SMI130_GYRO_X_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_DATA_8_GYRO_X_LSB__REG,
+			v_data_u8, SMI130_GYRO_DATA_LENGTH);
+
+			*v_gyro_x_s16 = (s16)
+			((((s32)((s8)v_data_u8[SMI130_GYRO_X_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[SMI130_GYRO_X_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads gyro data Y values
+ *	form the register 0x0E and 0x0F
+ *
+ *
+ *
+ *
+ *  @param v_gyro_y_s16 : The value of gyro y data
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note smi130_set_gyro_output_data_rate()
+ *	@note smi130_set_gyro_bw()
+ *	@note smi130_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error result of communication routines
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_gyro_y(s16 *v_gyro_y_s16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the gyro Y lSB and MSB data
+		v_data_u8[LSB_ZERO] - LSB
+		v_data_u8[MSB_ONE] - MSB*/
+	u8 v_data_u8[SMI130_GYRO_Y_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read gyro y data*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_DATA_10_GYRO_Y_LSB__REG,
+			v_data_u8, SMI130_GYRO_DATA_LENGTH);
+
+			*v_gyro_y_s16 = (s16)
+			((((s32)((s8)v_data_u8[SMI130_GYRO_Y_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[SMI130_GYRO_Y_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads gyro data Z values
+ *	form the register 0x10 and 0x11
+ *
+ *
+ *
+ *
+ *  @param v_gyro_z_s16 : The value of gyro z data
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note smi130_set_gyro_output_data_rate()
+ *	@note smi130_set_gyro_bw()
+ *	@note smi130_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_gyro_z(s16 *v_gyro_z_s16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the gyro Z lSB and MSB data
+		v_data_u8[LSB_ZERO] - LSB
+		v_data_u8[MSB_ONE] - MSB*/
+	u8 v_data_u8[SMI130_GYRO_Z_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read gyro z data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_DATA_12_GYRO_Z_LSB__REG,
+			v_data_u8, SMI130_GYRO_DATA_LENGTH);
+
+			*v_gyro_z_s16 = (s16)
+			((((s32)((s8)v_data_u8[SMI130_GYRO_Z_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[SMI130_GYRO_Z_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads gyro data X,Y,Z values
+ *	from the register 0x0C to 0x11
+ *
+ *
+ *
+ *
+ *  @param gyro : The value of gyro xyz
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note smi130_set_gyro_output_data_rate()
+ *	@note smi130_set_gyro_bw()
+ *	@note smi130_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_gyro_xyz(struct smi130_gyro_t *gyro)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the mag XYZ lSB and MSB data
+		v_data_u8[0] - X-LSB
+		v_data_u8[1] - X-MSB
+		v_data_u8[0] - Y-LSB
+		v_data_u8[1] - Y-MSB
+		v_data_u8[0] - Z-LSB
+		v_data_u8[1] - Z-MSB
+		*/
+	u8 v_data_u8[SMI130_GYRO_XYZ_DATA_SIZE] = {
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the gyro xyz data*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_DATA_8_GYRO_X_LSB__REG,
+			v_data_u8, SMI130_GYRO_XYZ_DATA_LENGTH);
+
+			/* Data X */
+			gyro->x = (s16)
+			((((s32)((s8)v_data_u8[
+			SMI130_DATA_FRAME_GYRO_X_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[SMI130_DATA_FRAME_GYRO_X_LSB_BYTE]));
+			/* Data Y */
+			gyro->y = (s16)
+			((((s32)((s8)v_data_u8[
+			SMI130_DATA_FRAME_GYRO_Y_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[SMI130_DATA_FRAME_GYRO_Y_LSB_BYTE]));
+
+			/* Data Z */
+			gyro->z = (s16)
+			((((s32)((s8)v_data_u8[
+			SMI130_DATA_FRAME_GYRO_Z_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[SMI130_DATA_FRAME_GYRO_Z_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads accelerometer data X values
+ *	form the register 0x12 and 0x13
+ *
+ *
+ *
+ *
+ *  @param v_accel_x_s16 : The value of accel x
+ *
+ *	@note For accel configuration use the following functions
+ *	@note smi130_set_accel_output_data_rate()
+ *	@note smi130_set_accel_bw()
+ *	@note smi130_set_accel_under_sampling_parameter()
+ *	@note smi130_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_accel_x(s16 *v_accel_x_s16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the accel X lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[1] - MSB*/
+	u8 v_data_u8[SMI130_ACCEL_X_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_DATA_14_ACCEL_X_LSB__REG,
+			v_data_u8, SMI130_ACCEL_DATA_LENGTH);
+
+			*v_accel_x_s16 = (s16)
+			((((s32)((s8)v_data_u8[SMI130_ACCEL_X_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[SMI130_ACCEL_X_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads accelerometer data Y values
+ *	form the register 0x14 and 0x15
+ *
+ *
+ *
+ *
+ *  @param v_accel_y_s16 : The value of accel y
+ *
+ *	@note For accel configuration use the following functions
+ *	@note smi130_set_accel_output_data_rate()
+ *	@note smi130_set_accel_bw()
+ *	@note smi130_set_accel_under_sampling_parameter()
+ *	@note smi130_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_accel_y(s16 *v_accel_y_s16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the accel Y lSB and MSB data
+		v_data_u8[0] - LSB
+		v_data_u8[1] - MSB*/
+	u8 v_data_u8[SMI130_ACCEL_Y_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_DATA_16_ACCEL_Y_LSB__REG,
+			v_data_u8, SMI130_ACCEL_DATA_LENGTH);
+
+			*v_accel_y_s16 = (s16)
+			((((s32)((s8)v_data_u8[SMI130_ACCEL_Y_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (v_data_u8[SMI130_ACCEL_Y_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads accelerometer data Z values
+ *	form the register 0x16 and 0x17
+ *
+ *
+ *
+ *
+ *  @param v_accel_z_s16 : The value of accel z
+ *
+ *	@note For accel configuration use the following functions
+ *	@note smi130_set_accel_output_data_rate()
+ *	@note smi130_set_accel_bw()
+ *	@note smi130_set_accel_under_sampling_parameter()
+ *	@note smi130_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_accel_z(s16 *v_accel_z_s16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the accel Z lSB and MSB data
+		a_data_u8r[LSB_ZERO] - LSB
+		a_data_u8r[MSB_ONE] - MSB*/
+	u8 a_data_u8r[SMI130_ACCEL_Z_DATA_SIZE] = {
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_DATA_18_ACCEL_Z_LSB__REG,
+			a_data_u8r, SMI130_ACCEL_DATA_LENGTH);
+
+			*v_accel_z_s16 = (s16)
+			((((s32)((s8)a_data_u8r[SMI130_ACCEL_Z_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (a_data_u8r[SMI130_ACCEL_Z_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads accelerometer data X,Y,Z values
+ *	from the register 0x12 to 0x17
+ *
+ *
+ *
+ *
+ *  @param accel :The value of accel xyz
+ *
+ *	@note For accel configuration use the following functions
+ *	@note smi130_set_accel_output_data_rate()
+ *	@note smi130_set_accel_bw()
+ *	@note smi130_set_accel_under_sampling_parameter()
+ *	@note smi130_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_accel_xyz(
+struct smi130_accel_t *accel)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the accel XYZ lSB and MSB data
+	a_data_u8r[0] - X-LSB
+	a_data_u8r[1] - X-MSB
+	a_data_u8r[0] - Y-LSB
+	a_data_u8r[1] - Y-MSB
+	a_data_u8r[0] - Z-LSB
+	a_data_u8r[1] - Z-MSB
+	*/
+	u8 a_data_u8r[SMI130_ACCEL_XYZ_DATA_SIZE] = {
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_DATA_14_ACCEL_X_LSB__REG,
+			a_data_u8r, SMI130_ACCEL_XYZ_DATA_LENGTH);
+
+			/* Data X */
+			accel->x = (s16)
+			((((s32)((s8)a_data_u8r[
+			SMI130_DATA_FRAME_ACCEL_X_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (a_data_u8r[SMI130_DATA_FRAME_ACCEL_X_LSB_BYTE]));
+			/* Data Y */
+			accel->y = (s16)
+			((((s32)((s8)a_data_u8r[
+			SMI130_DATA_FRAME_ACCEL_Y_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (a_data_u8r[SMI130_DATA_FRAME_ACCEL_Y_LSB_BYTE]));
+
+			/* Data Z */
+			accel->z = (s16)
+			((((s32)((s8)a_data_u8r[
+			SMI130_DATA_FRAME_ACCEL_Z_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (a_data_u8r[SMI130_DATA_FRAME_ACCEL_Z_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads sensor_time from the register
+ *	0x18 to 0x1A
+ *
+ *
+ *  @param v_sensor_time_u32 : The value of sensor time
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_sensor_time(u32 *v_sensor_time_u32)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the sensor time it is 32 bit data
+	a_data_u8r[0] - sensor time
+	a_data_u8r[1] - sensor time
+	a_data_u8r[0] - sensor time
+	*/
+	u8 a_data_u8r[SMI130_SENSOR_TIME_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_SENSORTIME_0_SENSOR_TIME_LSB__REG,
+			a_data_u8r, SMI130_SENSOR_TIME_LENGTH);
+
+			*v_sensor_time_u32 = (u32)
+			((((u32)a_data_u8r[SMI130_SENSOR_TIME_MSB_BYTE])
+			<< SMI130_SHIFT_BIT_POSITION_BY_16_BITS)
+			|(((u32)a_data_u8r[SMI130_SENSOR_TIME_XLSB_BYTE])
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (a_data_u8r[SMI130_SENSOR_TIME_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the Gyroscope self test
+ *	status from the register 0x1B bit 1
+ *
+ *
+ *  @param v_gyro_selftest_u8 : The value of gyro self test status
+ *  value    |   status
+ *  ---------|----------------
+ *   0       | Gyroscope self test is running or failed
+ *   1       | Gyroscope self test completed successfully
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_selftest(u8
+*v_gyro_selftest_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_STAT_GYRO_SELFTEST_OK__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_selftest_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_STAT_GYRO_SELFTEST_OK);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of
+ *	mag manual interface operation form the register 0x1B bit 2
+ *
+ *
+ *
+ *  @param v_mag_manual_stat_u8 : The value of mag manual operation status
+ *  value    |   status
+ *  ---------|----------------
+ *   0       | Indicates no manual magnetometer
+ *   -       | interface operation is ongoing
+ *   1       | Indicates manual magnetometer
+ *   -       | interface operation is ongoing
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_manual_operation_stat(u8
+*v_mag_manual_stat_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read manual operation*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_STAT_MAG_MANUAL_OPERATION__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_manual_stat_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_STAT_MAG_MANUAL_OPERATION);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the fast offset compensation
+ *	status form the register 0x1B bit 3
+ *
+ *
+ *  @param v_foc_rdy_u8 : The status of fast compensation
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_foc_rdy(u8
+*v_foc_rdy_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the FOC status*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_STAT_FOC_RDY__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_foc_rdy_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_STAT_FOC_RDY);
+		}
+	return com_rslt;
+}
+/*!
+ * @brief This API Reads the nvm_rdy status from the
+ *	resister 0x1B bit 4
+ *
+ *
+ *  @param v_nvm_rdy_u8 : The value of NVM ready status
+ *  value    |   status
+ *  ---------|----------------
+ *   0       | NVM write operation in progress
+ *   1       | NVM is ready to accept a new write trigger
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_nvm_rdy(u8
+*v_nvm_rdy_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the nvm ready status*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_STAT_NVM_RDY__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_nvm_rdy_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_STAT_NVM_RDY);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of mag data ready
+ *	from the register 0x1B bit 5
+ *	The status get reset when one mag data register is read out
+ *
+ *  @param v_data_rdy_u8 : The value of mag data ready status
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_data_rdy_mag(u8
+*v_data_rdy_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_STAT_DATA_RDY_MAG__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_data_rdy_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_STAT_DATA_RDY_MAG);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of gyro data ready form the
+ *	register 0x1B bit 6
+ *	The status get reset when gyro data register read out
+ *
+ *
+ *	@param v_data_rdy_u8 :	The value of gyro data ready
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_data_rdy(u8
+*v_data_rdy_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_STAT_DATA_RDY_GYRO__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_data_rdy_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_STAT_DATA_RDY_GYRO);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of accel data ready form the
+ *	register 0x1B bit 7
+ *	The status get reset when accel data register read out
+ *
+ *
+ *	@param v_data_rdy_u8 :	The value of accel data ready status
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_data_rdy(u8
+*v_data_rdy_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/*reads the status of accel data ready*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_STAT_DATA_RDY_ACCEL__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_data_rdy_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_STAT_DATA_RDY_ACCEL);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the step detector interrupt status
+ *	from the register 0x1C bit 0
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_step_intr_u8 : The status of step detector interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_step_intr(u8
+*v_step_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_0_STEP_INTR__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_step_intr_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_0_STEP_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the
+ *	significant motion interrupt status
+ *	from the register 0x1C bit 1
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *
+ *  @param v_significant_intr_u8 : The status of step
+ *	motion interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_significant_intr(u8
+*v_significant_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_0_SIGNIFICANT_INTR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_significant_intr_u8  = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_0_SIGNIFICANT_INTR);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API reads the any motion interrupt status
+ *	from the register 0x1C bit 2
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *  @param v_any_motion_intr_u8 : The status of any-motion interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_any_motion_intr(u8
+*v_any_motion_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_0_ANY_MOTION__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_any_motion_intr_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_0_ANY_MOTION);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the power mode trigger interrupt status
+ *	from the register 0x1C bit 3
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *
+ *  @param v_pmu_trigger_intr_u8 : The status of power mode trigger interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_pmu_trigger_intr(u8
+*v_pmu_trigger_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_0_PMU_TRIGGER__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_pmu_trigger_intr_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_0_PMU_TRIGGER);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the double tab status
+ *	from the register 0x1C bit 4
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_double_tap_intr_u8 :The status of double tab interrupt
+ *
+ *	@note Double tap interrupt can be configured by the following functions
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_double_tap()
+ *	@note AXIS MAPPING
+ *	@note smi130_get_stat2_tap_first_x()
+ *	@note smi130_get_stat2_tap_first_y()
+ *	@note smi130_get_stat2_tap_first_z()
+ *	@note DURATION
+ *	@note smi130_set_intr_tap_durn()
+ *	@note THRESHOLD
+ *	@note smi130_set_intr_tap_thres()
+ *	@note TAP QUIET
+ *	@note smi130_set_intr_tap_quiet()
+ *	@note TAP SHOCK
+ *	@note smi130_set_intr_tap_shock()
+ *	@note TAP SOURCE
+ *	@note smi130_set_intr_tap_source()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_double_tap_intr(u8
+*v_double_tap_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_0_DOUBLE_TAP_INTR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_double_tap_intr_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_0_DOUBLE_TAP_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the single tab status
+ *	from the register 0x1C bit 5
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_single_tap_intr_u8 :The status of single tap interrupt
+ *
+ *	@note Single tap interrupt can be configured by the following functions
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_single_tap()
+ *	@note AXIS MAPPING
+ *	@note smi130_get_stat2_tap_first_x()
+ *	@note smi130_get_stat2_tap_first_y()
+ *	@note smi130_get_stat2_tap_first_z()
+ *	@note DURATION
+ *	@note smi130_set_intr_tap_durn()
+ *	@note THRESHOLD
+ *	@note smi130_set_intr_tap_thres()
+ *	@note TAP QUIET
+ *	@note smi130_set_intr_tap_quiet()
+ *	@note TAP SHOCK
+ *	@note smi130_set_intr_tap_shock()
+ *	@note TAP SOURCE
+ *	@note smi130_set_intr_tap_source()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_single_tap_intr(u8
+*v_single_tap_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_0_SINGLE_TAP_INTR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_single_tap_intr_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_0_SINGLE_TAP_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the orient_mbl status
+ *	from the register 0x1C bit 6
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the orient_mbl interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_orient_mbl_intr_u8 : The status of orient_mbl interrupt
+ *
+ *	@note For orient_mbl interrupt configuration use the following functions
+ *	@note STATUS
+ *	@note smi130_get_stat0_orient_mbl_intr()
+ *	@note AXIS MAPPING
+ *	@note smi130_get_stat3_orient_mbl_xy()
+ *	@note smi130_get_stat3_orient_mbl_z()
+ *	@note smi130_set_intr_orient_mbl_axes_enable()
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_orient_mbl()
+ *	@note INTERRUPT OUTPUT
+ *	@note smi130_set_intr_orient_mbl_ud_enable()
+ *	@note THETA
+ *	@note smi130_set_intr_orient_mbl_theta()
+ *	@note HYSTERESIS
+ *	@note smi130_set_intr_orient_mbl_hyst()
+ *	@note BLOCKING
+ *	@note smi130_set_intr_orient_mbl_blocking()
+ *	@note MODE
+ *	@note smi130_set_intr_orient_mbl_mode()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_orient_mbl_intr(u8
+*v_orient_mbl_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_0_ORIENT__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_mbl_intr_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_0_ORIENT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the flat interrupt status
+ *	from the register 0x1C bit 7
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the flat interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_flat_intr_u8 : The status of  flat interrupt
+ *
+ *	@note For flat configuration use the following functions
+ *	@note STATS
+ *	@note smi130_get_stat0_flat_intr()
+ *	@note smi130_get_stat3_flat()
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_flat()
+ *	@note THETA
+ *	@note smi130_set_intr_flat_theta()
+ *	@note HOLD TIME
+ *	@note smi130_set_intr_flat_hold()
+ *	@note HYSTERESIS
+ *	@note smi130_set_intr_flat_hyst()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_flat_intr(u8
+*v_flat_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_0_FLAT__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_flat_intr_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_0_FLAT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the high_g interrupt status
+ *	from the register 0x1D bit 2
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the high g  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be permanently
+ *	latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_high_g_intr_u8 : The status of high_g interrupt
+ *
+ *	@note High_g interrupt configured by following functions
+ *	@note STATUS
+ *	@note smi130_get_stat1_high_g_intr()
+ *	@note AXIS MAPPING
+ *	@note smi130_get_stat3_high_g_first_x()
+ *	@note smi130_get_stat3_high_g_first_y()
+ *	@note smi130_get_stat3_high_g_first_z()
+ *	@note SIGN MAPPING
+ *	@note smi130_get_stat3_high_g_first_sign()
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_high_g()
+  *	@note HYSTERESIS
+ *	@note smi130_set_intr_high_g_hyst()
+ *	@note DURATION
+ *	@note smi130_set_intr_high_g_durn()
+ *	@note THRESHOLD
+ *	@note smi130_set_intr_high_g_thres()
+ *	@note SOURCE
+ *	@note smi130_set_intr_low_high_source()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat1_high_g_intr(u8
+*v_high_g_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_1_HIGH_G_INTR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_intr_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_1_HIGH_G_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the low g interrupt status
+ *	from the register 0x1D bit 3
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the low g  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_low_g_intr_u8 : The status of low_g interrupt
+ *
+ *	@note Low_g interrupt configured by following functions
+ *	@note STATUS
+ *	@note smi130_get_stat1_low_g_intr()
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_low_g()
+ *	@note SOURCE
+ *	@note smi130_set_intr_low_high_source()
+ *	@note DURATION
+ *	@note smi130_set_intr_low_g_durn()
+ *	@note THRESHOLD
+ *	@note smi130_set_intr_low_g_thres()
+ *	@note HYSTERESIS
+ *	@note smi130_set_intr_low_g_hyst()
+ *	@note MODE
+ *	@note smi130_set_intr_low_g_mode()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat1_low_g_intr(u8
+*v_low_g_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_1_LOW_G_INTR__REG, &v_data_u8,
+			 SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_low_g_intr_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_1_LOW_G_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads data ready interrupt status
+ *	from the register 0x1D bit 4
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the  data ready  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_data_rdy_intr_u8 : The status of data ready interrupt
+ *
+ *	@note Data ready interrupt configured by following functions
+ *	@note STATUS
+ *	@note smi130_get_stat1_data_rdy_intr()
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_data_rdy()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat1_data_rdy_intr(u8
+*v_data_rdy_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_1_DATA_RDY_INTR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_data_rdy_intr_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_1_DATA_RDY_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads data ready FIFO full interrupt status
+ *	from the register 0x1D bit 5
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the FIFO full interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will
+ *	be permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_fifo_full_intr_u8 : The status of fifo full interrupt
+ *
+ *	@note FIFO full interrupt can be configured by following functions
+ *	@note smi130_set_intr_fifo_full()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat1_fifo_full_intr(u8
+*v_fifo_full_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_1_FIFO_FULL_INTR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_full_intr_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_1_FIFO_FULL_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads data
+ *	 ready FIFO watermark interrupt status
+ *	from the register 0x1D bit 6
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the FIFO watermark interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_fifo_wm_intr_u8 : The status of fifo water mark interrupt
+ *
+ *	@note FIFO full interrupt can be configured by following functions
+ *	@note smi130_set_intr_fifo_wm()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat1_fifo_wm_intr(u8
+*v_fifo_wm_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_1_FIFO_WM_INTR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_wm_intr_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_1_FIFO_WM_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads data ready no motion interrupt status
+ *	from the register 0x1D bit 7
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the no motion  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be permanently
+ *	latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_nomotion_intr_u8 : The status of no motion interrupt
+ *
+ *	@note No motion interrupt can be configured by following function
+ *	@note STATUS
+ *	@note smi130_get_stat1_nomotion_intr()
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_nomotion()
+ *	@note DURATION
+ *	@note smi130_set_intr_slow_no_motion_durn()
+ *	@note THRESHOLD
+ *	@note smi130_set_intr_slow_no_motion_thres()
+ *	@note SLOW/NO MOTION SELECT
+ *	@note smi130_set_intr_slow_no_motion_select()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat1_nomotion_intr(u8
+*v_nomotion_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the no motion interrupt*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_1_NOMOTION_INTR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_nomotion_intr_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_1_NOMOTION_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *@brief This API reads the status of any motion first x
+ *	from the register 0x1E bit 0
+ *
+ *
+ *@param v_anymotion_first_x_u8 : The status of any motion first x interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by x axis
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_any_motion_first_x(u8
+*v_anymotion_first_x_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the any motion first x interrupt*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_X__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_anymotion_first_x_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_X);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of any motion first y interrupt
+ *	from the register 0x1E bit 1
+ *
+ *
+ *
+ *@param v_any_motion_first_y_u8 : The status of any motion first y interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_any_motion_first_y(u8
+*v_any_motion_first_y_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the any motion first y interrupt*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_any_motion_first_y_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of any motion first z interrupt
+ *	from the register 0x1E bit 2
+ *
+ *
+ *
+ *
+ *@param v_any_motion_first_z_u8 : The status of any motion first z interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_any_motion_first_z(u8
+*v_any_motion_first_z_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the any motion first z interrupt*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_any_motion_first_z_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the any motion sign status from the
+ *	register 0x1E bit 3
+ *
+ *
+ *
+ *
+ *  @param v_anymotion_sign_u8 : The status of any motion sign
+ *  value     |  sign
+ * -----------|-------------
+ *   0        | positive
+ *   1        | negative
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_any_motion_sign(u8
+*v_anymotion_sign_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read any motion sign interrupt status */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_2_ANY_MOTION_SIGN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_anymotion_sign_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_2_ANY_MOTION_SIGN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the any motion tap first x status from the
+ *	register 0x1E bit 4
+ *
+ *
+ *
+ *
+ *  @param v_tap_first_x_u8 :The status of any motion tap first x
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by x axis
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_tap_first_x(u8
+*v_tap_first_x_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read tap first x interrupt status */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_2_TAP_FIRST_X__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_first_x_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_2_TAP_FIRST_X);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the tap first y interrupt status from the
+ *	register 0x1E bit 5
+ *
+ *
+ *
+ *
+ *  @param v_tap_first_y_u8 :The status of tap first y interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_tap_first_y(u8
+*v_tap_first_y_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read tap first y interrupt status */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_2_TAP_FIRST_Y__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_first_y_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_2_TAP_FIRST_Y);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the tap first z interrupt status  from the
+ *	register 0x1E bit 6
+ *
+ *
+ *
+ *
+ *  @param v_tap_first_z_u8 :The status of tap first z interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_tap_first_z(u8
+*v_tap_first_z_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read tap first z interrupt status */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_2_TAP_FIRST_Z__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_first_z_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_2_TAP_FIRST_Z);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the tap sign status from the
+ *	register 0x1E bit 7
+ *
+ *
+ *
+ *
+ *  @param v_tap_sign_u8 : The status of tap sign
+ *  value     |  sign
+ * -----------|-------------
+ *   0        | positive
+ *   1        | negative
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_tap_sign(u8
+*v_tap_sign_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read tap_sign interrupt status */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_2_TAP_SIGN__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_sign_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_2_TAP_SIGN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the high_g first x status from the
+ *	register 0x1F bit 0
+ *
+ *
+ *
+ *
+ *  @param v_high_g_first_x_u8 :The status of high_g first x
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_high_g_first_x(u8
+*v_high_g_first_x_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read highg_x interrupt status */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_X__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_first_x_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_X);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the high_g first y status from the
+ *	register 0x1F bit 1
+ *
+ *
+ *
+ *
+ *  @param v_high_g_first_y_u8 : The status of high_g first y
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_high_g_first_y(u8
+*v_high_g_first_y_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read highg_y interrupt status */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_Y__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_first_y_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_Y);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the high_g first z status from the
+ *	register 0x1F bit 3
+ *
+ *
+ *
+ *
+ *  @param v_high_g_first_z_u8 : The status of high_g first z
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_high_g_first_z(u8
+*v_high_g_first_z_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read highg_z interrupt status */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_Z__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_first_z_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_Z);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the high sign status from the
+ *	register 0x1F bit 3
+ *
+ *
+ *
+ *
+ *  @param v_high_g_sign_u8 :The status of high sign
+ *  value     |  sign
+ * -----------|-------------
+ *   0        | positive
+ *   1        | negative
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_high_g_sign(u8
+*v_high_g_sign_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read highg_sign interrupt status */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_3_HIGH_G_SIGN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_sign_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_3_HIGH_G_SIGN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of orient_mbl_xy plane
+ *	from the register 0x1F bit 4 and 5
+ *
+ *
+ *  @param v_orient_mbl_xy_u8 :The status of orient_mbl_xy plane
+ *  value     |  status
+ * -----------|-------------
+ *   0x00     | portrait upright
+ *   0x01     | portrait upside down
+ *   0x02     | landscape left
+ *   0x03     | landscape right
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_orient_mbl_xy(u8
+*v_orient_mbl_xy_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read orient_mbl plane xy interrupt status */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_3_ORIENT_XY__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_mbl_xy_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_3_ORIENT_XY);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the status of orient_mbl z plane
+ *	from the register 0x1F bit 6
+ *
+ *
+ *  @param v_orient_mbl_z_u8 :The status of orient_mbl z
+ *  value     |  status
+ * -----------|-------------
+ *   0x00     | upward looking
+ *   0x01     | downward looking
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_orient_mbl_z(u8
+*v_orient_mbl_z_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read orient_mbl z plane interrupt status */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_3_ORIENT_Z__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_mbl_z_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_3_ORIENT_Z);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the flat status from the register
+ *	0x1F bit 7
+ *
+ *
+ *  @param v_flat_u8 : The status of flat interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0x00     | non flat
+ *   0x01     | flat position
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_flat(u8
+*v_flat_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read flat interrupt status */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_INTR_STAT_3_FLAT__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_flat_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_STAT_3_FLAT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the temperature of the sensor
+ *	from the register 0x21 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_temp_s16 : The value of temperature
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_temp(s16
+*v_temp_s16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the temperature lSB and MSB data
+	v_data_u8[0] - LSB
+	v_data_u8[1] - MSB*/
+	u8 v_data_u8[SMI130_TEMP_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read temperature data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_TEMP_LSB_VALUE__REG, v_data_u8,
+			SMI130_TEMP_DATA_LENGTH);
+			*v_temp_s16 =
+			(s16)(((s32)((s8) (v_data_u8[SMI130_TEMP_MSB_BYTE]) <<
+			SMI130_SHIFT_BIT_POSITION_BY_08_BITS))
+			| v_data_u8[SMI130_TEMP_LSB_BYTE]);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the  of the sensor
+ *	form the register 0x23 and 0x24 bit 0 to 7 and 0 to 2
+ *	@brief this byte counter is updated each time a complete frame
+ *	was read or writtern
+ *
+ *
+ *  @param v_fifo_length_u32 : The value of fifo byte counter
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_fifo_length(u32 *v_fifo_length_u32)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array contains the fifo length data
+	v_data_u8[0] - fifo length
+	v_data_u8[1] - fifo length*/
+	u8 a_data_u8r[SMI130_FIFO_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read fifo length*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_BYTE_COUNTER_LSB__REG, a_data_u8r,
+			 SMI130_FIFO_DATA_LENGTH);
+
+			a_data_u8r[SMI130_FIFO_LENGTH_MSB_BYTE] =
+			SMI130_GET_BITSLICE(
+			a_data_u8r[SMI130_FIFO_LENGTH_MSB_BYTE],
+			SMI130_USER_FIFO_BYTE_COUNTER_MSB);
+
+			*v_fifo_length_u32 =
+			(u32)(((u32)((u8) (
+			a_data_u8r[SMI130_FIFO_LENGTH_MSB_BYTE]) <<
+			SMI130_SHIFT_BIT_POSITION_BY_08_BITS))
+			| a_data_u8r[SMI130_FIFO_LENGTH_LSB_BYTE]);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the fifo data of the sensor
+ *	from the register 0x24
+ *	@brief Data format depends on the setting of register FIFO_CONFIG
+ *
+ *
+ *
+ *  @param v_fifodata_u8 : Pointer holding the fifo data
+ *  @param fifo_length_u16 : The value of fifo length maximum
+ *	1024
+ *
+ *	@note For reading FIFO data use the following functions
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_fifo_data(
+u8 *v_fifodata_u8, u16 v_fifo_length_u16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read fifo data*/
+			com_rslt =
+			p_smi130->SMI130_BURST_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_DATA__REG,
+			v_fifodata_u8, v_fifo_length_u16);
+
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the
+ *	accel output date rate form the register 0x40 bit 0 to 3
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of accel output date rate
+ *  value |  output data rate
+ * -------|--------------------------
+ *	 0    |	SMI130_ACCEL_OUTPUT_DATA_RATE_RESERVED
+ *	 1	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_0_78HZ
+ *	 2	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_1_56HZ
+ *	 3    |	SMI130_ACCEL_OUTPUT_DATA_RATE_3_12HZ
+ *	 4    | SMI130_ACCEL_OUTPUT_DATA_RATE_6_25HZ
+ *	 5	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_12_5HZ
+ *	 6	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_25HZ
+ *	 7	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_50HZ
+ *	 8	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_100HZ
+ *	 9	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_200HZ
+ *	 10	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_400HZ
+ *	 11	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_800HZ
+ *	 12	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_1600HZ
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_output_data_rate(
+u8 *v_output_data_rate_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the accel output data rate*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_output_data_rate_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the
+ *	accel output date rate form the register 0x40 bit 0 to 3
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of accel output date rate
+ *  value |  output data rate
+ * -------|--------------------------
+ *	 0    |	SMI130_ACCEL_OUTPUT_DATA_RATE_RESERVED
+ *	 1	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_0_78HZ
+ *	 2	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_1_56HZ
+ *	 3    |	SMI130_ACCEL_OUTPUT_DATA_RATE_3_12HZ
+ *	 4    | SMI130_ACCEL_OUTPUT_DATA_RATE_6_25HZ
+ *	 5	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_12_5HZ
+ *	 6	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_25HZ
+ *	 7	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_50HZ
+ *	 8	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_100HZ
+ *	 9	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_200HZ
+ *	 10	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_400HZ
+ *	 11	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_800HZ
+ *	 12	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_1600HZ
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_output_data_rate(
+u8 v_output_data_rate_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		/* accel output data rate selection */
+		if ((v_output_data_rate_u8 != SMI130_INIT_VALUE) &&
+		(v_output_data_rate_u8 <= SMI130_MAX_ACCEL_OUTPUT_DATA_RATE)) {
+			/* write accel output data rate */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE,
+				v_output_data_rate_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the
+ *	accel bandwidth from the register 0x40 bit 4 to 6
+ *	@brief bandwidth parameter determines filter configuration(acc_us=0)
+ *	and averaging for under sampling mode(acc_us=1)
+ *
+ *
+ *  @param  v_bw_u8 : The value of accel bandwidth
+ *
+ *	@note accel bandwidth depends on under sampling parameter
+ *	@note under sampling parameter cab be set by the function
+ *	"SMI130_SET_ACCEL_UNDER_SAMPLING_PARAMETER"
+ *
+ *	@note Filter configuration
+ *  accel_us  | Filter configuration
+ * -----------|---------------------
+ *    0x00    |  OSR4 mode
+ *    0x01    |  OSR2 mode
+ *    0x02    |  normal mode
+ *    0x03    |  CIC mode
+ *    0x04    |  Reserved
+ *    0x05    |  Reserved
+ *    0x06    |  Reserved
+ *    0x07    |  Reserved
+ *
+ *	@note accel under sampling mode
+ *  accel_us  | Under sampling mode
+ * -----------|---------------------
+ *    0x00    |  no averaging
+ *    0x01    |  average 2 samples
+ *    0x02    |  average 4 samples
+ *    0x03    |  average 8 samples
+ *    0x04    |  average 16 samples
+ *    0x05    |  average 32 samples
+ *    0x06    |  average 64 samples
+ *    0x07    |  average 128 samples
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_bw(u8 *v_bw_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the accel bandwidth */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_ACCEL_CONFIG_ACCEL_BW__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_bw_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_ACCEL_CONFIG_ACCEL_BW);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the
+ *	accel bandwidth from the register 0x40 bit 4 to 6
+ *	@brief bandwidth parameter determines filter configuration(acc_us=0)
+ *	and averaging for under sampling mode(acc_us=1)
+ *
+ *
+ *  @param  v_bw_u8 : The value of accel bandwidth
+ *
+ *	@note accel bandwidth depends on under sampling parameter
+ *	@note under sampling parameter cab be set by the function
+ *	"SMI130_SET_ACCEL_UNDER_SAMPLING_PARAMETER"
+ *
+ *	@note Filter configuration
+ *  accel_us  | Filter configuration
+ * -----------|---------------------
+ *    0x00    |  OSR4 mode
+ *    0x01    |  OSR2 mode
+ *    0x02    |  normal mode
+ *    0x03    |  CIC mode
+ *    0x04    |  Reserved
+ *    0x05    |  Reserved
+ *    0x06    |  Reserved
+ *    0x07    |  Reserved
+ *
+ *	@note accel under sampling mode
+ *  accel_us  | Under sampling mode
+ * -----------|---------------------
+ *    0x00    |  no averaging
+ *    0x01    |  average 2 samples
+ *    0x02    |  average 4 samples
+ *    0x03    |  average 8 samples
+ *    0x04    |  average 16 samples
+ *    0x05    |  average 32 samples
+ *    0x06    |  average 64 samples
+ *    0x07    |  average 128 samples
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_bw(u8 v_bw_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		/* select accel bandwidth*/
+		if (v_bw_u8 <= SMI130_MAX_ACCEL_BW) {
+			/* write accel bandwidth*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_ACCEL_CONFIG_ACCEL_BW__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_ACCEL_CONFIG_ACCEL_BW,
+				v_bw_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_ACCEL_CONFIG_ACCEL_BW__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the accel
+ *	under sampling parameter form the register 0x40 bit 7
+ *
+ *
+ *
+ *
+ *	@param  v_accel_under_sampling_u8 : The value of accel under sampling
+ *	value    | under_sampling
+ * ----------|---------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_under_sampling_parameter(
+u8 *v_accel_under_sampling_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the accel under sampling parameter */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_under_sampling_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the accel
+ *	under sampling parameter form the register 0x40 bit 7
+ *
+ *
+ *
+ *
+ *	@param  v_accel_under_sampling_u8 : The value of accel under sampling
+ *	value    | under_sampling
+ * ----------|---------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_under_sampling_parameter(
+u8 v_accel_under_sampling_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	if (v_accel_under_sampling_u8 <= SMI130_MAX_UNDER_SAMPLING) {
+		com_rslt =
+		p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+		SMI130_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			/* write the accel under sampling parameter */
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING,
+			v_accel_under_sampling_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_SMI130_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the ranges
+ *	(g values) of the accel from the register 0x41 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param v_range_u8 : The value of accel g range
+ *	value    | g_range
+ * ----------|-----------
+ *   0x03    | SMI130_ACCEL_RANGE_2G
+ *   0x05    | SMI130_ACCEL_RANGE_4G
+ *   0x08    | SMI130_ACCEL_RANGE_8G
+ *   0x0C    | SMI130_ACCEL_RANGE_16G
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_range(
+u8 *v_range_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the accel range*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_ACCEL_RANGE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_range_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_ACCEL_RANGE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the ranges
+ *	(g values) of the accel from the register 0x41 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param v_range_u8 : The value of accel g range
+ *	value    | g_range
+ * ----------|-----------
+ *   0x03    | SMI130_ACCEL_RANGE_2G
+ *   0x05    | SMI130_ACCEL_RANGE_4G
+ *   0x08    | SMI130_ACCEL_RANGE_8G
+ *   0x0C    | SMI130_ACCEL_RANGE_16G
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_range(u8 v_range_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if ((v_range_u8 == SMI130_ACCEL_RANGE0) ||
+			(v_range_u8 == SMI130_ACCEL_RANGE1) ||
+			(v_range_u8 == SMI130_ACCEL_RANGE3) ||
+			(v_range_u8 == SMI130_ACCEL_RANGE4)) {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_ACCEL_RANGE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8  = SMI130_SET_BITSLICE(
+				v_data_u8, SMI130_USER_ACCEL_RANGE,
+				v_range_u8);
+				/* write the accel range*/
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_ACCEL_RANGE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the
+ *	gyroscope output data rate from the register 0x42 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of gyro output data rate
+ *  value     |      gyro output data rate
+ * -----------|-----------------------------
+ *   0x00     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x01     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x02     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x03     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x04     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x05     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x06     | SMI130_GYRO_OUTPUT_DATA_RATE_25HZ
+ *   0x07     | SMI130_GYRO_OUTPUT_DATA_RATE_50HZ
+ *   0x08     | SMI130_GYRO_OUTPUT_DATA_RATE_100HZ
+ *   0x09     | SMI130_GYRO_OUTPUT_DATA_RATE_200HZ
+ *   0x0A     | SMI130_GYRO_OUTPUT_DATA_RATE_400HZ
+ *   0x0B     | SMI130_GYRO_OUTPUT_DATA_RATE_800HZ
+ *   0x0C     | SMI130_GYRO_OUTPUT_DATA_RATE_1600HZ
+ *   0x0D     | SMI130_GYRO_OUTPUT_DATA_RATE_3200HZ
+ *   0x0E     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x0F     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_output_data_rate(
+u8 *v_output_data_rate_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the gyro output data rate*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_output_data_rate_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_GYRO_CONFIG_OUTPUT_DATA_RATE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the
+ *	gyroscope output data rate from the register 0x42 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of gyro output data rate
+ *  value     |      gyro output data rate
+ * -----------|-----------------------------
+ *   0x00     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x01     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x02     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x03     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x04     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x05     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x06     | SMI130_GYRO_OUTPUT_DATA_RATE_25HZ
+ *   0x07     | SMI130_GYRO_OUTPUT_DATA_RATE_50HZ
+ *   0x08     | SMI130_GYRO_OUTPUT_DATA_RATE_100HZ
+ *   0x09     | SMI130_GYRO_OUTPUT_DATA_RATE_200HZ
+ *   0x0A     | SMI130_GYRO_OUTPUT_DATA_RATE_400HZ
+ *   0x0B     | SMI130_GYRO_OUTPUT_DATA_RATE_800HZ
+ *   0x0C     | SMI130_GYRO_OUTPUT_DATA_RATE_1600HZ
+ *   0x0D     | SMI130_GYRO_OUTPUT_DATA_RATE_3200HZ
+ *   0x0E     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x0F     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_output_data_rate(
+u8 v_output_data_rate_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		/* select the gyro output data rate*/
+		if ((v_output_data_rate_u8 <  SMI130_OUTPUT_DATA_RATE6) &&
+		(v_output_data_rate_u8 != SMI130_INIT_VALUE)
+		&& (v_output_data_rate_u8 !=  SMI130_OUTPUT_DATA_RATE1)
+		&& (v_output_data_rate_u8 !=  SMI130_OUTPUT_DATA_RATE2)
+		&& (v_output_data_rate_u8 !=  SMI130_OUTPUT_DATA_RATE3)
+		&& (v_output_data_rate_u8 !=  SMI130_OUTPUT_DATA_RATE4)
+		&& (v_output_data_rate_u8 !=  SMI130_OUTPUT_DATA_RATE5)
+		&& (v_output_data_rate_u8 !=  SMI130_OUTPUT_DATA_RATE6)
+		&& (v_output_data_rate_u8 !=  SMI130_OUTPUT_DATA_RATE7)) {
+			/* write the gyro output data rate */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_GYRO_CONFIG_OUTPUT_DATA_RATE,
+				v_output_data_rate_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the
+ *	data of gyro from the register 0x42 bit 4 to 5
+ *
+ *
+ *
+ *
+ *  @param  v_bw_u8 : The value of gyro bandwidth
+ *  value     | gyro bandwidth
+ *  ----------|----------------
+ *   0x00     | SMI130_GYRO_OSR4_MODE
+ *   0x01     | SMI130_GYRO_OSR2_MODE
+ *   0x02     | SMI130_GYRO_NORMAL_MODE
+ *   0x03     | SMI130_GYRO_CIC_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_bw(u8 *v_bw_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read gyro bandwidth*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_GYRO_CONFIG_BW__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_bw_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_GYRO_CONFIG_BW);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the
+ *	data of gyro from the register 0x42 bit 4 to 5
+ *
+ *
+ *
+ *
+ *  @param  v_bw_u8 : The value of gyro bandwidth
+ *  value     | gyro bandwidth
+ *  ----------|----------------
+ *   0x00     | SMI130_GYRO_OSR4_MODE
+ *   0x01     | SMI130_GYRO_OSR2_MODE
+ *   0x02     | SMI130_GYRO_NORMAL_MODE
+ *   0x03     | SMI130_GYRO_CIC_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_bw(u8 v_bw_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_bw_u8 <= SMI130_MAX_GYRO_BW) {
+			/* write the gyro bandwidth*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_GYRO_CONFIG_BW__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_GYRO_CONFIG_BW, v_bw_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_GYRO_CONFIG_BW__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads the range
+ *	of gyro from the register 0x43 bit 0 to 2
+ *
+ *  @param  v_range_u8 : The value of gyro range
+ *   value    |    range
+ *  ----------|-------------------------------
+ *    0x00    | SMI130_GYRO_RANGE_2000_DEG_SEC
+ *    0x01    | SMI130_GYRO_RANGE_1000_DEG_SEC
+ *    0x02    | SMI130_GYRO_RANGE_500_DEG_SEC
+ *    0x03    | SMI130_GYRO_RANGE_250_DEG_SEC
+ *    0x04    | SMI130_GYRO_RANGE_125_DEG_SEC
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_range(u8 *v_range_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the gyro range */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_GYRO_RANGE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_range_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_GYRO_RANGE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API set the range
+ *	of gyro from the register 0x43 bit 0 to 2
+ *
+ *  @param  v_range_u8 : The value of gyro range
+ *   value    |    range
+ *  ----------|-------------------------------
+ *    0x00    | SMI130_GYRO_RANGE_2000_DEG_SEC
+ *    0x01    | SMI130_GYRO_RANGE_1000_DEG_SEC
+ *    0x02    | SMI130_GYRO_RANGE_500_DEG_SEC
+ *    0x03    | SMI130_GYRO_RANGE_250_DEG_SEC
+ *    0x04    | SMI130_GYRO_RANGE_125_DEG_SEC
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_range(u8 v_range_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_range_u8 <= SMI130_MAX_GYRO_RANGE) {
+			/* write the gyro range value */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_GYRO_RANGE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_GYRO_RANGE,
+				v_range_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_GYRO_RANGE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the
+ *	output data rate of magnetometer from the register 0x44 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rat_u8e : The value of mag output data rate
+ *  value   |    mag output data rate
+ * ---------|---------------------------
+ *  0x00    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED
+ *  0x01    |SMI130_MAG_OUTPUT_DATA_RATE_0_78HZ
+ *  0x02    |SMI130_MAG_OUTPUT_DATA_RATE_1_56HZ
+ *  0x03    |SMI130_MAG_OUTPUT_DATA_RATE_3_12HZ
+ *  0x04    |SMI130_MAG_OUTPUT_DATA_RATE_6_25HZ
+ *  0x05    |SMI130_MAG_OUTPUT_DATA_RATE_12_5HZ
+ *  0x06    |SMI130_MAG_OUTPUT_DATA_RATE_25HZ
+ *  0x07    |SMI130_MAG_OUTPUT_DATA_RATE_50HZ
+ *  0x08    |SMI130_MAG_OUTPUT_DATA_RATE_100HZ
+ *  0x09    |SMI130_MAG_OUTPUT_DATA_RATE_200HZ
+ *  0x0A    |SMI130_MAG_OUTPUT_DATA_RATE_400HZ
+ *  0x0B    |SMI130_MAG_OUTPUT_DATA_RATE_800HZ
+ *  0x0C    |SMI130_MAG_OUTPUT_DATA_RATE_1600HZ
+ *  0x0D    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED0
+ *  0x0E    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED1
+ *  0x0F    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED2
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_output_data_rate(
+u8 *v_output_data_rat_u8e)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the mag data output rate*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_MAG_CONFIG_OUTPUT_DATA_RATE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_output_data_rat_u8e = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_MAG_CONFIG_OUTPUT_DATA_RATE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set the
+ *	output data rate of magnetometer from the register 0x44 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rat_u8e : The value of mag output data rate
+ *  value   |    mag output data rate
+ * ---------|---------------------------
+ *  0x00    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED
+ *  0x01    |SMI130_MAG_OUTPUT_DATA_RATE_0_78HZ
+ *  0x02    |SMI130_MAG_OUTPUT_DATA_RATE_1_56HZ
+ *  0x03    |SMI130_MAG_OUTPUT_DATA_RATE_3_12HZ
+ *  0x04    |SMI130_MAG_OUTPUT_DATA_RATE_6_25HZ
+ *  0x05    |SMI130_MAG_OUTPUT_DATA_RATE_12_5HZ
+ *  0x06    |SMI130_MAG_OUTPUT_DATA_RATE_25HZ
+ *  0x07    |SMI130_MAG_OUTPUT_DATA_RATE_50HZ
+ *  0x08    |SMI130_MAG_OUTPUT_DATA_RATE_100HZ
+ *  0x09    |SMI130_MAG_OUTPUT_DATA_RATE_200HZ
+ *  0x0A    |SMI130_MAG_OUTPUT_DATA_RATE_400HZ
+ *  0x0B    |SMI130_MAG_OUTPUT_DATA_RATE_800HZ
+ *  0x0C    |SMI130_MAG_OUTPUT_DATA_RATE_1600HZ
+ *  0x0D    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED0
+ *  0x0E    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED1
+ *  0x0F    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED2
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_output_data_rate(
+u8 v_output_data_rat_u8e)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		/* select the mag data output rate*/
+		if ((v_output_data_rat_u8e
+		<= SMI130_MAX_ACCEL_OUTPUT_DATA_RATE)
+		&& (v_output_data_rat_u8e
+		!= SMI130_OUTPUT_DATA_RATE0)
+		&& (v_output_data_rat_u8e
+		!=  SMI130_OUTPUT_DATA_RATE6)
+		&& (v_output_data_rat_u8e
+		!=  SMI130_OUTPUT_DATA_RATE7)) {
+			/* write the mag data output rate*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_MAG_CONFIG_OUTPUT_DATA_RATE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_MAG_CONFIG_OUTPUT_DATA_RATE,
+				v_output_data_rat_u8e);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_MAG_CONFIG_OUTPUT_DATA_RATE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read Down sampling
+ *	for gyro (2**downs_gyro) in the register 0x45 bit 0 to 2
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_gyro_u8 :The value of gyro fifo down
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_down_gyro(
+u8 *v_fifo_down_gyro_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the gyro fifo down*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_DOWN_GYRO__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_down_gyro_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FIFO_DOWN_GYRO);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to set Down sampling
+ *	for gyro (2**downs_gyro) in the register 0x45 bit 0 to 2
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_gyro_u8 :The value of gyro fifo down
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_down_gyro(
+u8 v_fifo_down_gyro_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write the gyro fifo down*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_DOWN_GYRO__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(
+				v_data_u8,
+				SMI130_USER_FIFO_DOWN_GYRO,
+				v_fifo_down_gyro_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FIFO_DOWN_GYRO__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read gyro fifo filter data
+ *	from the register 0x45 bit 3
+ *
+ *
+ *
+ *  @param v_gyro_fifo_filter_data_u8 :The value of gyro filter data
+ *  value      |  gyro_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_fifo_filter_data(
+u8 *v_gyro_fifo_filter_data_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the gyro fifo filter data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_FILTER_GYRO__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_fifo_filter_data_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FIFO_FILTER_GYRO);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set gyro fifo filter data
+ *	from the register 0x45 bit 3
+ *
+ *
+ *
+ *  @param v_gyro_fifo_filter_data_u8 :The value of gyro filter data
+ *  value      |  gyro_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_fifo_filter_data(
+u8 v_gyro_fifo_filter_data_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_gyro_fifo_filter_data_u8
+		<= SMI130_MAX_VALUE_FIFO_FILTER) {
+			/* write the gyro fifo filter data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_FILTER_GYRO__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(
+				v_data_u8,
+				SMI130_USER_FIFO_FILTER_GYRO,
+				v_gyro_fifo_filter_data_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FIFO_FILTER_GYRO__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read Down sampling
+ *	for accel (2*downs_accel) from the register 0x45 bit 4 to 6
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_u8 :The value of accel fifo down
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_down_accel(
+u8 *v_fifo_down_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the accel fifo down data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_DOWN_ACCEL__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_down_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FIFO_DOWN_ACCEL);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to set Down sampling
+ *	for accel (2*downs_accel) from the register 0x45 bit 4 to 6
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_u8 :The value of accel fifo down
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_down_accel(
+u8 v_fifo_down_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write the accel fifo down data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_DOWN_ACCEL__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FIFO_DOWN_ACCEL, v_fifo_down_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FIFO_DOWN_ACCEL__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read accel fifo filter data
+ *	from the register 0x45 bit 7
+ *
+ *
+ *
+ *  @param v_accel_fifo_filter_u8 :The value of accel filter data
+ *  value      |  accel_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_fifo_filter_data(
+u8 *v_accel_fifo_filter_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the accel fifo filter data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_FILTER_ACCEL__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_fifo_filter_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FIFO_FILTER_ACCEL);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set accel fifo filter data
+ *	from the register 0x45 bit 7
+ *
+ *
+ *
+ *  @param v_accel_fifo_filter_u8 :The value of accel filter data
+ *  value      |  accel_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_fifo_filter_data(
+u8 v_accel_fifo_filter_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_accel_fifo_filter_u8 <= SMI130_MAX_VALUE_FIFO_FILTER) {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_FILTER_ACCEL__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				/* write accel fifo filter data */
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FIFO_FILTER_ACCEL,
+				v_accel_fifo_filter_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FIFO_FILTER_ACCEL__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to Trigger an interrupt
+ *	when FIFO contains water mark level from the register 0x46 bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_fifo_wm_u8 : The value of fifo water mark level
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_wm(
+u8 *v_fifo_wm_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the fifo water mark level*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_WM__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_wm_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FIFO_WM);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to Trigger an interrupt
+ *	when FIFO contains water mark level from the register 0x46 bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_fifo_wm_u8 : The value of fifo water mark level
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_wm(
+u8 v_fifo_wm_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write the fifo water mark level*/
+			com_rslt =
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_WM__REG,
+			&v_fifo_wm_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads fifo sensor time
+ *	frame after the last valid data frame form the register  0x47 bit 1
+ *
+ *
+ *
+ *
+ *  @param v_fifo_time_enable_u8 : The value of sensor time
+ *  value      |  fifo sensor time
+ * ------------|-------------------------
+ *    0x00     |  do not return sensortime frame
+ *    0x01     |  return sensortime frame
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_time_enable(
+u8 *v_fifo_time_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the fifo sensor time*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_TIME_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_time_enable_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FIFO_TIME_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API set fifo sensor time
+ *	frame after the last valid data frame form the register  0x47 bit 1
+ *
+ *
+ *
+ *
+ *  @param v_fifo_time_enable_u8 : The value of sensor time
+ *  value      |  fifo sensor time
+ * ------------|-------------------------
+ *    0x00     |  do not return sensortime frame
+ *    0x01     |  return sensortime frame
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_time_enable(
+u8 v_fifo_time_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_fifo_time_enable_u8 <= SMI130_MAX_VALUE_FIFO_TIME) {
+			/* write the fifo sensor time*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_TIME_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FIFO_TIME_ENABLE,
+				v_fifo_time_enable_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FIFO_TIME_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads FIFO tag interrupt2 enable status
+ *	from the resister 0x47 bit 2
+ *
+ *  @param v_fifo_tag_intr2_u8 : The value of fifo tag interrupt
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_tag_intr2_enable(
+u8 *v_fifo_tag_intr2_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the fifo tag interrupt2*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_TAG_INTR2_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_tag_intr2_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FIFO_TAG_INTR2_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API set FIFO tag interrupt2 enable status
+ *	from the resister 0x47 bit 2
+ *
+ *  @param v_fifo_tag_intr2_u8 : The value of fifo tag interrupt
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_tag_intr2_enable(
+u8 v_fifo_tag_intr2_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_fifo_tag_intr2_u8 <= SMI130_MAX_VALUE_FIFO_INTR) {
+			/* write the fifo tag interrupt2*/
+			com_rslt = smi130_set_input_enable(1,
+			v_fifo_tag_intr2_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_TAG_INTR2_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FIFO_TAG_INTR2_ENABLE,
+				v_fifo_tag_intr2_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FIFO_TAG_INTR2_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API get FIFO tag interrupt1 enable status
+ *	from the resister 0x47 bit 3
+ *
+ *  @param v_fifo_tag_intr1_u8 :The value of fifo tag interrupt1
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_tag_intr1_enable(
+u8 *v_fifo_tag_intr1_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read fifo tag interrupt*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_TAG_INTR1_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_tag_intr1_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FIFO_TAG_INTR1_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API set FIFO tag interrupt1 enable status
+ *	from the resister 0x47 bit 3
+ *
+ *  @param v_fifo_tag_intr1_u8 :The value of fifo tag interrupt1
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_tag_intr1_enable(
+u8 v_fifo_tag_intr1_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_fifo_tag_intr1_u8 <= SMI130_MAX_VALUE_FIFO_INTR) {
+			/* write the fifo tag interrupt*/
+			com_rslt = smi130_set_input_enable(SMI130_INIT_VALUE,
+			v_fifo_tag_intr1_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_TAG_INTR1_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FIFO_TAG_INTR1_ENABLE,
+				v_fifo_tag_intr1_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FIFO_TAG_INTR1_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads FIFO frame
+ *	header enable from the register 0x47 bit 4
+ *
+ *  @param v_fifo_header_u8 :The value of fifo header
+ *	value    | fifo header
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_header_enable(
+u8 *v_fifo_header_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read fifo header */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_HEADER_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_header_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FIFO_HEADER_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API set FIFO frame
+ *	header enable from the register 0x47 bit 4
+ *
+ *  @param v_fifo_header_u8 :The value of fifo header
+ *	value    | fifo header
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_header_enable(
+u8 v_fifo_header_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_fifo_header_u8 <= SMI130_MAX_VALUE_FIFO_HEADER) {
+			/* write the fifo header */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_HEADER_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FIFO_HEADER_ENABLE,
+				v_fifo_header_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FIFO_HEADER_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read stored
+ *	magnetometer data in FIFO (all 3 axes) from the register 0x47 bit 5
+ *
+ *  @param v_fifo_mag_u8 : The value of fifo mag enble
+ *	value    | fifo mag
+ * ----------|-------------------
+ *  0x00     |  no magnetometer data is stored
+ *  0x01     |  magnetometer data is stored
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_mag_enable(
+u8 *v_fifo_mag_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the fifo mag enable*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_MAG_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_mag_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FIFO_MAG_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set stored
+ *	magnetometer data in FIFO (all 3 axes) from the register 0x47 bit 5
+ *
+ *  @param v_fifo_mag_u8 : The value of fifo mag enble
+ *	value    | fifo mag
+ * ----------|-------------------
+ *  0x00     |  no magnetometer data is stored
+ *  0x01     |  magnetometer data is stored
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_mag_enable(
+u8 v_fifo_mag_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			if (v_fifo_mag_u8 <= SMI130_MAX_VALUE_FIFO_MAG) {
+				/* write the fifo mag enable*/
+				com_rslt =
+				p_smi130->SMI130_BUS_READ_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_FIFO_MAG_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+				if (com_rslt == SUCCESS) {
+					v_data_u8 =
+					SMI130_SET_BITSLICE(v_data_u8,
+					SMI130_USER_FIFO_MAG_ENABLE,
+					v_fifo_mag_u8);
+					com_rslt +=
+					p_smi130->SMI130_BUS_WRITE_FUNC
+					(p_smi130->dev_addr,
+					SMI130_USER_FIFO_MAG_ENABLE__REG,
+					&v_data_u8,
+					SMI130_GEN_READ_WRITE_DATA_LENGTH);
+				}
+			} else {
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read stored
+ *	accel data in FIFO (all 3 axes) from the register 0x47 bit 6
+ *
+ *  @param v_fifo_accel_u8 : The value of fifo accel enble
+ *	value    | fifo accel
+ * ----------|-------------------
+ *  0x00     |  no accel data is stored
+ *  0x01     |  accel data is stored
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_accel_enable(
+u8 *v_fifo_accel_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the accel fifo enable*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_ACCEL_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_accel_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FIFO_ACCEL_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set stored
+ *	accel data in FIFO (all 3 axes) from the register 0x47 bit 6
+ *
+ *  @param v_fifo_accel_u8 : The value of fifo accel enble
+ *	value    | fifo accel
+ * ----------|-------------------
+ *  0x00     |  no accel data is stored
+ *  0x01     |  accel data is stored
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_accel_enable(
+u8 v_fifo_accel_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_fifo_accel_u8 <= SMI130_MAX_VALUE_FIFO_ACCEL) {
+			/* write the fifo mag enables*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_ACCEL_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FIFO_ACCEL_ENABLE, v_fifo_accel_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FIFO_ACCEL_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read stored
+ *	 gyro data in FIFO (all 3 axes) from the resister 0x47 bit 7
+ *
+ *
+ *  @param v_fifo_gyro_u8 : The value of fifo gyro enble
+ *	value    | fifo gyro
+ * ----------|-------------------
+ *  0x00     |  no gyro data is stored
+ *  0x01     |  gyro data is stored
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_gyro_enable(
+u8 *v_fifo_gyro_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read fifo gyro enable */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_GYRO_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_fifo_gyro_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FIFO_GYRO_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set stored
+ *	gyro data in FIFO (all 3 axes) from the resister 0x47 bit 7
+ *
+ *
+ *  @param v_fifo_gyro_u8 : The value of fifo gyro enble
+ *	value    | fifo gyro
+ * ----------|-------------------
+ *  0x00     |  no gyro data is stored
+ *  0x01     |  gyro data is stored
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_gyro_enable(
+u8 v_fifo_gyro_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_fifo_gyro_u8 <= SMI130_MAX_VALUE_FIFO_GYRO) {
+			/* write fifo gyro enable*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_FIFO_GYRO_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FIFO_GYRO_ENABLE, v_fifo_gyro_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FIFO_GYRO_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read
+ *	I2C device address of auxiliary mag from the register 0x4B bit 1 to 7
+ *
+ *
+ *
+ *
+ *  @param v_i2c_device_addr_u8 : The value of mag I2C device address
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_i2c_device_addr(
+u8 *v_i2c_device_addr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the mag I2C device address*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_I2C_DEVICE_ADDR__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_i2c_device_addr_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_I2C_DEVICE_ADDR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set
+ *	I2C device address of auxiliary mag from the register 0x4B bit 1 to 7
+ *
+ *
+ *
+ *
+ *  @param v_i2c_device_addr_u8 : The value of mag I2C device address
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_i2c_device_addr(
+u8 v_i2c_device_addr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write the mag I2C device address*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_I2C_DEVICE_ADDR__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_I2C_DEVICE_ADDR,
+				v_i2c_device_addr_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_I2C_DEVICE_ADDR__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read
+ *	Burst data length (1,2,6,8 byte) from the register 0x4C bit 0 to 1
+ *
+ *
+ *
+ *
+ *  @param v_mag_burst_u8 : The data of mag burst read lenth
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_burst(
+u8 *v_mag_burst_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read mag burst mode length*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_MAG_BURST__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_burst_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_MAG_BURST);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set
+ *	Burst data length (1,2,6,8 byte) from the register 0x4C bit 0 to 1
+ *
+ *
+ *
+ *
+ *  @param v_mag_burst_u8 : The data of mag burst read lenth
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_burst(
+u8 v_mag_burst_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write mag burst mode length*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_MAG_BURST__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_MAG_BURST, v_mag_burst_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_MAG_BURST__REG, &v_data_u8,
+				SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read
+ *	trigger-readout offset in units of 2.5 ms. If set to zero,
+ *	the offset is maximum, i.e. after readout a trigger
+ *	is issued immediately. from the register 0x4C bit 2 to 5
+ *
+ *
+ *
+ *
+ *  @param v_mag_offset_u8 : The value of mag offset
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_offset(
+u8 *v_mag_offset_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_MAG_OFFSET__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_offset_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_MAG_OFFSET);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set
+ *	trigger-readout offset in units of 2.5 ms. If set to zero,
+ *	the offset is maximum, i.e. after readout a trigger
+ *	is issued immediately. from the register 0x4C bit 2 to 5
+ *
+ *
+ *
+ *
+ *  @param v_mag_offset_u8 : The value of mag offset
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_offset(
+u8 v_mag_offset_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		com_rslt =
+		p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+		SMI130_USER_MAG_OFFSET__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 =
+			SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_MAG_OFFSET, v_mag_offset_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->dev_addr,
+			SMI130_USER_MAG_OFFSET__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API is used to read
+ *	Enable register access on MAG_IF[2] or MAG_IF[3] writes.
+ *	This implies that the DATA registers are not updated with
+ *	magnetometer values. Accessing magnetometer requires
+ *	the magnetometer in normal mode in PMU_STATUS.
+ *	from the register 0x4C bit 7
+ *
+ *
+ *
+ *  @param v_mag_manual_u8 : The value of mag manual enable
+ *	value    | mag manual
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_manual_enable(
+u8 *v_mag_manual_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read mag manual */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_MAG_MANUAL_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_manual_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_MAG_MANUAL_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set
+ *	Enable register access on MAG_IF[2] or MAG_IF[3] writes.
+ *	This implies that the DATA registers are not updated with
+ *	magnetometer values. Accessing magnetometer requires
+ *	the magnetometer in normal mode in PMU_STATUS.
+ *	from the register 0x4C bit 7
+ *
+ *
+ *
+ *  @param v_mag_manual_u8 : The value of mag manual enable
+ *	value    | mag manual
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_manual_enable(
+u8 v_mag_manual_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = SMI130_INIT_VALUE;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		/* write the mag manual*/
+		com_rslt =
+		p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+		SMI130_USER_MAG_MANUAL_ENABLE__REG, &v_data_u8,
+		SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		if (com_rslt == SUCCESS) {
+			/* set the bit of mag manual enable*/
+			v_data_u8 =
+			SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_MAG_MANUAL_ENABLE, v_mag_manual_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->dev_addr,
+			SMI130_USER_MAG_MANUAL_ENABLE__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		if (com_rslt == SUCCESS)
+			p_smi130->mag_manual_enable = v_mag_manual_u8;
+		else
+			p_smi130->mag_manual_enable = E_SMI130_COMM_RES;
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API is used to read data
+ *	magnetometer address to read from the register 0x4D bit 0 to 7
+ *	@brief It used to provide mag read address of auxiliary mag
+ *
+ *
+ *
+ *
+ *  @param  v_mag_read_addr_u8 : The value of address need to be read
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_read_addr(
+u8 *v_mag_read_addr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the written address*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_READ_ADDR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_read_addr_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_READ_ADDR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set
+ *	magnetometer write address from the register 0x4D bit 0 to 7
+ *	@brief mag write address writes the address of auxiliary mag to write
+ *
+ *
+ *
+ *  @param v_mag_read_addr_u8:
+ *	The data of auxiliary mag address to write data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_read_addr(
+u8 v_mag_read_addr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write the mag read address*/
+			com_rslt =
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->dev_addr,
+			SMI130_USER_READ_ADDR__REG, &v_mag_read_addr_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read
+ *	magnetometer write address from the register 0x4E bit 0 to 7
+ *	@brief mag write address writes the address of auxiliary mag to write
+ *
+ *
+ *
+ *  @param  v_mag_write_addr_u8:
+ *	The data of auxiliary mag address to write data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_write_addr(
+u8 *v_mag_write_addr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the address of last written */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_WRITE_ADDR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_write_addr_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_WRITE_ADDR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set
+ *	magnetometer write address from the register 0x4E bit 0 to 7
+ *	@brief mag write address writes the address of auxiliary mag to write
+ *
+ *
+ *
+ *  @param  v_mag_write_addr_u8:
+ *	The data of auxiliary mag address to write data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_write_addr(
+u8 v_mag_write_addr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write the data of mag address to write data */
+			com_rslt =
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->dev_addr,
+			SMI130_USER_WRITE_ADDR__REG, &v_mag_write_addr_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read magnetometer write data
+ *	form the resister 0x4F bit 0 to 7
+ *	@brief This writes the data will be wrote to mag
+ *
+ *
+ *
+ *  @param  v_mag_write_data_u8: The value of mag data
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_write_data(
+u8 *v_mag_write_data_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_WRITE_DATA__REG, &v_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_mag_write_data_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_WRITE_DATA);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to set magnetometer write data
+ *	form the resister 0x4F bit 0 to 7
+ *	@brief This writes the data will be wrote to mag
+ *
+ *
+ *
+ *  @param  v_mag_write_data_u8: The value of mag data
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_write_data(
+u8 v_mag_write_data_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt =
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->dev_addr,
+			SMI130_USER_WRITE_DATA__REG, &v_mag_write_data_u8,
+			SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief  This API is used to read
+ *	interrupt enable from the register 0x50 bit 0 to 7
+ *
+ *
+ *
+ *
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_ANY_MOTION_X_ENABLE
+ *       1         | SMI130_ANY_MOTION_Y_ENABLE
+ *       2         | SMI130_ANY_MOTION_Z_ENABLE
+ *       3         | SMI130_DOUBLE_TAP_ENABLE
+ *       4         | SMI130_SINGLE_TAP_ENABLE
+ *       5         | SMI130_ORIENT_ENABLE
+ *       6         | SMI130_FLAT_ENABLE
+ *
+ *	@param v_intr_enable_zero_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_enable_0(
+u8 v_enable_u8, u8 *v_intr_enable_zero_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		/* select interrupt to read*/
+		switch (v_enable_u8) {
+		case SMI130_ANY_MOTION_X_ENABLE:
+			/* read the any motion interrupt x data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE);
+		break;
+		case SMI130_ANY_MOTION_Y_ENABLE:
+			/* read the any motion interrupt y data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE);
+		break;
+		case SMI130_ANY_MOTION_Z_ENABLE:
+			/* read the any motion interrupt z data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE);
+		break;
+		case SMI130_DOUBLE_TAP_ENABLE:
+			/* read the double tap interrupt data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE);
+		break;
+		case SMI130_SINGLE_TAP_ENABLE:
+			/* read the single tap interrupt data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE);
+		break;
+		case SMI130_ORIENT_ENABLE:
+			/* read the orient_mbl interrupt data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_ENABLE_0_ORIENT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_ORIENT_ENABLE);
+		break;
+		case SMI130_FLAT_ENABLE:
+			/* read the flat interrupt data */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_ENABLE_0_FLAT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_zero_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_FLAT_ENABLE);
+		break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  This API is used to set
+ *	interrupt enable from the register 0x50 bit 0 to 7
+ *
+ *
+ *
+ *
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_ANY_MOTION_X_ENABLE
+ *       1         | SMI130_ANY_MOTION_Y_ENABLE
+ *       2         | SMI130_ANY_MOTION_Z_ENABLE
+ *       3         | SMI130_DOUBLE_TAP_ENABLE
+ *       4         | SMI130_SINGLE_TAP_ENABLE
+ *       5         | SMI130_ORIENT_ENABLE
+ *       6         | SMI130_FLAT_ENABLE
+ *
+ *	@param v_intr_enable_zero_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_enable_0(
+u8 v_enable_u8, u8 v_intr_enable_zero_u8)
+{
+/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	switch (v_enable_u8) {
+	case SMI130_ANY_MOTION_X_ENABLE:
+		/* write any motion x*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case SMI130_ANY_MOTION_Y_ENABLE:
+		/* write any motion y*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case SMI130_ANY_MOTION_Z_ENABLE:
+		/* write any motion z*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case SMI130_DOUBLE_TAP_ENABLE:
+		/* write double tap*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case SMI130_SINGLE_TAP_ENABLE:
+		/* write single tap */
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case SMI130_ORIENT_ENABLE:
+		/* write orient_mbl interrupt*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_ENABLE_0_ORIENT_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_ORIENT_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_0_ORIENT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case SMI130_FLAT_ENABLE:
+		/* write flat interrupt*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_ENABLE_0_FLAT_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_0_FLAT_ENABLE,
+			v_intr_enable_zero_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_0_FLAT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief  This API is used to read
+ *	interrupt enable byte1 from the register 0x51 bit 0 to 6
+ *	@brief It read the high_g_x,high_g_y,high_g_z,low_g_enable
+ *	data ready, fifo full and fifo water mark.
+ *
+ *
+ *
+ *  @param  v_enable_u8 :  The value of interrupt enable
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_HIGH_G_X_ENABLE
+ *       1         | SMI130_HIGH_G_Y_ENABLE
+ *       2         | SMI130_HIGH_G_Z_ENABLE
+ *       3         | SMI130_LOW_G_ENABLE
+ *       4         | SMI130_DATA_RDY_ENABLE
+ *       5         | SMI130_FIFO_FULL_ENABLE
+ *       6         | SMI130_FIFO_WM_ENABLE
+ *
+ *	@param v_intr_enable_1_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_enable_1(
+u8 v_enable_u8, u8 *v_intr_enable_1_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_enable_u8) {
+		case SMI130_HIGH_G_X_ENABLE:
+			/* read high_g_x interrupt*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE);
+			break;
+		case SMI130_HIGH_G_Y_ENABLE:
+			/* read high_g_y interrupt*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE);
+			break;
+		case SMI130_HIGH_G_Z_ENABLE:
+			/* read high_g_z interrupt*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE);
+			break;
+		case SMI130_LOW_G_ENABLE:
+			/* read low_g interrupt */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_1_LOW_G_ENABLE);
+			break;
+		case SMI130_DATA_RDY_ENABLE:
+			/* read data ready interrupt */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_1_DATA_RDY_ENABLE);
+			break;
+		case SMI130_FIFO_FULL_ENABLE:
+			/* read fifo full interrupt */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE);
+			break;
+		case SMI130_FIFO_WM_ENABLE:
+			/* read fifo water mark interrupt */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_1_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_1_FIFO_WM_ENABLE);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  This API is used to set
+ *	interrupt enable byte1 from the register 0x51 bit 0 to 6
+ *	@brief It read the high_g_x,high_g_y,high_g_z,low_g_enable
+ *	data ready, fifo full and fifo water mark.
+ *
+ *
+ *
+ *  @param  v_enable_u8 :  The value of interrupt enable
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_HIGH_G_X_ENABLE
+ *       1         | SMI130_HIGH_G_Y_ENABLE
+ *       2         | SMI130_HIGH_G_Z_ENABLE
+ *       3         | SMI130_LOW_G_ENABLE
+ *       4         | SMI130_DATA_RDY_ENABLE
+ *       5         | SMI130_FIFO_FULL_ENABLE
+ *       6         | SMI130_FIFO_WM_ENABLE
+ *
+ *	@param v_intr_enable_1_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_enable_1(
+u8 v_enable_u8, u8 v_intr_enable_1_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_enable_u8) {
+		case SMI130_HIGH_G_X_ENABLE:
+			/* write high_g_x interrupt*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr,
+				SMI130_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case SMI130_HIGH_G_Y_ENABLE:
+			/* write high_g_y interrupt*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr,
+				SMI130_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case SMI130_HIGH_G_Z_ENABLE:
+			/* write high_g_z interrupt*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr,
+				SMI130_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case SMI130_LOW_G_ENABLE:
+			/* write low_g interrupt*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_ENABLE_1_LOW_G_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr,
+				SMI130_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case SMI130_DATA_RDY_ENABLE:
+			/* write data ready interrupt*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_ENABLE_1_DATA_RDY_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr,
+				SMI130_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case SMI130_FIFO_FULL_ENABLE:
+			/* write fifo full interrupt*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr,
+				SMI130_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case SMI130_FIFO_WM_ENABLE:
+			/* write fifo water mark interrupt*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_ENABLE_1_FIFO_WM_ENABLE,
+				v_intr_enable_1_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr,
+				SMI130_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  This API is used to read
+ *	interrupt enable byte2 from the register bit 0x52 bit 0 to 3
+ *	@brief It reads no motion x,y and z
+ *
+ *
+ *
+ *	@param v_enable_u8: The value of interrupt enable
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_NOMOTION_X_ENABLE
+ *       1         | SMI130_NOMOTION_Y_ENABLE
+ *       2         | SMI130_NOMOTION_Z_ENABLE
+ *
+ *	@param v_intr_enable_2_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_enable_2(
+u8 v_enable_u8, u8 *v_intr_enable_2_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_enable_u8) {
+		case SMI130_NOMOTION_X_ENABLE:
+			/* read no motion x */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_2_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE);
+			break;
+		case SMI130_NOMOTION_Y_ENABLE:
+			/* read no motion y */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_2_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE);
+			break;
+		case SMI130_NOMOTION_Z_ENABLE:
+			/* read no motion z */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_enable_2_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  This API is used to set
+ *	interrupt enable byte2 from the register bit 0x52 bit 0 to 3
+ *	@brief It reads no motion x,y and z
+ *
+ *
+ *
+ *	@param v_enable_u8: The value of interrupt enable
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_NOMOTION_X_ENABLE
+ *       1         | SMI130_NOMOTION_Y_ENABLE
+ *       2         | SMI130_NOMOTION_Z_ENABLE
+ *
+ *	@param v_intr_enable_2_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_enable_2(
+u8 v_enable_u8, u8 v_intr_enable_2_u8)
+{
+/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	switch (v_enable_u8) {
+	case SMI130_NOMOTION_X_ENABLE:
+		/* write no motion x */
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr,
+		SMI130_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE,
+			v_intr_enable_2_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case SMI130_NOMOTION_Y_ENABLE:
+		/* write no motion y */
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr,
+		SMI130_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE,
+			v_intr_enable_2_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case SMI130_NOMOTION_Z_ENABLE:
+		/* write no motion z */
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr,
+		SMI130_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE,
+			v_intr_enable_2_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read
+ *	interrupt enable step detector interrupt from
+ *	the register bit 0x52 bit 3
+ *
+ *
+ *
+ *
+ *	@param v_step_intr_u8 : The value of step detector interrupt enable
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_step_detector_enable(
+u8 *v_step_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the step detector interrupt*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_step_intr_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to set
+ *	interrupt enable step detector interrupt from
+ *	the register bit 0x52 bit 3
+ *
+ *
+ *
+ *
+ *	@param v_step_intr_u8 : The value of step detector interrupt enable
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_step_detector_enable(
+u8 v_step_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr,
+		SMI130_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE,
+			v_step_intr_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr,
+			SMI130_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  Configure trigger condition of interrupt1
+ *	and interrupt2 pin from the register 0x53
+ *	@brief interrupt1 - bit 0
+ *	@brief interrupt2 - bit 4
+ *
+ *  @param v_channel_u8: The value of edge trigger selection
+ *   v_channel_u8  |   Edge trigger
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_EDGE_CTRL
+ *       1         | SMI130_INTR2_EDGE_CTRL
+ *
+ *	@param v_intr_edge_ctrl_u8 : The value of edge trigger enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_EDGE
+ *  0x00     |  SMI130_LEVEL
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_edge_ctrl(
+u8 v_channel_u8, u8 *v_intr_edge_ctrl_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case SMI130_INTR1_EDGE_CTRL:
+			/* read the edge trigger interrupt1*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR1_EDGE_CTRL__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_edge_ctrl_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR1_EDGE_CTRL);
+			break;
+		case SMI130_INTR2_EDGE_CTRL:
+			/* read the edge trigger interrupt2*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR2_EDGE_CTRL__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_edge_ctrl_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR2_EDGE_CTRL);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  Configure trigger condition of interrupt1
+ *	and interrupt2 pin from the register 0x53
+ *	@brief interrupt1 - bit 0
+ *	@brief interrupt2 - bit 4
+ *
+ *  @param v_channel_u8: The value of edge trigger selection
+ *   v_channel_u8  |   Edge trigger
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_EDGE_CTRL
+ *       1         | SMI130_INTR2_EDGE_CTRL
+ *
+ *	@param v_intr_edge_ctrl_u8 : The value of edge trigger enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_EDGE
+ *  0x00     |  SMI130_LEVEL
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_edge_ctrl(
+u8 v_channel_u8, u8 v_intr_edge_ctrl_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case SMI130_INTR1_EDGE_CTRL:
+			/* write the edge trigger interrupt1*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR1_EDGE_CTRL__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR1_EDGE_CTRL,
+				v_intr_edge_ctrl_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr, SMI130_USER_INTR1_EDGE_CTRL__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		case SMI130_INTR2_EDGE_CTRL:
+			/* write the edge trigger interrupt2*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR2_EDGE_CTRL__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR2_EDGE_CTRL,
+				v_intr_edge_ctrl_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr, SMI130_USER_INTR2_EDGE_CTRL__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  API used for get the Configure level condition of interrupt1
+ *	and interrupt2 pin form the register 0x53
+ *	@brief interrupt1 - bit 1
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of level condition selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_LEVEL
+ *       1         | SMI130_INTR2_LEVEL
+ *
+ *	@param v_intr_level_u8 : The value of level of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_LEVEL_HIGH
+ *  0x00     |  SMI130_LEVEL_LOW
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_level(
+u8 v_channel_u8, u8 *v_intr_level_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case SMI130_INTR1_LEVEL:
+			/* read the interrupt1 level*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR1_LEVEL__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_level_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR1_LEVEL);
+			break;
+		case SMI130_INTR2_LEVEL:
+			/* read the interrupt2 level*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR2_LEVEL__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_level_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR2_LEVEL);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  API used for set the Configure level condition of interrupt1
+ *	and interrupt2 pin form the register 0x53
+ *	@brief interrupt1 - bit 1
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of level condition selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_LEVEL
+ *       1         | SMI130_INTR2_LEVEL
+ *
+ *	@param v_intr_level_u8 : The value of level of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_LEVEL_HIGH
+ *  0x00     |  SMI130_LEVEL_LOW
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_level(
+u8 v_channel_u8, u8 v_intr_level_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case SMI130_INTR1_LEVEL:
+			/* write the interrupt1 level*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR1_LEVEL__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR1_LEVEL, v_intr_level_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr, SMI130_USER_INTR1_LEVEL__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		case SMI130_INTR2_LEVEL:
+			/* write the interrupt2 level*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR2_LEVEL__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR2_LEVEL, v_intr_level_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr, SMI130_USER_INTR2_LEVEL__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  API used to get configured output enable of interrupt1
+ *	and interrupt2 from the register 0x53
+ *	@brief interrupt1 - bit 2
+ *	@brief interrupt2 - bit 6
+ *
+ *
+ *  @param v_channel_u8: The value of output type enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_OUTPUT_TYPE
+ *       1         | SMI130_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_intr_output_type_u8 :
+ *	The value of output type of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_OPEN_DRAIN
+ *  0x00     |  SMI130_PUSH_PULL
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_output_type(
+u8 v_channel_u8, u8 *v_intr_output_type_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case SMI130_INTR1_OUTPUT_TYPE:
+			/* read the output type of interrupt1*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR1_OUTPUT_TYPE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_output_type_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR1_OUTPUT_TYPE);
+			break;
+		case SMI130_INTR2_OUTPUT_TYPE:
+			/* read the output type of interrupt2*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR2_OUTPUT_TYPE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_output_type_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR2_OUTPUT_TYPE);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief  API used to set output enable of interrupt1
+ *	and interrupt2 from the register 0x53
+ *	@brief interrupt1 - bit 2
+ *	@brief interrupt2 - bit 6
+ *
+ *
+ *  @param v_channel_u8: The value of output type enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_OUTPUT_TYPE
+ *       1         | SMI130_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_intr_output_type_u8 :
+ *	The value of output type of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_OPEN_DRAIN
+ *  0x00     |  SMI130_PUSH_PULL
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_output_type(
+u8 v_channel_u8, u8 v_intr_output_type_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case SMI130_INTR1_OUTPUT_TYPE:
+			/* write the output type of interrupt1*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR1_OUTPUT_TYPE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR1_OUTPUT_TYPE,
+				v_intr_output_type_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr, SMI130_USER_INTR1_OUTPUT_TYPE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		case SMI130_INTR2_OUTPUT_TYPE:
+			/* write the output type of interrupt2*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR2_OUTPUT_TYPE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR2_OUTPUT_TYPE,
+				v_intr_output_type_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr, SMI130_USER_INTR2_OUTPUT_TYPE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief API used to get the Output enable for interrupt1
+ *	and interrupt1 pin from the register 0x53
+ *	@brief interrupt1 - bit 3
+ *	@brief interrupt2 - bit 7
+ *
+ *  @param v_channel_u8: The value of output enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_OUTPUT_TYPE
+ *       1         | SMI130_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_output_enable_u8 :
+ *	The value of output enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_INPUT
+ *  0x00     |  SMI130_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_output_enable(
+u8 v_channel_u8, u8 *v_output_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case SMI130_INTR1_OUTPUT_ENABLE:
+			/* read the output enable of interrupt1*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR1_OUTPUT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_output_enable_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR1_OUTPUT_ENABLE);
+			break;
+		case SMI130_INTR2_OUTPUT_ENABLE:
+			/* read the output enable of interrupt2*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR2_OUTPUT_EN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_output_enable_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR2_OUTPUT_EN);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief API used to set the Output enable for interrupt1
+ *	and interrupt1 pin from the register 0x53
+ *	@brief interrupt1 - bit 3
+ *	@brief interrupt2 - bit 7
+ *
+ *  @param v_channel_u8: The value of output enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_OUTPUT_TYPE
+ *       1         | SMI130_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_output_enable_u8 :
+ *	The value of output enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_INPUT
+ *  0x00     |  SMI130_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_output_enable(
+u8 v_channel_u8, u8 v_output_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case SMI130_INTR1_OUTPUT_ENABLE:
+			/* write the output enable of interrupt1*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR1_OUTPUT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR1_OUTPUT_ENABLE,
+				v_output_enable_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr, SMI130_USER_INTR1_OUTPUT_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case SMI130_INTR2_OUTPUT_ENABLE:
+			/* write the output enable of interrupt2*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR2_OUTPUT_EN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR2_OUTPUT_EN,
+				v_output_enable_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr, SMI130_USER_INTR2_OUTPUT_EN__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+*	@brief This API is used to get the latch duration
+*	from the register 0x54 bit 0 to 3
+*	@brief This latch selection is not applicable for data ready,
+*	orient_mblation and flat interrupts.
+*
+*
+*
+*  @param v_latch_intr_u8 : The value of latch duration
+*	Latch Duration                      |     value
+* --------------------------------------|------------------
+*    SMI130_LATCH_DUR_NONE              |      0x00
+*    SMI130_LATCH_DUR_312_5_MICRO_SEC   |      0x01
+*    SMI130_LATCH_DUR_625_MICRO_SEC     |      0x02
+*    SMI130_LATCH_DUR_1_25_MILLI_SEC    |      0x03
+*    SMI130_LATCH_DUR_2_5_MILLI_SEC     |      0x04
+*    SMI130_LATCH_DUR_5_MILLI_SEC       |      0x05
+*    SMI130_LATCH_DUR_10_MILLI_SEC      |      0x06
+*    SMI130_LATCH_DUR_20_MILLI_SEC      |      0x07
+*    SMI130_LATCH_DUR_40_MILLI_SEC      |      0x08
+*    SMI130_LATCH_DUR_80_MILLI_SEC      |      0x09
+*    SMI130_LATCH_DUR_160_MILLI_SEC     |      0x0A
+*    SMI130_LATCH_DUR_320_MILLI_SEC     |      0x0B
+*    SMI130_LATCH_DUR_640_MILLI_SEC     |      0x0C
+*    SMI130_LATCH_DUR_1_28_SEC          |      0x0D
+*    SMI130_LATCH_DUR_2_56_SEC          |      0x0E
+*    SMI130_LATCHED                     |      0x0F
+*
+*
+*
+*	@return results of bus communication function
+*	@retval 0 -> Success
+*	@retval -1 -> Error
+*
+*
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_latch_intr(
+u8 *v_latch_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the latch duration value */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_LATCH__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_latch_intr_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_LATCH);
+		}
+	return com_rslt;
+}
+/*!
+*	@brief This API is used to set the latch duration
+*	from the register 0x54 bit 0 to 3
+*	@brief This latch selection is not applicable for data ready,
+*	orient_mblation and flat interrupts.
+*
+*
+*
+*  @param v_latch_intr_u8 : The value of latch duration
+*	Latch Duration                      |     value
+* --------------------------------------|------------------
+*    SMI130_LATCH_DUR_NONE              |      0x00
+*    SMI130_LATCH_DUR_312_5_MICRO_SEC   |      0x01
+*    SMI130_LATCH_DUR_625_MICRO_SEC     |      0x02
+*    SMI130_LATCH_DUR_1_25_MILLI_SEC    |      0x03
+*    SMI130_LATCH_DUR_2_5_MILLI_SEC     |      0x04
+*    SMI130_LATCH_DUR_5_MILLI_SEC       |      0x05
+*    SMI130_LATCH_DUR_10_MILLI_SEC      |      0x06
+*    SMI130_LATCH_DUR_20_MILLI_SEC      |      0x07
+*    SMI130_LATCH_DUR_40_MILLI_SEC      |      0x08
+*    SMI130_LATCH_DUR_80_MILLI_SEC      |      0x09
+*    SMI130_LATCH_DUR_160_MILLI_SEC     |      0x0A
+*    SMI130_LATCH_DUR_320_MILLI_SEC     |      0x0B
+*    SMI130_LATCH_DUR_640_MILLI_SEC     |      0x0C
+*    SMI130_LATCH_DUR_1_28_SEC          |      0x0D
+*    SMI130_LATCH_DUR_2_56_SEC          |      0x0E
+*    SMI130_LATCHED                     |      0x0F
+*
+*
+*
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+*
+*
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_latch_intr(u8 v_latch_intr_u8)
+{
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_latch_intr_u8 <= SMI130_MAX_LATCH_INTR) {
+			/* write the latch duration value */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_LATCH__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_LATCH, v_latch_intr_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr, SMI130_USER_INTR_LATCH__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief API used to get input enable for interrupt1
+ *	and interrupt2 pin from the register 0x54
+ *	@brief interrupt1 - bit 4
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of input enable selection
+ *   v_channel_u8  |   input selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_INPUT_ENABLE
+ *       1         | SMI130_INTR2_INPUT_ENABLE
+ *
+ *	@param v_input_en_u8 :
+ *	The value of input enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_INPUT
+ *  0x00     |  SMI130_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_input_enable(
+u8 v_channel_u8, u8 *v_input_en_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read input enable of interrup1 and interrupt2*/
+		case SMI130_INTR1_INPUT_ENABLE:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR1_INPUT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_input_en_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR1_INPUT_ENABLE);
+			break;
+		case SMI130_INTR2_INPUT_ENABLE:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR2_INPUT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_input_en_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR2_INPUT_ENABLE);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief API used to set input enable for interrupt1
+ *	and interrupt2 pin from the register 0x54
+ *	@brief interrupt1 - bit 4
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of input enable selection
+ *   v_channel_u8  |   input selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_INPUT_ENABLE
+ *       1         | SMI130_INTR2_INPUT_ENABLE
+ *
+ *	@param v_input_en_u8 :
+ *	The value of input enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_INPUT
+ *  0x00     |  SMI130_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_input_enable(
+u8 v_channel_u8, u8 v_input_en_u8)
+{
+/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* write input enable of interrup1 and interrupt2*/
+	case SMI130_INTR1_INPUT_ENABLE:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR1_INPUT_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR1_INPUT_ENABLE, v_input_en_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR1_INPUT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	case SMI130_INTR2_INPUT_ENABLE:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR2_INPUT_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR2_INPUT_ENABLE, v_input_en_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR2_INPUT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+	break;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief reads the Low g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 0 in the register 0x55
+ *	@brief interrupt2 bit 0 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of low_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_LOW_G
+ *       1         | SMI130_INTR2_MAP_LOW_G
+ *
+ *	@param v_intr_low_g_u8 : The value of low_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_low_g(
+u8 v_channel_u8, u8 *v_intr_low_g_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the low_g interrupt */
+		case SMI130_INTR1_MAP_LOW_G:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_LOW_G__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_low_g_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_LOW_G);
+			break;
+		case SMI130_INTR2_MAP_LOW_G:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_LOW_G__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_low_g_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_LOW_G);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief set the Low g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 0 in the register 0x55
+ *	@brief interrupt2 bit 0 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of low_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_LOW_G
+ *       1         | SMI130_INTR2_MAP_LOW_G
+ *
+ *	@param v_intr_low_g_u8 : The value of low_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_low_g(
+u8 v_channel_u8, u8 v_intr_low_g_u8)
+{
+/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+u8 v_step_cnt_stat_u8 = SMI130_INIT_VALUE;
+u8 v_step_det_stat_u8 = SMI130_INIT_VALUE;
+
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	/* check the step detector interrupt enable status*/
+	com_rslt = smi130_get_step_detector_enable(&v_step_det_stat_u8);
+	/* disable the step detector interrupt */
+	if (v_step_det_stat_u8 != SMI130_INIT_VALUE)
+		com_rslt += smi130_set_step_detector_enable(SMI130_INIT_VALUE);
+	/* check the step counter interrupt enable status*/
+	com_rslt += smi130_get_step_counter_enable(&v_step_cnt_stat_u8);
+	/* disable the step counter interrupt */
+	if (v_step_cnt_stat_u8 != SMI130_INIT_VALUE)
+			com_rslt += smi130_set_step_counter_enable(
+			SMI130_INIT_VALUE);
+	switch (v_channel_u8) {
+	/* write the low_g interrupt*/
+	case SMI130_INTR1_MAP_LOW_G:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_0_INTR1_LOW_G__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_LOW_G, v_intr_low_g_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_LOW_G__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case SMI130_INTR2_MAP_LOW_G:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_2_INTR2_LOW_G__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_LOW_G, v_intr_low_g_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_LOW_G__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads the HIGH g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 1 in the register 0x55
+ *	@brief interrupt2 bit 1 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of high_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_HIGH_G
+ *       1         | SMI130_INTR2_MAP_HIGH_G
+ *
+ *	@param v_intr_high_g_u8 : The value of high_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_high_g(
+u8 v_channel_u8, u8 *v_intr_high_g_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		/* read the high_g interrupt*/
+		switch (v_channel_u8) {
+		case SMI130_INTR1_MAP_HIGH_G:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_HIGH_G__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_high_g_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_HIGH_G);
+		break;
+		case SMI130_INTR2_MAP_HIGH_G:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_HIGH_G__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_high_g_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_HIGH_G);
+		break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write the HIGH g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 1 in the register 0x55
+ *	@brief interrupt2 bit 1 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of high_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_HIGH_G
+ *       1         | SMI130_INTR2_MAP_HIGH_G
+ *
+ *	@param v_intr_high_g_u8 : The value of high_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_high_g(
+u8 v_channel_u8, u8 v_intr_high_g_u8)
+{
+/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* write the high_g interrupt*/
+	case SMI130_INTR1_MAP_HIGH_G:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_0_INTR1_HIGH_G__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_HIGH_G, v_intr_high_g_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_HIGH_G__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	case SMI130_INTR2_MAP_HIGH_G:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_2_INTR2_HIGH_G__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_HIGH_G, v_intr_high_g_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_HIGH_G__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+	break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads the Any motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 2 in the register 0x55
+ *	@brief interrupt2 bit 2 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of any motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_ANY_MOTION
+ *       1         | SMI130_INTR2_MAP_ANY_MOTION
+ *
+ *	@param v_intr_any_motion_u8 : The value of any motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_any_motion(
+u8 v_channel_u8, u8 *v_intr_any_motion_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the any motion interrupt */
+		case SMI130_INTR1_MAP_ANY_MOTION:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_ANY_MOTION__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_any_motion_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_ANY_MOTION);
+		break;
+		case SMI130_INTR2_MAP_ANY_MOTION:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_ANY_MOTION__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_any_motion_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_ANY_MOTION);
+		break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write the Any motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 2 in the register 0x55
+ *	@brief interrupt2 bit 2 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of any motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_ANY_MOTION
+ *       1         | SMI130_INTR2_MAP_ANY_MOTION
+ *
+ *	@param v_intr_any_motion_u8 : The value of any motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_any_motion(
+u8 v_channel_u8, u8 v_intr_any_motion_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+u8 sig_mot_stat = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	/* read the status of significant motion interrupt */
+	com_rslt = smi130_get_intr_significant_motion_select(&sig_mot_stat);
+	/* disable the significant motion interrupt */
+	if (sig_mot_stat != SMI130_INIT_VALUE)
+		com_rslt += smi130_set_intr_significant_motion_select(
+		SMI130_INIT_VALUE);
+	switch (v_channel_u8) {
+	/* write the any motion interrupt */
+	case SMI130_INTR1_MAP_ANY_MOTION:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_0_INTR1_ANY_MOTION__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_ANY_MOTION,
+			v_intr_any_motion_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_ANY_MOTION__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	case SMI130_INTR2_MAP_ANY_MOTION:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_2_INTR2_ANY_MOTION__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_ANY_MOTION,
+			v_intr_any_motion_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_ANY_MOTION__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+	break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads the No motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 3 in the register 0x55
+ *	@brief interrupt2 bit 3 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of no motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_NOMO
+ *       1         | SMI130_INTR2_MAP_NOMO
+ *
+ *	@param v_intr_nomotion_u8 : The value of no motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_nomotion(
+u8 v_channel_u8, u8 *v_intr_nomotion_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the no motion interrupt*/
+		case SMI130_INTR1_MAP_NOMO:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_NOMOTION__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_nomotion_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_NOMOTION);
+			break;
+		case SMI130_INTR2_MAP_NOMO:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_NOMOTION__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_nomotion_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_NOMOTION);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write the No motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 3 in the register 0x55
+ *	@brief interrupt2 bit 3 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of no motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_NOMO
+ *       1         | SMI130_INTR2_MAP_NOMO
+ *
+ *	@param v_intr_nomotion_u8 : The value of no motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_nomotion(
+u8 v_channel_u8, u8 v_intr_nomotion_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* write the no motion interrupt*/
+	case SMI130_INTR1_MAP_NOMO:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_0_INTR1_NOMOTION__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_NOMOTION,
+			v_intr_nomotion_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_NOMOTION__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case SMI130_INTR2_MAP_NOMO:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_2_INTR2_NOMOTION__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_NOMOTION,
+			v_intr_nomotion_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_NOMOTION__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads the Double Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 4 in the register 0x55
+ *	@brief interrupt2 bit 4 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of double tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_DOUBLE_TAP
+ *       1         | SMI130_INTR2_MAP_DOUBLE_TAP
+ *
+ *	@param v_intr_double_tap_u8 : The value of double tap enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_double_tap(
+u8 v_channel_u8, u8 *v_intr_double_tap_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		case SMI130_INTR1_MAP_DOUBLE_TAP:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_double_tap_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_DOUBLE_TAP);
+			break;
+		case SMI130_INTR2_MAP_DOUBLE_TAP:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_double_tap_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_DOUBLE_TAP);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write the Double Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 4 in the register 0x55
+ *	@brief interrupt2 bit 4 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of double tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_DOUBLE_TAP
+ *       1         | SMI130_INTR2_MAP_DOUBLE_TAP
+ *
+ *	@param v_intr_double_tap_u8 : The value of double tap enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_double_tap(
+u8 v_channel_u8, u8 v_intr_double_tap_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* set the double tap interrupt */
+	case SMI130_INTR1_MAP_DOUBLE_TAP:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_DOUBLE_TAP,
+			v_intr_double_tap_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case SMI130_INTR2_MAP_DOUBLE_TAP:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_DOUBLE_TAP,
+			v_intr_double_tap_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads the Single Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 5 in the register 0x55
+ *	@brief interrupt2 bit 5 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of single tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_SINGLE_TAP
+ *       1         | SMI130_INTR2_MAP_SINGLE_TAP
+ *
+ *	@param v_intr_single_tap_u8 : The value of single tap  enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_single_tap(
+u8 v_channel_u8, u8 *v_intr_single_tap_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* reads the single tap interrupt*/
+		case SMI130_INTR1_MAP_SINGLE_TAP:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_SINGLE_TAP__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_single_tap_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_SINGLE_TAP);
+			break;
+		case SMI130_INTR2_MAP_SINGLE_TAP:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_SINGLE_TAP__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_single_tap_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_SINGLE_TAP);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write the Single Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 5 in the register 0x55
+ *	@brief interrupt2 bit 5 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of single tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_SINGLE_TAP
+ *       1         | SMI130_INTR2_MAP_SINGLE_TAP
+ *
+ *	@param v_intr_single_tap_u8 : The value of single tap  enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_single_tap(
+u8 v_channel_u8, u8 v_intr_single_tap_u8)
+{
+/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* write the single tap interrupt */
+	case SMI130_INTR1_MAP_SINGLE_TAP:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_0_INTR1_SINGLE_TAP__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_SINGLE_TAP,
+			v_intr_single_tap_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_SINGLE_TAP__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case SMI130_INTR2_MAP_SINGLE_TAP:
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_2_INTR2_SINGLE_TAP__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_SINGLE_TAP,
+			v_intr_single_tap_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_SINGLE_TAP__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads the Orient interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 6 in the register 0x55
+ *	@brief interrupt2 bit 6 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of orient_mbl interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_ORIENT
+ *       1         | SMI130_INTR2_MAP_ORIENT
+ *
+ *	@param v_intr_orient_mbl_u8 : The value of orient_mbl enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl(
+u8 v_channel_u8, u8 *v_intr_orient_mbl_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the orient_mblation interrupt*/
+		case SMI130_INTR1_MAP_ORIENT:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_ORIENT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_orient_mbl_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_ORIENT);
+			break;
+		case SMI130_INTR2_MAP_ORIENT:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_ORIENT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_orient_mbl_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_ORIENT);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write the Orient interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 6 in the register 0x55
+ *	@brief interrupt2 bit 6 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of orient_mbl interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_ORIENT
+ *       1         | SMI130_INTR2_MAP_ORIENT
+ *
+ *	@param v_intr_orient_mbl_u8 : The value of orient_mbl enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl(
+u8 v_channel_u8, u8 v_intr_orient_mbl_u8)
+{
+/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* write the orient_mblation interrupt*/
+	case SMI130_INTR1_MAP_ORIENT:
+		com_rslt =
+		p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_0_INTR1_ORIENT__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_ORIENT, v_intr_orient_mbl_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_ORIENT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	case SMI130_INTR2_MAP_ORIENT:
+		com_rslt =
+		p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_2_INTR2_ORIENT__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 =
+			SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_ORIENT, v_intr_orient_mbl_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_ORIENT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+		break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief Reads the Flat interrupt
+ *	mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 7 in the register 0x55
+ *	@brief interrupt2 bit 7 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of flat interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_FLAT
+ *       1         | SMI130_INTR2_MAP_FLAT
+ *
+ *	@param v_intr_flat_u8 : The value of flat enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_flat(
+u8 v_channel_u8, u8 *v_intr_flat_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the flat interrupt*/
+		case SMI130_INTR1_MAP_FLAT:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_FLAT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_flat_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_0_INTR1_FLAT);
+			break;
+		case SMI130_INTR2_MAP_FLAT:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_FLAT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_flat_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_2_INTR2_FLAT);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief Write the Flat interrupt
+ *	mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 7 in the register 0x55
+ *	@brief interrupt2 bit 7 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of flat interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_FLAT
+ *       1         | SMI130_INTR2_MAP_FLAT
+ *
+ *	@param v_intr_flat_u8 : The value of flat enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_flat(
+u8 v_channel_u8, u8 v_intr_flat_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* write the flat interrupt */
+		case SMI130_INTR1_MAP_FLAT:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_0_INTR1_FLAT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_MAP_0_INTR1_FLAT,
+				v_intr_flat_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr,
+				SMI130_USER_INTR_MAP_0_INTR1_FLAT__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		case SMI130_INTR2_MAP_FLAT:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_2_INTR2_FLAT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_MAP_2_INTR2_FLAT,
+				v_intr_flat_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr,
+				SMI130_USER_INTR_MAP_2_INTR2_FLAT__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Reads PMU trigger interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 0 and 4
+ *	@brief interrupt1 bit 0 in the register 0x56
+ *	@brief interrupt2 bit 4 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of pmu trigger selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_PMUTRIG
+ *       1         | SMI130_INTR2_MAP_PMUTRIG
+ *
+ *	@param v_intr_pmu_trig_u8 : The value of pmu trigger enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_pmu_trig(
+u8 v_channel_u8, u8 *v_intr_pmu_trig_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the pmu trigger interrupt*/
+		case SMI130_INTR1_MAP_PMUTRIG:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR1_PMU_TRIG__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_pmu_trig_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_1_INTR1_PMU_TRIG);
+			break;
+		case SMI130_INTR2_MAP_PMUTRIG:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR2_PMU_TRIG__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_pmu_trig_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_1_INTR2_PMU_TRIG);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write PMU trigger interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 0 and 4
+ *	@brief interrupt1 bit 0 in the register 0x56
+ *	@brief interrupt2 bit 4 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of pmu trigger selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_PMUTRIG
+ *       1         | SMI130_INTR2_MAP_PMUTRIG
+ *
+ *	@param v_intr_pmu_trig_u8 : The value of pmu trigger enable
+ *	value    | trigger enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_pmu_trig(
+u8 v_channel_u8, u8 v_intr_pmu_trig_u8)
+{
+/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/* write the pmu trigger interrupt */
+	case SMI130_INTR1_MAP_PMUTRIG:
+		com_rslt =
+		p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_1_INTR1_PMU_TRIG__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 =
+			SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_1_INTR1_PMU_TRIG,
+			v_intr_pmu_trig_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR1_PMU_TRIG__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	case SMI130_INTR2_MAP_PMUTRIG:
+		com_rslt =
+		p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_1_INTR2_PMU_TRIG__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 =
+			SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_1_INTR2_PMU_TRIG,
+			v_intr_pmu_trig_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR2_PMU_TRIG__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+	break;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief Reads FIFO Full interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 5 and 1
+ *	@brief interrupt1 bit 5 in the register 0x56
+ *	@brief interrupt2 bit 1 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo full interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_FIFO_FULL
+ *       1         | SMI130_INTR2_MAP_FIFO_FULL
+ *
+ *	@param v_intr_fifo_full_u8 : The value of fifo full interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_fifo_full(
+u8 v_channel_u8, u8 *v_intr_fifo_full_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the fifo full interrupt */
+		case SMI130_INTR1_MAP_FIFO_FULL:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR1_FIFO_FULL__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_fifo_full_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_1_INTR1_FIFO_FULL);
+		break;
+		case SMI130_INTR2_MAP_FIFO_FULL:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR2_FIFO_FULL__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_fifo_full_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_1_INTR2_FIFO_FULL);
+		break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write FIFO Full interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 5 and 1
+ *	@brief interrupt1 bit 5 in the register 0x56
+ *	@brief interrupt2 bit 1 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo full interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_FIFO_FULL
+ *       1         | SMI130_INTR2_MAP_FIFO_FULL
+ *
+ *	@param v_intr_fifo_full_u8 : The value of fifo full interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_fifo_full(
+u8 v_channel_u8, u8 v_intr_fifo_full_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* write the fifo full interrupt */
+		case SMI130_INTR1_MAP_FIFO_FULL:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR1_FIFO_FULL__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_MAP_1_INTR1_FIFO_FULL,
+				v_intr_fifo_full_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr,
+				SMI130_USER_INTR_MAP_1_INTR1_FIFO_FULL__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		case SMI130_INTR2_MAP_FIFO_FULL:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR2_FIFO_FULL__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_MAP_1_INTR2_FIFO_FULL,
+				v_intr_fifo_full_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr,
+				SMI130_USER_INTR_MAP_1_INTR2_FIFO_FULL__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+		break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Reads FIFO Watermark interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 6 and 2
+ *	@brief interrupt1 bit 6 in the register 0x56
+ *	@brief interrupt2 bit 2 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo Watermark interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_FIFO_WM
+ *       1         | SMI130_INTR2_MAP_FIFO_WM
+ *
+ *	@param v_intr_fifo_wm_u8 : The value of fifo Watermark interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_fifo_wm(
+u8 v_channel_u8, u8 *v_intr_fifo_wm_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* read the fifo water mark interrupt */
+		case SMI130_INTR1_MAP_FIFO_WM:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR1_FIFO_WM__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_fifo_wm_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_1_INTR1_FIFO_WM);
+			break;
+		case SMI130_INTR2_MAP_FIFO_WM:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR2_FIFO_WM__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_fifo_wm_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_1_INTR2_FIFO_WM);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write FIFO Watermark interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 6 and 2
+ *	@brief interrupt1 bit 6 in the register 0x56
+ *	@brief interrupt2 bit 2 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo Watermark interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_FIFO_WM
+ *       1         | SMI130_INTR2_MAP_FIFO_WM
+ *
+ *	@param v_intr_fifo_wm_u8 : The value of fifo Watermark interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_fifo_wm(
+u8 v_channel_u8, u8 v_intr_fifo_wm_u8)
+{
+/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/* write the fifo water mark interrupt */
+		case SMI130_INTR1_MAP_FIFO_WM:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR1_FIFO_WM__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_MAP_1_INTR1_FIFO_WM,
+				v_intr_fifo_wm_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr,
+				SMI130_USER_INTR_MAP_1_INTR1_FIFO_WM__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		case SMI130_INTR2_MAP_FIFO_WM:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR2_FIFO_WM__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_MAP_1_INTR2_FIFO_WM,
+				v_intr_fifo_wm_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+				dev_addr,
+				SMI130_USER_INTR_MAP_1_INTR2_FIFO_WM__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Reads Data Ready interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56
+ *	@brief interrupt1 bit 7 in the register 0x56
+ *	@brief interrupt2 bit 3 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of data ready interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_DATA_RDY
+ *       1         | SMI130_INTR2_MAP_DATA_RDY
+ *
+ *	@param v_intr_data_rdy_u8 : The value of data ready interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_data_rdy(
+u8 v_channel_u8, u8 *v_intr_data_rdy_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		switch (v_channel_u8) {
+		/*Read Data Ready interrupt*/
+		case SMI130_INTR1_MAP_DATA_RDY:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR1_DATA_RDY__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_data_rdy_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_1_INTR1_DATA_RDY);
+			break;
+		case SMI130_INTR2_MAP_DATA_RDY:
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR2_DATA_RDY__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_data_rdy_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_1_INTR2_DATA_RDY);
+			break;
+		default:
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief Write Data Ready interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56
+ *	@brief interrupt1 bit 7 in the register 0x56
+ *	@brief interrupt2 bit 3 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of data ready interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_DATA_RDY
+ *       1         | SMI130_INTR2_MAP_DATA_RDY
+ *
+ *	@param v_intr_data_rdy_u8 : The value of data ready interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_data_rdy(
+u8 v_channel_u8, u8 v_intr_data_rdy_u8)
+{
+/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	switch (v_channel_u8) {
+	/*Write Data Ready interrupt*/
+	case SMI130_INTR1_MAP_DATA_RDY:
+		com_rslt =
+		p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_1_INTR1_DATA_RDY__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_1_INTR1_DATA_RDY,
+			v_intr_data_rdy_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR1_DATA_RDY__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	case SMI130_INTR2_MAP_DATA_RDY:
+		com_rslt =
+		p_smi130->SMI130_BUS_READ_FUNC(p_smi130->
+		dev_addr, SMI130_USER_INTR_MAP_1_INTR2_DATA_RDY__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MAP_1_INTR2_DATA_RDY,
+			v_intr_data_rdy_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC(p_smi130->
+			dev_addr, SMI130_USER_INTR_MAP_1_INTR2_DATA_RDY__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	break;
+	default:
+	com_rslt = E_SMI130_OUT_OF_RANGE;
+	break;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API reads data source for the interrupt
+ *	engine for the single and double tap interrupts from the register
+ *	0x58 bit 3
+ *
+ *
+ *  @param v_tap_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_tap_source(u8 *v_tap_source_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the tap source interrupt */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_DATA_0_INTR_TAP_SOURCE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_source_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_DATA_0_INTR_TAP_SOURCE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write data source for the interrupt
+ *	engine for the single and double tap interrupts from the register
+ *	0x58 bit 3
+ *
+ *
+ *  @param v_tap_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_tap_source(
+u8 v_tap_source_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_tap_source_u8 <= SMI130_MAX_VALUE_SOURCE_INTR) {
+			/* write the tap source interrupt */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_DATA_0_INTR_TAP_SOURCE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_DATA_0_INTR_TAP_SOURCE,
+				v_tap_source_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_INTR_DATA_0_INTR_TAP_SOURCE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API Reads Data source for the
+ *	interrupt engine for the low and high g interrupts
+ *	from the register 0x58 bit 7
+ *
+ *  @param v_low_high_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_low_high_source(
+u8 *v_low_high_source_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the high_low_g source interrupt */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_low_high_source_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write Data source for the
+ *	interrupt engine for the low and high g interrupts
+ *	from the register 0x58 bit 7
+ *
+ *  @param v_low_high_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_low_high_source(
+u8 v_low_high_source_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	if (v_low_high_source_u8 <= SMI130_MAX_VALUE_SOURCE_INTR) {
+		/* write the high_low_g source interrupt */
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE,
+			v_low_high_source_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_SMI130_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API reads Data source for the
+ *	interrupt engine for the nomotion and anymotion interrupts
+ *	from the register 0x59 bit 7
+ *
+ *  @param v_motion_source_u8 :
+ *	The value of the any/no motion interrupt source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_motion_source(
+u8 *v_motion_source_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the any/no motion interrupt  */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_DATA_1_INTR_MOTION_SOURCE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_motion_source_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_DATA_1_INTR_MOTION_SOURCE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write Data source for the
+ *	interrupt engine for the nomotion and anymotion interrupts
+ *	from the register 0x59 bit 7
+ *
+ *  @param v_motion_source_u8 :
+ *	The value of the any/no motion interrupt source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_motion_source(
+u8 v_motion_source_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_motion_source_u8 <= SMI130_MAX_VALUE_SOURCE_INTR) {
+			/* write the any/no motion interrupt  */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_DATA_1_INTR_MOTION_SOURCE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_DATA_1_INTR_MOTION_SOURCE,
+				v_motion_source_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_INTR_DATA_1_INTR_MOTION_SOURCE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read the low_g duration from register
+ *	0x5A bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_durn_u8 : The value of low_g duration
+ *
+ *	@note Low_g duration trigger trigger delay according to
+ *	"(v_low_g_durn_u8 * 2.5)ms" in a range from 2.5ms to 640ms.
+ *	the default corresponds delay is 20ms
+ *	@note When low_g data source of interrupt is unfiltered
+ *	the sensor must not be in low power mode
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_low_g_durn(
+u8 *v_low_g_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the low_g interrupt */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_LOWHIGH_0_INTR_LOW_DURN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_low_g_durn_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_LOWHIGH_0_INTR_LOW_DURN);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to write the low_g duration from register
+ *	0x5A bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_durn_u8 : The value of low_g duration
+ *
+ *	@note Low_g duration trigger trigger delay according to
+ *	"(v_low_g_durn_u8 * 2.5)ms" in a range from 2.5ms to 640ms.
+ *	the default corresponds delay is 20ms
+ *	@note When low_g data source of interrupt is unfiltered
+ *	the sensor must not be in low power mode
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_low_g_durn(u8 v_low_g_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write the low_g interrupt */
+			com_rslt = p_smi130->SMI130_BUS_WRITE_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_LOWHIGH_0_INTR_LOW_DURN__REG,
+			&v_low_g_durn_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read Threshold
+ *	definition for the low-g interrupt from the register 0x5B bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_thres_u8 : The value of low_g threshold
+ *
+ *	@note Low_g interrupt trigger threshold according to
+ *	(v_low_g_thres_u8 * 7.81)mg for v_low_g_thres_u8 > 0
+ *	3.91 mg for v_low_g_thres_u8 = 0
+ *	The threshold range is form 3.91mg to 2.000mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_low_g_thres(
+u8 *v_low_g_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read low_g threshold */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_LOWHIGH_1_INTR_LOW_THRES__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_low_g_thres_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_LOWHIGH_1_INTR_LOW_THRES);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to write Threshold
+ *	definition for the low-g interrupt from the register 0x5B bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_thres_u8 : The value of low_g threshold
+ *
+ *	@note Low_g interrupt trigger threshold according to
+ *	(v_low_g_thres_u8 * 7.81)mg for v_low_g_thres_u8 > 0
+ *	3.91 mg for v_low_g_thres_u8 = 0
+ *	The threshold range is form 3.91mg to 2.000mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_low_g_thres(
+u8 v_low_g_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write low_g threshold */
+			com_rslt = p_smi130->SMI130_BUS_WRITE_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_LOWHIGH_1_INTR_LOW_THRES__REG,
+			&v_low_g_thres_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API Reads Low-g interrupt hysteresis
+ *	from the register 0x5C bit 0 to 1
+ *
+ *  @param v_low_hyst_u8 :The value of low_g hysteresis
+ *
+ *	@note Low_g hysteresis calculated by v_low_hyst_u8*125 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_low_g_hyst(
+u8 *v_low_hyst_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read low_g hysteresis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_low_hyst_u8 = SMI130_GET_BITSLICE(
+			v_data_u8,
+			SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write Low-g interrupt hysteresis
+ *	from the register 0x5C bit 0 to 1
+ *
+ *  @param v_low_hyst_u8 :The value of low_g hysteresis
+ *
+ *	@note Low_g hysteresis calculated by v_low_hyst_u8*125 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_low_g_hyst(
+u8 v_low_hyst_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write low_g hysteresis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST,
+				v_low_hyst_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads Low-g interrupt mode
+ *	from the register 0x5C bit 2
+ *
+ *  @param v_low_g_mode_u8 : The value of low_g mode
+ *	Value    |  Description
+ * ----------|-----------------
+ *	   0     | single-axis
+ *     1     | axis-summing
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_low_g_mode(u8 *v_low_g_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/*read Low-g interrupt mode*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_low_g_mode_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write Low-g interrupt mode
+ *	from the register 0x5C bit 2
+ *
+ *  @param v_low_g_mode_u8 : The value of low_g mode
+ *	Value    |  Description
+ * ----------|-----------------
+ *	   0     | single-axis
+ *     1     | axis-summing
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_low_g_mode(
+u8 v_low_g_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_low_g_mode_u8 <= SMI130_MAX_VALUE_LOW_G_MODE) {
+			/*write Low-g interrupt mode*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE,
+				v_low_g_mode_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads High-g interrupt hysteresis
+ *	from the register 0x5C bit 6 and 7
+ *
+ *  @param v_high_g_hyst_u8 : The value of high hysteresis
+ *
+ *	@note High_g hysteresis changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g hysteresis
+ *  ----------------|---------------------
+ *      2g          |  high_hy*125 mg
+ *      4g          |  high_hy*250 mg
+ *      8g          |  high_hy*500 mg
+ *      16g         |  high_hy*1000 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_high_g_hyst(
+u8 *v_high_g_hyst_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read high_g hysteresis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_hyst_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write High-g interrupt hysteresis
+ *	from the register 0x5C bit 6 and 7
+ *
+ *  @param v_high_g_hyst_u8 : The value of high hysteresis
+ *
+ *	@note High_g hysteresis changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g hysteresis
+ *  ----------------|---------------------
+ *      2g          |  high_hy*125 mg
+ *      4g          |  high_hy*250 mg
+ *      8g          |  high_hy*500 mg
+ *      16g         |  high_hy*1000 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_high_g_hyst(
+u8 v_high_g_hyst_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		/* write high_g hysteresis*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+		p_smi130->dev_addr,
+		SMI130_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST,
+			v_high_g_hyst_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API is used to read Delay
+ *	time definition for the high-g interrupt from the register
+ *	0x5D bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_high_g_durn_u8 :  The value of high duration
+ *
+ *	@note High_g interrupt delay triggered according to
+ *	v_high_g_durn_u8 * 2.5ms in a range from 2.5ms to 640ms
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_high_g_durn(
+u8 *v_high_g_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read high_g duration*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_durn_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to write Delay
+ *	time definition for the high-g interrupt from the register
+ *	0x5D bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_high_g_durn_u8 :  The value of high duration
+ *
+ *	@note High_g interrupt delay triggered according to
+ *	v_high_g_durn_u8 * 2.5ms in a range from 2.5ms to 640ms
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_high_g_durn(
+u8 v_high_g_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write high_g duration*/
+			com_rslt = p_smi130->SMI130_BUS_WRITE_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN__REG,
+			&v_high_g_durn_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to read Threshold
+ *	definition for the high-g interrupt from the register 0x5E 0 to 7
+ *
+ *
+ *
+ *
+ *  @param  v_high_g_thres_u8 : Pointer holding the value of Threshold
+ *	@note High_g threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  v_high_g_thres_u8*7.81 mg
+ *      4g          |  v_high_g_thres_u8*15.63 mg
+ *      8g          |  v_high_g_thres_u8*31.25 mg
+ *      16g         |  v_high_g_thres_u8*62.5 mg
+ *	@note when v_high_g_thres_u8 = 0
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  3.91 mg
+ *      4g          |  7.81 mg
+ *      8g          |  15.63 mg
+ *      16g         |  31.25 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_high_g_thres(
+u8 *v_high_g_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_high_g_thres_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES);
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to write Threshold
+ *	definition for the high-g interrupt from the register 0x5E 0 to 7
+ *
+ *
+ *
+ *
+ *  @param  v_high_g_thres_u8 : Pointer holding the value of Threshold
+ *	@note High_g threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  v_high_g_thres_u8*7.81 mg
+ *      4g          |  v_high_g_thres_u8*15.63 mg
+ *      8g          |  v_high_g_thres_u8*31.25 mg
+ *      16g         |  v_high_g_thres_u8*62.5 mg
+ *	@note when v_high_g_thres_u8 = 0
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  3.91 mg
+ *      4g          |  7.81 mg
+ *      8g          |  15.63 mg
+ *      16g         |  31.25 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_high_g_thres(
+u8 v_high_g_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		com_rslt = p_smi130->SMI130_BUS_WRITE_FUNC(
+		p_smi130->dev_addr,
+		SMI130_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES__REG,
+		&v_high_g_thres_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads any motion duration
+ *	from the register 0x5F bit 0 and 1
+ *
+ *  @param v_any_motion_durn_u8 : The value of any motion duration
+ *
+ *	@note Any motion duration can be calculated by "v_any_motion_durn_u8 + 1"
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_any_motion_durn(
+u8 *v_any_motion_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		/* read any motion duration*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		*v_any_motion_durn_u8 = SMI130_GET_BITSLICE
+		(v_data_u8,
+		SMI130_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN);
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write any motion duration
+ *	from the register 0x5F bit 0 and 1
+ *
+ *  @param v_any_motion_durn_u8 : The value of any motion duration
+ *
+ *	@note Any motion duration can be calculated by "v_any_motion_durn_u8 + 1"
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_any_motion_durn(
+u8 v_any_motion_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		/* write any motion duration*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN,
+			v_any_motion_durn_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read Slow/no-motion
+ *	interrupt trigger delay duration from the register 0x5F bit 2 to 7
+ *
+ *  @param v_slow_no_motion_u8 :The value of slow no motion duration
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *	@note
+ *	@note v_slow_no_motion_u8(5:4)=0b00 ->
+ *	[v_slow_no_motion_u8(3:0) + 1] * 1.28s (1.28s-20.48s)
+ *	@note v_slow_no_motion_u8(5:4)=1 ->
+ *	[v_slow_no_motion_u8(3:0)+5] * 5.12s (25.6s-102.4s)
+ *	@note v_slow_no_motion_u8(5)='1' ->
+ *	[(v_slow_no_motion_u8:0)+11] * 10.24s (112.64s-430.08s);
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_slow_no_motion_durn(
+u8 *v_slow_no_motion_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		/* read slow no motion duration*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		*v_slow_no_motion_u8 = SMI130_GET_BITSLICE
+		(v_data_u8,
+		SMI130_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN);
+	}
+return com_rslt;
+}
+ /*!
+ *	@brief This API write Slow/no-motion
+ *	interrupt trigger delay duration from the register 0x5F bit 2 to 7
+ *
+ *  @param v_slow_no_motion_u8 :The value of slow no motion duration
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *	@note
+ *	@note v_slow_no_motion_u8(5:4)=0b00 ->
+ *	[v_slow_no_motion_u8(3:0) + 1] * 1.28s (1.28s-20.48s)
+ *	@note v_slow_no_motion_u8(5:4)=1 ->
+ *	[v_slow_no_motion_u8(3:0)+5] * 5.12s (25.6s-102.4s)
+ *	@note v_slow_no_motion_u8(5)='1' ->
+ *	[(v_slow_no_motion_u8:0)+11] * 10.24s (112.64s-430.08s);
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_slow_no_motion_durn(
+u8 v_slow_no_motion_u8)
+{
+/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	/* write slow no motion duration*/
+	com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+	(p_smi130->dev_addr,
+	SMI130_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__REG,
+	&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	if (com_rslt == SUCCESS) {
+		v_data_u8 = SMI130_SET_BITSLICE
+		(v_data_u8,
+		SMI130_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN,
+		v_slow_no_motion_u8);
+		com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief This API is used to read threshold
+ *	definition for the any-motion interrupt
+ *	from the register 0x60 bit 0 to 7
+ *
+ *
+ *  @param  v_any_motion_thres_u8 : The value of any motion threshold
+ *
+ *	@note any motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_any_motion_thres_u8*3.91 mg
+ *      4g          |  v_any_motion_thres_u8*7.81 mg
+ *      8g          |  v_any_motion_thres_u8*15.63 mg
+ *      16g         |  v_any_motion_thres_u8*31.25 mg
+ *	@note when v_any_motion_thres_u8 = 0
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_any_motion_thres(
+u8 *v_any_motion_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read any motion threshold*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_any_motion_thres_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to write threshold
+ *	definition for the any-motion interrupt
+ *	from the register 0x60 bit 0 to 7
+ *
+ *
+ *  @param  v_any_motion_thres_u8 : The value of any motion threshold
+ *
+ *	@note any motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_any_motion_thres_u8*3.91 mg
+ *      4g          |  v_any_motion_thres_u8*7.81 mg
+ *      8g          |  v_any_motion_thres_u8*15.63 mg
+ *      16g         |  v_any_motion_thres_u8*31.25 mg
+ *	@note when v_any_motion_thres_u8 = 0
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_any_motion_thres(
+u8 v_any_motion_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		/* write any motion threshold*/
+		com_rslt = p_smi130->SMI130_BUS_WRITE_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES__REG,
+		&v_any_motion_thres_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read threshold
+ *	for the slow/no-motion interrupt
+ *	from the register 0x61 bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_slow_no_motion_thres_u8 : The value of slow no motion threshold
+ *	@note slow no motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_slow_no_motion_thres_u8*3.91 mg
+ *      4g          |  v_slow_no_motion_thres_u8*7.81 mg
+ *      8g          |  v_slow_no_motion_thres_u8*15.63 mg
+ *      16g         |  v_slow_no_motion_thres_u8*31.25 mg
+ *	@note when v_slow_no_motion_thres_u8 = 0
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_slow_no_motion_thres(
+u8 *v_slow_no_motion_thres_u8)
+{
+SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		/* read slow no motion threshold*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		*v_slow_no_motion_thres_u8 =
+		SMI130_GET_BITSLICE(v_data_u8,
+		SMI130_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES);
+	}
+return com_rslt;
+}
+ /*!
+ *	@brief This API is used to write threshold
+ *	for the slow/no-motion interrupt
+ *	from the register 0x61 bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_slow_no_motion_thres_u8 : The value of slow no motion threshold
+ *	@note slow no motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_slow_no_motion_thres_u8*3.91 mg
+ *      4g          |  v_slow_no_motion_thres_u8*7.81 mg
+ *      8g          |  v_slow_no_motion_thres_u8*15.63 mg
+ *      16g         |  v_slow_no_motion_thres_u8*31.25 mg
+ *	@note when v_slow_no_motion_thres_u8 = 0
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_slow_no_motion_thres(
+u8 v_slow_no_motion_thres_u8)
+{
+SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		/* write slow no motion threshold*/
+		com_rslt = p_smi130->SMI130_BUS_WRITE_FUNC(
+		p_smi130->dev_addr,
+		SMI130_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES__REG,
+		&v_slow_no_motion_thres_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	}
+return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read
+ *	the slow/no-motion selection from the register 0x62 bit 0
+ *
+ *
+ *
+ *
+ *  @param  v_intr_slow_no_motion_select_u8 :
+ *	The value of slow/no-motion select
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  SLOW_MOTION
+ *  0x01     |  NO_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_slow_no_motion_select(
+u8 *v_intr_slow_no_motion_select_u8)
+{
+SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		/* read slow no motion select*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+		p_smi130->dev_addr,
+		SMI130_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		*v_intr_slow_no_motion_select_u8 =
+		SMI130_GET_BITSLICE(v_data_u8,
+		SMI130_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT);
+	}
+return com_rslt;
+}
+ /*!
+ *	@brief This API is used to write
+ *	the slow/no-motion selection from the register 0x62 bit 0
+ *
+ *
+ *
+ *
+ *  @param  v_intr_slow_no_motion_select_u8 :
+ *	The value of slow/no-motion select
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  SLOW_MOTION
+ *  0x01     |  NO_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_slow_no_motion_select(
+u8 v_intr_slow_no_motion_select_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+} else {
+if (v_intr_slow_no_motion_select_u8 <= SMI130_MAX_VALUE_NO_MOTION) {
+	/* write slow no motion select*/
+	com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+	(p_smi130->dev_addr,
+	SMI130_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__REG,
+	&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	if (com_rslt == SUCCESS) {
+		v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+		SMI130_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT,
+		v_intr_slow_no_motion_select_u8);
+		com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	}
+} else {
+com_rslt = E_SMI130_OUT_OF_RANGE;
+}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API is used to select
+ *	the significant or any motion interrupt from the register 0x62 bit 1
+ *
+ *
+ *
+ *
+ *  @param  v_intr_significant_motion_select_u8 :
+ *	the value of significant or any motion interrupt selection
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  ANY_MOTION
+ *  0x01     |  SIGNIFICANT_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_significant_motion_select(
+u8 *v_intr_significant_motion_select_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the significant or any motion interrupt*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_SIGNIFICATION_MOTION_SELECT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_intr_significant_motion_select_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_SIGNIFICATION_MOTION_SELECT);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to write, select
+ *	the significant or any motion interrupt from the register 0x62 bit 1
+ *
+ *
+ *
+ *
+ *  @param  v_intr_significant_motion_select_u8 :
+ *	the value of significant or any motion interrupt selection
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  ANY_MOTION
+ *  0x01     |  SIGNIFICANT_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_significant_motion_select(
+u8 v_intr_significant_motion_select_u8)
+{
+/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	if (v_intr_significant_motion_select_u8 <=
+	SMI130_MAX_VALUE_SIGNIFICANT_MOTION) {
+		/* write the significant or any motion interrupt*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_INTR_SIGNIFICATION_MOTION_SELECT__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_SIGNIFICATION_MOTION_SELECT,
+			v_intr_significant_motion_select_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_SIGNIFICATION_MOTION_SELECT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_SMI130_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read
+ *	the significant skip time from the register 0x62 bit  2 and 3
+ *
+ *
+ *
+ *
+ *  @param  v_int_sig_mot_skip_u8 : the value of significant skip time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  skip time 1.5 seconds
+ *  0x01     |  skip time 3 seconds
+ *  0x02     |  skip time 6 seconds
+ *  0x03     |  skip time 12 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_significant_motion_skip(
+u8 *v_int_sig_mot_skip_u8)
+{
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read significant skip time*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_SIGNIFICANT_MOTION_SKIP__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_int_sig_mot_skip_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_SIGNIFICANT_MOTION_SKIP);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to write
+ *	the significant skip time from the register 0x62 bit  2 and 3
+ *
+ *
+ *
+ *
+ *  @param  v_int_sig_mot_skip_u8 : the value of significant skip time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  skip time 1.5 seconds
+ *  0x01     |  skip time 3 seconds
+ *  0x02     |  skip time 6 seconds
+ *  0x03     |  skip time 12 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_significant_motion_skip(
+u8 v_int_sig_mot_skip_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_int_sig_mot_skip_u8 <= SMI130_MAX_UNDER_SIG_MOTION) {
+			/* write significant skip time*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_SIGNIFICANT_MOTION_SKIP__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_SIGNIFICANT_MOTION_SKIP,
+				v_int_sig_mot_skip_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_INTR_SIGNIFICANT_MOTION_SKIP__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to read
+ *	the significant proof time from the register 0x62 bit  4 and 5
+ *
+ *
+ *
+ *
+ *  @param  v_significant_motion_proof_u8 :
+ *	the value of significant proof time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  proof time 0.25 seconds
+ *  0x01     |  proof time 0.5 seconds
+ *  0x02     |  proof time 1 seconds
+ *  0x03     |  proof time 2 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_significant_motion_proof(
+u8 *v_significant_motion_proof_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read significant proof time */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_SIGNIFICANT_MOTION_PROOF__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_significant_motion_proof_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_SIGNIFICANT_MOTION_PROOF);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API is used to write
+ *	the significant proof time from the register 0x62 bit  4 and 5
+ *
+ *
+ *
+ *
+ *  @param  v_significant_motion_proof_u8 :
+ *	the value of significant proof time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  proof time 0.25 seconds
+ *  0x01     |  proof time 0.5 seconds
+ *  0x02     |  proof time 1 seconds
+ *  0x03     |  proof time 2 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_significant_motion_proof(
+u8 v_significant_motion_proof_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_significant_motion_proof_u8
+		<= SMI130_MAX_UNDER_SIG_MOTION) {
+			/* write significant proof time */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_SIGNIFICANT_MOTION_PROOF__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_SIGNIFICANT_MOTION_PROOF,
+				v_significant_motion_proof_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_INTR_SIGNIFICANT_MOTION_PROOF__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to get the tap duration
+ *	from the register 0x63 bit 0 to 2
+ *
+ *
+ *
+ *  @param v_tap_durn_u8 : The value of tap duration
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | SMI130_TAP_DURN_50MS
+ *  0x01     | SMI130_TAP_DURN_100MS
+ *  0x03     | SMI130_TAP_DURN_150MS
+ *  0x04     | SMI130_TAP_DURN_200MS
+ *  0x05     | SMI130_TAP_DURN_250MS
+ *  0x06     | SMI130_TAP_DURN_375MS
+ *  0x07     | SMI130_TAP_DURN_700MS
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_tap_durn(
+u8 *v_tap_durn_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read tap duration*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_TAP_0_INTR_TAP_DURN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_durn_u8 = SMI130_GET_BITSLICE(
+			v_data_u8,
+			SMI130_USER_INTR_TAP_0_INTR_TAP_DURN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API is used to write the tap duration
+ *	from the register 0x63 bit 0 to 2
+ *
+ *
+ *
+ *  @param v_tap_durn_u8 : The value of tap duration
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | SMI130_TAP_DURN_50MS
+ *  0x01     | SMI130_TAP_DURN_100MS
+ *  0x03     | SMI130_TAP_DURN_150MS
+ *  0x04     | SMI130_TAP_DURN_200MS
+ *  0x05     | SMI130_TAP_DURN_250MS
+ *  0x06     | SMI130_TAP_DURN_375MS
+ *  0x07     | SMI130_TAP_DURN_700MS
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_tap_durn(
+u8 v_tap_durn_u8)
+{
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_tap_durn_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_tap_durn_u8 <= SMI130_MAX_TAP_TURN) {
+			switch (v_tap_durn_u8) {
+			case SMI130_TAP_DURN_50MS:
+				v_data_tap_durn_u8 = SMI130_TAP_DURN_50MS;
+				break;
+			case SMI130_TAP_DURN_100MS:
+				v_data_tap_durn_u8 = SMI130_TAP_DURN_100MS;
+				break;
+			case SMI130_TAP_DURN_150MS:
+				v_data_tap_durn_u8 = SMI130_TAP_DURN_150MS;
+				break;
+			case SMI130_TAP_DURN_200MS:
+				v_data_tap_durn_u8 = SMI130_TAP_DURN_200MS;
+				break;
+			case SMI130_TAP_DURN_250MS:
+				v_data_tap_durn_u8 = SMI130_TAP_DURN_250MS;
+				break;
+			case SMI130_TAP_DURN_375MS:
+				v_data_tap_durn_u8 = SMI130_TAP_DURN_375MS;
+				break;
+			case SMI130_TAP_DURN_500MS:
+				v_data_tap_durn_u8 = SMI130_TAP_DURN_500MS;
+				break;
+			case SMI130_TAP_DURN_700MS:
+				v_data_tap_durn_u8 = SMI130_TAP_DURN_700MS;
+				break;
+			default:
+				break;
+			}
+			/* write tap duration*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_TAP_0_INTR_TAP_DURN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_TAP_0_INTR_TAP_DURN,
+				v_data_tap_durn_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_INTR_TAP_0_INTR_TAP_DURN__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read the
+ *	tap shock duration from the register 0x63 bit 2
+ *
+ *  @param v_tap_shock_u8 :The value of tap shock
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | SMI130_TAP_SHOCK_50MS
+ *  0x01     | SMI130_TAP_SHOCK_75MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_tap_shock(
+u8 *v_tap_shock_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read tap shock duration*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_TAP_0_INTR_TAP_SHOCK__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_shock_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_TAP_0_INTR_TAP_SHOCK);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write the
+ *	tap shock duration from the register 0x63 bit 2
+ *
+ *  @param v_tap_shock_u8 :The value of tap shock
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | SMI130_TAP_SHOCK_50MS
+ *  0x01     | SMI130_TAP_SHOCK_75MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_tap_shock(u8 v_tap_shock_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_tap_shock_u8 <= SMI130_MAX_VALUE_TAP_SHOCK) {
+			/* write tap shock duration*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_TAP_0_INTR_TAP_SHOCK__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_TAP_0_INTR_TAP_SHOCK,
+				v_tap_shock_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_INTR_TAP_0_INTR_TAP_SHOCK__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read
+ *	tap quiet duration from the register 0x63 bit 7
+ *
+ *
+ *  @param v_tap_quiet_u8 : The value of tap quiet
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | SMI130_TAP_QUIET_30MS
+ *  0x01     | SMI130_TAP_QUIET_20MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_tap_quiet(
+u8 *v_tap_quiet_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read tap quiet duration*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_TAP_0_INTR_TAP_QUIET__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_quiet_u8 = SMI130_GET_BITSLICE(
+			v_data_u8,
+			SMI130_USER_INTR_TAP_0_INTR_TAP_QUIET);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write
+ *	tap quiet duration from the register 0x63 bit 7
+ *
+ *
+ *  @param v_tap_quiet_u8 : The value of tap quiet
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | SMI130_TAP_QUIET_30MS
+ *  0x01     | SMI130_TAP_QUIET_20MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_tap_quiet(u8 v_tap_quiet_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_tap_quiet_u8 <= SMI130_MAX_VALUE_TAP_QUIET) {
+			/* write tap quiet duration*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_TAP_0_INTR_TAP_QUIET__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_TAP_0_INTR_TAP_QUIET,
+				v_tap_quiet_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_INTR_TAP_0_INTR_TAP_QUIET__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read Threshold of the
+ *	single/double tap interrupt from the register 0x64 bit 0 to 4
+ *
+ *
+ *	@param v_tap_thres_u8 : The value of single/double tap threshold
+ *
+ *	@note single/double tap threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | single/double tap threshold
+ *  ----------------|---------------------
+ *      2g          |  ((v_tap_thres_u8 + 1) * 62.5)mg
+ *      4g          |  ((v_tap_thres_u8 + 1) * 125)mg
+ *      8g          |  ((v_tap_thres_u8 + 1) * 250)mg
+ *      16g         |  ((v_tap_thres_u8 + 1) * 500)mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_tap_thres(
+u8 *v_tap_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read tap threshold*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_TAP_1_INTR_TAP_THRES__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_tap_thres_u8 = SMI130_GET_BITSLICE
+			(v_data_u8,
+			SMI130_USER_INTR_TAP_1_INTR_TAP_THRES);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write Threshold of the
+ *	single/double tap interrupt from the register 0x64 bit 0 to 4
+ *
+ *
+ *	@param v_tap_thres_u8 : The value of single/double tap threshold
+ *
+ *	@note single/double tap threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | single/double tap threshold
+ *  ----------------|---------------------
+ *      2g          |  ((v_tap_thres_u8 + 1) * 62.5)mg
+ *      4g          |  ((v_tap_thres_u8 + 1) * 125)mg
+ *      8g          |  ((v_tap_thres_u8 + 1) * 250)mg
+ *      16g         |  ((v_tap_thres_u8 + 1) * 500)mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_tap_thres(
+u8 v_tap_thres_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write tap threshold*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_TAP_1_INTR_TAP_THRES__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_TAP_1_INTR_TAP_THRES,
+				v_tap_thres_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_INTR_TAP_1_INTR_TAP_THRES__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read the threshold for orient_mblation interrupt
+ *	from the register 0x65 bit 0 and 1
+ *
+ *  @param v_orient_mbl_mode_u8 : The value of threshold for orient_mblation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | symmetrical
+ *  0x01     | high-asymmetrical
+ *  0x02     | low-asymmetrical
+ *  0x03     | symmetrical
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl_mode(
+u8 *v_orient_mbl_mode_u8)
+{
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read orient_mblation threshold*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_mbl_mode_u8 = SMI130_GET_BITSLICE
+			(v_data_u8,
+			SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_MODE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write the threshold for orient_mblation interrupt
+ *	from the register 0x65 bit 0 and 1
+ *
+ *  @param v_orient_mbl_mode_u8 : The value of threshold for orient_mblation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | symmetrical
+ *  0x01     | high-asymmetrical
+ *  0x02     | low-asymmetrical
+ *  0x03     | symmetrical
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl_mode(
+u8 v_orient_mbl_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_orient_mbl_mode_u8 <= SMI130_MAX_ORIENT_MODE) {
+			/* write orient_mblation threshold*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_MODE,
+				v_orient_mbl_mode_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read the orient_mbl blocking mode
+ *	that is used for the generation of the orient_mblation interrupt.
+ *	from the register 0x65 bit 2 and 3
+ *
+ *  @param v_orient_mbl_blocking_u8 : The value of orient_mbl blocking mode
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | No blocking
+ *  0x01     | Theta blocking or acceleration in any axis > 1.5g
+ *  0x02     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.2g or acceleration in any axis > 1.5g
+ *  0x03     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.4g or acceleration in any axis >
+ *   -       | 1.5g and value of orient_mbl is not stable
+ *   -       | for at least 100 ms
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl_blocking(
+u8 *v_orient_mbl_blocking_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read orient_mbl blocking mode*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_mbl_blocking_u8 = SMI130_GET_BITSLICE
+			(v_data_u8,
+			SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write the orient_mbl blocking mode
+ *	that is used for the generation of the orient_mblation interrupt.
+ *	from the register 0x65 bit 2 and 3
+ *
+ *  @param v_orient_mbl_blocking_u8 : The value of orient_mbl blocking mode
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | No blocking
+ *  0x01     | Theta blocking or acceleration in any axis > 1.5g
+ *  0x02     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.2g or acceleration in any axis > 1.5g
+ *  0x03     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.4g or acceleration in any axis >
+ *   -       | 1.5g and value of orient_mbl is not stable
+ *   -       | for at least 100 ms
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl_blocking(
+u8 v_orient_mbl_blocking_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	if (v_orient_mbl_blocking_u8 <= SMI130_MAX_ORIENT_BLOCKING) {
+		/* write orient_mbl blocking mode*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING,
+			v_orient_mbl_blocking_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_SMI130_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief This API read Orient interrupt
+ *	hysteresis, from the register 0x64 bit 4 to 7
+ *
+ *
+ *
+ *  @param v_orient_mbl_hyst_u8 : The value of orient_mbl hysteresis
+ *
+ *	@note 1 LSB corresponds to 62.5 mg,
+ *	irrespective of the selected accel range
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl_hyst(
+u8 *v_orient_mbl_hyst_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read orient_mbl hysteresis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_mbl_hyst_u8 = SMI130_GET_BITSLICE
+			(v_data_u8,
+			SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_HYST);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write Orient interrupt
+ *	hysteresis, from the register 0x64 bit 4 to 7
+ *
+ *
+ *
+ *  @param v_orient_mbl_hyst_u8 : The value of orient_mbl hysteresis
+ *
+ *	@note 1 LSB corresponds to 62.5 mg,
+ *	irrespective of the selected accel range
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl_hyst(
+u8 v_orient_mbl_hyst_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write orient_mbl hysteresis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_HYST,
+				v_orient_mbl_hyst_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read Orient
+ *	blocking angle (0 to 44.8) from the register 0x66 bit 0 to 5
+ *
+ *  @param v_orient_mbl_theta_u8 : The value of Orient blocking angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl_theta(
+u8 *v_orient_mbl_theta_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read Orient blocking angle*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_mbl_theta_u8 = SMI130_GET_BITSLICE
+			(v_data_u8,
+			SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_THETA);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write Orient
+ *	blocking angle (0 to 44.8) from the register 0x66 bit 0 to 5
+ *
+ *  @param v_orient_mbl_theta_u8 : The value of Orient blocking angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl_theta(
+u8 v_orient_mbl_theta_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	if (v_orient_mbl_theta_u8 <= SMI130_MAX_ORIENT_THETA) {
+		/* write Orient blocking angle*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_THETA,
+			v_orient_mbl_theta_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_SMI130_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief This API read orient_mbl change
+ *	of up/down bit from the register 0x66 bit 6
+ *
+ *  @param v_orient_mbl_ud_u8 : The value of orient_mbl change of up/down
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | Is ignored
+ *  0x01     | Generates orient_mblation interrupt
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl_ud_enable(
+u8 *v_orient_mbl_ud_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read orient_mbl up/down enable*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_mbl_ud_u8 = SMI130_GET_BITSLICE
+			(v_data_u8,
+			SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write orient_mbl change
+ *	of up/down bit from the register 0x66 bit 6
+ *
+ *  @param v_orient_mbl_ud_u8 : The value of orient_mbl change of up/down
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | Is ignored
+ *  0x01     | Generates orient_mblation interrupt
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl_ud_enable(
+u8 v_orient_mbl_ud_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	if (v_orient_mbl_ud_u8 <= SMI130_MAX_VALUE_ORIENT_UD) {
+		/* write orient_mbl up/down enable */
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE,
+			v_orient_mbl_ud_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_SMI130_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API read orient_mblation axes changes
+ *	from the register 0x66 bit 7
+ *
+ *  @param v_orient_mbl_axes_u8 : The value of orient_mbl axes assignment
+ *	value    |       Behaviour    | Name
+ * ----------|--------------------|------
+ *  0x00     | x = x, y = y, z = z|orient_mbl_ax_noex
+ *  0x01     | x = y, y = z, z = x|orient_mbl_ax_ex
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl_axes_enable(
+u8 *v_orient_mbl_axes_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read orient_mblation axes changes  */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_orient_mbl_axes_u8 = SMI130_GET_BITSLICE
+			(v_data_u8,
+			SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write orient_mblation axes changes
+ *	from the register 0x66 bit 7
+ *
+ *  @param v_orient_mbl_axes_u8 : The value of orient_mbl axes assignment
+ *	value    |       Behaviour    | Name
+ * ----------|--------------------|------
+ *  0x00     | x = x, y = y, z = z|orient_mbl_ax_noex
+ *  0x01     | x = y, y = z, z = x|orient_mbl_ax_ex
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl_axes_enable(
+u8 v_orient_mbl_axes_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+	if (v_orient_mbl_axes_u8 <= SMI130_MAX_VALUE_ORIENT_AXES) {
+		/*write orient_mblation axes changes  */
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX,
+			v_orient_mbl_axes_u8);
+			com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_SMI130_OUT_OF_RANGE;
+	}
+}
+return com_rslt;
+}
+ /*!
+ *	@brief This API read Flat angle (0 to 44.8) for flat interrupt
+ *	from the register 0x67 bit 0 to 5
+ *
+ *  @param v_flat_theta_u8 : The value of flat angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_flat_theta(
+u8 *v_flat_theta_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read Flat angle*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_FLAT_0_INTR_FLAT_THETA__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_flat_theta_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_FLAT_0_INTR_FLAT_THETA);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write Flat angle (0 to 44.8) for flat interrupt
+ *	from the register 0x67 bit 0 to 5
+ *
+ *  @param v_flat_theta_u8 : The value of flat angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_flat_theta(
+u8 v_flat_theta_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_flat_theta_u8 <= SMI130_MAX_FLAT_THETA) {
+			/* write Flat angle */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_FLAT_0_INTR_FLAT_THETA__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_FLAT_0_INTR_FLAT_THETA,
+				v_flat_theta_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_INTR_FLAT_0_INTR_FLAT_THETA__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read Flat interrupt hold time;
+ *	from the register 0x68 bit 4 and 5
+ *
+ *  @param v_flat_hold_u8 : The value of flat hold time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | 0ms
+ *  0x01     | 512ms
+ *  0x01     | 1024ms
+ *  0x01     | 2048ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_flat_hold(
+u8 *v_flat_hold_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read flat hold time*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_FLAT_1_INTR_FLAT_HOLD__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_flat_hold_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_INTR_FLAT_1_INTR_FLAT_HOLD);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write Flat interrupt hold time;
+ *	from the register 0x68 bit 4 and 5
+ *
+ *  @param v_flat_hold_u8 : The value of flat hold time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | 0ms
+ *  0x01     | 512ms
+ *  0x01     | 1024ms
+ *  0x01     | 2048ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_flat_hold(
+u8 v_flat_hold_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_flat_hold_u8 <= SMI130_MAX_FLAT_HOLD) {
+			/* write flat hold time*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_FLAT_1_INTR_FLAT_HOLD__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_FLAT_1_INTR_FLAT_HOLD,
+				v_flat_hold_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_INTR_FLAT_1_INTR_FLAT_HOLD__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read flat interrupt hysteresis
+ *	from the register 0x68 bit 0 to 3
+ *
+ *  @param v_flat_hyst_u8 : The value of flat hysteresis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_flat_hyst(
+u8 *v_flat_hyst_u8)
+{
+	/* variable used to return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the flat hysteresis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_INTR_FLAT_1_INTR_FLAT_HYST__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_flat_hyst_u8 = SMI130_GET_BITSLICE(
+			v_data_u8,
+			SMI130_USER_INTR_FLAT_1_INTR_FLAT_HYST);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write flat interrupt hysteresis
+ *	from the register 0x68 bit 0 to 3
+ *
+ *  @param v_flat_hyst_u8 : The value of flat hysteresis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_flat_hyst(
+u8 v_flat_hyst_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_flat_hyst_u8 <= SMI130_MAX_FLAT_HYST) {
+			/* read the flat hysteresis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_INTR_FLAT_1_INTR_FLAT_HYST__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_INTR_FLAT_1_INTR_FLAT_HYST,
+				v_flat_hyst_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_INTR_FLAT_1_INTR_FLAT_HYST__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read accel offset compensation
+ *	target value for z-axis from the register 0x69 bit 0 and 1
+ *
+ *  @param v_foc_accel_z_u8 : the value of accel offset compensation z axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_foc_accel_z(u8 *v_foc_accel_z_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the accel offset compensation for z axis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_FOC_ACCEL_Z__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_foc_accel_z_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FOC_ACCEL_Z);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write accel offset compensation
+ *	target value for z-axis from the register 0x69 bit 0 and 1
+ *
+ *  @param v_foc_accel_z_u8 : the value of accel offset compensation z axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_foc_accel_z(
+u8 v_foc_accel_z_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write the accel offset compensation for z axis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_FOC_ACCEL_Z__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FOC_ACCEL_Z,
+				v_foc_accel_z_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_FOC_ACCEL_Z__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel offset compensation
+ *	target value for y-axis
+ *	from the register 0x69 bit 2 and 3
+ *
+ *  @param v_foc_accel_y_u8 : the value of accel offset compensation y axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_foc_accel_y(u8 *v_foc_accel_y_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the accel offset compensation for y axis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_FOC_ACCEL_Y__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_foc_accel_y_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FOC_ACCEL_Y);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel offset compensation
+ *	target value for y-axis
+ *	from the register 0x69 bit 2 and 3
+ *
+ *  @param v_foc_accel_y_u8 : the value of accel offset compensation y axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x02     | -1g
+ *  0x03     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_foc_accel_y(u8 v_foc_accel_y_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_foc_accel_y_u8 <= SMI130_MAX_ACCEL_FOC) {
+			/* write the accel offset compensation for y axis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_FOC_ACCEL_Y__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FOC_ACCEL_Y,
+				v_foc_accel_y_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_FOC_ACCEL_Y__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel offset compensation
+ *	target value for x-axis is
+ *	from the register 0x69 bit 4 and 5
+ *
+ *  @param v_foc_accel_x_u8 : the value of accel offset compensation x axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x02     | -1g
+ *  0x03     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_foc_accel_x(u8 *v_foc_accel_x_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		/* read the accel offset compensation for x axis*/
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+		p_smi130->dev_addr,
+		SMI130_USER_FOC_ACCEL_X__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		*v_foc_accel_x_u8 = SMI130_GET_BITSLICE(v_data_u8,
+		SMI130_USER_FOC_ACCEL_X);
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel offset compensation
+ *	target value for x-axis is
+ *	from the register 0x69 bit 4 and 5
+ *
+ *  @param v_foc_accel_x_u8 : the value of accel offset compensation x axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_foc_accel_x(u8 v_foc_accel_x_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_foc_accel_x_u8 <= SMI130_MAX_ACCEL_FOC) {
+			/* write the accel offset compensation for x axis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_FOC_ACCEL_X__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FOC_ACCEL_X,
+				v_foc_accel_x_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FOC_ACCEL_X__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API writes accel fast offset compensation
+ *	from the register 0x69 bit 0 to 5
+ *	@brief This API writes each axis individually
+ *	FOC_X_AXIS - bit 4 and 5
+ *	FOC_Y_AXIS - bit 2 and 3
+ *	FOC_Z_AXIS - bit 0 and 1
+ *
+ *  @param  v_foc_accel_u8: The value of accel offset compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_axis_u8: The value of accel offset axis selection
+  *	value    | axis
+ * ----------|-------------------
+ *  0        | FOC_X_AXIS
+ *  1        | FOC_Y_AXIS
+ *  2        | FOC_Z_AXIS
+ *
+ *	@param v_accel_offset_s8: The accel offset value
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_foc_trigger(u8 v_axis_u8,
+u8 v_foc_accel_u8, s8 *v_accel_offset_s8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+s8 v_status_s8 = SUCCESS;
+u8 v_timeout_u8 = SMI130_INIT_VALUE;
+s8 v_foc_accel_offset_x_s8  = SMI130_INIT_VALUE;
+s8 v_foc_accel_offset_y_s8 =  SMI130_INIT_VALUE;
+s8 v_foc_accel_offset_z_s8 =  SMI130_INIT_VALUE;
+u8 focstatus = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+} else {
+	v_status_s8 = smi130_set_accel_offset_enable(
+	ACCEL_OFFSET_ENABLE);
+	if (v_status_s8 == SUCCESS) {
+		switch (v_axis_u8) {
+		case FOC_X_AXIS:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_FOC_ACCEL_X__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FOC_ACCEL_X,
+				v_foc_accel_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FOC_ACCEL_X__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* trigger the
+			FOC need to write
+			0x03 in the register 0x7e*/
+			com_rslt +=
+			smi130_set_command_register(
+			START_FOC_ACCEL_GYRO);
+
+			com_rslt +=
+			smi130_get_foc_rdy(&focstatus);
+			if ((com_rslt != SUCCESS) ||
+			(focstatus != SMI130_FOC_STAT_HIGH)) {
+				while ((com_rslt != SUCCESS) ||
+				(focstatus != SMI130_FOC_STAT_HIGH
+				&& v_timeout_u8 <
+				SMI130_MAXIMUM_TIMEOUT)) {
+					p_smi130->delay_msec(
+					SMI130_DELAY_SETTLING_TIME);
+					com_rslt = smi130_get_foc_rdy(
+					&focstatus);
+					v_timeout_u8++;
+				}
+			}
+			if ((com_rslt == SUCCESS) &&
+				(focstatus == SMI130_FOC_STAT_HIGH)) {
+				com_rslt +=
+				smi130_get_accel_offset_compensation_xaxis(
+				&v_foc_accel_offset_x_s8);
+				*v_accel_offset_s8 =
+				v_foc_accel_offset_x_s8;
+			}
+		break;
+		case FOC_Y_AXIS:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_FOC_ACCEL_Y__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FOC_ACCEL_Y,
+				v_foc_accel_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FOC_ACCEL_Y__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* trigger the FOC
+			need to write 0x03
+			in the register 0x7e*/
+			com_rslt +=
+			smi130_set_command_register(
+			START_FOC_ACCEL_GYRO);
+
+			com_rslt +=
+			smi130_get_foc_rdy(&focstatus);
+			if ((com_rslt != SUCCESS) ||
+			(focstatus != SMI130_FOC_STAT_HIGH)) {
+				while ((com_rslt != SUCCESS) ||
+				(focstatus != SMI130_FOC_STAT_HIGH
+				&& v_timeout_u8 <
+				SMI130_MAXIMUM_TIMEOUT)) {
+					p_smi130->delay_msec(
+					SMI130_DELAY_SETTLING_TIME);
+					com_rslt = smi130_get_foc_rdy(
+					&focstatus);
+					v_timeout_u8++;
+				}
+			}
+			if ((com_rslt == SUCCESS) &&
+			(focstatus == SMI130_FOC_STAT_HIGH)) {
+				com_rslt +=
+				smi130_get_accel_offset_compensation_yaxis(
+				&v_foc_accel_offset_y_s8);
+				*v_accel_offset_s8 =
+				v_foc_accel_offset_y_s8;
+			}
+		break;
+		case FOC_Z_AXIS:
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_FOC_ACCEL_Z__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FOC_ACCEL_Z,
+				v_foc_accel_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FOC_ACCEL_Z__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* trigger the FOC need to write
+			0x03 in the register 0x7e*/
+			com_rslt +=
+			smi130_set_command_register(
+			START_FOC_ACCEL_GYRO);
+
+			com_rslt +=
+			smi130_get_foc_rdy(&focstatus);
+			if ((com_rslt != SUCCESS) ||
+			(focstatus != SMI130_FOC_STAT_HIGH)) {
+				while ((com_rslt != SUCCESS) ||
+				(focstatus != SMI130_FOC_STAT_HIGH
+				&& v_timeout_u8 <
+				SMI130_MAXIMUM_TIMEOUT)) {
+					p_smi130->delay_msec(
+					SMI130_DELAY_SETTLING_TIME);
+					com_rslt = smi130_get_foc_rdy(
+					&focstatus);
+					v_timeout_u8++;
+				}
+			}
+			if ((com_rslt == SUCCESS) &&
+			(focstatus == SMI130_FOC_STAT_HIGH)) {
+				com_rslt +=
+				smi130_get_accel_offset_compensation_zaxis(
+				&v_foc_accel_offset_z_s8);
+				*v_accel_offset_s8 =
+				v_foc_accel_offset_z_s8;
+			}
+		break;
+		default:
+		break;
+		}
+	} else {
+	com_rslt =  ERROR;
+	}
+}
+return com_rslt;
+}
+/*!
+ *	@brief This API write fast accel offset compensation
+ *	it writes all axis together.To the register 0x69 bit 0 to 5
+ *	FOC_X_AXIS - bit 4 and 5
+ *	FOC_Y_AXIS - bit 2 and 3
+ *	FOC_Z_AXIS - bit 0 and 1
+ *
+ *  @param  v_foc_accel_x_u8: The value of accel offset x compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_foc_accel_y_u8: The value of accel offset y compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_foc_accel_z_u8: The value of accel offset z compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_accel_off_x_s8: The value of accel offset x axis
+ *  @param  v_accel_off_y_s8: The value of accel offset y axis
+ *  @param  v_accel_off_z_s8: The value of accel offset z axis
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_accel_foc_trigger_xyz(u8 v_foc_accel_x_u8,
+u8 v_foc_accel_y_u8, u8 v_foc_accel_z_u8, s8 *v_accel_off_x_s8,
+s8 *v_accel_off_y_s8, s8 *v_accel_off_z_s8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 focx = SMI130_INIT_VALUE;
+u8 focy = SMI130_INIT_VALUE;
+u8 focz = SMI130_INIT_VALUE;
+s8 v_foc_accel_offset_x_s8 = SMI130_INIT_VALUE;
+s8 v_foc_accel_offset_y_s8 = SMI130_INIT_VALUE;
+s8 v_foc_accel_offset_z_s8 = SMI130_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+u8 v_timeout_u8 = SMI130_INIT_VALUE;
+u8 focstatus = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		v_status_s8 = smi130_set_accel_offset_enable(
+		ACCEL_OFFSET_ENABLE);
+		if (v_status_s8 == SUCCESS) {
+			/* foc x axis*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_FOC_ACCEL_X__REG,
+			&focx, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				focx = SMI130_SET_BITSLICE(focx,
+				SMI130_USER_FOC_ACCEL_X,
+				v_foc_accel_x_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FOC_ACCEL_X__REG,
+				&focx, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* foc y axis*/
+			com_rslt +=
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_FOC_ACCEL_Y__REG,
+			&focy, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				focy = SMI130_SET_BITSLICE(focy,
+				SMI130_USER_FOC_ACCEL_Y,
+				v_foc_accel_y_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FOC_ACCEL_Y__REG,
+				&focy, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* foc z axis*/
+			com_rslt +=
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_FOC_ACCEL_Z__REG,
+			&focz, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				focz = SMI130_SET_BITSLICE(focz,
+				SMI130_USER_FOC_ACCEL_Z,
+				v_foc_accel_z_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_FOC_ACCEL_Z__REG,
+				&focz, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* trigger the FOC need to
+			write 0x03 in the register 0x7e*/
+			com_rslt += smi130_set_command_register(
+			START_FOC_ACCEL_GYRO);
+
+			com_rslt += smi130_get_foc_rdy(
+			&focstatus);
+			if ((com_rslt != SUCCESS) ||
+			(focstatus != SMI130_FOC_STAT_HIGH)) {
+				while ((com_rslt != SUCCESS) ||
+				(focstatus != SMI130_FOC_STAT_HIGH
+				&& v_timeout_u8 <
+				SMI130_MAXIMUM_TIMEOUT)) {
+					p_smi130->delay_msec(
+					SMI130_DELAY_SETTLING_TIME);
+					com_rslt = smi130_get_foc_rdy(
+					&focstatus);
+					v_timeout_u8++;
+				}
+			}
+			if ((com_rslt == SUCCESS) &&
+			(focstatus == SMI130_GEN_READ_WRITE_DATA_LENGTH)) {
+				com_rslt +=
+				smi130_get_accel_offset_compensation_xaxis(
+				&v_foc_accel_offset_x_s8);
+				*v_accel_off_x_s8 =
+				v_foc_accel_offset_x_s8;
+				com_rslt +=
+				smi130_get_accel_offset_compensation_yaxis(
+				&v_foc_accel_offset_y_s8);
+				*v_accel_off_y_s8 =
+				v_foc_accel_offset_y_s8;
+				com_rslt +=
+				smi130_get_accel_offset_compensation_zaxis(
+				&v_foc_accel_offset_z_s8);
+				*v_accel_off_z_s8 =
+				v_foc_accel_offset_z_s8;
+			}
+		} else {
+		com_rslt =  ERROR;
+		}
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API read gyro fast offset enable
+ *	from the register 0x69 bit 6
+ *
+ *  @param v_foc_gyro_u8 : The value of gyro fast offset enable
+ *  value    |  Description
+ * ----------|-------------
+ *    0      | fast offset compensation disabled
+ *    1      |  fast offset compensation enabled
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_foc_gyro_enable(
+u8 *v_foc_gyro_u8)
+{
+	/* used for return the status of bus communication */
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the gyro fast offset enable*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_FOC_GYRO_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_foc_gyro_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_FOC_GYRO_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro fast offset enable
+ *	from the register 0x69 bit 6
+ *
+ *  @param v_foc_gyro_u8 : The value of gyro fast offset enable
+ *  value    |  Description
+ * ----------|-------------
+ *    0      | fast offset compensation disabled
+ *    1      |  fast offset compensation enabled
+ *
+ *	@param v_gyro_off_x_s16 : The value of gyro fast offset x axis data
+ *	@param v_gyro_off_y_s16 : The value of gyro fast offset y axis data
+ *	@param v_gyro_off_z_s16 : The value of gyro fast offset z axis data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_foc_gyro_enable(
+u8 v_foc_gyro_u8, s16 *v_gyro_off_x_s16,
+s16 *v_gyro_off_y_s16, s16 *v_gyro_off_z_s16)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+u8 v_timeout_u8 = SMI130_INIT_VALUE;
+s16 offsetx = SMI130_INIT_VALUE;
+s16 offsety = SMI130_INIT_VALUE;
+s16 offsetz = SMI130_INIT_VALUE;
+u8 focstatus = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		v_status_s8 = smi130_set_gyro_offset_enable(
+		GYRO_OFFSET_ENABLE);
+		if (v_status_s8 == SUCCESS) {
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_FOC_GYRO_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_FOC_GYRO_ENABLE,
+				v_foc_gyro_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_FOC_GYRO_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			/* trigger the FOC need to write 0x03
+			in the register 0x7e*/
+			com_rslt += smi130_set_command_register
+			(START_FOC_ACCEL_GYRO);
+
+			com_rslt += smi130_get_foc_rdy(&focstatus);
+			if ((com_rslt != SUCCESS) ||
+			(focstatus != SMI130_FOC_STAT_HIGH)) {
+				while ((com_rslt != SUCCESS) ||
+				(focstatus != SMI130_FOC_STAT_HIGH
+				&& v_timeout_u8 <
+				SMI130_MAXIMUM_TIMEOUT)) {
+					p_smi130->delay_msec(
+					SMI130_DELAY_SETTLING_TIME);
+					com_rslt = smi130_get_foc_rdy(
+					&focstatus);
+					v_timeout_u8++;
+				}
+			}
+			if ((com_rslt == SUCCESS) &&
+			(focstatus == SMI130_FOC_STAT_HIGH)) {
+				com_rslt +=
+				smi130_get_gyro_offset_compensation_xaxis
+				(&offsetx);
+				*v_gyro_off_x_s16 = offsetx;
+
+				com_rslt +=
+				smi130_get_gyro_offset_compensation_yaxis
+				(&offsety);
+				*v_gyro_off_y_s16 = offsety;
+
+				com_rslt +=
+				smi130_get_gyro_offset_compensation_zaxis(
+				&offsetz);
+				*v_gyro_off_z_s16 = offsetz;
+			}
+		} else {
+		com_rslt = ERROR;
+		}
+	}
+return com_rslt;
+}
+ /*!
+ *	@brief This API read NVM program enable
+ *	from the register 0x6A bit 1
+ *
+ *  @param v_nvm_prog_u8 : The value of NVM program enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_nvm_prog_enable(
+u8 *v_nvm_prog_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read NVM program*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_CONFIG_NVM_PROG_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_nvm_prog_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_CONFIG_NVM_PROG_ENABLE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write NVM program enable
+ *	from the register 0x6A bit 1
+ *
+ *  @param v_nvm_prog_u8 : The value of NVM program enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_nvm_prog_enable(
+u8 v_nvm_prog_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_nvm_prog_u8 <= SMI130_MAX_VALUE_NVM_PROG) {
+			/* write the NVM program*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_CONFIG_NVM_PROG_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_CONFIG_NVM_PROG_ENABLE,
+				v_nvm_prog_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_CONFIG_NVM_PROG_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ * @brief This API read to configure SPI
+ * Interface Mode for primary and OIS interface
+ * from the register 0x6B bit 0
+ *
+ *  @param v_spi3_u8 : The value of SPI mode selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  SPI 4-wire mode
+ *   1     |  SPI 3-wire mode
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_spi3(
+u8 *v_spi3_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read SPI mode*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_IF_CONFIG_SPI3__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_spi3_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_IF_CONFIG_SPI3);
+		}
+	return com_rslt;
+}
+/*!
+ * @brief This API write to configure SPI
+ * Interface Mode for primary and OIS interface
+ * from the register 0x6B bit 0
+ *
+ *  @param v_spi3_u8 : The value of SPI mode selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  SPI 4-wire mode
+ *   1     |  SPI 3-wire mode
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_spi3(
+u8 v_spi3_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_spi3_u8 <= SMI130_MAX_VALUE_SPI3) {
+			/* write SPI mode*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_IF_CONFIG_SPI3__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_IF_CONFIG_SPI3,
+				v_spi3_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_IF_CONFIG_SPI3__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read I2C Watchdog timer
+ *	from the register 0x70 bit 1
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watch dog timer
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  I2C watchdog v_timeout_u8 after 1 ms
+ *   1     |  I2C watchdog v_timeout_u8 after 50 ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_i2c_wdt_select(
+u8 *v_i2c_wdt_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read I2C watch dog timer */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_IF_CONFIG_I2C_WDT_SELECT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_i2c_wdt_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_IF_CONFIG_I2C_WDT_SELECT);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write I2C Watchdog timer
+ *	from the register 0x70 bit 1
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watch dog timer
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  I2C watchdog v_timeout_u8 after 1 ms
+ *   1     |  I2C watchdog v_timeout_u8 after 50 ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_i2c_wdt_select(
+u8 v_i2c_wdt_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_i2c_wdt_u8 <= SMI130_MAX_VALUE_I2C_WDT) {
+			/* write I2C watch dog timer */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_IF_CONFIG_I2C_WDT_SELECT__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_IF_CONFIG_I2C_WDT_SELECT,
+				v_i2c_wdt_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_IF_CONFIG_I2C_WDT_SELECT__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read I2C watchdog enable
+ *	from the register 0x70 bit 2
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watchdog enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_i2c_wdt_enable(
+u8 *v_i2c_wdt_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read i2c watch dog eneble */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_IF_CONFIG_I2C_WDT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_i2c_wdt_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_IF_CONFIG_I2C_WDT_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write I2C watchdog enable
+ *	from the register 0x70 bit 2
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watchdog enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_i2c_wdt_enable(
+u8 v_i2c_wdt_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_i2c_wdt_u8 <= SMI130_MAX_VALUE_I2C_WDT) {
+			/* write i2c watch dog eneble */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_IF_CONFIG_I2C_WDT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_IF_CONFIG_I2C_WDT_ENABLE,
+				v_i2c_wdt_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_IF_CONFIG_I2C_WDT_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ * @brief This API read I2C interface configuration(if) moe
+ * from the register 0x6B bit 4 and 5
+ *
+ *  @param  v_if_mode_u8 : The value of interface configuration mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  |  Primary interface:autoconfig / secondary interface:off
+ *   0x01  |  Primary interface:I2C / secondary interface:OIS
+ *   0x02  |  Primary interface:autoconfig/secondary interface:Magnetometer
+ *   0x03  |   Reserved
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_if_mode(
+u8 *v_if_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read if mode*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_IF_CONFIG_IF_MODE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_if_mode_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_IF_CONFIG_IF_MODE);
+		}
+	return com_rslt;
+}
+/*!
+ * @brief This API write I2C interface configuration(if) moe
+ * from the register 0x6B bit 4 and 5
+ *
+ *  @param  v_if_mode_u8 : The value of interface configuration mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  |  Primary interface:autoconfig / secondary interface:off
+ *   0x01  |  Primary interface:I2C / secondary interface:OIS
+ *   0x02  |  Primary interface:autoconfig/secondary interface:Magnetometer
+ *   0x03  |   Reserved
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_if_mode(
+u8 v_if_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_if_mode_u8 <= SMI130_MAX_IF_MODE) {
+			/* write if mode*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_IF_CONFIG_IF_MODE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_IF_CONFIG_IF_MODE,
+				v_if_mode_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_IF_CONFIG_IF_MODE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read gyro sleep trigger
+ *	from the register 0x6C bit 0 to 2
+ *
+ *  @param v_gyro_sleep_trigger_u8 : The value of gyro sleep trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | nomotion: no / Not INT1 pin: no / INT2 pin: no
+ *   0x01  | nomotion: no / Not INT1 pin: no / INT2 pin: yes
+ *   0x02  | nomotion: no / Not INT1 pin: yes / INT2 pin: no
+ *   0x03  | nomotion: no / Not INT1 pin: yes / INT2 pin: yes
+ *   0x04  | nomotion: yes / Not INT1 pin: no / INT2 pin: no
+ *   0x05  | anymotion: yes / Not INT1 pin: no / INT2 pin: yes
+ *   0x06  | anymotion: yes / Not INT1 pin: yes / INT2 pin: no
+ *   0x07  | anymotion: yes / Not INT1 pin: yes / INT2 pin: yes
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_sleep_trigger(
+u8 *v_gyro_sleep_trigger_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read gyro sleep trigger */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_GYRO_SLEEP_TRIGGER__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_sleep_trigger_u8 =
+			SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_GYRO_SLEEP_TRIGGER);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro sleep trigger
+ *	from the register 0x6C bit 0 to 2
+ *
+ *  @param v_gyro_sleep_trigger_u8 : The value of gyro sleep trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | nomotion: no / Not INT1 pin: no / INT2 pin: no
+ *   0x01  | nomotion: no / Not INT1 pin: no / INT2 pin: yes
+ *   0x02  | nomotion: no / Not INT1 pin: yes / INT2 pin: no
+ *   0x03  | nomotion: no / Not INT1 pin: yes / INT2 pin: yes
+ *   0x04  | nomotion: yes / Not INT1 pin: no / INT2 pin: no
+ *   0x05  | anymotion: yes / Not INT1 pin: no / INT2 pin: yes
+ *   0x06  | anymotion: yes / Not INT1 pin: yes / INT2 pin: no
+ *   0x07  | anymotion: yes / Not INT1 pin: yes / INT2 pin: yes
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_sleep_trigger(
+u8 v_gyro_sleep_trigger_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_gyro_sleep_trigger_u8 <= SMI130_MAX_GYRO_SLEEP_TIGGER) {
+			/* write gyro sleep trigger */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_GYRO_SLEEP_TRIGGER__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_GYRO_SLEEP_TRIGGER,
+				v_gyro_sleep_trigger_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_GYRO_SLEEP_TRIGGER__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read gyro wakeup trigger
+ *	from the register 0x6C bit 3 and 4
+ *
+ *  @param v_gyro_wakeup_trigger_u8 : The value of gyro wakeup trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | anymotion: no / INT1 pin: no
+ *   0x01  | anymotion: no / INT1 pin: yes
+ *   0x02  | anymotion: yes / INT1 pin: no
+ *   0x03  | anymotion: yes / INT1 pin: yes
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_wakeup_trigger(
+u8 *v_gyro_wakeup_trigger_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read gyro wakeup trigger */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_GYRO_WAKEUP_TRIGGER__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_wakeup_trigger_u8 = SMI130_GET_BITSLICE(
+			v_data_u8,
+			SMI130_USER_GYRO_WAKEUP_TRIGGER);
+	  }
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro wakeup trigger
+ *	from the register 0x6C bit 3 and 4
+ *
+ *  @param v_gyro_wakeup_trigger_u8 : The value of gyro wakeup trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | anymotion: no / INT1 pin: no
+ *   0x01  | anymotion: no / INT1 pin: yes
+ *   0x02  | anymotion: yes / INT1 pin: no
+ *   0x03  | anymotion: yes / INT1 pin: yes
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_wakeup_trigger(
+u8 v_gyro_wakeup_trigger_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_gyro_wakeup_trigger_u8
+		<= SMI130_MAX_GYRO_WAKEUP_TRIGGER) {
+			/* write gyro wakeup trigger */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_GYRO_WAKEUP_TRIGGER__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_GYRO_WAKEUP_TRIGGER,
+				v_gyro_wakeup_trigger_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_GYRO_WAKEUP_TRIGGER__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read Target state for gyro sleep mode
+ *	from the register 0x6C bit 5
+ *
+ *  @param v_gyro_sleep_state_u8 : The value of gyro sleep mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | Sleep transition to fast wake up state
+ *   0x01  | Sleep transition to suspend state
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_sleep_state(
+u8 *v_gyro_sleep_state_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read gyro sleep state*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_GYRO_SLEEP_STATE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_sleep_state_u8 = SMI130_GET_BITSLICE(
+			v_data_u8,
+			SMI130_USER_GYRO_SLEEP_STATE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write Target state for gyro sleep mode
+ *	from the register 0x6C bit 5
+ *
+ *  @param v_gyro_sleep_state_u8 : The value of gyro sleep mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | Sleep transition to fast wake up state
+ *   0x01  | Sleep transition to suspend state
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_sleep_state(
+u8 v_gyro_sleep_state_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_gyro_sleep_state_u8 <= SMI130_MAX_VALUE_SLEEP_STATE) {
+			/* write gyro sleep state*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_GYRO_SLEEP_STATE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_GYRO_SLEEP_STATE,
+				v_gyro_sleep_state_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_GYRO_SLEEP_STATE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read gyro wakeup interrupt
+ *	from the register 0x6C bit 6
+ *
+ *  @param v_gyro_wakeup_intr_u8 : The valeu of gyro wakeup interrupt
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | DISABLE
+ *   0x01  | ENABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_wakeup_intr(
+u8 *v_gyro_wakeup_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read gyro wakeup interrupt */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_GYRO_WAKEUP_INTR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_wakeup_intr_u8 = SMI130_GET_BITSLICE(
+			v_data_u8,
+			SMI130_USER_GYRO_WAKEUP_INTR);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro wakeup interrupt
+ *	from the register 0x6C bit 6
+ *
+ *  @param v_gyro_wakeup_intr_u8 : The valeu of gyro wakeup interrupt
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | DISABLE
+ *   0x01  | ENABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_wakeup_intr(
+u8 v_gyro_wakeup_intr_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_gyro_wakeup_intr_u8 <= SMI130_MAX_VALUE_WAKEUP_INTR) {
+			/* write gyro wakeup interrupt */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_GYRO_WAKEUP_INTR__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_GYRO_WAKEUP_INTR,
+				v_gyro_wakeup_intr_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_GYRO_WAKEUP_INTR__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ * @brief This API read accel select axis to be self-test
+ *
+ *  @param v_accel_selftest_axis_u8 :
+ *	The value of accel self test axis selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | disabled
+ *   0x01  | x-axis
+ *   0x02  | y-axis
+ *   0x03  | z-axis
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_selftest_axis(
+u8 *v_accel_selftest_axis_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read accel self test axis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_ACCEL_SELFTEST_AXIS__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_selftest_axis_u8 = SMI130_GET_BITSLICE(
+			v_data_u8,
+			SMI130_USER_ACCEL_SELFTEST_AXIS);
+		}
+	return com_rslt;
+}
+/*!
+ * @brief This API write accel select axis to be self-test
+ *
+ *  @param v_accel_selftest_axis_u8 :
+ *	The value of accel self test axis selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | disabled
+ *   0x01  | x-axis
+ *   0x02  | y-axis
+ *   0x03  | z-axis
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_selftest_axis(
+u8 v_accel_selftest_axis_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_accel_selftest_axis_u8
+		<= SMI130_MAX_ACCEL_SELFTEST_AXIS) {
+			/* write accel self test axis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_ACCEL_SELFTEST_AXIS__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_ACCEL_SELFTEST_AXIS,
+				v_accel_selftest_axis_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_ACCEL_SELFTEST_AXIS__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel self test axis sign
+ *	from the register 0x6D bit 2
+ *
+ *  @param v_accel_selftest_sign_u8: The value of accel self test axis sign
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | negative
+ *   0x01  | positive
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_selftest_sign(
+u8 *v_accel_selftest_sign_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read accel self test axis sign*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_ACCEL_SELFTEST_SIGN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_selftest_sign_u8 = SMI130_GET_BITSLICE(
+			v_data_u8,
+			SMI130_USER_ACCEL_SELFTEST_SIGN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel self test axis sign
+ *	from the register 0x6D bit 2
+ *
+ *  @param v_accel_selftest_sign_u8: The value of accel self test axis sign
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | negative
+ *   0x01  | positive
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_selftest_sign(
+u8 v_accel_selftest_sign_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_accel_selftest_sign_u8 <=
+		SMI130_MAX_VALUE_SELFTEST_SIGN) {
+			/* write accel self test axis sign*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_ACCEL_SELFTEST_SIGN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_ACCEL_SELFTEST_SIGN,
+				v_accel_selftest_sign_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_ACCEL_SELFTEST_SIGN__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+			com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel self test amplitude
+ *	from the register 0x6D bit 3
+ *        select amplitude of the selftest deflection:
+ *
+ *  @param v_accel_selftest_amp_u8 : The value of accel self test amplitude
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | LOW
+ *   0x01  | HIGH
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_selftest_amp(
+u8 *v_accel_selftest_amp_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read  self test amplitude*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_SELFTEST_AMP__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_selftest_amp_u8 = SMI130_GET_BITSLICE(
+			v_data_u8,
+			SMI130_USER_SELFTEST_AMP);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel self test amplitude
+ *	from the register 0x6D bit 3
+ *        select amplitude of the selftest deflection:
+ *
+ *  @param v_accel_selftest_amp_u8 : The value of accel self test amplitude
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | LOW
+ *   0x01  | HIGH
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_selftest_amp(
+u8 v_accel_selftest_amp_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_accel_selftest_amp_u8 <=
+		SMI130_MAX_VALUE_SELFTEST_AMP) {
+			/* write  self test amplitude*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_SELFTEST_AMP__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_SELFTEST_AMP,
+				v_accel_selftest_amp_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_SELFTEST_AMP__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read gyro self test trigger
+ *
+ *	@param v_gyro_selftest_start_u8: The value of gyro self test start
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_selftest_start(
+u8 *v_gyro_selftest_start_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read gyro self test start */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_GYRO_SELFTEST_START__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_selftest_start_u8 = SMI130_GET_BITSLICE(
+			v_data_u8,
+			SMI130_USER_GYRO_SELFTEST_START);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro self test trigger
+ *
+ *	@param v_gyro_selftest_start_u8: The value of gyro self test start
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_selftest_start(
+u8 v_gyro_selftest_start_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_gyro_selftest_start_u8 <=
+		SMI130_MAX_VALUE_SELFTEST_START) {
+			/* write gyro self test start */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_GYRO_SELFTEST_START__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_GYRO_SELFTEST_START,
+				v_gyro_selftest_start_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_GYRO_SELFTEST_START__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ * @brief This API read primary interface selection I2C or SPI
+ *	from the register 0x70 bit 0
+ *
+ *  @param v_spi_enable_u8: The value of Interface selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | I2C Enable
+ *   0x01  | I2C DISBALE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_spi_enable(u8 *v_spi_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read interface section*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_NV_CONFIG_SPI_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_spi_enable_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_NV_CONFIG_SPI_ENABLE);
+		}
+	return com_rslt;
+}
+ /*!
+ * @brief This API write primary interface selection I2C or SPI
+ *	from the register 0x70 bit 0
+ *
+ *  @param v_spi_enable_u8: The value of Interface selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | I2C Enable
+ *   0x01  | I2C DISBALE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_spi_enable(u8 v_spi_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write interface section*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_NV_CONFIG_SPI_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_NV_CONFIG_SPI_ENABLE,
+				v_spi_enable_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_NV_CONFIG_SPI_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read the spare zero
+ *	form register 0x70 bit 3
+ *
+ *
+ *  @param v_spare0_trim_u8: The value of spare zero
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_spare0_trim(u8 *v_spare0_trim_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read spare zero*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_NV_CONFIG_SPARE0__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_spare0_trim_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_NV_CONFIG_SPARE0);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write the spare zero
+ *	form register 0x70 bit 3
+ *
+ *
+ *  @param v_spare0_trim_u8: The value of spare zero
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_spare0_trim(u8 v_spare0_trim_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write  spare zero*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_NV_CONFIG_SPARE0__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_NV_CONFIG_SPARE0,
+				v_spare0_trim_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_NV_CONFIG_SPARE0__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read the NVM counter
+ *	form register 0x70 bit 4 to 7
+ *
+ *
+ *  @param v_nvm_counter_u8: The value of NVM counter
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_nvm_counter(u8 *v_nvm_counter_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read NVM counter*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_NV_CONFIG_NVM_COUNTER__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_nvm_counter_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_NV_CONFIG_NVM_COUNTER);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write the NVM counter
+ *	form register 0x70 bit 4 to 7
+ *
+ *
+ *  @param v_nvm_counter_u8: The value of NVM counter
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_nvm_counter(
+u8 v_nvm_counter_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write NVM counter*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_NV_CONFIG_NVM_COUNTER__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_NV_CONFIG_NVM_COUNTER,
+				v_nvm_counter_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_NV_CONFIG_NVM_COUNTER__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel manual offset compensation of x axis
+ *	from the register 0x71 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_x_s8:
+ *	The value of accel manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_offset_compensation_xaxis(
+s8 *v_accel_off_x_s8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read accel manual offset compensation of x axis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_OFFSET_0_ACCEL_OFF_X__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_off_x_s8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_OFFSET_0_ACCEL_OFF_X);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel manual offset compensation of x axis
+ *	from the register 0x71 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_x_s8:
+ *	The value of accel manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_offset_compensation_xaxis(
+s8 v_accel_off_x_s8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		/* enable accel offset */
+		v_status_s8 = smi130_set_accel_offset_enable(
+		ACCEL_OFFSET_ENABLE);
+		if (v_status_s8 == SUCCESS) {
+			/* write accel manual offset compensation of x axis*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_OFFSET_0_ACCEL_OFF_X__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				SMI130_SET_BITSLICE(
+				v_data_u8,
+				SMI130_USER_OFFSET_0_ACCEL_OFF_X,
+				v_accel_off_x_s8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_OFFSET_0_ACCEL_OFF_X__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt =  ERROR;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel manual offset compensation of y axis
+ *	from the register 0x72 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_y_s8:
+ *	The value of accel manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_offset_compensation_yaxis(
+s8 *v_accel_off_y_s8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read accel manual offset compensation of y axis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_OFFSET_1_ACCEL_OFF_Y__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_off_y_s8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_OFFSET_1_ACCEL_OFF_Y);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel manual offset compensation of y axis
+ *	from the register 0x72 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_y_s8:
+ *	The value of accel manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_offset_compensation_yaxis(
+s8 v_accel_off_y_s8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		/* enable accel offset */
+		v_status_s8 = smi130_set_accel_offset_enable(
+		ACCEL_OFFSET_ENABLE);
+		if (v_status_s8 == SUCCESS) {
+			/* write accel manual offset compensation of y axis*/
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_OFFSET_1_ACCEL_OFF_Y__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				SMI130_SET_BITSLICE(
+				v_data_u8,
+				SMI130_USER_OFFSET_1_ACCEL_OFF_Y,
+				v_accel_off_y_s8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_OFFSET_1_ACCEL_OFF_Y__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = ERROR;
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read accel manual offset compensation of z axis
+ *	from the register 0x73 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_z_s8:
+ *	The value of accel manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_offset_compensation_zaxis(
+s8 *v_accel_off_z_s8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read accel manual offset compensation of z axis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_OFFSET_2_ACCEL_OFF_Z__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_off_z_s8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_OFFSET_2_ACCEL_OFF_Z);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write accel manual offset compensation of z axis
+ *	from the register 0x73 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_z_s8:
+ *	The value of accel manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_offset_compensation_zaxis(
+s8 v_accel_off_z_s8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	u8 v_status_s8 = SUCCESS;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* enable accel offset */
+			v_status_s8 = smi130_set_accel_offset_enable(
+			ACCEL_OFFSET_ENABLE);
+			if (v_status_s8 == SUCCESS) {
+				/* write accel manual offset
+				compensation of z axis*/
+				com_rslt =
+				p_smi130->SMI130_BUS_READ_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_OFFSET_2_ACCEL_OFF_Z__REG,
+				&v_data_u8,
+				SMI130_GEN_READ_WRITE_DATA_LENGTH);
+				if (com_rslt == SUCCESS) {
+					v_data_u8 =
+					SMI130_SET_BITSLICE(v_data_u8,
+					SMI130_USER_OFFSET_2_ACCEL_OFF_Z,
+					v_accel_off_z_s8);
+					com_rslt +=
+					p_smi130->SMI130_BUS_WRITE_FUNC(
+					p_smi130->dev_addr,
+					SMI130_USER_OFFSET_2_ACCEL_OFF_Z__REG,
+					&v_data_u8,
+					SMI130_GEN_READ_WRITE_DATA_LENGTH);
+				}
+			} else {
+			com_rslt = ERROR;
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read gyro manual offset compensation of x axis
+ *	from the register 0x74 bit 0 to 7 and 0x77 bit 0 and 1
+ *
+ *
+ *
+ *  @param v_gyro_off_x_s16:
+ *	The value of gyro manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_offset_compensation_xaxis(
+s16 *v_gyro_off_x_s16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data1_u8r = SMI130_INIT_VALUE;
+	u8 v_data2_u8r = SMI130_INIT_VALUE;
+	s16 v_data3_u8r, v_data4_u8r = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read gyro offset x*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_OFFSET_3_GYRO_OFF_X__REG,
+			&v_data1_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			v_data1_u8r = SMI130_GET_BITSLICE(v_data1_u8r,
+			SMI130_USER_OFFSET_3_GYRO_OFF_X);
+			com_rslt += p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_OFFSET_6_GYRO_OFF_X__REG,
+			&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			v_data2_u8r = SMI130_GET_BITSLICE(v_data2_u8r,
+			SMI130_USER_OFFSET_6_GYRO_OFF_X);
+			v_data3_u8r = v_data2_u8r
+			<< SMI130_SHIFT_BIT_POSITION_BY_14_BITS;
+			v_data4_u8r =  v_data1_u8r
+			<< SMI130_SHIFT_BIT_POSITION_BY_06_BITS;
+			v_data3_u8r = v_data3_u8r | v_data4_u8r;
+			*v_gyro_off_x_s16 = v_data3_u8r
+			>> SMI130_SHIFT_BIT_POSITION_BY_06_BITS;
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro manual offset compensation of x axis
+ *	from the register 0x74 bit 0 to 7 and 0x77 bit 0 and 1
+ *
+ *
+ *
+ *  @param v_gyro_off_x_s16:
+ *	The value of gyro manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_offset_compensation_xaxis(
+s16 v_gyro_off_x_s16)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data1_u8r, v_data2_u8r = SMI130_INIT_VALUE;
+u16 v_data3_u8r = SMI130_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		/* write gyro offset x*/
+		v_status_s8 = smi130_set_gyro_offset_enable(
+		GYRO_OFFSET_ENABLE);
+		if (v_status_s8 == SUCCESS) {
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_OFFSET_3_GYRO_OFF_X__REG,
+			&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data1_u8r =
+				((s8) (v_gyro_off_x_s16 &
+				SMI130_GYRO_MANUAL_OFFSET_0_7));
+				v_data2_u8r = SMI130_SET_BITSLICE(
+				v_data2_u8r,
+				SMI130_USER_OFFSET_3_GYRO_OFF_X,
+				v_data1_u8r);
+				/* write 0x74 bit 0 to 7*/
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_OFFSET_3_GYRO_OFF_X__REG,
+				&v_data2_u8r,
+				SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			com_rslt += p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_OFFSET_6_GYRO_OFF_X__REG,
+			&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data3_u8r =
+				(u16) (v_gyro_off_x_s16 &
+				SMI130_GYRO_MANUAL_OFFSET_8_9);
+				v_data1_u8r = (u8)(v_data3_u8r
+				>> SMI130_SHIFT_BIT_POSITION_BY_08_BITS);
+				v_data2_u8r = SMI130_SET_BITSLICE(
+				v_data2_u8r,
+				SMI130_USER_OFFSET_6_GYRO_OFF_X,
+				v_data1_u8r);
+				/* write 0x77 bit 0 and 1*/
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_OFFSET_6_GYRO_OFF_X__REG,
+				&v_data2_u8r,
+				SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		return ERROR;
+		}
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API read gyro manual offset compensation of y axis
+ *	from the register 0x75 bit 0 to 7 and 0x77 bit 2 and 3
+ *
+ *
+ *
+ *  @param v_gyro_off_y_s16:
+ *	The value of gyro manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_offset_compensation_yaxis(
+s16 *v_gyro_off_y_s16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data1_u8r = SMI130_INIT_VALUE;
+	u8 v_data2_u8r = SMI130_INIT_VALUE;
+	s16 v_data3_u8r, v_data4_u8r = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read gyro offset y*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_OFFSET_4_GYRO_OFF_Y__REG,
+			&v_data1_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			v_data1_u8r = SMI130_GET_BITSLICE(v_data1_u8r,
+			SMI130_USER_OFFSET_4_GYRO_OFF_Y);
+			com_rslt += p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_OFFSET_6_GYRO_OFF_Y__REG,
+			&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			v_data2_u8r = SMI130_GET_BITSLICE(v_data2_u8r,
+			SMI130_USER_OFFSET_6_GYRO_OFF_Y);
+			v_data3_u8r = v_data2_u8r
+			<< SMI130_SHIFT_BIT_POSITION_BY_14_BITS;
+			v_data4_u8r =  v_data1_u8r
+			<< SMI130_SHIFT_BIT_POSITION_BY_06_BITS;
+			v_data3_u8r = v_data3_u8r | v_data4_u8r;
+			*v_gyro_off_y_s16 = v_data3_u8r
+			>> SMI130_SHIFT_BIT_POSITION_BY_06_BITS;
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro manual offset compensation of y axis
+ *	from the register 0x75 bit 0 to 7 and 0x77 bit 2 and 3
+ *
+ *
+ *
+ *  @param v_gyro_off_y_s16:
+ *	The value of gyro manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_offset_compensation_yaxis(
+s16 v_gyro_off_y_s16)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data1_u8r, v_data2_u8r = SMI130_INIT_VALUE;
+u16 v_data3_u8r = SMI130_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		/* enable gyro offset bit */
+		v_status_s8 = smi130_set_gyro_offset_enable(
+		GYRO_OFFSET_ENABLE);
+		/* write gyro offset y*/
+		if (v_status_s8 == SUCCESS) {
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_OFFSET_4_GYRO_OFF_Y__REG,
+			&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data1_u8r =
+				((s8) (v_gyro_off_y_s16 &
+				SMI130_GYRO_MANUAL_OFFSET_0_7));
+				v_data2_u8r = SMI130_SET_BITSLICE(
+				v_data2_u8r,
+				SMI130_USER_OFFSET_4_GYRO_OFF_Y,
+				v_data1_u8r);
+				/* write 0x75 bit 0 to 7*/
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_OFFSET_4_GYRO_OFF_Y__REG,
+				&v_data2_u8r,
+				SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			com_rslt += p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_OFFSET_6_GYRO_OFF_Y__REG,
+			&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data3_u8r =
+				(u16) (v_gyro_off_y_s16 &
+				SMI130_GYRO_MANUAL_OFFSET_8_9);
+				v_data1_u8r = (u8)(v_data3_u8r
+				>> SMI130_SHIFT_BIT_POSITION_BY_08_BITS);
+				v_data2_u8r = SMI130_SET_BITSLICE(
+				v_data2_u8r,
+				SMI130_USER_OFFSET_6_GYRO_OFF_Y,
+				v_data1_u8r);
+				/* write 0x77 bit 2 and 3*/
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_OFFSET_6_GYRO_OFF_Y__REG,
+				&v_data2_u8r,
+				SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		return ERROR;
+		}
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API read gyro manual offset compensation of z axis
+ *	from the register 0x76 bit 0 to 7 and 0x77 bit 4 and 5
+ *
+ *
+ *
+ *  @param v_gyro_off_z_s16:
+ *	The value of gyro manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_offset_compensation_zaxis(
+s16 *v_gyro_off_z_s16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data1_u8r = SMI130_INIT_VALUE;
+	u8 v_data2_u8r = SMI130_INIT_VALUE;
+	s16 v_data3_u8r, v_data4_u8r = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read gyro manual offset z axis*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_OFFSET_5_GYRO_OFF_Z__REG,
+			&v_data1_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			v_data1_u8r = SMI130_GET_BITSLICE
+			(v_data1_u8r,
+			SMI130_USER_OFFSET_5_GYRO_OFF_Z);
+			com_rslt +=
+			p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_OFFSET_6_GYRO_OFF_Z__REG,
+			&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			v_data2_u8r = SMI130_GET_BITSLICE(
+			v_data2_u8r,
+			SMI130_USER_OFFSET_6_GYRO_OFF_Z);
+			v_data3_u8r = v_data2_u8r
+			<< SMI130_SHIFT_BIT_POSITION_BY_14_BITS;
+			v_data4_u8r =  v_data1_u8r
+			<< SMI130_SHIFT_BIT_POSITION_BY_06_BITS;
+			v_data3_u8r = v_data3_u8r | v_data4_u8r;
+			*v_gyro_off_z_s16 = v_data3_u8r
+			>> SMI130_SHIFT_BIT_POSITION_BY_06_BITS;
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write gyro manual offset compensation of z axis
+ *	from the register 0x76 bit 0 to 7 and 0x77 bit 4 and 5
+ *
+ *
+ *
+ *  @param v_gyro_off_z_s16:
+ *	The value of gyro manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_offset_compensation_zaxis(
+s16 v_gyro_off_z_s16)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data1_u8r, v_data2_u8r = SMI130_INIT_VALUE;
+u16 v_data3_u8r = SMI130_INIT_VALUE;
+u8 v_status_s8 = SUCCESS;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+	} else {
+		/* enable gyro offset*/
+		v_status_s8 = smi130_set_gyro_offset_enable(
+		GYRO_OFFSET_ENABLE);
+		/* write gyro manual offset z axis*/
+		if (v_status_s8 == SUCCESS) {
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_OFFSET_5_GYRO_OFF_Z__REG,
+			&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data1_u8r =
+				((u8) (v_gyro_off_z_s16 &
+				SMI130_GYRO_MANUAL_OFFSET_0_7));
+				v_data2_u8r = SMI130_SET_BITSLICE(
+				v_data2_u8r,
+				SMI130_USER_OFFSET_5_GYRO_OFF_Z,
+				v_data1_u8r);
+				/* write 0x76 bit 0 to 7*/
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_OFFSET_5_GYRO_OFF_Z__REG,
+				&v_data2_u8r,
+				SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+
+			com_rslt += p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_OFFSET_6_GYRO_OFF_Z__REG,
+			&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data3_u8r =
+				(u16) (v_gyro_off_z_s16 &
+				SMI130_GYRO_MANUAL_OFFSET_8_9);
+				v_data1_u8r = (u8)(v_data3_u8r
+				>> SMI130_SHIFT_BIT_POSITION_BY_08_BITS);
+				v_data2_u8r = SMI130_SET_BITSLICE(
+				v_data2_u8r,
+				SMI130_USER_OFFSET_6_GYRO_OFF_Z,
+				v_data1_u8r);
+				/* write 0x77 bit 4 and 5*/
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_USER_OFFSET_6_GYRO_OFF_Z__REG,
+				&v_data2_u8r,
+				SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		return ERROR;
+		}
+	}
+return com_rslt;
+}
+/*!
+ *	@brief This API read the accel offset enable bit
+ *	from the register 0x77 bit 6
+ *
+ *
+ *
+ *  @param v_accel_off_enable_u8: The value of accel offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_offset_enable(
+u8 *v_accel_off_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read accel offset enable */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_OFFSET_6_ACCEL_OFF_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_accel_off_enable_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_OFFSET_6_ACCEL_OFF_ENABLE);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write the accel offset enable bit
+ *	from the register 0x77 bit 6
+ *
+ *
+ *
+ *  @param v_accel_off_enable_u8: The value of accel offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_offset_enable(
+u8 v_accel_off_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+			} else {
+			/* write accel offset enable */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_OFFSET_6_ACCEL_OFF_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_OFFSET_6_ACCEL_OFF_ENABLE,
+				v_accel_off_enable_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_OFFSET_6_ACCEL_OFF_ENABLE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API read the accel offset enable bit
+ *	from the register 0x77 bit 7
+ *
+ *
+ *
+ *  @param v_gyro_off_enable_u8: The value of gyro offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_offset_enable(
+u8 *v_gyro_off_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read gyro offset*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_OFFSET_6_GYRO_OFF_EN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_gyro_off_enable_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_OFFSET_6_GYRO_OFF_EN);
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API write the accel offset enable bit
+ *	from the register 0x77 bit 7
+ *
+ *
+ *
+ *  @param v_gyro_off_enable_u8: The value of gyro offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_offset_enable(
+u8 v_gyro_off_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write gyro offset*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_OFFSET_6_GYRO_OFF_EN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 = SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_USER_OFFSET_6_GYRO_OFF_EN,
+				v_gyro_off_enable_u8);
+				com_rslt += p_smi130->SMI130_BUS_WRITE_FUNC(
+				p_smi130->dev_addr,
+				SMI130_USER_OFFSET_6_GYRO_OFF_EN__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+/*!
+ *	@brief This API reads step counter value
+ *	form the register 0x78 and 0x79
+ *
+ *
+ *
+ *
+ *  @param v_step_cnt_s16 : The value of step counter
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_step_count(u16 *v_step_cnt_s16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* array having the step counter LSB and MSB data
+	v_data_u8[0] - LSB
+	v_data_u8[1] - MSB*/
+	u8 a_data_u8r[SMI130_STEP_COUNT_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE};
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read step counter */
+			com_rslt =
+			p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+			SMI130_USER_STEP_COUNT_LSB__REG,
+			a_data_u8r, SMI130_STEP_COUNTER_LENGTH);
+
+			*v_step_cnt_s16 = (s16)
+			((((s32)((s8)a_data_u8r[SMI130_STEP_COUNT_MSB_BYTE]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+			| (a_data_u8r[SMI130_STEP_COUNT_LSB_BYTE]));
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API Reads
+ *	step counter configuration
+ *	from the register 0x7A bit 0 to 7
+ *	and from the register 0x7B bit 0 to 2 and 4 to 7
+ *
+ *
+ *  @param v_step_config_u16 : The value of step configuration
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_step_config(
+u16 *v_step_config_u16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data1_u8r = SMI130_INIT_VALUE;
+	u8 v_data2_u8r = SMI130_INIT_VALUE;
+	u16 v_data3_u8r = SMI130_INIT_VALUE;
+	/* Read the 0 to 7 bit*/
+	com_rslt =
+	p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+	SMI130_USER_STEP_CONFIG_ZERO__REG,
+	&v_data1_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	/* Read the 8 to 10 bit*/
+	com_rslt +=
+	p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+	SMI130_USER_STEP_CONFIG_ONE_CNF1__REG,
+	&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	v_data2_u8r = SMI130_GET_BITSLICE(v_data2_u8r,
+	SMI130_USER_STEP_CONFIG_ONE_CNF1);
+	v_data3_u8r = ((u16)((((u32)
+	((u8)v_data2_u8r))
+	<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) | (v_data1_u8r)));
+	/* Read the 11 to 14 bit*/
+	com_rslt +=
+	p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+	SMI130_USER_STEP_CONFIG_ONE_CNF2__REG,
+	&v_data1_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	v_data1_u8r = SMI130_GET_BITSLICE(v_data1_u8r,
+	SMI130_USER_STEP_CONFIG_ONE_CNF2);
+	*v_step_config_u16 = ((u16)((((u32)
+	((u8)v_data1_u8r))
+	<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) | (v_data3_u8r)));
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write
+ *	step counter configuration
+ *	from the register 0x7A bit 0 to 7
+ *	and from the register 0x7B bit 0 to 2 and 4 to 7
+ *
+ *
+ *  @param v_step_config_u16   :
+ *	the value of  Enable step configuration
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_step_config(
+u16 v_step_config_u16)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data1_u8r = SMI130_INIT_VALUE;
+	u8 v_data2_u8r = SMI130_INIT_VALUE;
+	u16 v_data3_u16 = SMI130_INIT_VALUE;
+
+	/* write the 0 to 7 bit*/
+	v_data1_u8r = (u8)(v_step_config_u16 &
+	SMI130_STEP_CONFIG_0_7);
+	p_smi130->SMI130_BUS_WRITE_FUNC
+	(p_smi130->dev_addr,
+	SMI130_USER_STEP_CONFIG_ZERO__REG,
+	&v_data1_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	/* write the 8 to 10 bit*/
+	com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+	(p_smi130->dev_addr,
+	SMI130_USER_STEP_CONFIG_ONE_CNF1__REG,
+	&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	if (com_rslt == SUCCESS) {
+		v_data3_u16 = (u16) (v_step_config_u16 &
+		SMI130_STEP_CONFIG_8_10);
+		v_data1_u8r = (u8)(v_data3_u16
+		>> SMI130_SHIFT_BIT_POSITION_BY_08_BITS);
+		v_data2_u8r = SMI130_SET_BITSLICE(v_data2_u8r,
+		SMI130_USER_STEP_CONFIG_ONE_CNF1, v_data1_u8r);
+		p_smi130->SMI130_BUS_WRITE_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_STEP_CONFIG_ONE_CNF1__REG,
+		&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	}
+	/* write the 11 to 14 bit*/
+	com_rslt += p_smi130->SMI130_BUS_READ_FUNC
+	(p_smi130->dev_addr,
+	SMI130_USER_STEP_CONFIG_ONE_CNF2__REG,
+	&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	if (com_rslt == SUCCESS) {
+		v_data3_u16 = (u16) (v_step_config_u16 &
+		SMI130_STEP_CONFIG_11_14);
+		v_data1_u8r = (u8)(v_data3_u16
+		>> SMI130_SHIFT_BIT_POSITION_BY_12_BITS);
+		v_data2_u8r = SMI130_SET_BITSLICE(v_data2_u8r,
+		SMI130_USER_STEP_CONFIG_ONE_CNF2, v_data1_u8r);
+		p_smi130->SMI130_BUS_WRITE_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_STEP_CONFIG_ONE_CNF2__REG,
+		&v_data2_u8r, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	}
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read enable step counter
+ *	from the register 0x7B bit 3
+ *
+ *
+ *  @param v_step_counter_u8 : The value of step counter enable
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_step_counter_enable(
+u8 *v_step_counter_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the step counter */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_step_counter_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write enable step counter
+ *	from the register 0x7B bit 3
+ *
+ *
+ *  @param v_step_counter_u8 : The value of step counter enable
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_step_counter_enable(u8 v_step_counter_u8)
+{
+/* variable used for return the status of communication result*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+/* check the p_smi130 structure as NULL*/
+if (p_smi130 == SMI130_NULL) {
+	return E_SMI130_NULL_PTR;
+} else {
+	if (v_step_counter_u8 <= SMI130_MAX_GYRO_STEP_COUNTER) {
+		/* write the step counter */
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+		(p_smi130->dev_addr,
+		SMI130_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		if (com_rslt == SUCCESS) {
+			v_data_u8 =
+			SMI130_SET_BITSLICE(v_data_u8,
+			SMI130_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE,
+			v_step_counter_u8);
+			com_rslt +=
+			p_smi130->SMI130_BUS_WRITE_FUNC
+			(p_smi130->dev_addr,
+			SMI130_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	} else {
+	com_rslt = E_SMI130_OUT_OF_RANGE;
+	}
+}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API set Step counter modes
+ *
+ *
+ *  @param  v_step_mode_u8 : The value of step counter mode
+ *  value    |   mode
+ * ----------|-----------
+ *   0       | SMI130_STEP_NORMAL_MODE
+ *   1       | SMI130_STEP_SENSITIVE_MODE
+ *   2       | SMI130_STEP_ROBUST_MODE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_step_mode(u8 v_step_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+
+	switch (v_step_mode_u8) {
+	case SMI130_STEP_NORMAL_MODE:
+		com_rslt = smi130_set_step_config(
+		STEP_CONFIG_NORMAL);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+	case SMI130_STEP_SENSITIVE_MODE:
+		com_rslt = smi130_set_step_config(
+		STEP_CONFIG_SENSITIVE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+	case SMI130_STEP_ROBUST_MODE:
+		com_rslt = smi130_set_step_config(
+		STEP_CONFIG_ROBUST);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+	break;
+	}
+
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to trigger the  signification motion
+ *	interrupt
+ *
+ *
+ *  @param  v_significant_u8 : The value of interrupt selection
+ *  value    |  interrupt
+ * ----------|-----------
+ *   0       |  SMI130_MAP_INTR1
+ *   1       |  SMI130_MAP_INTR2
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_map_significant_motion_intr(
+u8 v_significant_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_sig_motion_u8 = SMI130_INIT_VALUE;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	u8 v_any_motion_intr1_stat_u8 = SMI130_ENABLE_ANY_MOTION_INTR1;
+	u8 v_any_motion_intr2_stat_u8 = SMI130_ENABLE_ANY_MOTION_INTR2;
+	u8 v_any_motion_axis_stat_u8 = SMI130_ENABLE_ANY_MOTION_AXIS;
+	/* enable the significant motion interrupt */
+	com_rslt = smi130_get_intr_significant_motion_select(&v_sig_motion_u8);
+	if (v_sig_motion_u8 != SMI130_SIG_MOTION_STAT_HIGH)
+		com_rslt += smi130_set_intr_significant_motion_select(
+		SMI130_SIG_MOTION_INTR_ENABLE);
+	switch (v_significant_u8) {
+	case SMI130_MAP_INTR1:
+		/* interrupt */
+		com_rslt += smi130_read_reg(
+		SMI130_USER_INTR_MAP_0_INTR1_ANY_MOTION__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_any_motion_intr1_stat_u8;
+		/* map the signification interrupt to any-motion interrupt1*/
+		com_rslt += smi130_write_reg(
+		SMI130_USER_INTR_MAP_0_INTR1_ANY_MOTION__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* axis*/
+		com_rslt = smi130_read_reg(SMI130_USER_INTR_ENABLE_0_ADDR,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_any_motion_axis_stat_u8;
+		com_rslt += smi130_write_reg(
+		SMI130_USER_INTR_ENABLE_0_ADDR,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+
+	case SMI130_MAP_INTR2:
+		/* map the signification interrupt to any-motion interrupt2*/
+		com_rslt += smi130_read_reg(
+		SMI130_USER_INTR_MAP_2_INTR2_ANY_MOTION__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_any_motion_intr2_stat_u8;
+		com_rslt += smi130_write_reg(
+		SMI130_USER_INTR_MAP_2_INTR2_ANY_MOTION__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* axis*/
+		com_rslt = smi130_read_reg(SMI130_USER_INTR_ENABLE_0_ADDR,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_any_motion_axis_stat_u8;
+		com_rslt += smi130_write_reg(
+		SMI130_USER_INTR_ENABLE_0_ADDR,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+	break;
+
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to trigger the step detector
+ *	interrupt
+ *
+ *
+ *  @param  v_step_detector_u8 : The value of interrupt selection
+ *  value    |  interrupt
+ * ----------|-----------
+ *   0       |  SMI130_MAP_INTR1
+ *   1       |  SMI130_MAP_INTR2
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_map_step_detector_intr(
+u8 v_step_detector_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_step_det_u8 = SMI130_INIT_VALUE;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	u8 v_low_g_intr_u81_stat_u8 = SMI130_LOW_G_INTR_STAT;
+	u8 v_low_g_intr_u82_stat_u8 = SMI130_LOW_G_INTR_STAT;
+	u8 v_low_g_enable_u8 = SMI130_ENABLE_LOW_G;
+	/* read the v_status_s8 of step detector interrupt*/
+	com_rslt = smi130_get_step_detector_enable(&v_step_det_u8);
+	if (v_step_det_u8 != SMI130_STEP_DET_STAT_HIGH)
+		com_rslt += smi130_set_step_detector_enable(
+		SMI130_STEP_DETECT_INTR_ENABLE);
+	switch (v_step_detector_u8) {
+	case SMI130_MAP_INTR1:
+		com_rslt += smi130_read_reg(
+		SMI130_USER_INTR_MAP_0_INTR1_LOW_G__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_low_g_intr_u81_stat_u8;
+		/* map the step detector interrupt
+		to Low-g interrupt 1*/
+		com_rslt += smi130_write_reg(
+		SMI130_USER_INTR_MAP_0_INTR1_LOW_G__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* Enable the Low-g interrupt*/
+		com_rslt = smi130_read_reg(
+		SMI130_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_low_g_enable_u8;
+		com_rslt += smi130_write_reg(
+		SMI130_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+	case SMI130_MAP_INTR2:
+		/* map the step detector interrupt
+		to Low-g interrupt 1*/
+		com_rslt += smi130_read_reg(
+		SMI130_USER_INTR_MAP_2_INTR2_LOW_G__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_low_g_intr_u82_stat_u8;
+
+		com_rslt += smi130_write_reg(
+		SMI130_USER_INTR_MAP_2_INTR2_LOW_G__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* Enable the Low-g interrupt*/
+		com_rslt = smi130_read_reg(
+		SMI130_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		v_data_u8 |= v_low_g_enable_u8;
+		com_rslt += smi130_write_reg(
+		SMI130_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+	break;
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API used to clear the step counter interrupt
+ *	interrupt
+ *
+ *
+ *  @param  : None
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_clear_step_counter(void)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* clear the step counter*/
+	com_rslt = smi130_set_command_register(RESET_STEP_COUNTER);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+
+	return com_rslt;
+
+}
+ /*!
+ *	@brief This API writes value to the register 0x7E bit 0 to 7
+ *
+ *
+ *  @param  v_command_reg_u8 : The value to write command register
+ *  value   |  Description
+ * ---------|--------------------------------------------------------
+ *	0x00	|	Reserved
+ *  0x03	|	Starts fast offset calibration for the accel and gyro
+ *	0x10	|	Sets the PMU mode for the Accelerometer to suspend
+ *	0x11	|	Sets the PMU mode for the Accelerometer to normal
+ *	0x12	|	Sets the PMU mode for the Accelerometer Lowpower
+ *  0x14	|	Sets the PMU mode for the Gyroscope to suspend
+ *	0x15	|	Sets the PMU mode for the Gyroscope to normal
+ *	0x16	|	Reserved
+ *	0x17	|	Sets the PMU mode for the Gyroscope to fast start-up
+ *  0x18	|	Sets the PMU mode for the Magnetometer to suspend
+ *	0x19	|	Sets the PMU mode for the Magnetometer to normal
+ *	0x1A	|	Sets the PMU mode for the Magnetometer to Lowpower
+ *	0xB0	|	Clears all data in the FIFO
+ *  0xB1	|	Resets the interrupt engine
+ *	0xB2	|	step_cnt_clr Clears the step counter
+ *	0xB6	|	Triggers a reset
+ *	0x37	|	See extmode_en_last
+ *	0x9A	|	See extmode_en_last
+ *	0xC0	|	Enable the extended mode
+ *  0xC4	|	Erase NVM cell
+ *	0xC8	|	Load NVM cell
+ *	0xF0	|	Reset acceleration data path
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_command_register(u8 v_command_reg_u8)
+{
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write command register */
+			com_rslt = p_smi130->SMI130_BUS_WRITE_FUNC(
+			p_smi130->dev_addr,
+			SMI130_CMD_COMMANDS__REG,
+			&v_command_reg_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read target page from the register 0x7F bit 4 and 5
+ *
+ *  @param v_target_page_u8: The value of target page
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  User data/configure page
+ *   1      |  Chip level trim/test page
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_target_page(u8 *v_target_page_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* read the page*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+			p_smi130->dev_addr,
+			SMI130_CMD_TARGET_PAGE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			*v_target_page_u8 = SMI130_GET_BITSLICE(v_data_u8,
+			SMI130_CMD_TARGET_PAGE);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write target page from the register 0x7F bit 4 and 5
+ *
+ *  @param v_target_page_u8: The value of target page
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  User data/configure page
+ *   1      |  Chip level trim/test page
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_target_page(u8 v_target_page_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_target_page_u8 <= SMI130_MAX_TARGET_PAGE) {
+			/* write the page*/
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_CMD_TARGET_PAGE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_CMD_TARGET_PAGE,
+				v_target_page_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_CMD_TARGET_PAGE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read page enable from the register 0x7F bit 7
+ *
+ *
+ *
+ *  @param v_page_enable_u8: The value of page enable
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  DISABLE
+ *   1      |  ENABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_paging_enable(u8 *v_page_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		/* read the page enable */
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+		p_smi130->dev_addr,
+		SMI130_CMD_PAGING_EN__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		*v_page_enable_u8 = SMI130_GET_BITSLICE(v_data_u8,
+		SMI130_CMD_PAGING_EN);
+		}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API write page enable from the register 0x7F bit 7
+ *
+ *
+ *
+ *  @param v_page_enable_u8: The value of page enable
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  DISABLE
+ *   1      |  ENABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_paging_enable(
+u8 v_page_enable_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		if (v_page_enable_u8 <= SMI130_MAX_VALUE_PAGE) {
+			/* write the page enable */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_CMD_PAGING_EN__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_CMD_PAGING_EN,
+				v_page_enable_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_CMD_PAGING_EN__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		} else {
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+		}
+	}
+	return com_rslt;
+}
+ /*!
+ *	@brief This API read
+ *	pull up configuration from the register 0X85 bit 4 an 5
+ *
+ *
+ *
+ *  @param v_control_pullup_u8: The value of pull up register
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_pullup_configuration(
+u8 *v_control_pullup_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt  = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		/* read pull up value */
+		com_rslt = p_smi130->SMI130_BUS_READ_FUNC(
+		p_smi130->dev_addr,
+		SMI130_COM_C_TRIM_FIVE__REG,
+		&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		*v_control_pullup_u8 = SMI130_GET_BITSLICE(v_data_u8,
+		SMI130_COM_C_TRIM_FIVE);
+		}
+	return com_rslt;
+
+}
+ /*!
+ *	@brief This API write
+ *	pull up configuration from the register 0X85 bit 4 an 5
+ *
+ *
+ *
+ *  @param v_control_pullup_u8: The value of pull up register
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_pullup_configuration(
+u8 v_control_pullup_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+			/* write  pull up value */
+			com_rslt = p_smi130->SMI130_BUS_READ_FUNC
+			(p_smi130->dev_addr,
+			SMI130_COM_C_TRIM_FIVE__REG,
+			&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			if (com_rslt == SUCCESS) {
+				v_data_u8 =
+				SMI130_SET_BITSLICE(v_data_u8,
+				SMI130_COM_C_TRIM_FIVE,
+				v_control_pullup_u8);
+				com_rslt +=
+				p_smi130->SMI130_BUS_WRITE_FUNC
+				(p_smi130->dev_addr,
+				SMI130_COM_C_TRIM_FIVE__REG,
+				&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+			}
+		}
+	return com_rslt;
+}
+
+/*!
+ *	@brief This function used for read the compensated value of mag
+ *	Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bmm150_mag_compensate_xyz(
+struct smi130_mag_xyz_s32_t *mag_comp_xyz)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	struct smi130_mag_xyzr_t mag_xyzr;
+	com_rslt = smi130_read_mag_xyzr(&mag_xyzr);
+	if (com_rslt)
+		return com_rslt;
+	/* Compensation for X axis */
+	mag_comp_xyz->x = smi130_bmm150_mag_compensate_X(
+	mag_xyzr.x, mag_xyzr.r);
+
+	/* Compensation for Y axis */
+	mag_comp_xyz->y = smi130_bmm150_mag_compensate_Y(
+	mag_xyzr.y, mag_xyzr.r);
+
+	/* Compensation for Z axis */
+	mag_comp_xyz->z = smi130_bmm150_mag_compensate_Z(
+	mag_xyzr.z, mag_xyzr.r);
+
+	return com_rslt;
+}
+
+/*!
+ *	@brief This function used for read the compensated value of mag
+ *	Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bmm150_mag_compensate_xyz_raw(
+struct smi130_mag_xyz_s32_t *mag_comp_xyz, struct smi130_mag_xyzr_t mag_xyzr)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+
+	/* Compensation for X axis */
+	mag_comp_xyz->x = smi130_bmm150_mag_compensate_X(
+	mag_xyzr.x, mag_xyzr.r);
+
+	/* Compensation for Y axis */
+	mag_comp_xyz->y = smi130_bmm150_mag_compensate_Y(
+	mag_xyzr.y, mag_xyzr.r);
+
+	/* Compensation for Z axis */
+	mag_comp_xyz->z = smi130_bmm150_mag_compensate_Z(
+	mag_xyzr.z, mag_xyzr.r);
+
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to get the compensated BMM150-X data
+ *	the out put of X as s32
+ *	Before start reading the mag compensated X data
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *
+ *  @param  v_mag_data_x_s16 : The value of mag raw X data
+ *  @param  v_data_r_u16 : The value of mag R data
+ *
+ *	@return results of compensated X data value output as s32
+ *
+ */
+s32 smi130_bmm150_mag_compensate_X(s16 v_mag_data_x_s16, u16 v_data_r_u16)
+{
+s32 inter_retval = SMI130_INIT_VALUE;
+/* no overflow */
+if (v_mag_data_x_s16 != SMI130_MAG_FLIP_OVERFLOW_ADCVAL) {
+	if ((v_data_r_u16 != 0)
+	&& (mag_trim_mbl.dig_xyz1 != 0)) {
+		inter_retval = ((s32)(((u16)
+		((((s32)mag_trim_mbl.dig_xyz1)
+		<< SMI130_SHIFT_BIT_POSITION_BY_14_BITS)/
+		 (v_data_r_u16 != 0 ?
+		 v_data_r_u16 : mag_trim_mbl.dig_xyz1))) -
+		((u16)0x4000)));
+	} else {
+		inter_retval = SMI130_MAG_OVERFLOW_OUTPUT;
+		return inter_retval;
+	}
+	inter_retval = ((s32)((((s32)v_mag_data_x_s16) *
+			((((((((s32)mag_trim_mbl.dig_xy2) *
+			((((s32)inter_retval) *
+			((s32)inter_retval))
+			>> SMI130_SHIFT_BIT_POSITION_BY_07_BITS)) +
+			 (((s32)inter_retval) *
+			  ((s32)(((s16)mag_trim_mbl.dig_xy1)
+			  << SMI130_SHIFT_BIT_POSITION_BY_07_BITS))))
+			  >> SMI130_SHIFT_BIT_POSITION_BY_09_BITS) +
+		   ((s32)0x100000)) *
+		  ((s32)(((s16)mag_trim_mbl.dig_x2) +
+		  ((s16)0xA0))))
+		  >> SMI130_SHIFT_BIT_POSITION_BY_12_BITS))
+		  >> SMI130_SHIFT_BIT_POSITION_BY_13_BITS)) +
+		(((s16)mag_trim_mbl.dig_x1)
+		<< SMI130_SHIFT_BIT_POSITION_BY_03_BITS);
+	/* check the overflow output */
+	if (inter_retval == (s32)SMI130_MAG_OVERFLOW_OUTPUT)
+		inter_retval = SMI130_MAG_OVERFLOW_OUTPUT_S32;
+} else {
+	/* overflow */
+	inter_retval = SMI130_MAG_OVERFLOW_OUTPUT;
+}
+return inter_retval;
+}
+/*!
+ *	@brief This API used to get the compensated BMM150-Y data
+ *	the out put of Y as s32
+ *	Before start reading the mag compensated Y data
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *
+ *  @param  v_mag_data_y_s16 : The value of mag raw Y data
+ *  @param  v_data_r_u16 : The value of mag R data
+ *
+ *	@return results of compensated Y data value output as s32
+ */
+s32 smi130_bmm150_mag_compensate_Y(s16 v_mag_data_y_s16, u16 v_data_r_u16)
+{
+s32 inter_retval = SMI130_INIT_VALUE;
+/* no overflow */
+if (v_mag_data_y_s16 != SMI130_MAG_FLIP_OVERFLOW_ADCVAL) {
+	if ((v_data_r_u16 != 0)
+	&& (mag_trim_mbl.dig_xyz1 != 0)) {
+		inter_retval = ((s32)(((u16)(((
+		(s32)mag_trim_mbl.dig_xyz1)
+		<< SMI130_SHIFT_BIT_POSITION_BY_14_BITS) /
+		(v_data_r_u16 != 0 ?
+		 v_data_r_u16 : mag_trim_mbl.dig_xyz1))) -
+		((u16)0x4000)));
+		} else {
+			inter_retval = SMI130_MAG_OVERFLOW_OUTPUT;
+			return inter_retval;
+		}
+	inter_retval = ((s32)((((s32)v_mag_data_y_s16) * ((((((((s32)
+		mag_trim_mbl.dig_xy2) * ((((s32) inter_retval) *
+		((s32)inter_retval)) >> SMI130_SHIFT_BIT_POSITION_BY_07_BITS))
+		+ (((s32)inter_retval) *
+		((s32)(((s16)mag_trim_mbl.dig_xy1)
+		<< SMI130_SHIFT_BIT_POSITION_BY_07_BITS))))
+		>> SMI130_SHIFT_BIT_POSITION_BY_09_BITS) +
+		((s32)0x100000))
+		* ((s32)(((s16)mag_trim_mbl.dig_y2)
+		+ ((s16)0xA0))))
+		>> SMI130_SHIFT_BIT_POSITION_BY_12_BITS))
+		>> SMI130_SHIFT_BIT_POSITION_BY_13_BITS)) +
+		(((s16)mag_trim_mbl.dig_y1)
+		<< SMI130_SHIFT_BIT_POSITION_BY_03_BITS);
+	/* check the overflow output */
+	if (inter_retval == (s32)SMI130_MAG_OVERFLOW_OUTPUT)
+		inter_retval = SMI130_MAG_OVERFLOW_OUTPUT_S32;
+} else {
+	/* overflow */
+	inter_retval = SMI130_MAG_OVERFLOW_OUTPUT;
+}
+return inter_retval;
+}
+/*!
+ *	@brief This API used to get the compensated BMM150-Z data
+ *	the out put of Z as s32
+ *	Before start reading the mag compensated Z data
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *
+ *  @param  v_mag_data_z_s16 : The value of mag raw Z data
+ *  @param  v_data_r_u16 : The value of mag R data
+ *
+ *	@return results of compensated Z data value output as s32
+ */
+s32 smi130_bmm150_mag_compensate_Z(s16 v_mag_data_z_s16, u16 v_data_r_u16)
+{
+	s32 retval = SMI130_INIT_VALUE;
+
+	if (v_mag_data_z_s16 != SMI130_MAG_HALL_OVERFLOW_ADCVAL) {
+		if ((v_data_r_u16 != 0)
+		   && (mag_trim_mbl.dig_z2 != 0)
+		/*   && (mag_trim_mbl.dig_z3 != 0)*/
+		   && (mag_trim_mbl.dig_z1 != 0)
+		   && (mag_trim_mbl.dig_xyz1 != 0)) {
+			retval = (((((s32)(v_mag_data_z_s16 - mag_trim_mbl.dig_z4))
+			<< SMI130_SHIFT_BIT_POSITION_BY_15_BITS) -
+			((((s32)mag_trim_mbl.dig_z3) *
+			((s32)(((s16)v_data_r_u16) -
+			((s16)mag_trim_mbl.dig_xyz1))))
+			>> SMI130_SHIFT_BIT_POSITION_BY_02_BITS))/
+			(mag_trim_mbl.dig_z2 +
+			((s16)(((((s32)mag_trim_mbl.dig_z1) *
+			((((s16)v_data_r_u16)
+			<< SMI130_SHIFT_BIT_POSITION_BY_01_BIT))) +
+			(1 << SMI130_SHIFT_BIT_POSITION_BY_15_BITS))
+			>> SMI130_SHIFT_BIT_POSITION_BY_16_BITS))));
+		}
+	} else {
+		retval = SMI130_MAG_OVERFLOW_OUTPUT;
+	}
+		return retval;
+}
+ /*!
+ *	@brief This function used for initialize the bmm150 sensor
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bmm150_mag_interface_init(void)
+{
+	/* This variable used for provide the communication
+	results*/
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = SMI130_INIT_VALUE;
+	u8 v_pull_value_u8 = SMI130_INIT_VALUE;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	/* accel operation mode to normal*/
+	com_rslt = smi130_set_command_register(ACCEL_MODE_NORMAL);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* write the mag power mode as NORMAL*/
+	com_rslt += smi130_set_mag_interface_normal();
+
+	/* register 0x7E write the 0x37, 0x9A and 0x30*/
+	com_rslt += smi130_set_command_register(SMI130_COMMAND_REG_ONE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_command_register(SMI130_COMMAND_REG_TWO);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_command_register(SMI130_COMMAND_REG_THREE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/*switch the page1*/
+	com_rslt += smi130_set_target_page(SMI130_WRITE_TARGET_PAGE1);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_target_page(&v_data_u8);
+	com_rslt += smi130_set_paging_enable(SMI130_WRITE_ENABLE_PAGE1);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_paging_enable(&v_data_u8);
+	/* enable the pullup configuration from
+	the register 0x05 bit 4 and 5 as 10*/
+	smi130_get_pullup_configuration(&v_pull_value_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	v_pull_value_u8 = v_pull_value_u8 | SMI130_PULL_UP_DATA;
+	com_rslt += smi130_set_pullup_configuration(v_pull_value_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/*switch the page0*/
+	com_rslt += smi130_set_target_page(SMI130_WRITE_TARGET_PAGE0);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_target_page(&v_data_u8);
+	/* Write the BMM150 i2c address*/
+	com_rslt += smi130_set_i2c_device_addr(SMI130_AUX_BMM150_I2C_ADDRESS);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* enable the mag interface to manual mode*/
+	com_rslt += smi130_set_mag_manual_enable(SMI130_MANUAL_ENABLE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_mag_manual_enable(&v_data_u8);
+	/*Enable the MAG interface */
+	com_rslt += smi130_set_if_mode(SMI130_ENABLE_MAG_IF_MODE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_if_mode(&v_data_u8);
+	/* Mag normal mode*/
+	com_rslt += smi130_bmm150_mag_wakeup();
+	printk(KERN_INFO "com_rslt:%d, <%s><%d>\n",
+		com_rslt, __func__, __LINE__);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* Read the BMM150 device id is 0x32*/
+	/*com_rslt += smi130_set_mag_read_addr(SMI130_BMM150_CHIP_ID);*/
+	/*p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);*/
+	/*com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);*/
+	/**v_chip_id_u8 = v_data_u8;*/
+	/*p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);*/
+	/* write the power mode register*/
+	com_rslt += smi130_set_mag_write_data(SMI130_BMM_POWER_MODE_REG);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/*write 0x4C register to write set power mode to normal*/
+	com_rslt += smi130_set_mag_write_addr(
+	SMI130_BMM150_POWE_MODE_REG);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* read the mag trim values*/
+	com_rslt += smi130_read_bmm150_mag_trim_mbl();
+	printk(KERN_INFO "com_rslt:%d, <%s><%d>\n",
+		com_rslt, __func__, __LINE__);
+	/* To avoid the auto mode enable when manual mode operation running*/
+	V_bmm150_maual_auto_condition_u8_mbl = SMI130_MANUAL_ENABLE;
+	/* write the XY and Z repetitions*/
+	com_rslt += smi130_set_bmm150_mag_presetmode(
+	SMI130_MAG_PRESETMODE_REGULAR);
+	printk(KERN_INFO "com_rslt:%d, <%s><%d>\n",
+		com_rslt, __func__, __LINE__);
+	/* To avoid the auto mode enable when manual mode operation running*/
+	V_bmm150_maual_auto_condition_u8_mbl = SMI130_MANUAL_DISABLE;
+	/* Set the power mode of mag as force mode*/
+	/* The data have to write for the register
+	It write the value in the register 0x4F */
+	com_rslt += smi130_set_mag_write_data(SMI130_BMM150_FORCE_MODE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	printk(KERN_INFO "com_rslt:%d, <%s><%d>\n",
+		com_rslt, __func__, __LINE__);
+	/* write into power mode register*/
+	com_rslt += smi130_set_mag_write_addr(
+	SMI130_BMM150_POWE_MODE_REG);
+	/* write the mag v_data_bw_u8 as 25Hz*/
+	com_rslt += smi130_set_mag_output_data_rate(
+	SMI130_MAG_OUTPUT_DATA_RATE_25HZ);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+
+	/* When mag interface is auto mode - The mag read address
+	starts the register 0x42*/
+	com_rslt += smi130_set_mag_read_addr(
+	SMI130_BMM150_DATA_REG);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* enable mag interface to auto mode*/
+	com_rslt += smi130_set_mag_manual_enable(SMI130_MANUAL_DISABLE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_mag_manual_enable(&v_data_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for set the mag power control
+ *	bit enable
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bmm150_mag_wakeup(void)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = SMI130_INIT_VALUE;
+	u8 v_try_times_u8 = SMI130_BMM150_MAX_RETRY_WAKEUP;
+	u8 v_power_control_bit_u8 = SMI130_INIT_VALUE;
+	u8 i = SMI130_INIT_VALUE;
+
+	for (i = SMI130_INIT_VALUE; i < v_try_times_u8; i++) {
+		com_rslt = smi130_set_mag_write_data(SMI130_BMM150_POWER_ON);
+		p_smi130->delay_msec(SMI130_BMM150_WAKEUP_DELAY1);
+		/*write 0x4B register to enable power control bit*/
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_POWE_CONTROL_REG);
+		p_smi130->delay_msec(SMI130_BMM150_WAKEUP_DELAY2);
+		com_rslt += smi130_set_mag_read_addr(
+		SMI130_BMM150_POWE_CONTROL_REG);
+		/* 0x04 is secondary read mag x lsb register */
+		p_smi130->delay_msec(SMI130_BMM150_WAKEUP_DELAY3);
+		com_rslt += smi130_read_reg(SMI130_USER_DATA_0_ADDR,
+		&v_power_control_bit_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+		v_power_control_bit_u8 = SMI130_BMM150_SET_POWER_CONTROL
+		& v_power_control_bit_u8;
+		if (v_power_control_bit_u8 == SMI130_BMM150_POWER_ON)
+			break;
+	}
+	com_rslt = (i >= v_try_times_u8) ?
+	SMI130_BMM150_POWER_ON_FAIL : SMI130_BMM150_POWER_ON_SUCCESS;
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for set the magnetometer
+ *	power mode.
+ *	@note
+ *	Before set the mag power mode
+ *	make sure the following two point is addressed
+ *		Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *
+ *	@param v_mag_sec_if_pow_mode_u8 : The value of mag power mode
+ *  value    |  mode
+ * ----------|------------
+ *   0       | SMI130_MAG_FORCE_MODE
+ *   1       | SMI130_MAG_SUSPEND_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_bmm150_mag_and_secondary_if_power_mode(
+u8 v_mag_sec_if_pow_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = SMI130_INIT_VALUE;
+	/* set the accel power mode to NORMAL*/
+	com_rslt = smi130_set_command_register(ACCEL_MODE_NORMAL);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_smi130->mag_manual_enable, __func__, __LINE__);
+	/* set mag interface manual mode*/
+	if (p_smi130->mag_manual_enable != SMI130_MANUAL_ENABLE)	{
+		com_rslt += smi130_set_mag_manual_enable(
+		SMI130_MANUAL_ENABLE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	}
+	printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+	com_rslt, p_smi130->mag_manual_enable, __func__, __LINE__);
+
+	switch (v_mag_sec_if_pow_mode_u8) {
+	case SMI130_MAG_FORCE_MODE:
+		/* set the secondary mag power mode as NORMAL*/
+		com_rslt += smi130_set_mag_interface_normal();
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_smi130->mag_manual_enable, __func__, __LINE__);
+		/* set the mag power mode as FORCE mode*/
+		com_rslt += smi130_bmm150_mag_set_power_mode(FORCE_MODE);
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_smi130->mag_manual_enable, __func__, __LINE__);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+	case SMI130_MAG_SUSPEND_MODE:
+		/* set the mag power mode as SUSPEND mode*/
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_smi130->mag_manual_enable, __func__, __LINE__);
+		com_rslt += smi130_bmm150_mag_set_power_mode(SUSPEND_MODE);
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_smi130->mag_manual_enable, __func__, __LINE__);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* set the secondary mag power mode as SUSPEND*/
+		com_rslt += smi130_set_command_register(MAG_MODE_SUSPEND);
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_smi130->mag_manual_enable, __func__, __LINE__);
+		p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+	break;
+	}
+	if (p_smi130->mag_manual_enable == SMI130_MANUAL_ENABLE) {
+		/* set mag interface auto mode*/
+		com_rslt += smi130_set_mag_manual_enable(
+		SMI130_MANUAL_DISABLE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	}
+	printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+	com_rslt, p_smi130->mag_manual_enable, __func__, __LINE__);
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for set the magnetometer
+ *	power mode.
+ *	@note
+ *	Before set the mag power mode
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *	@param v_mag_pow_mode_u8 : The value of mag power mode
+ *  value    |  mode
+ * ----------|------------
+ *   0       | FORCE_MODE
+ *   1       | SUSPEND_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bmm150_mag_set_power_mode(
+u8 v_mag_pow_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = SMI130_INIT_VALUE;
+	u8 manual_enable_status = 0;
+	/* set mag interface manual mode*/
+	if (p_smi130->mag_manual_enable != SMI130_MANUAL_ENABLE) {
+		com_rslt = smi130_set_mag_manual_enable(
+		SMI130_MANUAL_ENABLE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_get_mag_manual_enable(&manual_enable_status);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		printk(KERN_INFO "1com_rslt:%d, manual:%d, manual_read:%d\n",
+		com_rslt, p_smi130->mag_manual_enable, manual_enable_status);
+	}
+	printk(KERN_INFO "2com_rslt:%d, manual:%d, manual_read:%d\n",
+	com_rslt, p_smi130->mag_manual_enable, manual_enable_status);
+
+	switch (v_mag_pow_mode_u8) {
+	case FORCE_MODE:
+		/* Set the power control bit enabled */
+		com_rslt = smi130_bmm150_mag_wakeup();
+		/* write the mag power mode as FORCE mode*/
+		com_rslt += smi130_set_mag_write_data(
+		SMI130_BMM150_FORCE_MODE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_POWE_MODE_REG);
+		p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		/* To avoid the auto mode enable when manual
+		mode operation running*/
+		V_bmm150_maual_auto_condition_u8_mbl = SMI130_MANUAL_ENABLE;
+		/* set the preset mode */
+		com_rslt += smi130_set_bmm150_mag_presetmode(
+		SMI130_MAG_PRESETMODE_REGULAR);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* To avoid the auto mode enable when manual
+		mode operation running*/
+		V_bmm150_maual_auto_condition_u8_mbl = SMI130_MANUAL_DISABLE;
+		/* set the mag read address to data registers*/
+		com_rslt += smi130_set_mag_read_addr(
+		SMI130_BMM150_DATA_REG);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+	case SUSPEND_MODE:
+		printk(KERN_INFO "3com_rslt:%d, manual:%d, read_manual:%d\n",
+		com_rslt, p_smi130->mag_manual_enable, manual_enable_status);
+		/* Set the power mode of mag as suspend mode*/
+		com_rslt += smi130_set_mag_write_data(
+		SMI130_BMM150_POWER_OFF);
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_smi130->mag_manual_enable, __func__, __LINE__);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_POWE_CONTROL_REG);
+		printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+		com_rslt, p_smi130->mag_manual_enable, __func__, __LINE__);
+		p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+	break;
+	}
+	printk(KERN_INFO "4com_rslt:%d, manual:%d, manual_read:%d\n",
+	com_rslt, p_smi130->mag_manual_enable, manual_enable_status);
+	/* set mag interface auto mode*/
+	if (p_smi130->mag_manual_enable == SMI130_MANUAL_ENABLE) {
+		com_rslt += smi130_set_mag_manual_enable(
+		SMI130_MANUAL_DISABLE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_get_mag_manual_enable(&manual_enable_status);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	}
+	printk(KERN_INFO "5com_rslt:%d, manual:%d, manual_read:%d\n",
+	com_rslt, p_smi130->mag_manual_enable, manual_enable_status);
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to set the pre-set modes of bmm150
+ *	The pre-set mode setting is depend on data rate and xy and z repetitions
+ *
+ *	@note
+ *	Before set the mag preset mode
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param  v_mode_u8: The value of pre-set mode selection value
+ *  value    |  pre_set mode
+ * ----------|------------
+ *   1       | SMI130_MAG_PRESETMODE_LOWPOWER
+ *   2       | SMI130_MAG_PRESETMODE_REGULAR
+ *   3       | SMI130_MAG_PRESETMODE_HIGHACCURACY
+ *   4       | SMI130_MAG_PRESETMODE_ENHANCED
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_bmm150_mag_presetmode(u8 v_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	switch (v_mode_u8) {
+	case SMI130_MAG_PRESETMODE_LOWPOWER:
+		/* write the XY and Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt = smi130_set_mag_write_data(
+		SMI130_MAG_LOWPOWER_REPXY);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_XY_REP);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* write the Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt += smi130_set_mag_write_data(
+		SMI130_MAG_LOWPOWER_REPZ);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_Z_REP);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* set the mag v_data_u8 rate as 10 to the register 0x4C*/
+		com_rslt += smi130_set_mag_write_data(
+		SMI130_MAG_LOWPOWER_DR);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_POWE_MODE_REG);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+	case SMI130_MAG_PRESETMODE_REGULAR:
+		/* write the XY and Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt = smi130_set_mag_write_data(
+		SMI130_MAG_REGULAR_REPXY);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_XY_REP);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* write the Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt += smi130_set_mag_write_data(
+		SMI130_MAG_REGULAR_REPZ);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_Z_REP);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* set the mag v_data_u8 rate as 10 to the register 0x4C*/
+		com_rslt += smi130_set_mag_write_data(
+		SMI130_MAG_REGULAR_DR);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_POWE_MODE_REG);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+	case SMI130_MAG_PRESETMODE_HIGHACCURACY:
+		/* write the XY and Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt = smi130_set_mag_write_data(
+		SMI130_MAG_HIGHACCURACY_REPXY);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_XY_REP);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* write the Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt += smi130_set_mag_write_data(
+		SMI130_MAG_HIGHACCURACY_REPZ);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_Z_REP);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* set the mag v_data_u8 rate as 20 to the register 0x4C*/
+		com_rslt += smi130_set_mag_write_data(
+		SMI130_MAG_HIGHACCURACY_DR);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_POWE_MODE_REG);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+	case SMI130_MAG_PRESETMODE_ENHANCED:
+		/* write the XY and Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt = smi130_set_mag_write_data(
+		SMI130_MAG_ENHANCED_REPXY);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_XY_REP);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* write the Z repetitions*/
+		/* The v_data_u8 have to write for the register
+		It write the value in the register 0x4F*/
+		com_rslt += smi130_set_mag_write_data(
+		SMI130_MAG_ENHANCED_REPZ);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_Z_REP);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* set the mag v_data_u8 rate as 10 to the register 0x4C*/
+		com_rslt += smi130_set_mag_write_data(
+		SMI130_MAG_ENHANCED_DR);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_BMM150_POWE_MODE_REG);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+	break;
+	}
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for read the trim values of magnetometer
+ *
+ *	@note
+ *	Before reading the mag trimming values
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_bmm150_mag_trim_mbl(void)
+{
+	/* This variable used for provide the communication
+	results*/
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array holding the bmm150 trim data
+	*/
+	u8 v_data_u8[SMI130_MAG_TRIM_DATA_SIZE] = {
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE};
+	/* read dig_x1 value */
+	com_rslt = smi130_set_mag_read_addr(
+	SMI130_MAG_DIG_X1);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_X1],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	mag_trim_mbl.dig_x1 = v_data_u8[SMI130_BMM150_DIG_X1];
+	/* read dig_y1 value */
+	com_rslt += smi130_set_mag_read_addr(
+	SMI130_MAG_DIG_Y1);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_Y1],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	mag_trim_mbl.dig_y1 = v_data_u8[SMI130_BMM150_DIG_Y1];
+
+	/* read dig_x2 value */
+	com_rslt += smi130_set_mag_read_addr(
+	SMI130_MAG_DIG_X2);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_X2],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	mag_trim_mbl.dig_x2 = v_data_u8[SMI130_BMM150_DIG_X2];
+	/* read dig_y2 value */
+	com_rslt += smi130_set_mag_read_addr(
+	SMI130_MAG_DIG_Y2);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_Y3],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	mag_trim_mbl.dig_y2 = v_data_u8[SMI130_BMM150_DIG_Y3];
+
+	/* read dig_xy1 value */
+	com_rslt += smi130_set_mag_read_addr(
+	SMI130_MAG_DIG_XY1);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_XY1],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	mag_trim_mbl.dig_xy1 = v_data_u8[SMI130_BMM150_DIG_XY1];
+	/* read dig_xy2 value */
+	com_rslt += smi130_set_mag_read_addr(
+	SMI130_MAG_DIG_XY2);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is v_mag_x_s16 ls register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_XY2],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	mag_trim_mbl.dig_xy2 = v_data_u8[SMI130_BMM150_DIG_XY2];
+
+	/* read dig_z1 lsb value */
+	com_rslt += smi130_set_mag_read_addr(
+	SMI130_MAG_DIG_Z1_LSB);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_Z1_LSB],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* read dig_z1 msb value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_MAG_DIG_Z1_MSB);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is v_mag_x_s16 msb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_Z1_MSB],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	mag_trim_mbl.dig_z1 =
+	(u16)((((u32)((u8)v_data_u8[SMI130_BMM150_DIG_Z1_MSB]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[SMI130_BMM150_DIG_Z1_LSB]));
+
+	/* read dig_z2 lsb value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_MAG_DIG_Z2_LSB);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_Z2_LSB],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* read dig_z2 msb value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_MAG_DIG_Z2_MSB);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is v_mag_x_s16 msb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_Z2_MSB],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	mag_trim_mbl.dig_z2 =
+	(s16)((((s32)((s8)v_data_u8[SMI130_BMM150_DIG_Z2_MSB]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[SMI130_BMM150_DIG_Z2_LSB]));
+
+	/* read dig_z3 lsb value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_MAG_DIG_Z3_LSB);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_DIG_Z3_LSB],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* read dig_z3 msb value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_MAG_DIG_Z3_MSB);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is v_mag_x_s16 msb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_DIG_Z3_MSB],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	mag_trim_mbl.dig_z3 =
+	(s16)((((s32)((s8)v_data_u8[SMI130_BMM150_DIG_DIG_Z3_MSB]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[SMI130_BMM150_DIG_DIG_Z3_LSB]));
+
+	/* read dig_z4 lsb value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_MAG_DIG_Z4_LSB);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_DIG_Z4_LSB],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* read dig_z4 msb value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_MAG_DIG_Z4_MSB);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is v_mag_x_s16 msb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_DIG_Z4_MSB],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	mag_trim_mbl.dig_z4 =
+	(s16)((((s32)((s8)v_data_u8[SMI130_BMM150_DIG_DIG_Z4_MSB]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[SMI130_BMM150_DIG_DIG_Z4_LSB]));
+
+	/* read dig_xyz1 lsb value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_MAG_DIG_XYZ1_LSB);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_DIG_XYZ1_LSB],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* read dig_xyz1 msb value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_MAG_DIG_XYZ1_MSB);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is v_mag_x_s16 msb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[SMI130_BMM150_DIG_DIG_XYZ1_MSB],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	mag_trim_mbl.dig_xyz1 =
+	(u16)((((u32)((u8)v_data_u8[SMI130_BMM150_DIG_DIG_XYZ1_MSB]))
+			<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) |
+			(v_data_u8[SMI130_BMM150_DIG_DIG_XYZ1_LSB]));
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for initialize
+ *	the AKM09911 and AKM09912 sensor
+ *
+ *
+ *	@param v_akm_i2c_address_u8: The value of device address
+ *	AKM sensor   |  Slave address
+ * --------------|---------------------
+ *  AKM09911     |  AKM09911_I2C_ADDR_1
+ *     -         |  and AKM09911_I2C_ADDR_2
+ *  AKM09912     |  AKM09912_I2C_ADDR_1
+ *     -         |  AKM09912_I2C_ADDR_2
+ *     -         |  AKM09912_I2C_ADDR_3
+ *     -         |  AKM09912_I2C_ADDR_4
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_akm_mag_interface_init(
+u8 v_akm_i2c_address_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_pull_value_u8 = SMI130_INIT_VALUE;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	u8 v_akm_chip_id_u8 = SMI130_INIT_VALUE;
+	/* accel operation mode to normal*/
+	com_rslt = smi130_set_command_register(ACCEL_MODE_NORMAL);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_command_register(MAG_MODE_NORMAL);
+	p_smi130->delay_msec(SMI130_AKM_INIT_DELAY);
+	smi130_get_mag_power_mode_stat(&v_data_u8);
+	/* register 0x7E write the 0x37, 0x9A and 0x30*/
+	com_rslt += smi130_set_command_register(SMI130_COMMAND_REG_ONE);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_command_register(SMI130_COMMAND_REG_TWO);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_command_register(SMI130_COMMAND_REG_THREE);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/*switch the page1*/
+	com_rslt += smi130_set_target_page(SMI130_WRITE_TARGET_PAGE1);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_target_page(&v_data_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_paging_enable(SMI130_WRITE_ENABLE_PAGE1);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_paging_enable(&v_data_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* enable the pullup configuration from
+	the register 0x05 bit 4 and 5  to 10*/
+	smi130_get_pullup_configuration(&v_pull_value_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	v_pull_value_u8 = v_pull_value_u8 | SMI130_PULL_UP_DATA;
+	com_rslt += smi130_set_pullup_configuration(v_pull_value_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+
+	/*switch the page0*/
+	com_rslt += smi130_set_target_page(SMI130_WRITE_TARGET_PAGE0);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_target_page(&v_data_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* Write the AKM09911 0r AKM09912 i2c address*/
+	com_rslt += smi130_set_i2c_device_addr(v_akm_i2c_address_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* enable the mag interface to manual mode*/
+	com_rslt += smi130_set_mag_manual_enable(SMI130_MANUAL_ENABLE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_mag_manual_enable(&v_data_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/*Enable the MAG interface */
+	com_rslt += smi130_set_if_mode(SMI130_ENABLE_MAG_IF_MODE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_if_mode(&v_data_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+
+	/* Set the AKM Fuse ROM mode */
+	/* Set value for fuse ROM mode*/
+	com_rslt += smi130_set_mag_write_data(AKM_FUSE_ROM_MODE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* AKM mode address is 0x31*/
+	com_rslt += smi130_set_mag_write_addr(AKM_POWER_MODE_REG);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* Read the Fuse ROM v_data_u8 from registers
+	0x60,0x61 and 0x62*/
+	/* ASAX v_data_u8 */
+	com_rslt += smi130_read_bosch_akm_sensitivity_data();
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* read the device id of the AKM sensor
+	if device id is 0x05 - AKM09911
+	if device id is 0x04 - AKM09912*/
+	com_rslt += smi130_set_mag_read_addr(AKM09912_CHIP_ID_REG);
+	/* 0x04 is mag_x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_akm_chip_id_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	printk(KERN_INFO "smi130,addr:0x%x, akm_chip_id:0x%x",
+	v_akm_i2c_address_u8, v_akm_chip_id_u8);
+	/* Set value power down mode mode*/
+	com_rslt += smi130_set_mag_write_data(AKM_POWER_DOWN_MODE_DATA);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* AKM mode address is 0x31*/
+	com_rslt += smi130_set_mag_write_addr(AKM_POWER_MODE_REG);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* Set AKM Force mode*/
+	com_rslt += smi130_set_mag_write_data(
+	AKM_SINGLE_MEASUREMENT_MODE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* AKM mode address is 0x31*/
+	com_rslt += smi130_set_mag_write_addr(AKM_POWER_MODE_REG);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* Set the AKM read xyz v_data_u8 address*/
+	com_rslt += smi130_set_mag_read_addr(AKM_DATA_REGISTER);
+	/* write the mag v_data_bw_u8 as 25Hz*/
+	com_rslt += smi130_set_mag_output_data_rate(
+	SMI130_MAG_OUTPUT_DATA_RATE_25HZ);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* Enable mag interface to auto mode*/
+	com_rslt += smi130_set_mag_manual_enable(SMI130_MANUAL_DISABLE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_mag_manual_enable(&v_data_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for read the sensitivity data of
+ *	AKM09911 and AKM09912
+ *
+ *	@note Before reading the mag sensitivity values
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_bosch_akm_sensitivity_data(void)
+{
+	/* This variable used for provide the communication
+	results*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array holding the sensitivity ax,ay and az data*/
+	u8 v_data_u8[SMI130_AKM_SENSITIVITY_DATA_SIZE] = {
+	SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	/* read asax value */
+	com_rslt = smi130_set_mag_read_addr(SMI130_BST_AKM_ASAX);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[AKM_ASAX],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	akm_asa_data_mbl.asax = v_data_u8[AKM_ASAX];
+	/* read asay value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_BST_AKM_ASAY);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[AKM_ASAY],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	akm_asa_data_mbl.asay = v_data_u8[AKM_ASAY];
+	/* read asaz value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_BST_AKM_ASAZ);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[AKM_ASAZ],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	akm_asa_data_mbl.asaz = v_data_u8[AKM_ASAZ];
+
+	return com_rslt;
+}
+/*!
+ *	@brief This API used to get the compensated X data
+ *	of AKM09911 the out put of X as s32
+ *	@note	Before start reading the mag compensated X data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bosch_akm_x_s16 : The value of X data
+ *
+ *	@return results of compensated X data value output as s32
+ *
+ */
+s32 smi130_bosch_akm09911_compensate_X(s16 v_bosch_akm_x_s16)
+{
+	/*Return value of AKM x compensated v_data_u8*/
+	s32 retval = SMI130_INIT_VALUE;
+	/* Convert raw v_data_u8 into compensated v_data_u8*/
+	retval = (v_bosch_akm_x_s16 *
+	((akm_asa_data_mbl.asax/AKM09911_SENSITIVITY_DIV) +
+	SMI130_GEN_READ_WRITE_DATA_LENGTH));
+	return retval;
+}
+/*!
+ *	@brief This API used to get the compensated Y data
+ *	of AKM09911 the out put of Y as s32
+ *	@note	Before start reading the mag compensated Y data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bosch_akm_y_s16 : The value of Y data
+ *
+ *	@return results of compensated Y data value output as s32
+ *
+ */
+s32 smi130_bosch_akm09911_compensate_Y(s16 v_bosch_akm_y_s16)
+{
+	/*Return value of AKM y compensated v_data_u8*/
+	s32 retval = SMI130_INIT_VALUE;
+	/* Convert raw v_data_u8 into compensated v_data_u8*/
+	retval = (v_bosch_akm_y_s16 *
+	((akm_asa_data_mbl.asay/AKM09911_SENSITIVITY_DIV) +
+	SMI130_GEN_READ_WRITE_DATA_LENGTH));
+	return retval;
+}
+/*!
+ *	@brief This API used to get the compensated Z data
+ *	of AKM09911 the out put of Z as s32
+ *	@note	Before start reading the mag compensated Z data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bosch_akm_z_s16 : The value of Z data
+ *
+ *	@return results of compensated Z data value output as s32
+ *
+ */
+s32 smi130_bosch_akm09911_compensate_Z(s16 v_bosch_akm_z_s16)
+{
+	/*Return value of AKM z compensated v_data_u8*/
+	s32 retval = SMI130_INIT_VALUE;
+	/* Convert raw v_data_u8 into compensated v_data_u8*/
+	retval = (v_bosch_akm_z_s16 *
+	((akm_asa_data_mbl.asaz/AKM09911_SENSITIVITY_DIV) +
+	SMI130_GEN_READ_WRITE_DATA_LENGTH));
+	return retval;
+}
+/*!
+ *	@brief This API used to get the compensated X data
+ *	of AKM09912 the out put of X as s32
+ *	@note	Before start reading the mag compensated X data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bosch_akm_x_s16 : The value of X data
+ *
+ *	@return results of compensated X data value output as s32
+ *
+ */
+s32 smi130_bosch_akm09912_compensate_X(s16 v_bosch_akm_x_s16)
+{
+	/*Return value of AKM x compensated data*/
+	s32 retval = SMI130_INIT_VALUE;
+	/* Convert raw data into compensated data*/
+	retval = v_bosch_akm_x_s16 *
+	(akm_asa_data_mbl.asax + AKM09912_SENSITIVITY)
+	/ AKM09912_SENSITIVITY_DIV;
+	return retval;
+}
+/*!
+ *	@brief This API used to get the compensated Y data
+ *	of AKM09912 the out put of Y as s32
+ *	@note	Before start reading the mag compensated Y data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bosch_akm_y_s16 : The value of Y data
+ *
+ *	@return results of compensated Y data value output as s32
+ *
+ */
+s32 smi130_bosch_akm09912_compensate_Y(s16 v_bosch_akm_y_s16)
+{
+	/*Return value of AKM y compensated data*/
+	s32 retval = SMI130_INIT_VALUE;
+	/* Convert raw data into compensated data*/
+	retval = v_bosch_akm_y_s16 *
+	(akm_asa_data_mbl.asax + AKM09912_SENSITIVITY)
+	/ AKM09912_SENSITIVITY_DIV;
+	return retval;
+}
+/*!
+ *	@brief This API used to get the compensated Z data
+ *	of AKM09912 the out put of Z as s32
+ *	@note	Before start reading the mag compensated Z data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bosch_akm_z_s16 : The value of Z data
+ *
+ *	@return results of compensated Z data value output as s32
+ *
+ */
+s32 smi130_bosch_akm09912_compensate_Z(s16 v_bosch_akm_z_s16)
+{
+	/*Return value of AKM z compensated data*/
+	s32 retval = SMI130_INIT_VALUE;
+	/* Convert raw data into compensated data*/
+	retval = v_bosch_akm_z_s16 *
+	(akm_asa_data_mbl.asax + AKM09912_SENSITIVITY)
+	/ AKM09912_SENSITIVITY_DIV;
+	return retval;
+}
+ /*!
+ *	@brief This function used for read the compensated value of
+ *	AKM09911
+ *	@note Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_akm09911_compensate_xyz(
+struct smi130_mag_xyz_s32_t *bosch_akm_xyz)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	struct smi130_mag_t mag_xyz;
+
+	com_rslt = smi130_read_mag_xyz(&mag_xyz, BST_AKM);
+	/* Compensation for X axis */
+	bosch_akm_xyz->x = smi130_bosch_akm09911_compensate_X(mag_xyz.x);
+
+	/* Compensation for Y axis */
+	bosch_akm_xyz->y = smi130_bosch_akm09911_compensate_Y(mag_xyz.y);
+
+	/* Compensation for Z axis */
+	bosch_akm_xyz->z = smi130_bosch_akm09911_compensate_Z(mag_xyz.z);
+
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for read the compensated value of
+ *	AKM09912
+ *	@note Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_akm09912_compensate_xyz(
+struct smi130_mag_xyz_s32_t *bosch_akm_xyz)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	struct smi130_mag_t mag_xyz;
+
+	com_rslt = smi130_read_mag_xyz(&mag_xyz, BST_AKM);
+	printk(KERN_INFO "akm09912_raw_x:%d, %d, %d, <%s>,<%d>",
+	mag_xyz.x, mag_xyz.y, mag_xyz.z, __func__, __LINE__);
+	/* Compensation for X axis */
+	bosch_akm_xyz->x = smi130_bosch_akm09912_compensate_X(mag_xyz.x);
+
+	/* Compensation for Y axis */
+	bosch_akm_xyz->y = smi130_bosch_akm09912_compensate_Y(mag_xyz.y);
+
+	/* Compensation for Z axis */
+	bosch_akm_xyz->z = smi130_bosch_akm09912_compensate_Z(mag_xyz.z);
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for read the compensated value of
+ *	AKM09912
+ *	@note Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_akm09912_compensate_xyz_raw(
+struct smi130_mag_xyz_s32_t *bosch_akm_xyz)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Compensation for X axis */
+	bosch_akm_xyz->x = smi130_bosch_akm09912_compensate_X(bosch_akm_xyz->x);
+
+	/* Compensation for Y axis */
+	bosch_akm_xyz->y = smi130_bosch_akm09912_compensate_Y(bosch_akm_xyz->y);
+
+	/* Compensation for Z axis */
+	bosch_akm_xyz->z = smi130_bosch_akm09912_compensate_Z(bosch_akm_xyz->z);
+
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for set the AKM09911 and AKM09912
+ *	power mode.
+ *	@note Before set the AKM power mode
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *	@param v_akm_pow_mode_u8 : The value of akm power mode
+ *  value   |    Description
+ * ---------|--------------------
+ *    0     |  AKM_POWER_DOWN_MODE
+ *    1     |  AKM_SINGLE_MEAS_MODE
+ *    2     |  FUSE_ROM_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_akm_set_powermode(
+u8 v_akm_pow_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = SMI130_INIT_VALUE;
+	/* set mag interface manual mode*/
+	if (p_smi130->mag_manual_enable != SMI130_MANUAL_ENABLE) {
+		com_rslt = smi130_set_mag_manual_enable(
+		SMI130_MANUAL_ENABLE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	}
+	printk(KERN_INFO "com_rslt:%d, manual:%d, <%s>\n",
+	com_rslt, p_smi130->mag_manual_enable, __func__);
+	switch (v_akm_pow_mode_u8) {
+	case AKM_POWER_DOWN_MODE:
+		/* Set the power mode of AKM as power down mode*/
+		com_rslt += smi130_set_mag_write_data(AKM_POWER_DOWN_MODE_DATA);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(AKM_POWER_MODE_REG);
+		p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	break;
+	case AKM_SINGLE_MEAS_MODE:
+		/* Set the power mode of AKM as
+		single measurement mode*/
+		com_rslt += smi130_set_mag_write_data
+		(AKM_SINGLE_MEASUREMENT_MODE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(AKM_POWER_MODE_REG);
+		p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_read_addr(AKM_DATA_REGISTER);
+	break;
+	case FUSE_ROM_MODE:
+		/* Set the power mode of AKM as
+		Fuse ROM mode*/
+		com_rslt += smi130_set_mag_write_data(AKM_FUSE_ROM_MODE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(AKM_POWER_MODE_REG);
+		p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		/* Sensitivity v_data_u8 */
+		com_rslt += smi130_read_bosch_akm_sensitivity_data();
+		p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		/* power down mode*/
+		com_rslt += smi130_set_mag_write_data(AKM_POWER_DOWN_MODE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(AKM_POWER_MODE_REG);
+		p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+	break;
+	}
+	/* set mag interface auto mode*/
+	if (p_smi130->mag_manual_enable == SMI130_MANUAL_ENABLE) {
+		com_rslt += smi130_set_mag_manual_enable(
+		SMI130_MANUAL_DISABLE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	}
+	printk(KERN_INFO "com_rslt:%d, manual:%d, <%s><%d>\n",
+	com_rslt, p_smi130->mag_manual_enable, __func__, __LINE__);
+	return com_rslt;
+}
+ /*!
+ *	@brief This function used for set the magnetometer
+ *	power mode of AKM09911 and AKM09912
+ *	@note Before set the mag power mode
+ *	make sure the following two point is addressed
+ *		Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *
+ *	@param v_mag_sec_if_pow_mode_u8 : The value of secondary if power mode
+ *  value   |    Description
+ * ---------|--------------------
+ *    0     |  SMI130_MAG_FORCE_MODE
+ *    1     |  SMI130_MAG_SUSPEND_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_bosch_akm_and_secondary_if_powermode(
+u8 v_mag_sec_if_pow_mode_u8)
+{
+	/* variable used for return the status of communication result*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* accel operation mode to normal*/
+	com_rslt = smi130_set_command_register(ACCEL_MODE_NORMAL);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* set mag interface manual mode*/
+	if (p_smi130->mag_manual_enable != SMI130_MANUAL_ENABLE) {
+		com_rslt = smi130_set_mag_manual_enable(
+		SMI130_MANUAL_ENABLE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	}
+	printk(KERN_ERR "com_rslt:%d, manual:%d,after setacc normal mode\n",
+	com_rslt, p_smi130->mag_manual_enable);
+	switch (v_mag_sec_if_pow_mode_u8) {
+	case SMI130_MAG_FORCE_MODE:
+		/* set the secondary mag power mode as NORMAL*/
+		com_rslt += smi130_set_mag_interface_normal();
+		/* set the akm power mode as single measurement mode*/
+		com_rslt += smi130_bosch_akm_set_powermode(AKM_SINGLE_MEAS_MODE);
+		p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_read_addr(AKM_DATA_REGISTER);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	break;
+	case SMI130_MAG_SUSPEND_MODE:
+		/* set the akm power mode as power down mode*/
+		com_rslt += smi130_bosch_akm_set_powermode(AKM_POWER_DOWN_MODE);
+		p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		/* set the secondary mag power mode as SUSPEND*/
+		com_rslt += smi130_set_command_register(MAG_MODE_SUSPEND);
+		p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	break;
+	default:
+		com_rslt = E_SMI130_OUT_OF_RANGE;
+	break;
+	}
+	/* set mag interface auto mode*/
+	if (p_smi130->mag_manual_enable == SMI130_MANUAL_ENABLE)
+		com_rslt += smi130_set_mag_manual_enable(
+		SMI130_MANUAL_DISABLE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for read the YAMAH-YAS532 init
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yamaha_yas532_mag_interface_init(
+void)
+{
+	/* This variable used for provide the communication
+	results*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	u8 v_pull_value_u8 = SMI130_INIT_VALUE;
+	u8 v_data_u8 = SMI130_INIT_VALUE;
+	u8 i = SMI130_INIT_VALUE;
+	/* accel operation mode to normal*/
+	com_rslt = smi130_set_command_register(ACCEL_MODE_NORMAL);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* write mag power mode as NORMAL*/
+	com_rslt += smi130_set_mag_interface_normal();
+	/* register 0x7E write the 0x37, 0x9A and 0x30*/
+	com_rslt += smi130_set_command_register(SMI130_COMMAND_REG_ONE);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_command_register(SMI130_COMMAND_REG_TWO);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_command_register(SMI130_COMMAND_REG_THREE);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/*switch the page1*/
+	com_rslt += smi130_set_target_page(SMI130_WRITE_TARGET_PAGE1);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_target_page(&v_data_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_paging_enable(SMI130_WRITE_ENABLE_PAGE1);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_paging_enable(&v_data_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* enable the pullup configuration from
+	the register 0x05 bit 4 and 5 as 10*/
+	smi130_get_pullup_configuration(&v_pull_value_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	v_pull_value_u8 = v_pull_value_u8 | SMI130_PULL_UP_DATA;
+	com_rslt += smi130_set_pullup_configuration(v_pull_value_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/*switch the page0*/
+	com_rslt += smi130_set_target_page(SMI130_WRITE_TARGET_PAGE0);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_target_page(&v_data_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* Write the YAS532 i2c address*/
+	com_rslt += smi130_set_i2c_device_addr(SMI130_AUX_YAS532_I2C_ADDRESS);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* enable the mag interface to manual mode*/
+	com_rslt += smi130_set_mag_manual_enable(SMI130_MANUAL_ENABLE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_mag_manual_enable(&v_data_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/*Enable the MAG interface */
+	com_rslt += smi130_set_if_mode(SMI130_ENABLE_MAG_IF_MODE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_if_mode(&v_data_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	v_data_u8 = SMI130_MANUAL_DISABLE;
+	/* Read the YAS532 device id is 0x02*/
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS_DEVICE_ID_REG);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* Read the YAS532 calibration data*/
+	com_rslt += smi130_bosch_yamaha_yas532_calib_values();
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* Assign the data acquisition mode*/
+	yas532_data_mbl.measure_state = YAS532_MAG_STATE_INIT_COIL;
+	/* Set the default offset as invalid offset*/
+	set_vector(yas532_data_mbl.v_hard_offset_s8, INVALID_OFFSET);
+	/* set the transform to zero */
+	yas532_data_mbl.transform = SMI130_NULL;
+	/* Assign overflow as zero*/
+	yas532_data_mbl.overflow = 0;
+	#if YAS532_MAG_LOG < YAS532_MAG_TEMPERATURE_LOG
+		yas532_data_mbl.temp_data.num =
+		yas532_data_mbl.temp_data.idx = 0;
+	#endif
+	/* Assign the coef value*/
+	for (i = 0; i < 3; i++) {
+		yas532_data_mbl.coef[i] = yas532_version_ac_coef[i];
+		yas532_data_mbl.last_raw[i] = 0;
+	}
+	yas532_data_mbl.last_raw[3] = 0;
+	/* Set the initial values of yas532*/
+	com_rslt += smi130_bosch_yas532_set_initial_values();
+	/* write the mag v_data_bw_u8 as 25Hz*/
+	com_rslt += smi130_set_mag_output_data_rate(
+	SMI130_MAG_OUTPUT_DATA_RATE_25HZ);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* Enable mag interface to auto mode*/
+	com_rslt += smi130_set_mag_manual_enable(
+	SMI130_MANUAL_DISABLE);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	smi130_get_mag_manual_enable(&v_data_u8);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+
+	return com_rslt;
+}
+/*!
+ *	@brief This function used to set the YAS532 initial values
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_set_initial_values(void)
+{
+/* This variable used for provide the communication
+	results*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* write testr1 as 0x00*/
+	com_rslt = smi130_set_mag_write_data(
+	SMI130_YAS532_WRITE_TESTR1);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_mag_write_addr(SMI130_YAS532_TESTR1);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* write testr2 as 0x00*/
+	com_rslt += smi130_set_mag_write_data(
+	SMI130_YAS532_WRITE_TESTR2);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_mag_write_addr(SMI130_YAS532_TESTR2);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* write Rcoil as 0x00*/
+	com_rslt += smi130_set_mag_write_data(
+	SMI130_YAS532_WRITE_RCOIL);
+	p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_mag_write_addr(SMI130_YAS532_RCOIL);
+	p_smi130->delay_msec(SMI130_YAS532_SET_INITIAL_VALUE_DELAY);
+	/* check the valid offset*/
+	if (is_valid_offset(yas532_data_mbl.v_hard_offset_s8)) {
+		com_rslt += smi130_bosch_yas532_set_offset(
+		yas532_data_mbl.v_hard_offset_s8);
+		yas532_data_mbl.measure_state = YAS532_MAG_STATE_NORMAL;
+	} else {
+		/* set the default offset as invalid offset*/
+		set_vector(yas532_data_mbl.v_hard_offset_s8, INVALID_OFFSET);
+		/*Set the default measure state for offset correction*/
+		yas532_data_mbl.measure_state = YAS532_MAG_STATE_MEASURE_OFFSET;
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for YAS532 offset correction
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_magnetic_measure_set_offset(
+void)
+{
+	/* This variable used for provide the communication
+	results*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* used for offset value set to the offset register*/
+	s8 v_hard_offset_s8[SMI130_HARD_OFFSET_DATA_SIZE] = {
+	SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	/* offset correction factors*/
+	static const u8 v_correct_u8[SMI130_YAS_CORRECT_DATA_SIZE] = {
+	16, 8, 4, 2, 1};
+	/* used for the temperature */
+	u16 v_temp_u16 = SMI130_INIT_VALUE;
+	/* used for the xy1y2 read*/
+	u16 v_xy1y2_u16[SMI130_YAS_XY1Y2_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	/* local flag for assign the values*/
+	s32 v_flag_s32[SMI130_YAS_FLAG_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	u8 i, j, v_busy_u8, v_overflow_u8 = SMI130_INIT_VALUE;
+
+	for (i = 0; i < 5; i++) {
+		/* set the offset values*/
+		com_rslt = smi130_bosch_yas532_set_offset(v_hard_offset_s8);
+		/* read the sensor data*/
+		com_rslt += smi130_bosch_yas532_normal_measurement_data(
+		SMI130_YAS532_ACQ_START, &v_busy_u8, &v_temp_u16,
+		v_xy1y2_u16, &v_overflow_u8);
+		/* check the sensor busy status*/
+		if (v_busy_u8)
+			return E_SMI130_BUSY;
+		/* calculate the magnetic correction with
+		offset and assign the values
+		to the offset register */
+		for (j = 0; j < 3; j++) {
+			if (YAS532_DATA_CENTER == v_xy1y2_u16[j])
+				v_flag_s32[j] = 0;
+			if (YAS532_DATA_CENTER < v_xy1y2_u16[j])
+				v_flag_s32[j] = 1;
+			if (v_xy1y2_u16[j] < YAS532_DATA_CENTER)
+				v_flag_s32[j] = -1;
+		}
+		for (j = 0; j < 3; j++) {
+			if (v_flag_s32[j])
+				v_hard_offset_s8[j] = (s8)(v_hard_offset_s8[j]
+				+ v_flag_s32[j] * v_correct_u8[i]);
+		}
+	}
+	/* set the offset */
+	com_rslt += smi130_bosch_yas532_set_offset(v_hard_offset_s8);
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS532 calibration data
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yamaha_yas532_calib_values(void)
+{
+	/* This variable used for provide the communication
+	results*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array holding the YAS532 calibration values */
+	u8 v_data_u8[SMI130_YAS532_CALIB_DATA_SIZE] = {
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	/* Read the DX value */
+	com_rslt = smi130_set_mag_read_addr(SMI130_YAS532_CALIB_CX);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[0], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	yas532_data_mbl.calib_yas532.cx = (s32)((v_data_u8[0]
+	* 10) - 1280);
+	/* Read the DY1 value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS532_CALIB_CY1);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[1], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	yas532_data_mbl.calib_yas532.cy1 =
+	(s32)((v_data_u8[1] * 10) - 1280);
+	/* Read the DY2 value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS532_CALIB_CY2);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[2], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	yas532_data_mbl.calib_yas532.cy2 =
+	(s32)((v_data_u8[2] * 10) - 1280);
+	/* Read the D2 and D3 value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS532_CALIB1);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[3], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	yas532_data_mbl.calib_yas532.a2 =
+	(s32)(((v_data_u8[3] >>
+	SMI130_SHIFT_BIT_POSITION_BY_02_BITS)
+	& 0x03F) - 32);
+	/* Read the D3 and D4 value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS532_CALIB2);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[4], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	/* calculate a3*/
+	yas532_data_mbl.calib_yas532.a3 = (s32)((((v_data_u8[3] <<
+	SMI130_SHIFT_BIT_POSITION_BY_02_BITS) & 0x0C) |
+	((v_data_u8[4]
+	>> SMI130_SHIFT_BIT_POSITION_BY_06_BITS)
+	& 0x03)) - 8);
+	/* calculate a4*/
+	yas532_data_mbl.calib_yas532.a4 = (s32)((v_data_u8[4]
+	& 0x3F) - 32);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+    /* Read the D5 and D6 value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS532_CALIB3);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[5], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	/* calculate a5*/
+	yas532_data_mbl.calib_yas532.a5 =
+	(s32)(((v_data_u8[5]
+	>> SMI130_SHIFT_BIT_POSITION_BY_02_BITS)
+	& 0x3F) + 38);
+	/* Read the D6 and D7 value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS532_CALIB4);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[6], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	/* calculate a6*/
+	yas532_data_mbl.calib_yas532.a6 =
+	(s32)((((v_data_u8[5]
+	<< SMI130_SHIFT_BIT_POSITION_BY_04_BITS)
+	& 0x30) | ((v_data_u8[6] >>
+	 SMI130_SHIFT_BIT_POSITION_BY_04_BITS)
+	 & 0x0F)) - 32);
+	 /* Read the D7 and D8 value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS532_CALIB5);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[7], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	/* calculate a7*/
+	yas532_data_mbl.calib_yas532.a7 = (s32)((((v_data_u8[6]
+	<< SMI130_SHIFT_BIT_POSITION_BY_03_BITS)
+	& 0x78) |
+	((v_data_u8[7]
+	>> SMI130_SHIFT_BIT_POSITION_BY_05_BITS) &
+	0x07)) - 64);
+	/* Read the D8 and D9 value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS532_CLAIB6);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[8], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	/* calculate a8*/
+	yas532_data_mbl.calib_yas532.a8 = (s32)((((v_data_u8[7] <<
+	SMI130_GEN_READ_WRITE_DATA_LENGTH) & 0x3E) |
+	((v_data_u8[8] >>
+	SMI130_SHIFT_BIT_POSITION_BY_07_BITS) & 0x01)) -
+	32);
+
+	/* Read the D8 and D9 value */
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS532_CALIB7);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[9], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	/* calculate a9*/
+	yas532_data_mbl.calib_yas532.a9 = (s32)(((v_data_u8[8] <<
+	SMI130_GEN_READ_WRITE_DATA_LENGTH) & 0xFE) |
+	 ((v_data_u8[9] >>
+	 SMI130_SHIFT_BIT_POSITION_BY_07_BITS) & 0x01));
+	/* calculate k*/
+	yas532_data_mbl.calib_yas532.k = (s32)((v_data_u8[9] >>
+	SMI130_SHIFT_BIT_POSITION_BY_02_BITS) & 0x1F);
+	/* Read the  value from register 0x9A*/
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS532_CALIB8);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[10],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	/* Read the  value from register 0x9B*/
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS532_CALIIB9);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[11],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	/* Read the  value from register 0x9C*/
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS532_CALIB10);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[12],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	/* Read the  value from register 0x9D*/
+	com_rslt += smi130_set_mag_read_addr(SMI130_YAS532_CALIB11);
+	/* 0x04 is secondary read mag x lsb register */
+	com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+	&v_data_u8[13],
+	SMI130_GEN_READ_WRITE_DATA_LENGTH);
+	/* Calculate the fxy1y2 and rxy1y1*/
+	yas532_data_mbl.calib_yas532.fxy1y2[0] =
+	(u8)(((v_data_u8[10]
+	& 0x01)
+	<< SMI130_SHIFT_BIT_POSITION_BY_01_BIT)
+	| ((v_data_u8[11] >>
+	SMI130_SHIFT_BIT_POSITION_BY_07_BITS) & 0x01));
+	yas532_data_mbl.calib_yas532.rxy1y2[0] =
+	((s8)(((v_data_u8[10]
+	>> SMI130_SHIFT_BIT_POSITION_BY_01_BIT) & 0x3F)
+	<< SMI130_SHIFT_BIT_POSITION_BY_02_BITS))
+	>> SMI130_SHIFT_BIT_POSITION_BY_02_BITS;
+	yas532_data_mbl.calib_yas532.fxy1y2[1] =
+	(u8)(((v_data_u8[11] & 0x01)
+	<< SMI130_SHIFT_BIT_POSITION_BY_01_BIT)
+	 | ((v_data_u8[12] >>
+	 SMI130_SHIFT_BIT_POSITION_BY_07_BITS) & 0x01));
+	yas532_data_mbl.calib_yas532.rxy1y2[1] =
+	((s8)(((v_data_u8[11]
+	>> SMI130_SHIFT_BIT_POSITION_BY_01_BIT) & 0x3F)
+	<< SMI130_SHIFT_BIT_POSITION_BY_02_BITS))
+	>> SMI130_SHIFT_BIT_POSITION_BY_02_BITS;
+	yas532_data_mbl.calib_yas532.fxy1y2[2] =
+	(u8)(((v_data_u8[12] & 0x01)
+	<< SMI130_SHIFT_BIT_POSITION_BY_01_BIT)
+	| ((v_data_u8[13]
+	>> SMI130_SHIFT_BIT_POSITION_BY_07_BITS) & 0x01));
+	yas532_data_mbl.calib_yas532.rxy1y2[2] =
+	((s8)(((v_data_u8[12]
+	>> SMI130_SHIFT_BIT_POSITION_BY_01_BIT) & 0x3F)
+	 << SMI130_SHIFT_BIT_POSITION_BY_02_BITS))
+	 >> SMI130_SHIFT_BIT_POSITION_BY_02_BITS;
+
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for calculate the
+ *	YAS532 read the linear data
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_xy1y2_to_linear(
+u16 *v_xy1y2_u16, s32 *xy1y2_linear)
+{
+	/* This variable used for provide the communication
+	results*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = SUCCESS;
+	static const u16 v_calib_data[] = {
+	3721, 3971, 4221, 4471};
+	u8 i = SMI130_INIT_VALUE;
+
+	for (i = 0; i < 3; i++)
+		xy1y2_linear[i] = v_xy1y2_u16[i] -
+		 v_calib_data[yas532_data_mbl.calib_yas532.fxy1y2[i]]
+			+ (yas532_data_mbl.v_hard_offset_s8[i] -
+			yas532_data_mbl.calib_yas532.rxy1y2[i])
+			* yas532_data_mbl.coef[i];
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for read the YAS532 sensor data
+ *	@param	v_acquisition_command_u8: used to set the data acquisition
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ *	@param	v_busy_u8 : used to get the busy flay for sensor data read
+ *	@param	v_temp_u16 : used to get the temperature data
+ *	@param	v_xy1y2_u16 : used to get the sensor xy1y2 data
+ *	@param	v_overflow_u8 : used to get the overflow data
+ *
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_normal_measurement_data(
+u8 v_acquisition_command_u8, u8 *v_busy_u8,
+u16 *v_temp_u16, u16 *v_xy1y2_u16, u8 *v_overflow_u8)
+{
+	/* This variable used for provide the communication
+	results*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = SMI130_INIT_VALUE;
+	/* Array holding the YAS532 xyy1 data*/
+	u8 v_data_u8[SMI130_YAS_XY1Y2T_DATA_SIZE] = {
+	SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	u8 i = SMI130_INIT_VALUE;
+	/* check the p_smi130 structure as NULL*/
+	if (p_smi130 == SMI130_NULL) {
+		return E_SMI130_NULL_PTR;
+		} else {
+		/* read the sensor data */
+		com_rslt = smi130_bosch_yas532_acquisition_command_register(
+		v_acquisition_command_u8);
+		com_rslt +=
+		p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+		SMI130_USER_DATA_MAG_X_LSB__REG,
+		v_data_u8, SMI130_MAG_YAS_DATA_LENGTH);
+		/* read the xyy1 data*/
+		*v_busy_u8 =
+		((v_data_u8[0]
+		>> SMI130_SHIFT_BIT_POSITION_BY_07_BITS) & 0x01);
+		*v_temp_u16 =
+		(u16)((((s32)v_data_u8[0]
+		<< SMI130_SHIFT_BIT_POSITION_BY_03_BITS)
+		& 0x3F8) | ((v_data_u8[1]
+		>> SMI130_SHIFT_BIT_POSITION_BY_05_BITS) & 0x07));
+		v_xy1y2_u16[0] =
+		(u16)((((s32)v_data_u8[2]
+		<< SMI130_SHIFT_BIT_POSITION_BY_06_BITS) & 0x1FC0)
+		| ((v_data_u8[3] >>
+		SMI130_SHIFT_BIT_POSITION_BY_02_BITS) & 0x3F));
+		v_xy1y2_u16[1] =
+		(u16)((((s32)v_data_u8[4]
+		<< SMI130_SHIFT_BIT_POSITION_BY_06_BITS)
+		& 0x1FC0)
+		| ((v_data_u8[5]
+		>> SMI130_SHIFT_BIT_POSITION_BY_02_BITS) & 0x3F));
+		v_xy1y2_u16[2] =
+		(u16)((((s32)v_data_u8[6]
+		<< SMI130_SHIFT_BIT_POSITION_BY_06_BITS)
+		& 0x1FC0)
+		| ((v_data_u8[7]
+		>> SMI130_SHIFT_BIT_POSITION_BY_02_BITS) & 0x3F));
+		*v_overflow_u8 = 0;
+		for (i = 0; i < 3; i++) {
+			if (v_xy1y2_u16[i] == YAS532_DATA_OVERFLOW)
+				*v_overflow_u8 |= (1 << (i * 2));
+			if (v_xy1y2_u16[i] == YAS532_DATA_UNDERFLOW)
+				*v_overflow_u8 |= (1 << (i * 2 + 1));
+		}
+	}
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for YAS532 sensor data
+ *	@param	v_acquisition_command_u8	:	the value of CMDR
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ * @param xyz_data : the vector xyz output
+ * @param v_overflow_s8 : the value of overflow
+ * @param v_temp_correction_u8 : the value of temperate correction enable
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_measurement_xyz_data(
+struct yas532_vector *xyz_data, u8 *v_overflow_s8, u8 v_temp_correction_u8,
+u8 v_acquisition_command_u8)
+{
+	/* This variable used for provide the communication
+	results*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = SMI130_INIT_VALUE;
+	/* Array holding the linear calculation output*/
+	s32 v_xy1y2_linear_s32[SMI130_YAS_XY1Y2_DATA_SIZE] = {
+	SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	/* Array holding the temperature data */
+	s32 v_xyz_tmp_s32[SMI130_YAS_TEMP_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	s32 tmp = SMI130_INIT_VALUE;
+	s32 sx, sy1, sy2, sy, sz = SMI130_INIT_VALUE;
+	u8 i, v_busy_u8 = SMI130_INIT_VALUE;
+	u16 v_temp_u16 = SMI130_INIT_VALUE;
+	/* Array holding the xyy1 sensor raw data*/
+	u16 v_xy1y2_u16[SMI130_YAS_XY1Y2_DATA_SIZE] = {SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	#if YAS532_MAG_LOG < YAS532_MAG_TEMPERATURE_LOG
+	s32 sum = SMI130_INIT_VALUE;
+	#endif
+	*v_overflow_s8 = SMI130_INIT_VALUE;
+	switch (yas532_data_mbl.measure_state) {
+	case YAS532_MAG_STATE_INIT_COIL:
+		if (p_smi130->mag_manual_enable != SMI130_MANUAL_ENABLE)
+			com_rslt = smi130_set_mag_manual_enable(
+			SMI130_MANUAL_ENABLE);
+		/* write Rcoil*/
+		com_rslt += smi130_set_mag_write_data(
+		SMI130_YAS_DISABLE_RCOIL);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(SMI130_YAS532_RCOIL);
+		p_smi130->delay_msec(SMI130_YAS532_MEASUREMENT_DELAY);
+		if (!yas532_data_mbl.overflow && is_valid_offset(
+		yas532_data_mbl.v_hard_offset_s8))
+			yas532_data_mbl.measure_state = 0;
+	break;
+	case YAS532_MAG_STATE_MEASURE_OFFSET:
+		com_rslt = smi130_bosch_yas532_magnetic_measure_set_offset();
+		yas532_data_mbl.measure_state = 0;
+	break;
+	default:
+	break;
+	}
+	/* Read sensor data*/
+	com_rslt += smi130_bosch_yas532_normal_measurement_data(
+	v_acquisition_command_u8, &v_busy_u8, &v_temp_u16,
+	v_xy1y2_u16, v_overflow_s8);
+	/* Calculate the linear data*/
+	com_rslt += smi130_bosch_yas532_xy1y2_to_linear(v_xy1y2_u16,
+	v_xy1y2_linear_s32);
+	/* Calculate temperature correction */
+	#if YAS532_MAG_LOG < YAS532_MAG_TEMPERATURE_LOG
+		yas532_data_mbl.temp_data.log[yas532_data_mbl.temp_data.idx++] =
+		v_temp_u16;
+	if (YAS532_MAG_TEMPERATURE_LOG <= yas532_data_mbl.temp_data.idx)
+		yas532_data_mbl.temp_data.idx = 0;
+		yas532_data_mbl.temp_data.num++;
+	if (YAS532_MAG_TEMPERATURE_LOG <= yas532_data_mbl.temp_data.num)
+		yas532_data_mbl.temp_data.num = YAS532_MAG_TEMPERATURE_LOG;
+	for (i = 0; i < yas532_data_mbl.temp_data.num; i++)
+		sum += yas532_data_mbl.temp_data.log[i];
+		tmp = sum * 10 / yas532_data_mbl.temp_data.num
+		- YAS532_TEMP20DEGREE_TYPICAL * 10;
+	#else
+		tmp = (v_temp_u16 - YAS532_TEMP20DEGREE_TYPICAL)
+		* 10;
+	#endif
+	sx  = v_xy1y2_linear_s32[0];
+	sy1 = v_xy1y2_linear_s32[1];
+	sy2 = v_xy1y2_linear_s32[2];
+	/* Temperature correction */
+	if (v_temp_correction_u8) {
+		sx  -= (yas532_data_mbl.calib_yas532.cx  * tmp)
+		/ 1000;
+		sy1 -= (yas532_data_mbl.calib_yas532.cy1 * tmp)
+		/ 1000;
+		sy2 -= (yas532_data_mbl.calib_yas532.cy2 * tmp)
+		/ 1000;
+	}
+	sy = sy1 - sy2;
+	sz = -sy1 - sy2;
+
+	xyz_data->yas532_vector_xyz[0] = yas532_data_mbl.calib_yas532.k *
+	((100 * sx + yas532_data_mbl.calib_yas532.a2 * sy +
+	yas532_data_mbl.calib_yas532.a3 * sz) / 10);
+	xyz_data->yas532_vector_xyz[1] = yas532_data_mbl.calib_yas532.k *
+	((yas532_data_mbl.calib_yas532.a4 * sx + yas532_data_mbl.calib_yas532.a5 * sy +
+	yas532_data_mbl.calib_yas532.a6 * sz) / 10);
+	xyz_data->yas532_vector_xyz[2] = yas532_data_mbl.calib_yas532.k *
+	((yas532_data_mbl.calib_yas532.a7 * sx + yas532_data_mbl.calib_yas532.a8 * sy +
+	yas532_data_mbl.calib_yas532.a9 * sz) / 10);
+	if (yas532_data_mbl.transform != SMI130_NULL) {
+		for (i = 0; i < 3; i++) {
+				v_xyz_tmp_s32[i] = yas532_data_mbl.transform[i
+				* 3] *
+				xyz_data->yas532_vector_xyz[0]
+				+ yas532_data_mbl.transform[i * 3 + 1] *
+				xyz_data->yas532_vector_xyz[1]
+				+ yas532_data_mbl.transform[i * 3 + 2] *
+				xyz_data->yas532_vector_xyz[2];
+		}
+		set_vector(xyz_data->yas532_vector_xyz, v_xyz_tmp_s32);
+	}
+	for (i = 0; i < 3; i++) {
+		xyz_data->yas532_vector_xyz[i] -=
+		xyz_data->yas532_vector_xyz[i] % 10;
+		if (*v_overflow_s8 & (1
+		<< (i * 2)))
+			xyz_data->yas532_vector_xyz[i] +=
+			1; /* set overflow */
+		if (*v_overflow_s8 & (1 <<
+		(i * 2 + 1)))
+			xyz_data->yas532_vector_xyz[i] += 2; /* set underflow */
+	}
+
+
+if (v_busy_u8)
+		return com_rslt;
+	if (0 < *v_overflow_s8) {
+		if (!yas532_data_mbl.overflow)
+			yas532_data_mbl.overflow = 1;
+		yas532_data_mbl.measure_state = YAS532_MAG_STATE_INIT_COIL;
+	} else
+		yas532_data_mbl.overflow = 0;
+	for (i = 0; i < 3; i++)
+		yas532_data_mbl.last_raw[i] = v_xy1y2_u16[i];
+	  yas532_data_mbl.last_raw[i] = v_temp_u16;
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for YAS532 write data acquisition
+ *	command register write
+ *	@param	v_command_reg_data_u8	:	the value of data acquisition
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_acquisition_command_register(
+u8 v_command_reg_data_u8)
+{
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+
+	if (p_smi130->mag_manual_enable != SMI130_MANUAL_ENABLE)
+			com_rslt = smi130_set_mag_manual_enable(
+			SMI130_MANUAL_ENABLE);
+
+		com_rslt = smi130_set_mag_write_data(v_command_reg_data_u8);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* YAMAHA YAS532-0x82*/
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_YAS532_COMMAND_REGISTER);
+		p_smi130->delay_msec(SMI130_YAS_ACQ_COMMAND_DELAY);
+		com_rslt += smi130_set_mag_read_addr(
+		SMI130_YAS532_DATA_REGISTER);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+
+	if (p_smi130->mag_manual_enable == SMI130_MANUAL_ENABLE)
+		com_rslt += smi130_set_mag_manual_enable(SMI130_MANUAL_DISABLE);
+
+	return com_rslt;
+
+}
+/*!
+ *	@brief This function used write offset of YAS532
+ *
+ *	@param	p_offset_s8	: The value of offset to write
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_set_offset(
+const s8 *p_offset_s8)
+{
+	/* This variable used for provide the communication
+	results*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+
+	if (p_smi130->mag_manual_enable != SMI130_MANUAL_ENABLE)
+		com_rslt = smi130_set_mag_manual_enable(SMI130_MANUAL_ENABLE);
+		p_smi130->delay_msec(SMI130_YAS532_OFFSET_DELAY);
+
+	    /* Write offset X data*/
+		com_rslt = smi130_set_mag_write_data(p_offset_s8[0]);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* YAS532 offset x write*/
+		com_rslt += smi130_set_mag_write_addr(SMI130_YAS532_OFFSET_X);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+
+		/* Write offset Y data*/
+		com_rslt = smi130_set_mag_write_data(p_offset_s8[1]);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* YAS532 offset y write*/
+		com_rslt += smi130_set_mag_write_addr(SMI130_YAS532_OFFSET_Y);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+
+		/* Write offset Z data*/
+		com_rslt = smi130_set_mag_write_data(p_offset_s8[2]);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* YAS532 offset z write*/
+		com_rslt += smi130_set_mag_write_addr(SMI130_YAS532_OFFSET_Z);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		set_vector(yas532_data_mbl.v_hard_offset_s8, p_offset_s8);
+
+	if (p_smi130->mag_manual_enable == SMI130_MANUAL_ENABLE)
+		com_rslt = smi130_set_mag_manual_enable(SMI130_MANUAL_DISABLE);
+	return com_rslt;
+}
+/*!
+ *	@brief This function used to init the YAMAH-YAS537
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yamaha_yas537_mag_interface_init(
+void)
+{
+/* This variable used for provide the communication
+results*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+u8 v_pull_value_u8 = SMI130_INIT_VALUE;
+u8 v_data_u8 = SMI130_INIT_VALUE;
+u8 i = SMI130_INIT_VALUE;
+/* accel operation mode to normal*/
+com_rslt = smi130_set_command_register(ACCEL_MODE_NORMAL);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* write mag power mode as NORMAL*/
+com_rslt += smi130_set_mag_interface_normal();
+/* register 0x7E write the 0x37, 0x9A and 0x30*/
+com_rslt += smi130_set_command_register(SMI130_COMMAND_REG_ONE);
+p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+com_rslt += smi130_set_command_register(SMI130_COMMAND_REG_TWO);
+p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+com_rslt += smi130_set_command_register(SMI130_COMMAND_REG_THREE);
+p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+/*switch the page1*/
+com_rslt += smi130_set_target_page(SMI130_WRITE_TARGET_PAGE1);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+smi130_get_target_page(&v_data_u8);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+com_rslt += smi130_set_paging_enable(SMI130_WRITE_ENABLE_PAGE1);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+smi130_get_paging_enable(&v_data_u8);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* enable the pullup configuration from
+the register 0x05 bit 4 and 5 as 10*/
+smi130_get_pullup_configuration(&v_pull_value_u8);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+v_pull_value_u8 = v_pull_value_u8 | SMI130_PULL_UP_DATA;
+com_rslt += smi130_set_pullup_configuration(v_pull_value_u8);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/*switch the page0*/
+com_rslt += smi130_set_target_page(SMI130_WRITE_TARGET_PAGE0);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+smi130_get_target_page(&v_data_u8);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* Write the YAS532 i2c address*/
+com_rslt += smi130_set_i2c_device_addr(SMI130_YAS537_I2C_ADDRESS);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* enable the mag interface to manual mode*/
+com_rslt += smi130_set_mag_manual_enable(SMI130_MANUAL_ENABLE);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+smi130_get_mag_manual_enable(&v_data_u8);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/*Enable the MAG interface */
+com_rslt += smi130_set_if_mode(SMI130_ENABLE_MAG_IF_MODE);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+smi130_get_if_mode(&v_data_u8);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+v_data_u8 = SMI130_MANUAL_DISABLE;
+/* Read the YAS537 device id*/
+com_rslt += smi130_set_mag_read_addr(SMI130_YAS_DEVICE_ID_REG);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&v_data_u8, SMI130_GEN_READ_WRITE_DATA_LENGTH);
+yas537_data_mbl.dev_id = v_data_u8;
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* Read the YAS532 calibration data*/
+com_rslt +=
+smi130_bosch_yamaha_yas537_calib_values(
+SMI130_GEN_READ_WRITE_DATA_LENGTH);
+p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+/* set the mode to NORMAL*/
+yas537_data_mbl.measure_state = YAS537_MAG_STATE_NORMAL;
+/* set the transform to zero */
+yas537_data_mbl.transform = SMI130_NULL;
+yas537_data_mbl.average = 32;
+for (i = 0; i < 3; i++) {
+	yas537_data_mbl.hard_offset[i] = -128;
+	yas537_data_mbl.last_after_rcoil[i] = 0;
+}
+for (i = 0; i < 4; i++)
+	yas537_data_mbl.last_raw[i] = 0;
+/* write the mag bandwidth as 25Hz*/
+com_rslt += smi130_set_mag_output_data_rate(
+SMI130_MAG_OUTPUT_DATA_RATE_25HZ);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* Enable mag interface to auto mode*/
+com_rslt += smi130_set_mag_manual_enable(
+SMI130_MANUAL_DISABLE);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+smi130_get_mag_manual_enable(&v_data_u8);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+return com_rslt;
+}
+/*!
+*	@brief This function used for read the
+*	YAMAHA YAS537 calibration data
+*
+*
+*	@param v_rcoil_u8 : The value of r coil
+*
+*
+*	@return results of bus communication function
+*	@retval 0 -> Success
+*	@retval -1 -> Error
+*
+*
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yamaha_yas537_calib_values(
+u8 v_rcoil_u8)
+{
+/* This variable used for provide the communication
+results*/
+SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+/* Array holding the YAS532 calibration values */
+u8 a_data_u8[SMI130_YAS537_CALIB_DATA_SIZE] = {
+SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+};
+static const u8 v_avrr_u8[] = {0x50, 0x60, 0x70};
+u8 v_cal_valid_u8 = SMI130_INIT_VALUE, i;
+/* write soft reset as 0x02*/
+com_rslt = smi130_set_mag_write_data(
+YAS537_SRSTR_DATA);
+p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+com_rslt += smi130_set_mag_write_addr(YAS537_REG_SRSTR);
+p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+/* Read the DX value */
+com_rslt = smi130_set_mag_read_addr(YAS537_REG_CALR_C0);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[0], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the DY1 value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_C1);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[1], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the DY2 value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_C2);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[2], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D2 value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_C3);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[3], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D3 value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_C4);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[4], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D4 value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_C5);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[5], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D5 value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_C6);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[6], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D6 value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_C7);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[7], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D7 value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_C8);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[8], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D8 value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_C9);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[9], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the D9 value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_CA);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[10], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the RX value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_CB);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[11], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the RY1 value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_CC);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[12], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the RY2 value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_CD);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[13], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the RY2 value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_CE);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[14], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the CHF value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_CF);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[15], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* Read the VER value */
+com_rslt += smi130_set_mag_read_addr(YAS537_REG_CALR_DO);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+/* 0x04 is secondary read mag x lsb register */
+com_rslt += smi130_read_reg(SMI130_MAG_DATA_READ_REG,
+&a_data_u8[16], SMI130_GEN_READ_WRITE_DATA_LENGTH);
+/* get the calib ver*/
+yas537_data_mbl.calib_yas537.ver =
+(a_data_u8[16] >> SMI130_SHIFT_BIT_POSITION_BY_06_BITS);
+for (i = 0; i < 17; i++) {
+	if (((i < 16 && a_data_u8[i]) != 0))
+		v_cal_valid_u8 = 1;
+	if ((i < 16 &&
+	(a_data_u8[i] & 0x3F)) != 0)
+		v_cal_valid_u8 = 1;
+}
+if (!v_cal_valid_u8)
+	return ERROR;
+if (yas537_data_mbl.calib_yas537.ver == 0) {
+	for (i = 0; i < 17; i++) {
+		if (i < 12) {
+			/* write offset*/
+			com_rslt += smi130_set_mag_write_data(
+			a_data_u8[i]);
+			p_smi130->delay_msec(
+			SMI130_GEN_READ_WRITE_DELAY);
+			com_rslt += smi130_set_mag_write_addr(
+			YAS537_REG_MTCR + i);
+			p_smi130->delay_msec(
+			SMI130_GEN_READ_WRITE_DELAY);
+		} else if (i < 15) {
+			/* write offset correction*/
+			com_rslt += smi130_set_mag_write_data(
+			a_data_u8[i]);
+			p_smi130->delay_msec(
+			SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+			com_rslt += smi130_set_mag_write_addr((
+			(YAS537_REG_OXR + i) - 12));
+			p_smi130->delay_msec(
+			SMI130_GEN_READ_WRITE_DELAY);
+			yas537_data_mbl.hard_offset[i - 12]
+			= a_data_u8[i];
+		} else {
+			/* write offset correction*/
+			com_rslt += smi130_set_mag_write_data(
+			a_data_u8[i]);
+			p_smi130->delay_msec(
+			SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+			com_rslt += smi130_set_mag_write_addr((
+			(YAS537_REG_OXR + i) - 11));
+			p_smi130->delay_msec(
+			SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		}
+
+}
+} else if (yas537_data_mbl.calib_yas537.ver == 1) {
+	for (i = 0; i < 3; i++) {
+		/* write offset*/
+		com_rslt += smi130_set_mag_write_data(
+		a_data_u8[i]);
+		p_smi130->delay_msec(
+		SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(
+		YAS537_REG_MTCR + i);
+		p_smi130->delay_msec(
+		SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		if (com_rslt == SUCCESS) {
+			/* write offset*/
+			com_rslt += smi130_set_mag_write_data(
+			a_data_u8[i + 12]);
+			p_smi130->delay_msec(
+			SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+			com_rslt += smi130_set_mag_write_addr(
+			YAS537_REG_OXR + i);
+			p_smi130->delay_msec(
+			SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+			yas537_data_mbl.hard_offset[i] =
+			a_data_u8[i + 12];
+		} else {
+			com_rslt = ERROR;
+		}
+	}
+	/* write offset*/
+	com_rslt += smi130_set_mag_write_data(
+	((a_data_u8[i] & 0xE0) | 0x10));
+	p_smi130->delay_msec(
+	SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_mag_write_addr(
+	YAS537_REG_MTCR + i);
+	p_smi130->delay_msec(
+	SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* write offset*/
+	com_rslt += smi130_set_mag_write_data(
+	((a_data_u8[15]
+	>> SMI130_SHIFT_BIT_POSITION_BY_03_BITS)
+	& 0x1E));
+	p_smi130->delay_msec(
+	SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_mag_write_addr(YAS537_REG_HCKR);
+	p_smi130->delay_msec(
+	SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* write offset*/
+	com_rslt += smi130_set_mag_write_data(
+	((a_data_u8[15] << 1) & 0x1E));
+	p_smi130->delay_msec(
+	SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_mag_write_addr(YAS537_REG_LCKR);
+	p_smi130->delay_msec(
+	SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	/* write offset*/
+	com_rslt += smi130_set_mag_write_data(
+	(a_data_u8[16] & 0x3F));
+	p_smi130->delay_msec(
+	SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_mag_write_addr(YAS537_REG_OCR);
+	p_smi130->delay_msec(
+	SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+
+	/* Assign the calibration values*/
+	/* a2 */
+	yas537_data_mbl.calib_yas537.a2 =
+	((((a_data_u8[3]
+	<< SMI130_SHIFT_BIT_POSITION_BY_02_BITS)
+	& 0x7C)
+	| (a_data_u8[4]
+	>> SMI130_SHIFT_BIT_POSITION_BY_06_BITS)) - 64);
+	/* a3 */
+	yas537_data_mbl.calib_yas537.a3 =
+	((((a_data_u8[4] << SMI130_SHIFT_BIT_POSITION_BY_01_BIT)
+	& 0x7E)
+	| (a_data_u8[5]
+	>> SMI130_SHIFT_BIT_POSITION_BY_07_BITS)) - 64);
+	/* a4 */
+	yas537_data_mbl.calib_yas537.a4 =
+	((((a_data_u8[5]
+	<< SMI130_SHIFT_BIT_POSITION_BY_01_BIT)
+	& 0xFE)
+	| (a_data_u8[6]
+	>> SMI130_SHIFT_BIT_POSITION_BY_07_BITS))
+	- 128);
+	/* a5 */
+	yas537_data_mbl.calib_yas537.a5 =
+	((((a_data_u8[6]
+	<< SMI130_SHIFT_BIT_POSITION_BY_02_BITS)
+	& 0x1FC)
+	| (a_data_u8[7]
+	>> SMI130_SHIFT_BIT_POSITION_BY_06_BITS))
+	- 112);
+	/* a6 */
+	yas537_data_mbl.calib_yas537.a6 =
+	((((a_data_u8[7]
+	<< SMI130_SHIFT_BIT_POSITION_BY_01_BIT)
+	& 0x7E)
+	| (a_data_u8[8]
+	>> SMI130_SHIFT_BIT_POSITION_BY_07_BITS)) - 64);
+	/* a7 */
+	yas537_data_mbl.calib_yas537.a7 =
+	((((a_data_u8[8]
+	<< SMI130_SHIFT_BIT_POSITION_BY_01_BIT)
+	& 0xFE)
+	| (a_data_u8[9]
+	>> SMI130_SHIFT_BIT_POSITION_BY_07_BITS))
+	- 128);
+	/* a8 */
+	yas537_data_mbl.calib_yas537.a8 = ((a_data_u8[9] &
+	0x7F) - 64);
+	/* a9 */
+	yas537_data_mbl.calib_yas537.a9 = ((((a_data_u8[10]
+	<< SMI130_SHIFT_BIT_POSITION_BY_01_BIT) & 0x1FE)
+	| (a_data_u8[11]
+	>> SMI130_SHIFT_BIT_POSITION_BY_07_BITS))
+	- 112);
+	/* k */
+	yas537_data_mbl.calib_yas537.k = (
+	a_data_u8[11] & 0x7F);
+	} else {
+		return ERROR;
+	}
+/* write A/D converter*/
+com_rslt += smi130_set_mag_write_data(
+YAS537_WRITE_A_D_CONVERTER);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+com_rslt += smi130_set_mag_write_addr(YAS537_REG_ADCCALR);
+p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+/* write A/D converter second register*/
+com_rslt += smi130_set_mag_write_data(
+YAS537_WRITE_A_D_CONVERTER2);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+com_rslt += smi130_set_mag_write_addr(YAS537_REG_ADCCALR_ONE);
+p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+/* write temperature calibration register*/
+com_rslt += smi130_set_mag_write_data(YAS537_WRITE_TEMP_CALIB);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+com_rslt += smi130_set_mag_write_addr(YAS537_REG_TRMR);
+p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+/* write average filter register*/
+com_rslt += smi130_set_mag_write_data(
+v_avrr_u8[yas537_data_mbl.average]);
+p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+com_rslt += smi130_set_mag_write_addr(YAS537_REG_AVRR);
+p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+if (v_rcoil_u8) {
+	/* write average; filter register*/
+	com_rslt += smi130_set_mag_write_data(
+	YAS537_WRITE_FILTER);
+	p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+	com_rslt += smi130_set_mag_write_addr(YAS537_REG_CONFR);
+	p_smi130->delay_msec(
+	SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+}
+
+return com_rslt;
+
+}
+/*!
+ *	@brief This function used for YAS537 write data acquisition
+ *	command register write
+ *	@param	v_command_reg_data_u8	:	the value of data acquisition
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas537_acquisition_command_register(
+u8 v_command_reg_data_u8)
+{
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+
+	if (p_smi130->mag_manual_enable != SMI130_MANUAL_ENABLE)
+			com_rslt = smi130_set_mag_manual_enable(
+			SMI130_MANUAL_ENABLE);
+			p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+
+		com_rslt = smi130_set_mag_write_data(v_command_reg_data_u8);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		/* YAMAHA YAS532-0x82*/
+		com_rslt += smi130_set_mag_write_addr(
+		SMI130_REG_YAS537_CMDR);
+		/* set the mode to RECORD*/
+		yas537_data_mbl.measure_state = YAS537_MAG_STATE_RECORD_DATA;
+		p_smi130->delay_msec(SMI130_YAS_ACQ_COMMAND_DELAY);
+		com_rslt += smi130_set_mag_read_addr(
+		YAS537_REG_TEMPERATURE_0);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+
+	if (p_smi130->mag_manual_enable == SMI130_MANUAL_ENABLE)
+		com_rslt += smi130_set_mag_manual_enable(
+		SMI130_MANUAL_DISABLE);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+
+	return com_rslt;
+
+}
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 xy1y2 data
+ *
+ *	@param xy1y2: The value of raw xy1y2 data
+ *	@param xyz: The value of  xyz data
+ *
+ *
+ *	@return None
+ *
+ *
+ */
+static void xy1y2_to_xyz(u16 *xy1y2, s32 *xyz)
+{
+	xyz[0] = ((xy1y2[0] - 8192)
+	* 300);
+	xyz[1] = (((xy1y2[1] - xy1y2[2])
+	* 1732) / 10);
+	xyz[2] = (((-xy1y2[2] - xy1y2[2])
+	+ 16384) * 300);
+}
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 xy1y2 data
+ *
+ *	@param v_coil_stat_u8: The value of R coil status
+ *	@param v_busy_u8: The value of busy status
+ *	@param v_temperature_u16: The value of temperature
+ *	@param xy1y2: The value of raw xy1y2 data
+ *	@param v_ouflow_u8: The value of overflow
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yamaha_yas537_read_xy1y2_data(
+u8 *v_coil_stat_u8, u8 *v_busy_u8,
+u16 *v_temperature_u16, u16 *xy1y2, u8 *v_ouflow_u8)
+{
+	/* This variable used for provide the communication
+	results*/
+	SMI130_RETURN_FUNCTION_TYPE com_rslt = E_SMI130_COMM_RES;
+	/* Array holding the YAS532 calibration values */
+	u8 a_data_u8[SMI130_YAS_XY1Y2T_DATA_SIZE] = {
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE,
+	};
+	u8 i = SMI130_INIT_VALUE;
+	s32 a_h_s32[SMI130_YAS_H_DATA_SIZE] = {
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	s32 a_s_s32[SMI130_YAS_S_DATA_SIZE] = {
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	/* set command register*/
+	com_rslt = smi130_bosch_yas537_acquisition_command_register(
+	YAS537_SET_COMMAND_REGISTER);
+	/* read the yas537 sensor data of xy1y2*/
+	com_rslt +=
+	p_smi130->SMI130_BUS_READ_FUNC(p_smi130->dev_addr,
+	SMI130_USER_DATA_MAG_X_LSB__REG,
+	a_data_u8, SMI130_MAG_YAS_DATA_LENGTH);
+	/* read the busy flag*/
+	*v_busy_u8 = a_data_u8[2]
+	>> SMI130_SHIFT_BIT_POSITION_BY_07_BITS;
+	/* read the coil status*/
+	*v_coil_stat_u8 =
+	((a_data_u8[2] >>
+	SMI130_SHIFT_BIT_POSITION_BY_06_BITS) & 0X01);
+	/* read temperature data*/
+	*v_temperature_u16 = (u16)((a_data_u8[0]
+	<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) | a_data_u8[1]);
+	/* read x data*/
+	xy1y2[0] = (u16)(((a_data_u8[2] &
+	0x3F)
+	<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+	| (a_data_u8[3]));
+	/* read y1 data*/
+	xy1y2[1] = (u16)((a_data_u8[4]
+	<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+	| a_data_u8[5]);
+	/* read y2 data*/
+	xy1y2[2] = (u16)((a_data_u8[6]
+	<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS)
+	| a_data_u8[7]);
+	for (i = 0; i < 3; i++)
+		yas537_data_mbl.last_raw[i] = xy1y2[i];
+	yas537_data_mbl.last_raw[i] = *v_temperature_u16;
+	if (yas537_data_mbl.calib_yas537.ver == 1) {
+		for (i = 0; i < 3; i++)
+			a_s_s32[i] = xy1y2[i] - 8192;
+		/* read hx*/
+		a_h_s32[0] = ((yas537_data_mbl.calib_yas537.k * (
+		(128 * a_s_s32[0]) +
+		(yas537_data_mbl.calib_yas537.a2 * a_s_s32[1]) +
+		(yas537_data_mbl.calib_yas537.a3 * a_s_s32[2])))
+		/ (8192));
+		/* read hy1*/
+		a_h_s32[1] = ((yas537_data_mbl.calib_yas537.k * (
+		(yas537_data_mbl.calib_yas537.a4 * a_s_s32[0]) +
+		(yas537_data_mbl.calib_yas537.a5 * a_s_s32[1]) +
+		(yas537_data_mbl.calib_yas537.a6 * a_s_s32[2])))
+		/ (8192));
+		/* read hy2*/
+		a_h_s32[2] = ((yas537_data_mbl.calib_yas537.k * (
+		(yas537_data_mbl.calib_yas537.a7 * a_s_s32[0]) +
+		(yas537_data_mbl.calib_yas537.a8 * a_s_s32[1]) +
+		(yas537_data_mbl.calib_yas537.a9 * a_s_s32[2])))
+		/ (8192));
+
+		for (i = 0; i < 3; i++) {
+			if (a_h_s32[i] < -8192)
+				a_h_s32[i] = -8192;
+
+			if (8192 < a_h_s32[i])
+				a_h_s32[i] = 8192;
+
+			xy1y2[i] = a_h_s32[i] + 8192;
+
+		}
+	}
+	*v_ouflow_u8 = 0;
+	for (i = 0; i < 3; i++) {
+		if (YAS537_DATA_OVERFLOW <= xy1y2[i])
+			*v_ouflow_u8 |= (1 << (i * 2));
+		if (xy1y2[i] == YAS537_DATA_UNDERFLOW)
+			*v_ouflow_u8 |= (1 << (i * 2 + 1));
+	}
+
+	return com_rslt;
+
+}
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 xy1y2 data
+ *
+ *	@param v_ouflow_u8: The value of overflow
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+static SMI130_RETURN_FUNCTION_TYPE invalid_magnetic_field(
+u16 *v_cur_u16, u16 *v_last_u16)
+{
+	s16 invalid_thresh[] = {1500, 1500, 1500};
+	u8 i = SMI130_INIT_VALUE;
+
+	for (i = 0; i < 3; i++)
+		if (invalid_thresh[i] < ABS(v_cur_u16[i] - v_last_u16[i]))
+			return 1;
+	return 0;
+}
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 xy1y2 data
+ *
+ *	@param v_ouflow_u8: The value of overflow
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yamaha_yas537_measure_xyz_data(
+u8 *v_ouflow_u8, struct yas_vector *vector_xyz)
+{
+	s32 a_xyz_tmp_s32[SMI130_YAS_TEMP_DATA_SIZE] = {
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	u8 i = SMI130_INIT_VALUE;
+	s8 com_rslt = SMI130_INIT_VALUE;
+	u8 v_busy_u8 = SMI130_INIT_VALUE;
+	u8 v_rcoil_u8 = SMI130_INIT_VALUE;
+	u16 v_temperature_u16 = SMI130_INIT_VALUE;
+	u16 a_xy1y2_u16[SMI130_YAS_XY1Y2_DATA_SIZE] = {
+	SMI130_INIT_VALUE, SMI130_INIT_VALUE, SMI130_INIT_VALUE};
+	*v_ouflow_u8 = 0;
+	/* read the yas537 xy1y2 data*/
+	com_rslt = smi130_bosch_yamaha_yas537_read_xy1y2_data(
+	&v_rcoil_u8, &v_busy_u8,
+	&v_temperature_u16, a_xy1y2_u16, v_ouflow_u8);
+	/* linear calculation*/
+	xy1y2_to_xyz(a_xy1y2_u16, vector_xyz->yas537_vector_xyz);
+	if (yas537_data_mbl.transform != SMI130_NULL) {
+		for (i = 0; i < 3; i++) {
+			a_xyz_tmp_s32[i] = ((
+			yas537_data_mbl.transform[i + 3]
+			* vector_xyz->yas537_vector_xyz[0])
+			+ (yas537_data_mbl.transform[
+			i * 3 + 1]
+			* vector_xyz->yas537_vector_xyz[1])
+			+ (yas537_data_mbl.transform[
+			i * 3 + 2]
+			* vector_xyz->yas537_vector_xyz[2]));
+		}
+		yas537_set_vector(
+		vector_xyz->yas537_vector_xyz, a_xyz_tmp_s32);
+	}
+	for (i = 0; i < 3; i++) {
+		vector_xyz->yas537_vector_xyz[i] -=
+		vector_xyz->yas537_vector_xyz[i] % 10;
+		if (*v_ouflow_u8 & (1 <<
+		(i * 2)))
+			vector_xyz->yas537_vector_xyz[i] +=
+			1; /* set overflow */
+		if (*v_ouflow_u8 & (1 << (i * 2 + 1)))
+			/* set underflow */
+			vector_xyz->yas537_vector_xyz[i] += 2;
+	}
+	if (v_busy_u8)
+		return ERROR;
+	switch (yas537_data_mbl.measure_state) {
+	case YAS537_MAG_STATE_INIT_COIL:
+		if (p_smi130->mag_manual_enable != SMI130_MANUAL_ENABLE)
+			com_rslt = smi130_set_mag_manual_enable(
+			SMI130_MANUAL_ENABLE);
+		com_rslt += smi130_set_mag_write_data(YAS537_WRITE_CONFR);
+		p_smi130->delay_msec(SMI130_GEN_READ_WRITE_DELAY);
+		com_rslt += smi130_set_mag_write_addr(YAS537_REG_CONFR);
+		p_smi130->delay_msec(SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY);
+		yas537_data_mbl.measure_state = YAS537_MAG_STATE_RECORD_DATA;
+		if (p_smi130->mag_manual_enable == SMI130_MANUAL_ENABLE)
+			com_rslt = smi130_set_mag_manual_enable(
+			SMI130_MANUAL_DISABLE);
+	break;
+	case YAS537_MAG_STATE_RECORD_DATA:
+		if (v_rcoil_u8)
+			break;
+		yas537_set_vector(yas537_data_mbl.last_after_rcoil, a_xy1y2_u16);
+		yas537_data_mbl.measure_state = YAS537_MAG_STATE_NORMAL;
+	break;
+	case YAS537_MAG_STATE_NORMAL:
+		if (SMI130_INIT_VALUE < v_ouflow_u8
+		|| invalid_magnetic_field(a_xy1y2_u16,
+		yas537_data_mbl.last_after_rcoil)) {
+			yas537_data_mbl.measure_state = YAS537_MAG_STATE_INIT_COIL;
+			for (i = 0; i < 3; i++) {
+				if (!*v_ouflow_u8)
+					vector_xyz->yas537_vector_xyz[i] += 3;
+			}
+		}
+	break;
+	}
+
+	return com_rslt;
+}
+/*!
+ *	@brief This function used for reading
+ *	smi130_t structure
+ *
+ *  @return the reference and values of smi130_t
+ *
+ *
+*/
+struct smi130_t *smi130_get_ptr(void)
+{
+	return  p_smi130;
+}
diff --git a/drivers/input/sensors/smi130/smi130.h b/drivers/input/sensors/smi130/smi130.h
new file mode 100644
index 0000000..c62f65c
--- /dev/null
+++ b/drivers/input/sensors/smi130/smi130.h
@@ -0,0 +1,11851 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+*
+* smi130.h
+* Date : 2015/04/02
+* @id       836294d
+* Revision : 2.0.9 $
+* @brief
+* The head file of SMI130API
+*
+
+**************************************************************************/
+/*! \file smi130.h
+    \brief SMI130 Sensor Driver Support Header File */
+/* user defined code to be added here ... */
+#ifndef __SMI130_H__
+#define __SMI130_H__
+
+/*!
+* @brief The following definition uses for define the data types
+*
+* @note While porting the API please consider the following
+* @note Please check the version of C standard
+* @note Are you using Linux platform
+*/
+
+/*!
+* @brief For the Linux platform support
+* Please use the types.h for your data types definitions
+*/
+#ifdef	__KERNEL__
+
+#include <linux/types.h>
+
+#else /* ! __KERNEL__ */
+/**********************************************************
+* These definition uses for define the C
+* standard version data types
+***********************************************************/
+# if !defined(__STDC_VERSION__)
+
+/************************************************
+ * compiler is C11 C standard
+************************************************/
+#if (__STDC_VERSION__ == 201112L)
+
+/************************************************/
+#include <stdint.h>
+/************************************************/
+
+/*unsigned integer types*/
+#define	u8	uint8_t
+#define	u16	uint16_t
+#define	u32	uint32_t
+#define	u64	uint64_t
+
+/*signed integer types*/
+#define	s8	int8_t
+#define	s16	int16_t
+#define	s32	int32_t
+#define	s64	int64_t
+/************************************************
+ * compiler is C99 C standard
+************************************************/
+
+#elif (__STDC_VERSION__ == 199901L)
+
+/* stdint.h is a C99 supported c library.
+which is used to fixed the integer size*/
+/************************************************/
+#include <stdint.h>
+/************************************************/
+
+/*unsigned integer types*/
+#define	u8	uint8_t
+#define	u16	uint16_t
+#define	u32	uint32_t
+#define	u64	uint64_t
+
+/*signed integer types*/
+#define s8	int8_t
+#define	s16	int16_t
+#define	s32	int32_t
+#define	s64	int64_t
+/************************************************
+ * compiler is C89 or other C standard
+************************************************/
+#else /*  !defined(__STDC_VERSION__) */
+/*	By default it is defined as 32 bit machine configuration*/
+/*	define the definition based on your machine configuration*/
+/*	define the data types based on your
+	machine/compiler/controller configuration*/
+#define  MACHINE_32_BIT
+
+/* If your machine support 16 bit
+define the MACHINE_16_BIT*/
+#ifdef MACHINE_16_BIT
+#include <limits.h>
+/*signed integer types*/
+#define	s8	signed char
+#define	s16	signed short int
+#define	s32	signed long int
+
+#if defined(LONG_MAX) && LONG_MAX == 0x7fffffffffffffffL
+#define s64 long int
+#define u64 unsigned long int
+#elif defined(LLONG_MAX) && (LLONG_MAX == 0x7fffffffffffffffLL)
+#define s64 long long int
+#define u64 unsigned long long int
+#else
+#warning Either the correct data type for signed 64 bit integer \
+could not be found, or 64 bit integers are not supported in your environment.
+#warning If 64 bit integers are supported on your platform, \
+please set s64 manually.
+#endif
+
+/*unsigned integer types*/
+#define	u8	unsigned char
+#define	u16	unsigned short int
+#define	u32	unsigned long int
+
+/* If your machine support 32 bit
+define the MACHINE_32_BIT*/
+#elif defined MACHINE_32_BIT
+/*signed integer types*/
+#define	s8	signed char
+#define	s16	signed short int
+#define	s32	signed int
+#define	s64	signed long long int
+
+/*unsigned integer types*/
+#define	u8	unsigned char
+#define	u16	unsigned short int
+#define	u32	unsigned int
+#define	u64	unsigned long long int
+
+/* If your machine support 64 bit
+define the MACHINE_64_BIT*/
+#elif defined MACHINE_64_BIT
+/*signed integer types*/
+#define	s8	signed char
+#define	s16	signed short int
+#define	s32	signed int
+#define	s64	signed long int
+
+/*unsigned integer types*/
+#define	u8	unsigned char
+#define	u16	unsigned short int
+#define	u32	unsigned int
+#define	u64	unsigned long int
+
+#else
+#warning The data types defined above which not supported \
+define the data types manually
+#endif
+#endif
+
+/*** This else will execute for the compilers
+ *	which are not supported the C standards
+ *	Like C89/C99/C11***/
+#else
+/*	By default it is defined as 32 bit machine configuration*/
+/*	define the definition based on your machine configuration*/
+/*	define the data types based on your
+	machine/compiler/controller configuration*/
+#define  MACHINE_32_BIT
+
+/* If your machine support 16 bit
+define the MACHINE_16_BIT*/
+#ifdef MACHINE_16_BIT
+#include <limits.h>
+/*signed integer types*/
+#define	s8	signed char
+#define	s16	signed short int
+#define	s32	signed long int
+
+#if defined(LONG_MAX) && LONG_MAX == 0x7fffffffffffffffL
+#define s64 long int
+#define u64 unsigned long int
+#elif defined(LLONG_MAX) && (LLONG_MAX == 0x7fffffffffffffffLL)
+#define s64 long long int
+#define u64 unsigned long long int
+#else
+#warning Either the correct data type for signed 64 bit integer \
+could not be found, or 64 bit integers are not supported in your environment.
+#warning If 64 bit integers are supported on your platform, \
+please set s64 manually.
+#endif
+
+/*unsigned integer types*/
+#define	u8	unsigned char
+#define	u16	unsigned short int
+#define	u32	unsigned long int
+
+/* If your machine support 32 bit
+define the MACHINE_32_BIT*/
+#elif defined MACHINE_32_BIT
+/*signed integer types*/
+#define	s8	signed char
+#define	s16	signed short int
+#define	s32	signed int
+#define	s64	signed long long int
+
+/*unsigned integer types*/
+#define	u8	unsigned char
+#define	u16	unsigned short int
+#define	u32	unsigned int
+#define	u64	unsigned long long int
+
+/* If your machine support 64 bit
+define the MACHINE_64_BIT*/
+#elif defined  MACHINE_64_BIT
+/*signed integer types*/
+#define	s8	signed char
+#define	s16	signed short int
+#define	s32	signed int
+#define	s64	signed long int
+
+/*unsigned integer types*/
+#define	u8	unsigned char
+#define	u16	unsigned short int
+#define	u32	unsigned int
+#define	u64	unsigned long int
+
+#else
+#warning The data types defined above which not supported \
+define the data types manually
+#endif
+#endif
+#endif
+/***************************************************************/
+/**\name	BUS READ AND WRITE FUNCTION POINTERS        */
+/***************************************************************/
+/*!
+	@brief Define the calling convention of YOUR bus communication routine.
+	@note This includes types of parameters. This example shows the
+	configuration for an SPI bus link.
+
+    If your communication function looks like this:
+
+    write_my_bus_xy(u8 device_addr, u8 register_addr,
+    u8 * data, u8 length);
+
+    The SMI130_WR_FUNC_PTR would equal:
+
+    SMI130_WR_FUNC_PTR s8 (* bus_write)(u8,
+    u8, u8 *, u8)
+
+    Parameters can be mixed as needed refer to the
+    @ref SMI130_BUS_WRITE_FUNC  macro.
+
+
+*/
+#define SMI130_WR_FUNC_PTR s8 (*bus_write)(u8, u8,\
+u8 *, u8)
+/**< link macro between API function calls and bus write function
+	@note The bus write function can change since this is a
+	system dependant issue.
+
+    If the bus_write parameter calling order is like: reg_addr,
+    reg_data, wr_len it would be as it is here.
+
+    If the parameters are differently ordered or your communication
+    function like I2C need to know the device address,
+    you can change this macro accordingly.
+
+
+    SMI130_BUS_WRITE_FUNC(dev_addr, reg_addr, reg_data, wr_len)\
+    bus_write(dev_addr, reg_addr, reg_data, wr_len)
+
+    This macro lets all API functions call YOUR communication routine in a
+    way that equals your definition in the
+    @ref SMI130_WR_FUNC_PTR definition.
+
+*/
+#define SMI130_BUS_WRITE_FUNC(dev_addr, reg_addr, reg_data, wr_len)\
+				bus_write(dev_addr, reg_addr, reg_data, wr_len)
+
+/**< Define the calling convention of YOUR bus communication routine.
+	@note This includes types of parameters. This example shows the
+	configuration for an SPI bus link.
+
+    If your communication function looks like this:
+
+    read_my_bus_xy(u8 device_addr, u8 register_addr,
+    u8 * data, u8 length);
+
+    The SMI130_RD_FUNC_PTR would equal:
+
+    SMI130_RD_FUNC_PTR s8 (* bus_read)(u8,
+    u8, u8 *, u8)
+
+    Parameters can be mixed as needed refer to the
+    refer SMI130_BUS_READ_FUNC  macro.
+
+*/
+#define SMI130_SPI_RD_MASK (0x80)   /* for spi read transactions on SPI the
+			MSB has to be set */
+#define SMI130_RD_FUNC_PTR s8 (*bus_read)(u8,\
+			u8, u8 *, u8)
+
+#define SMI130_BRD_FUNC_PTR s8 \
+(*burst_read)(u8, u8, u8 *, u32)
+
+/**< link macro between API function calls and bus read function
+	@note The bus write function can change since this is a
+	system dependant issue.
+
+    If the bus_read parameter calling order is like: reg_addr,
+    reg_data, wr_len it would be as it is here.
+
+    If the parameters are differently ordered or your communication
+    function like I2C need to know the device address,
+    you can change this macro accordingly.
+
+
+    SMI130_BUS_READ_FUNC(dev_addr, reg_addr, reg_data, wr_len)\
+    bus_read(dev_addr, reg_addr, reg_data, wr_len)
+
+    This macro lets all API functions call YOUR communication routine in a
+    way that equals your definition in the
+    refer SMI130_WR_FUNC_PTR definition.
+
+    @note: this macro also includes the "MSB='1'
+    for reading SMI130 addresses.
+
+*/
+#define SMI130_BUS_READ_FUNC(dev_addr, reg_addr, reg_data, r_len)\
+				bus_read(dev_addr, reg_addr, reg_data, r_len)
+
+#define SMI130_BURST_READ_FUNC(device_addr, \
+register_addr, register_data, rd_len)\
+burst_read(device_addr, register_addr, register_data, rd_len)
+
+
+#define SMI130_MDELAY_DATA_TYPE                 u32
+
+/***************************************************************/
+/**\name	BUS READ AND WRITE FUNCTION POINTERS        */
+/***************************************************************/
+#define SMI130_I2C_ADDR1	0x68 /**< I2C Address needs to be changed */
+#define SMI130_I2C_ADDR2    0x69 /**< I2C Address needs to be changed */
+#define SMI130_AUX_BMM150_I2C_ADDRESS       (0x10)
+#define SMI130_AUX_YAS532_I2C_ADDRESS       (0x2E)
+/**< I2C address of YAS532*/
+#define SMI130_AKM09911_I2C_ADDRESS   0x0C/**< I2C address of AKM09911*/
+/**< I2C address of AKM09911*/
+#define	SMI130_AUX_AKM09911_I2C_ADDR_2		(0x0D)
+/**< I2C address of AKM09911*/
+#define	SMI130_AUX_AKM09912_I2C_ADDR_1		(0x0C)
+/**< I2C address of AKM09912*/
+#define	SMI130_AUX_AKM09912_I2C_ADDR_2		(0x0D)
+/**< I2C address of AKM09912*/
+#define	SMI130_AUX_AKM09912_I2C_ADDR_3		(0x0E)
+/**< I2C address of AKM09912*/
+#define SMI130_AKM09912_I2C_ADDRESS   0x0F/**< I2C address of akm09912*/
+
+#define SMI130_YAS532_I2C_ADDRESS	0x2E/**< I2C address of YAS532*/
+/*******************************************/
+/**\name	CONSTANTS        */
+/******************************************/
+#define  SMI130_INIT_VALUE					(0)
+#define  SMI130_GEN_READ_WRITE_DATA_LENGTH	(1)
+#define  SMI130_MAXIMUM_TIMEOUT             (10)
+/* output data rate condition check*/
+#define  SMI130_OUTPUT_DATA_RATE0	(0)
+#define  SMI130_OUTPUT_DATA_RATE1	(1)
+#define  SMI130_OUTPUT_DATA_RATE2	(2)
+#define  SMI130_OUTPUT_DATA_RATE3	(3)
+#define  SMI130_OUTPUT_DATA_RATE4	(4)
+#define  SMI130_OUTPUT_DATA_RATE5	(5)
+#define  SMI130_OUTPUT_DATA_RATE6	(14)
+#define  SMI130_OUTPUT_DATA_RATE7	(15)
+/* accel range check*/
+#define SMI130_ACCEL_RANGE0  (3)
+#define SMI130_ACCEL_RANGE1  (5)
+#define SMI130_ACCEL_RANGE3  (8)
+#define SMI130_ACCEL_RANGE4  (12)
+/* check the status of registers*/
+#define  SMI130_FOC_STAT_HIGH			(1)
+#define  SMI130_SIG_MOTION_STAT_HIGH	(1)
+#define  SMI130_STEP_DET_STAT_HIGH		(1)
+
+/*condition check for reading and writing data*/
+#define	SMI130_MAX_VALUE_SIGNIFICANT_MOTION      (1)
+#define	SMI130_MAX_VALUE_FIFO_FILTER    (1)
+#define	SMI130_MAX_VALUE_FIFO_TIME      (1)
+#define	SMI130_MAX_VALUE_FIFO_INTR      (1)
+#define	SMI130_MAX_VALUE_FIFO_HEADER    (1)
+#define	SMI130_MAX_VALUE_FIFO_MAG       (1)
+#define	SMI130_MAX_VALUE_FIFO_ACCEL     (1)
+#define	SMI130_MAX_VALUE_FIFO_GYRO      (1)
+#define	SMI130_MAX_VALUE_SOURCE_INTR    (1)
+#define	SMI130_MAX_VALUE_LOW_G_MODE     (1)
+#define	SMI130_MAX_VALUE_NO_MOTION      (1)
+#define	SMI130_MAX_VALUE_TAP_SHOCK      (1)
+#define	SMI130_MAX_VALUE_TAP_QUIET      (1)
+#define	SMI130_MAX_VALUE_ORIENT_UD      (1)
+#define	SMI130_MAX_VALUE_ORIENT_AXES    (1)
+#define	SMI130_MAX_VALUE_NVM_PROG       (1)
+#define	SMI130_MAX_VALUE_SPI3           (1)
+#define	SMI130_MAX_VALUE_PAGE           (1)
+#define	SMI130_MAX_VALUE_I2C_WDT        (1)
+#define	SMI130_MAX_VALUE_SLEEP_STATE    (1)
+#define	SMI130_MAX_VALUE_WAKEUP_INTR    (1)
+#define	SMI130_MAX_VALUE_SELFTEST_SIGN  (1)
+#define	SMI130_MAX_VALUE_SELFTEST_AMP   (1)
+#define	SMI130_MAX_VALUE_SELFTEST_START (1)
+#define SMI130_MAX_GYRO_WAKEUP_TRIGGER		(3)
+#define SMI130_MAX_ACCEL_SELFTEST_AXIS	    (3)
+#define SMI130_MAX_GYRO_STEP_COUNTER        (1)
+#define SMI130_MAX_GYRO_BW                  (3)
+#define SMI130_MAX_ACCEL_BW                 (7)
+#define SMI130_MAX_ORIENT_MODE              (3)
+#define SMI130_MAX_ORIENT_BLOCKING          (3)
+#define SMI130_MAX_FLAT_HOLD                (3)
+#define SMI130_MAX_ACCEL_FOC                (3)
+#define SMI130_MAX_IF_MODE                  (3)
+#define SMI130_MAX_TARGET_PAGE              (3)
+#define SMI130_MAX_GYRO_RANGE               (4)
+#define SMI130_MAX_GYRO_SLEEP_TIGGER        (7)
+#define SMI130_MAX_TAP_TURN                 (7)
+#define SMI130_MAX_UNDER_SAMPLING           (1)
+#define SMI130_MAX_UNDER_SIG_MOTION         (3)
+#define SMI130_MAX_ACCEL_OUTPUT_DATA_RATE   (12)
+#define SMI130_MAX_LATCH_INTR               (15)
+#define SMI130_MAX_FLAT_HYST                (15)
+#define SMI130_MAX_ORIENT_THETA             (63)
+#define SMI130_MAX_FLAT_THETA               (63)
+
+/* FIFO index definitions*/
+#define SMI130_FIFO_X_LSB_DATA			(0)
+#define SMI130_FIFO_X_MSB_DATA			(1)
+#define SMI130_FIFO_Y_LSB_DATA			(2)
+#define SMI130_FIFO_Y_MSB_DATA			(3)
+#define SMI130_FIFO_Z_LSB_DATA			(4)
+#define SMI130_FIFO_Z_MSB_DATA			(5)
+#define SMI130_FIFO_R_LSB_DATA			(6)
+#define SMI130_FIFO_R_MSB_DATA			(7)
+/* FIFO gyro definition*/
+#define SMI130_GA_FIFO_G_X_LSB		(0)
+#define SMI130_GA_FIFO_G_X_MSB		(1)
+#define SMI130_GA_FIFO_G_Y_LSB		(2)
+#define SMI130_GA_FIFO_G_Y_MSB		(3)
+#define SMI130_GA_FIFO_G_Z_LSB		(4)
+#define SMI130_GA_FIFO_G_Z_MSB		(5)
+#define SMI130_GA_FIFO_A_X_LSB		(6)
+#define SMI130_GA_FIFO_A_X_MSB		(7)
+#define SMI130_GA_FIFO_A_Y_LSB		(8)
+#define SMI130_GA_FIFO_A_Y_MSB		(9)
+#define SMI130_GA_FIFO_A_Z_LSB		(10)
+#define SMI130_GA_FIFO_A_Z_MSB		(11)
+/* FIFO mag/gyro/accel definition*/
+#define SMI130_MGA_FIFO_M_X_LSB		(0)
+#define SMI130_MGA_FIFO_M_X_MSB		(1)
+#define SMI130_MGA_FIFO_M_Y_LSB		(2)
+#define SMI130_MGA_FIFO_M_Y_MSB		(3)
+#define SMI130_MGA_FIFO_M_Z_LSB		(4)
+#define SMI130_MGA_FIFO_M_Z_MSB		(5)
+#define SMI130_MGA_FIFO_M_R_LSB		(6)
+#define SMI130_MGA_FIFO_M_R_MSB		(7)
+#define SMI130_MGA_FIFO_G_X_LSB		(8)
+#define SMI130_MGA_FIFO_G_X_MSB		(9)
+#define SMI130_MGA_FIFO_G_Y_LSB		(10)
+#define SMI130_MGA_FIFO_G_Y_MSB		(11)
+#define SMI130_MGA_FIFO_G_Z_LSB		(12)
+#define SMI130_MGA_FIFO_G_Z_MSB		(13)
+#define SMI130_MGA_FIFO_A_X_LSB		(14)
+#define SMI130_MGA_FIFO_A_X_MSB		(15)
+#define SMI130_MGA_FIFO_A_Y_LSB		(16)
+#define SMI130_MGA_FIFO_A_Y_MSB		(17)
+#define SMI130_MGA_FIFO_A_Z_LSB		(18)
+#define SMI130_MGA_FIFO_A_Z_MSB		(19)
+/* FIFO mag definition*/
+#define SMI130_MA_FIFO_M_X_LSB		(0)
+#define SMI130_MA_FIFO_M_X_MSB		(1)
+#define SMI130_MA_FIFO_M_Y_LSB		(2)
+#define SMI130_MA_FIFO_M_Y_MSB		(3)
+#define SMI130_MA_FIFO_M_Z_LSB		(4)
+#define SMI130_MA_FIFO_M_Z_MSB		(5)
+#define SMI130_MA_FIFO_M_R_LSB		(6)
+#define SMI130_MA_FIFO_M_R_MSB		(7)
+#define SMI130_MA_FIFO_A_X_LSB		(8)
+#define SMI130_MA_FIFO_A_X_MSB		(9)
+#define SMI130_MA_FIFO_A_Y_LSB		(10)
+#define SMI130_MA_FIFO_A_Y_MSB		(11)
+#define SMI130_MA_FIFO_A_Z_LSB		(12)
+#define SMI130_MA_FIFO_A_Z_MSB		(13)
+/* FIFO mag/gyro definition*/
+#define SMI130_MG_FIFO_M_X_LSB		(0)
+#define SMI130_MG_FIFO_M_X_MSB		(1)
+#define SMI130_MG_FIFO_M_Y_LSB		(2)
+#define SMI130_MG_FIFO_M_Y_MSB		(3)
+#define SMI130_MG_FIFO_M_Z_LSB		(4)
+#define SMI130_MG_FIFO_M_Z_MSB		(5)
+#define SMI130_MG_FIFO_M_R_LSB		(6)
+#define SMI130_MG_FIFO_M_R_MSB		(7)
+#define SMI130_MG_FIFO_G_X_LSB		(8)
+#define SMI130_MG_FIFO_G_X_MSB		(9)
+#define SMI130_MG_FIFO_G_Y_LSB		(10)
+#define SMI130_MG_FIFO_G_Y_MSB		(11)
+#define SMI130_MG_FIFO_G_Z_LSB		(12)
+#define SMI130_MG_FIFO_G_Z_MSB		(13)
+/* FIFO length definitions*/
+#define SMI130_FIFO_SENSOR_TIME_LSB     (0)
+#define SMI130_FIFO_SENSOR_TIME_XLSB    (1)
+#define SMI130_FIFO_SENSOR_TIME_MSB     (2)
+#define SMI130_FIFO_SENSOR_TIME_LENGTH  (3)
+#define SMI130_FIFO_A_LENGTH            (6)
+#define SMI130_FIFO_G_LENGTH            (6)
+#define SMI130_FIFO_M_LENGTH            (8)
+#define SMI130_FIFO_AG_LENGTH           (12)
+#define SMI130_FIFO_AMG_LENGTH          (20)
+#define SMI130_FIFO_MA_OR_MG_LENGTH     (14)
+
+/* bus read and write length for mag, accel and gyro*/
+#define SMI130_MAG_X_DATA_LENGTH     (2)
+#define SMI130_MAG_Y_DATA_LENGTH     (2)
+#define SMI130_MAG_Z_DATA_LENGTH     (2)
+#define SMI130_MAG_R_DATA_LENGTH     (2)
+#define SMI130_MAG_XYZ_DATA_LENGTH	 (6)
+#define SMI130_MAG_XYZR_DATA_LENGTH	 (8)
+#define SMI130_MAG_YAS_DATA_LENGTH	 (8)
+#define SMI130_GYRO_DATA_LENGTH		 (2)
+#define SMI130_GYRO_XYZ_DATA_LENGTH	 (6)
+#define SMI130_ACCEL_DATA_LENGTH	 (2)
+#define SMI130_ACCEL_XYZ_DATA_LENGTH (6)
+#define SMI130_TEMP_DATA_LENGTH		 (2)
+#define SMI130_FIFO_DATA_LENGTH		 (2)
+#define SMI130_STEP_COUNTER_LENGTH	 (2)
+#define SMI130_SENSOR_TIME_LENGTH	 (3)
+
+/* Delay definitions*/
+#define SMI130_SEC_INTERFACE_GEN_READ_WRITE_DELAY    (5)
+#define SMI130_BMM150_WAKEUP_DELAY1                  (2)
+#define SMI130_BMM150_WAKEUP_DELAY2                  (3)
+#define SMI130_BMM150_WAKEUP_DELAY3                  (1)
+#define SMI130_YAS532_OFFSET_DELAY                   (2)
+#define SMI130_GEN_READ_WRITE_DELAY                  (1)
+#define SMI130_YAS532_MEASUREMENT_DELAY              (25)
+#define SMI130_YAS_ACQ_COMMAND_DELAY                 (50)
+#define SMI130_YAS532_SET_INITIAL_VALUE_DELAY        (200)
+#define SMI130_AKM_INIT_DELAY                        (60)
+/****************************************************/
+/**\name	ARRAY SIZE DEFINITIONS      */
+/***************************************************/
+#define	SMI130_ACCEL_X_DATA_SIZE   (2)
+#define	SMI130_ACCEL_Y_DATA_SIZE   (2)
+#define	SMI130_ACCEL_Z_DATA_SIZE   (2)
+#define	SMI130_ACCEL_XYZ_DATA_SIZE (6)
+
+#define	SMI130_GYRO_X_DATA_SIZE    (2)
+#define	SMI130_GYRO_Y_DATA_SIZE    (2)
+#define	SMI130_GYRO_Z_DATA_SIZE    (2)
+#define	SMI130_GYRO_XYZ_DATA_SIZE  (6)
+
+#define	SMI130_MAG_X_DATA_SIZE      (2)
+#define	SMI130_MAG_Y_DATA_SIZE      (2)
+#define	SMI130_MAG_Z_DATA_SIZE      (2)
+#define	SMI130_MAG_R_DATA_SIZE      (2)
+#define	SMI130_MAG_XYZ_DATA_SIZE    (6)
+#define	SMI130_MAG_XYZR_DATA_SIZE   (8)
+#define	SMI130_MAG_TRIM_DATA_SIZE   (16)
+
+
+#define	SMI130_TEMP_DATA_SIZE       (2)
+#define	SMI130_FIFO_DATA_SIZE       (2)
+#define	SMI130_STEP_COUNT_DATA_SIZE (2)
+
+#define	SMI130_SENSOR_TIME_DATA_SIZE      (3)
+#define	SMI130_AKM_SENSITIVITY_DATA_SIZE  (3)
+#define	SMI130_HARD_OFFSET_DATA_SIZE      (3)
+#define	SMI130_YAS_XY1Y2_DATA_SIZE        (3)
+#define	SMI130_YAS_FLAG_DATA_SIZE         (3)
+#define	SMI130_YAS_TEMP_DATA_SIZE         (3)
+#define	SMI130_YAS_H_DATA_SIZE            (3)
+#define	SMI130_YAS_S_DATA_SIZE            (3)
+#define SMI130_YAS_CORRECT_DATA_SIZE      (5)
+#define SMI130_YAS_XY1Y2T_DATA_SIZE       (8)
+#define SMI130_YAS537_CALIB_DATA_SIZE     (17)
+#define SMI130_YAS532_CALIB_DATA_SIZE     (14)
+/****************************************************/
+/**\name	ARRAY PARAMETER DEFINITIONS      */
+/***************************************************/
+#define SMI130_SENSOR_TIME_MSB_BYTE   (2)
+#define SMI130_SENSOR_TIME_XLSB_BYTE  (1)
+#define SMI130_SENSOR_TIME_LSB_BYTE   (0)
+
+#define SMI130_MAG_X_LSB_BYTE	          (0)
+#define SMI130_MAG_X_MSB_BYTE              (1)
+#define SMI130_MAG_Y_LSB_BYTE	           (0)
+#define SMI130_MAG_Y_MSB_BYTE              (1)
+#define SMI130_MAG_Z_LSB_BYTE	           (0)
+#define SMI130_MAG_Z_MSB_BYTE              (1)
+#define SMI130_MAG_R_LSB_BYTE	           (0)
+#define SMI130_MAG_R_MSB_BYTE              (1)
+#define SMI130_DATA_FRAME_MAG_X_LSB_BYTE   (0)
+#define SMI130_DATA_FRAME_MAG_X_MSB_BYTE   (1)
+#define SMI130_DATA_FRAME_MAG_Y_LSB_BYTE   (2)
+#define SMI130_DATA_FRAME_MAG_Y_MSB_BYTE   (3)
+#define SMI130_DATA_FRAME_MAG_Z_LSB_BYTE   (4)
+#define SMI130_DATA_FRAME_MAG_Z_MSB_BYTE   (5)
+#define SMI130_DATA_FRAME_MAG_R_LSB_BYTE   (6)
+#define SMI130_DATA_FRAME_MAG_R_MSB_BYTE   (7)
+
+#define SMI130_GYRO_X_LSB_BYTE              (0)
+#define SMI130_GYRO_X_MSB_BYTE              (1)
+#define SMI130_GYRO_Y_LSB_BYTE              (0)
+#define SMI130_GYRO_Y_MSB_BYTE              (1)
+#define SMI130_GYRO_Z_LSB_BYTE              (0)
+#define SMI130_GYRO_Z_MSB_BYTE              (1)
+#define SMI130_DATA_FRAME_GYRO_X_LSB_BYTE   (0)
+#define SMI130_DATA_FRAME_GYRO_X_MSB_BYTE   (1)
+#define SMI130_DATA_FRAME_GYRO_Y_LSB_BYTE   (2)
+#define SMI130_DATA_FRAME_GYRO_Y_MSB_BYTE   (3)
+#define SMI130_DATA_FRAME_GYRO_Z_LSB_BYTE   (4)
+#define SMI130_DATA_FRAME_GYRO_Z_MSB_BYTE   (5)
+
+#define SMI130_ACCEL_X_LSB_BYTE              (0)
+#define SMI130_ACCEL_X_MSB_BYTE              (1)
+#define SMI130_ACCEL_Y_LSB_BYTE              (0)
+#define SMI130_ACCEL_Y_MSB_BYTE              (1)
+#define SMI130_ACCEL_Z_LSB_BYTE              (0)
+#define SMI130_ACCEL_Z_MSB_BYTE              (1)
+#define SMI130_DATA_FRAME_ACCEL_X_LSB_BYTE   (0)
+#define SMI130_DATA_FRAME_ACCEL_X_MSB_BYTE   (1)
+#define SMI130_DATA_FRAME_ACCEL_Y_LSB_BYTE   (2)
+#define SMI130_DATA_FRAME_ACCEL_Y_MSB_BYTE   (3)
+#define SMI130_DATA_FRAME_ACCEL_Z_LSB_BYTE   (4)
+#define SMI130_DATA_FRAME_ACCEL_Z_MSB_BYTE   (5)
+
+#define	SMI130_TEMP_LSB_BYTE    (0)
+#define	SMI130_TEMP_MSB_BYTE    (1)
+
+#define	SMI130_FIFO_LENGTH_LSB_BYTE    (0)
+#define	SMI130_FIFO_LENGTH_MSB_BYTE    (1)
+
+#define	SMI130_STEP_COUNT_LSB_BYTE    (0)
+#define	SMI130_STEP_COUNT_MSB_BYTE    (1)
+/****************************************************/
+/**\name	ERROR CODES       */
+/***************************************************/
+
+#define E_SMI130_NULL_PTR			((s8)-127)
+#define E_SMI130_COMM_RES			((s8)-1)
+#define E_SMI130_OUT_OF_RANGE		((s8)-2)
+#define E_SMI130_BUSY				((s8)-3)
+#define	SUCCESS						((u8)0)
+#define	ERROR						((s8)-1)
+
+/* Constants */
+#define SMI130_NULL						(0)
+#define SMI130_DELAY_SETTLING_TIME		(5)
+/*This refers SMI130 return type as s8 */
+#define SMI130_RETURN_FUNCTION_TYPE        s8
+/****************************************************/
+/**\name	REGISTER DEFINITIONS       */
+/***************************************************/
+/*******************/
+/**\name CHIP ID */
+/*******************/
+#define SMI130_USER_CHIP_ID_ADDR				(0x00)
+/*******************/
+/**\name ERROR STATUS */
+/*******************/
+#define SMI130_USER_ERROR_ADDR					(0X02)
+/*******************/
+/**\name POWER MODE STATUS */
+/*******************/
+#define SMI130_USER_PMU_STAT_ADDR				(0X03)
+/*******************/
+/**\name MAG DATA REGISTERS */
+/*******************/
+#define SMI130_USER_DATA_0_ADDR					(0X04)
+#define SMI130_USER_DATA_1_ADDR					(0X05)
+#define SMI130_USER_DATA_2_ADDR					(0X06)
+#define SMI130_USER_DATA_3_ADDR					(0X07)
+#define SMI130_USER_DATA_4_ADDR					(0X08)
+#define SMI130_USER_DATA_5_ADDR					(0X09)
+#define SMI130_USER_DATA_6_ADDR					(0X0A)
+#define SMI130_USER_DATA_7_ADDR					(0X0B)
+/*******************/
+/**\name GYRO DATA REGISTERS */
+/*******************/
+#define SMI130_USER_DATA_8_ADDR					(0X0C)
+#define SMI130_USER_DATA_9_ADDR					(0X0D)
+#define SMI130_USER_DATA_10_ADDR				(0X0E)
+#define SMI130_USER_DATA_11_ADDR				(0X0F)
+#define SMI130_USER_DATA_12_ADDR				(0X10)
+#define SMI130_USER_DATA_13_ADDR				(0X11)
+#define SMI130_USER_DATA_14_ADDR				(0X12)
+#define SMI130_USER_DATA_15_ADDR				(0X13)
+/*******************/
+/**\name ACCEL DATA REGISTERS */
+/*******************/
+#define SMI130_USER_DATA_16_ADDR				(0X14)
+#define SMI130_USER_DATA_17_ADDR				(0X15)
+#define SMI130_USER_DATA_18_ADDR				(0X16)
+#define SMI130_USER_DATA_19_ADDR				(0X17)
+/*******************/
+/**\name SENSOR TIME REGISTERS */
+/*******************/
+#define SMI130_USER_SENSORTIME_0_ADDR			(0X18)
+#define SMI130_USER_SENSORTIME_1_ADDR			(0X19)
+#define SMI130_USER_SENSORTIME_2_ADDR			(0X1A)
+/*******************/
+/**\name STATUS REGISTER FOR SENSOR STATUS FLAG */
+/*******************/
+#define SMI130_USER_STAT_ADDR					(0X1B)
+/*******************/
+/**\name INTERRUPY STATUS REGISTERS */
+/*******************/
+#define SMI130_USER_INTR_STAT_0_ADDR			(0X1C)
+#define SMI130_USER_INTR_STAT_1_ADDR			(0X1D)
+#define SMI130_USER_INTR_STAT_2_ADDR			(0X1E)
+#define SMI130_USER_INTR_STAT_3_ADDR			(0X1F)
+/*******************/
+/**\name TEMPERATURE REGISTERS */
+/*******************/
+#define SMI130_USER_TEMPERATURE_0_ADDR			(0X20)
+#define SMI130_USER_TEMPERATURE_1_ADDR			(0X21)
+/*******************/
+/**\name FIFO REGISTERS */
+/*******************/
+#define SMI130_USER_FIFO_LENGTH_0_ADDR			(0X22)
+#define SMI130_USER_FIFO_LENGTH_1_ADDR			(0X23)
+#define SMI130_USER_FIFO_DATA_ADDR				(0X24)
+/***************************************************/
+/**\name ACCEL CONFIG REGISTERS  FOR ODR, BANDWIDTH AND UNDERSAMPLING*/
+/******************************************************/
+#define SMI130_USER_ACCEL_CONFIG_ADDR			(0X40)
+/*******************/
+/**\name ACCEL RANGE */
+/*******************/
+#define SMI130_USER_ACCEL_RANGE_ADDR            (0X41)
+/***************************************************/
+/**\name GYRO CONFIG REGISTERS  FOR ODR AND BANDWIDTH */
+/******************************************************/
+#define SMI130_USER_GYRO_CONFIG_ADDR            (0X42)
+/*******************/
+/**\name GYRO RANGE */
+/*******************/
+#define SMI130_USER_GYRO_RANGE_ADDR             (0X43)
+/***************************************************/
+/**\name MAG CONFIG REGISTERS  FOR ODR*/
+/******************************************************/
+#define SMI130_USER_MAG_CONFIG_ADDR				(0X44)
+/***************************************************/
+/**\name REGISTER FOR GYRO AND ACCEL DOWNSAMPLING RATES FOR FIFO*/
+/******************************************************/
+#define SMI130_USER_FIFO_DOWN_ADDR              (0X45)
+/***************************************************/
+/**\name FIFO CONFIG REGISTERS*/
+/******************************************************/
+#define SMI130_USER_FIFO_CONFIG_0_ADDR          (0X46)
+#define SMI130_USER_FIFO_CONFIG_1_ADDR          (0X47)
+/***************************************************/
+/**\name MAG INTERFACE REGISTERS*/
+/******************************************************/
+#define SMI130_USER_MAG_IF_0_ADDR				(0X4B)
+#define SMI130_USER_MAG_IF_1_ADDR				(0X4C)
+#define SMI130_USER_MAG_IF_2_ADDR				(0X4D)
+#define SMI130_USER_MAG_IF_3_ADDR				(0X4E)
+#define SMI130_USER_MAG_IF_4_ADDR				(0X4F)
+/***************************************************/
+/**\name INTERRUPT ENABLE REGISTERS*/
+/******************************************************/
+#define SMI130_USER_INTR_ENABLE_0_ADDR			(0X50)
+#define SMI130_USER_INTR_ENABLE_1_ADDR			(0X51)
+#define SMI130_USER_INTR_ENABLE_2_ADDR			(0X52)
+#define SMI130_USER_INTR_OUT_CTRL_ADDR			(0X53)
+/***************************************************/
+/**\name LATCH DURATION REGISTERS*/
+/******************************************************/
+#define SMI130_USER_INTR_LATCH_ADDR				(0X54)
+/***************************************************/
+/**\name MAP INTERRUPT 1 and 2 REGISTERS*/
+/******************************************************/
+#define SMI130_USER_INTR_MAP_0_ADDR				(0X55)
+#define SMI130_USER_INTR_MAP_1_ADDR				(0X56)
+#define SMI130_USER_INTR_MAP_2_ADDR				(0X57)
+/***************************************************/
+/**\name DATA SOURCE REGISTERS*/
+/******************************************************/
+#define SMI130_USER_INTR_DATA_0_ADDR			(0X58)
+#define SMI130_USER_INTR_DATA_1_ADDR			(0X59)
+/***************************************************/
+/**\name
+INTERRUPT THRESHOLD, HYSTERESIS, DURATION, MODE CONFIGURATION REGISTERS*/
+/******************************************************/
+#define SMI130_USER_INTR_LOWHIGH_0_ADDR			(0X5A)
+#define SMI130_USER_INTR_LOWHIGH_1_ADDR			(0X5B)
+#define SMI130_USER_INTR_LOWHIGH_2_ADDR			(0X5C)
+#define SMI130_USER_INTR_LOWHIGH_3_ADDR			(0X5D)
+#define SMI130_USER_INTR_LOWHIGH_4_ADDR			(0X5E)
+#define SMI130_USER_INTR_MOTION_0_ADDR			(0X5F)
+#define SMI130_USER_INTR_MOTION_1_ADDR			(0X60)
+#define SMI130_USER_INTR_MOTION_2_ADDR			(0X61)
+#define SMI130_USER_INTR_MOTION_3_ADDR			(0X62)
+#define SMI130_USER_INTR_TAP_0_ADDR				(0X63)
+#define SMI130_USER_INTR_TAP_1_ADDR				(0X64)
+#define SMI130_USER_INTR_ORIENT_0_ADDR			(0X65)
+#define SMI130_USER_INTR_ORIENT_1_ADDR			(0X66)
+#define SMI130_USER_INTR_FLAT_0_ADDR			(0X67)
+#define SMI130_USER_INTR_FLAT_1_ADDR			(0X68)
+/***************************************************/
+/**\name FAST OFFSET CONFIGURATION REGISTER*/
+/******************************************************/
+#define SMI130_USER_FOC_CONFIG_ADDR				(0X69)
+/***************************************************/
+/**\name MISCELLANEOUS CONFIGURATION REGISTER*/
+/******************************************************/
+#define SMI130_USER_CONFIG_ADDR					(0X6A)
+/***************************************************/
+/**\name SERIAL INTERFACE SETTINGS REGISTER*/
+/******************************************************/
+#define SMI130_USER_IF_CONFIG_ADDR				(0X6B)
+/***************************************************/
+/**\name GYRO POWER MODE TRIGGER REGISTER */
+/******************************************************/
+#define SMI130_USER_PMU_TRIGGER_ADDR			(0X6C)
+/***************************************************/
+/**\name SELF_TEST REGISTER*/
+/******************************************************/
+#define SMI130_USER_SELF_TEST_ADDR				(0X6D)
+/***************************************************/
+/**\name SPI,I2C SELECTION REGISTER*/
+/******************************************************/
+#define SMI130_USER_NV_CONFIG_ADDR				(0x70)
+/***************************************************/
+/**\name ACCEL AND GYRO OFFSET REGISTERS*/
+/******************************************************/
+#define SMI130_USER_OFFSET_0_ADDR				(0X71)
+#define SMI130_USER_OFFSET_1_ADDR				(0X72)
+#define SMI130_USER_OFFSET_2_ADDR				(0X73)
+#define SMI130_USER_OFFSET_3_ADDR				(0X74)
+#define SMI130_USER_OFFSET_4_ADDR				(0X75)
+#define SMI130_USER_OFFSET_5_ADDR				(0X76)
+#define SMI130_USER_OFFSET_6_ADDR				(0X77)
+/***************************************************/
+/**\name STEP COUNTER INTERRUPT REGISTERS*/
+/******************************************************/
+#define SMI130_USER_STEP_COUNT_0_ADDR			(0X78)
+#define SMI130_USER_STEP_COUNT_1_ADDR			(0X79)
+/***************************************************/
+/**\name STEP COUNTER CONFIGURATION REGISTERS*/
+/******************************************************/
+#define SMI130_USER_STEP_CONFIG_0_ADDR			(0X7A)
+#define SMI130_USER_STEP_CONFIG_1_ADDR			(0X7B)
+/***************************************************/
+/**\name COMMAND REGISTER*/
+/******************************************************/
+#define SMI130_CMD_COMMANDS_ADDR				(0X7E)
+/***************************************************/
+/**\name PAGE REGISTERS*/
+/******************************************************/
+#define SMI130_CMD_EXT_MODE_ADDR				(0X7F)
+#define SMI130_COM_C_TRIM_FIVE_ADDR				(0X05)
+
+/****************************************************/
+/**\name	SHIFT VALUE DEFINITION       */
+/***************************************************/
+#define SMI130_SHIFT_BIT_POSITION_BY_01_BIT      (1)
+#define SMI130_SHIFT_BIT_POSITION_BY_02_BITS     (2)
+#define SMI130_SHIFT_BIT_POSITION_BY_03_BITS     (3)
+#define SMI130_SHIFT_BIT_POSITION_BY_04_BITS     (4)
+#define SMI130_SHIFT_BIT_POSITION_BY_05_BITS     (5)
+#define SMI130_SHIFT_BIT_POSITION_BY_06_BITS     (6)
+#define SMI130_SHIFT_BIT_POSITION_BY_07_BITS     (7)
+#define SMI130_SHIFT_BIT_POSITION_BY_08_BITS     (8)
+#define SMI130_SHIFT_BIT_POSITION_BY_09_BITS     (9)
+#define SMI130_SHIFT_BIT_POSITION_BY_12_BITS     (12)
+#define SMI130_SHIFT_BIT_POSITION_BY_13_BITS     (13)
+#define SMI130_SHIFT_BIT_POSITION_BY_14_BITS     (14)
+#define SMI130_SHIFT_BIT_POSITION_BY_15_BITS     (15)
+#define SMI130_SHIFT_BIT_POSITION_BY_16_BITS     (16)
+
+/****************************************************/
+/**\name	 DEFINITIONS USED FOR YAMAHA-YAS532 */
+/***************************************************/
+#define YAS532_MAG_STATE_NORMAL				(0)
+#define YAS532_MAG_STATE_INIT_COIL			(1)
+#define YAS532_MAG_STATE_MEASURE_OFFSET		(2)
+#define YAS532_MAG_INITCOIL_TIMEOUT			(1000)
+#define YAS532_MAG_NOTRANS_POSITION			(3)
+#define YAS532_DEFAULT_SENSOR_DELAY			(50)
+#define YAS532_DATA_OVERFLOW				(8190)
+#define YAS532_DATA_UNDERFLOW				(0)
+#define YAS532_MAG_LOG				(20)
+#define YAS532_MAG_TEMPERATURE_LOG			(10)
+#define YAS532_TEMP20DEGREE_TYPICAL			(390)
+#define YAS532_VERSION_AC_COEF_X			(850)
+#define YAS532_VERSION_AC_COEF_Y1			(750)
+#define YAS532_VERSION_AC_COEF_Y2			(750)
+#define YAS532_DATA_CENTER					(4096)
+/****************************************************/
+/**\name	YAMAHA-YAS532 OFFSET DEFINITION */
+/***************************************************/
+static const s8 INVALID_OFFSET[] = {0x7f, 0x7f, 0x7f};
+#define set_vector(to, from) \
+	{int _l; for (_l = 0; _l < 3; _l++) (to)[_l] = (from)[_l]; }
+#define is_valid_offset(a) \
+	(((a)[0] <= 31) && ((a)[1] <= 31) && ((a)[2] <= 31) \
+		&& (-31 <= (a)[0]) && (-31 <= (a)[1]) && (-31 <= (a)[2]))
+
+/**************************************************/
+/**\name	YAS532 CALIB DATA DEFINITIONS  */
+/*************************************************/
+
+
+/* register address of YAS532*/
+#define SMI130_YAS532_TESTR1			(0x88)
+#define SMI130_YAS532_TESTR2			(0x89)
+#define SMI130_YAS532_RCOIL				(0x81)
+#define SMI130_YAS532_COMMAND_REGISTER	(0x82)
+#define SMI130_YAS532_DATA_REGISTER		(0xB0)
+/* calib data register definition*/
+#define SMI130_YAS532_CALIB_CX	        (0x90)
+#define SMI130_YAS532_CALIB_CY1	        (0x91)
+#define SMI130_YAS532_CALIB_CY2	        (0x92)
+#define SMI130_YAS532_CALIB1	        (0x93)
+#define SMI130_YAS532_CALIB2	        (0x94)
+#define SMI130_YAS532_CALIB3	        (0x95)
+#define SMI130_YAS532_CALIB4	        (0x96)
+#define SMI130_YAS532_CALIB5	        (0x97)
+#define SMI130_YAS532_CLAIB6	        (0x98)
+#define SMI130_YAS532_CALIB7	        (0x99)
+#define SMI130_YAS532_CALIB8	        (0x9A)
+#define SMI130_YAS532_CALIIB9	        (0x9B)
+#define SMI130_YAS532_CALIB10	        (0x9C)
+#define SMI130_YAS532_CALIB11	        (0x9D)
+/* offset definition */
+#define SMI130_YAS532_OFFSET_X	        (0x85)
+#define SMI130_YAS532_OFFSET_Y	        (0x86)
+#define SMI130_YAS532_OFFSET_Z	        (0x87)
+/* data to write register for yas532*/
+#define SMI130_YAS532_WRITE_TESTR1	    (0x00)
+#define SMI130_YAS532_WRITE_TESTR2	    (0x00)
+#define SMI130_YAS532_WRITE_RCOIL       (0x00)
+/**************************************************/
+/**\name	YAS537 DEFINITION  */
+/*************************************************/
+
+#define	YAS537_SRSTR_DATA		        (0x02)
+#define	YAS537_WRITE_A_D_CONVERTER		(0x03)
+#define	YAS537_WRITE_A_D_CONVERTER2		(0xF8)
+#define	YAS537_WRITE_FILTER             (0x08)
+#define	YAS537_WRITE_CONFR              (0x08)
+#define	YAS537_WRITE_TEMP_CALIB         (0xFF)
+#define	YAS537_SET_COMMAND_REGISTER     (0x01)
+
+/**************************************************/
+/**\name	YAS537 REGISTER DEFINITION  */
+/*************************************************/
+#define	YAS537_REG_SRSTR				(0x90)
+#define	YAS537_REG_CALR_C0				(0xC0)
+#define	YAS537_REG_CALR_C1				(0xC1)
+#define	YAS537_REG_CALR_C2				(0xC2)
+#define	YAS537_REG_CALR_C3				(0xC3)
+#define	YAS537_REG_CALR_C4				(0xC4)
+#define	YAS537_REG_CALR_C5				(0xC5)
+#define	YAS537_REG_CALR_C6				(0xC6)
+#define	YAS537_REG_CALR_C7				(0xC7)
+#define	YAS537_REG_CALR_C8				(0xC8)
+#define	YAS537_REG_CALR_C9				(0xC9)
+#define	YAS537_REG_CALR_CA				(0xCA)
+#define	YAS537_REG_CALR_CB				(0xCB)
+#define	YAS537_REG_CALR_CC				(0xCC)
+#define	YAS537_REG_CALR_CD				(0xCD)
+#define	YAS537_REG_CALR_CE				(0xCE)
+#define	YAS537_REG_CALR_CF				(0xCF)
+#define	YAS537_REG_CALR_DO				(0xD0)
+#define	YAS537_REG_MTCR					(0x93)
+#define	YAS537_REG_CONFR				(0x82)
+#define	SMI130_REG_YAS537_CMDR			(0x81)
+#define	YAS537_REG_OXR					(0x84)
+#define	YAS537_REG_AVRR					(0x87)
+#define	YAS537_REG_HCKR					(0x88)
+#define	YAS537_REG_LCKR					(0x89)
+#define	YAS537_REG_ADCCALR				(0x91)
+#define	YAS537_REG_ADCCALR_ONE			(0x92)
+#define	YAS537_REG_OCR					(0x9E)
+#define	YAS537_REG_TRMR			        (0x9F)
+#define	YAS537_REG_TEMPERATURE_0		(0xB0)
+#define	YAS537_REG_TEMPERATURE_1		(0xB1)
+#define	YAS537_REG_DATA_X_0				(0xB2)
+#define	YAS537_REG_DATA_X_1				(0xB3)
+#define	YAS537_REG_DATA_Y1_0			(0xB4)
+#define	YAS537_REG_DATA_Y1_1			(0xB5)
+#define	YAS537_REG_DATA_Y2_0			(0xB6)
+#define	YAS537_REG_DATA_Y2_1			(0xB7)
+#define YAS537_MAG_STATE_NORMAL			(0)
+#define YAS537_MAG_STATE_INIT_COIL		(1)
+#define YAS537_MAG_STATE_RECORD_DATA	(2)
+#define YAS537_DATA_UNDERFLOW			(0)
+#define YAS537_DATA_OVERFLOW			(16383)
+/****************************************************/
+/**\name	YAS537_set vector */
+/***************************************************/
+#define yas537_set_vector(to, from) \
+	{int _l; for (_l = 0; _l < 3; _l++) (to)[_l] = (from)[_l]; }
+
+#ifndef ABS
+#define ABS(a)		((a) > 0 ? (a) : -(a)) /*!< Absolute value */
+#endif
+/****************************************************/
+/**\name	AKM09911 AND AKM09912 DEFINITION */
+/***************************************************/
+#define AKM09912_SENSITIVITY_DIV	(256)
+#define AKM09912_SENSITIVITY		(128)
+#define AKM09911_SENSITIVITY_DIV	(128)
+#define AKM_ASAX	(0)
+#define AKM_ASAY	(1)
+#define AKM_ASAZ	(2)
+#define AKM_POWER_DOWN_MODE_DATA		(0x00)
+#define AKM_FUSE_ROM_MODE				(0x1F)
+#define AKM_POWER_MODE_REG				(0x31)
+#define	AKM_SINGLE_MEASUREMENT_MODE		(0x01)
+#define AKM_DATA_REGISTER				(0x11)
+/*! AKM09912 Register definition */
+#define AKM09912_CHIP_ID_REG			(0x01)
+/****************************************************/
+/**\name	BMM150 DEFINITION */
+/***************************************************/
+#define SMI130_BMM150_SET_POWER_CONTROL	(0x01)
+#define SMI130_BMM150_MAX_RETRY_WAKEUP	(5)
+#define SMI130_BMM150_POWER_ON			(0x01)
+#define SMI130_BMM150_POWER_OFF			(0x00)
+#define SMI130_BMM150_FORCE_MODE		(0x02)
+#define SMI130_BMM150_POWER_ON_SUCCESS	(0)
+#define SMI130_BMM150_POWER_ON_FAIL		((s8)-1)
+
+#define	SMI130_BMM150_DIG_X1			(0)
+#define	SMI130_BMM150_DIG_Y1			(1)
+#define	SMI130_BMM150_DIG_X2			(2)
+#define	SMI130_BMM150_DIG_Y3			(3)
+#define	SMI130_BMM150_DIG_XY1			(4)
+#define	SMI130_BMM150_DIG_XY2			(5)
+#define	SMI130_BMM150_DIG_Z1_LSB		(6)
+#define	SMI130_BMM150_DIG_Z1_MSB		(7)
+#define	SMI130_BMM150_DIG_Z2_LSB		(8)
+#define	SMI130_BMM150_DIG_Z2_MSB		(9)
+#define	SMI130_BMM150_DIG_DIG_Z3_LSB	(10)
+#define	SMI130_BMM150_DIG_DIG_Z3_MSB	(11)
+#define	SMI130_BMM150_DIG_DIG_Z4_LSB	(12)
+#define	SMI130_BMM150_DIG_DIG_Z4_MSB	(13)
+#define	SMI130_BMM150_DIG_DIG_XYZ1_LSB	(14)
+#define	SMI130_BMM150_DIG_DIG_XYZ1_MSB	(15)
+
+/**************************************************************/
+/**\name	STRUCTURE DEFINITIONS                         */
+/**************************************************************/
+/*!
+*	@brief smi130 structure
+*	This structure holds all relevant information about smi130
+*/
+struct smi130_t {
+u8 chip_id;/**< chip id of SMI130 */
+u8 dev_addr;/**< device address of SMI130 */
+s8 mag_manual_enable;/**< used for check the mag manual/auto mode status */
+SMI130_WR_FUNC_PTR;/**< bus write function pointer */
+SMI130_RD_FUNC_PTR;/**< bus read function pointer */
+SMI130_BRD_FUNC_PTR;/**< burst write function pointer */
+void (*delay_msec)(SMI130_MDELAY_DATA_TYPE);/**< delay function pointer */
+};
+/*!
+ * @brief Structure containing bmm150 and akm09911
+ *	magnetometer values for x,y and
+ *	z-axis in s16
+ */
+struct smi130_mag_t {
+s16 x;/**< BMM150 and AKM09911 and AKM09912 X raw data*/
+s16 y;/**< BMM150 and AKM09911 and AKM09912 Y raw data*/
+s16 z;/**< BMM150 and AKM09911 and AKM09912 Z raw data*/
+};
+/*!
+ * @brief Structure containing bmm150 xyz data and temperature
+ */
+struct smi130_mag_xyzr_t {
+s16 x;/**< BMM150 X raw data*/
+s16 y;/**< BMM150 Y raw data*/
+s16 z;/**<BMM150 Z raw data*/
+u16 r;/**<BMM150 R raw data*/
+};
+/*!
+ * @brief Structure containing gyro xyz data
+ */
+struct smi130_gyro_t {
+s16 x;/**<gyro X  data*/
+s16 y;/**<gyro Y  data*/
+s16 z;/**<gyro Z  data*/
+};
+/*!
+ * @brief Structure containing accel xyz data
+ */
+struct smi130_accel_t {
+s16 x;/**<accel X  data*/
+s16 y;/**<accel Y  data*/
+s16 z;/**<accel Z  data*/
+};
+/*!
+ * @brief Structure bmm150 mag compensated data with s32 output
+ */
+struct smi130_mag_xyz_s32_t {
+s16 x;/**<BMM150 X compensated data*/
+s16 y;/**<BMM150 Y compensated data*/
+s16 z;/**<BMM150 Z compensated data*/
+};
+/*!
+ * @brief Structure bmm150 mag trim data
+ */
+struct trim_data_t {
+s8 dig_x1;/**<BMM150 trim x1 data*/
+s8 dig_y1;/**<BMM150 trim y1 data*/
+
+s8 dig_x2;/**<BMM150 trim x2 data*/
+s8 dig_y2;/**<BMM150 trim y2 data*/
+
+u16 dig_z1;/**<BMM150 trim z1 data*/
+s16 dig_z2;/**<BMM150 trim z2 data*/
+s16 dig_z3;/**<BMM150 trim z3 data*/
+s16 dig_z4;/**<BMM150 trim z4 data*/
+
+u8 dig_xy1;/**<BMM150 trim xy1 data*/
+s8 dig_xy2;/**<BMM150 trim xy2 data*/
+
+u16 dig_xyz1;/**<BMM150 trim xyz1 data*/
+};
+
+/*!
+*	@brief Structure for reading AKM compensating data
+*/
+struct bosch_akm_sensitivity_data_t {
+u8 asax;/**<AKM09911 and AKM09912 X sensitivity data*/
+u8 asay;/**<AKM09911 and AKM09912 Y sensitivity data*/
+u8 asaz;/**<AKM09911 and AKM09912 Z sensitivity data*/
+};
+/*!
+* @brief YAMAHA-YAS532 struct
+* Calibration YAS532 data struct
+*/
+struct bosch_yas532_calib_data_t {
+s32 cx;/**<YAS532 calib cx data */
+s32 cy1;/**<YAS532 calib cy1 data */
+s32 cy2;/**<YAS532 calib cy2 data */
+s32 a2;/**<YAS532 calib a2 data */
+s32 a3;/**<YAS532 calib a3 data */
+s32 a4;/**<YAS532 calib a4 data */
+s32 a5;/**<YAS532 calib a5 data */
+s32 a6;/**<YAS532 calib a6 data */
+s32 a7;/**<YAS532 calib a7 data */
+s32 a8;/**<YAS532 calib a8 data */
+s32 a9;/**<YAS532 calib a9 data */
+s32 k;/**<YAS532 calib k data */
+s8 rxy1y2[3];/**<YAS532 calib rxy1y2 data */
+u8 fxy1y2[3];/**<YAS532 calib fxy1y2 data */
+};
+/*!
+* @brief YAS532 Temperature structure
+*/
+#if YAS532_MAG_LOG < YAS532_MAG_TEMPERATURE_LOG
+struct yas_temp_filter_t {
+u16 log[YAS532_MAG_TEMPERATURE_LOG];/**<YAS532 temp log array */
+u8 num;/**< used for increment the index */
+u8 idx;/**< used for increment the index */
+};
+#endif
+/*!
+* @brief YAS532 sensor initialization
+*/
+struct yas532_t {
+struct bosch_yas532_calib_data_t calib_yas532;/**< calib data */
+s8 measure_state;/**< update measure state */
+s8 v_hard_offset_s8[3];/**< offset write array*/
+s32 coef[3];/**< co efficient data */
+s8 overflow;/**< over flow condition check */
+u8 dev_id;/**< device id information */
+const s8 *transform;/**< transform condition check  */
+#if YAS532_MAG_LOG < YAS532_MAG_TEMPERATURE_LOG
+struct yas_temp_filter_t temp_data;/**< temp data */
+#endif
+u16 last_raw[4];/**< raw data */
+};
+/*!
+* @brief Used for reading the YAS532 XYZ data
+*/
+struct yas532_vector {
+s32 yas532_vector_xyz[3];/**< YAS532 compensated xyz data*/
+};
+/**
+ * @struct yas_vector
+ * @brief Stores the sensor data
+ */
+struct yas_vector {
+	s32 yas537_vector_xyz[3]; /*!< vector data */
+};
+/*!
+* @brief YAMAHA-YAS532 struct
+* Calibration YAS532 data struct
+*/
+struct bosch_yas537_calib_data_t {
+s8 a2;/**<YAS532 calib a2 data */
+s8 a3;/**<YAS532 calib a3 data */
+s8 a4;/**<YAS532 calib a4 data */
+s16 a5;/**<YAS532 calib a5 data */
+s8 a6;/**<YAS532 calib a6 data */
+s8 a7;/**<YAS532 calib a7 data */
+s8 a8;/**<YAS532 calib a8 data */
+s16 a9;/**<YAS532 calib a9 data */
+u8 k;/**<YAS532 calib k data */
+u8 ver;/**<YAS532 calib ver data*/
+};
+/*!
+* @brief YAS537 sensor initialization
+*/
+struct yas537_t {
+struct bosch_yas537_calib_data_t calib_yas537;/**< calib data */
+s8 measure_state;/**< update measure state */
+s8 hard_offset[3];/**< offset write array*/
+u16 last_after_rcoil[3];/**< rcoil write array*/
+s32 coef[3];/**< co efficient data */
+s8 overflow;/**< over flow condition check */
+u8 dev_id;/**< device id information */
+u8 average;/**<average selection for offset configuration*/
+const s8 *transform;/**< transform condition check  */
+u16 last_raw[4];/**< raw data */
+struct yas_vector xyz; /*!< X, Y, Z measurement data of the sensor */
+};
+/**************************************************************/
+/**\name	USER DATA REGISTERS DEFINITION START    */
+/**************************************************************/
+
+/**************************************************************/
+/**\name	CHIP ID LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Chip ID Description - Reg Addr --> (0x00), Bit --> 0...7 */
+#define SMI130_USER_CHIP_ID__POS             (0)
+#define SMI130_USER_CHIP_ID__MSK            (0xFF)
+#define SMI130_USER_CHIP_ID__LEN             (8)
+#define SMI130_USER_CHIP_ID__REG             (SMI130_USER_CHIP_ID_ADDR)
+/**************************************************************/
+/**\name	ERROR STATUS LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Error Description - Reg Addr --> (0x02), Bit --> 0 */
+#define SMI130_USER_ERR_STAT__POS               (0)
+#define SMI130_USER_ERR_STAT__LEN               (8)
+#define SMI130_USER_ERR_STAT__MSK               (0xFF)
+#define SMI130_USER_ERR_STAT__REG               (SMI130_USER_ERROR_ADDR)
+
+#define SMI130_USER_FATAL_ERR__POS               (0)
+#define SMI130_USER_FATAL_ERR__LEN               (1)
+#define SMI130_USER_FATAL_ERR__MSK               (0x01)
+#define SMI130_USER_FATAL_ERR__REG               (SMI130_USER_ERROR_ADDR)
+
+/* Error Description - Reg Addr --> (0x02), Bit --> 1...4 */
+#define SMI130_USER_ERR_CODE__POS               (1)
+#define SMI130_USER_ERR_CODE__LEN               (4)
+#define SMI130_USER_ERR_CODE__MSK               (0x1E)
+#define SMI130_USER_ERR_CODE__REG               (SMI130_USER_ERROR_ADDR)
+
+/* Error Description - Reg Addr --> (0x02), Bit --> 5 */
+#define SMI130_USER_I2C_FAIL_ERR__POS               (5)
+#define SMI130_USER_I2C_FAIL_ERR__LEN               (1)
+#define SMI130_USER_I2C_FAIL_ERR__MSK               (0x20)
+#define SMI130_USER_I2C_FAIL_ERR__REG               (SMI130_USER_ERROR_ADDR)
+
+/* Error Description - Reg Addr --> (0x02), Bit --> 6 */
+#define SMI130_USER_DROP_CMD_ERR__POS              (6)
+#define SMI130_USER_DROP_CMD_ERR__LEN              (1)
+#define SMI130_USER_DROP_CMD_ERR__MSK              (0x40)
+#define SMI130_USER_DROP_CMD_ERR__REG              (SMI130_USER_ERROR_ADDR)
+/**************************************************************/
+/**\name	MAG DATA READY LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Error Description - Reg Addr --> (0x02), Bit --> 7 */
+#define SMI130_USER_MAG_DADA_RDY_ERR__POS               (7)
+#define SMI130_USER_MAG_DADA_RDY_ERR__LEN               (1)
+#define SMI130_USER_MAG_DADA_RDY_ERR__MSK               (0x80)
+#define SMI130_USER_MAG_DADA_RDY_ERR__REG               (SMI130_USER_ERROR_ADDR)
+/**************************************************************/
+/**\name	MAG POWER MODE LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* PMU_Status Description of MAG - Reg Addr --> (0x03), Bit --> 1..0 */
+#define SMI130_USER_MAG_POWER_MODE_STAT__POS		(0)
+#define SMI130_USER_MAG_POWER_MODE_STAT__LEN		(2)
+#define SMI130_USER_MAG_POWER_MODE_STAT__MSK		(0x03)
+#define SMI130_USER_MAG_POWER_MODE_STAT__REG		\
+(SMI130_USER_PMU_STAT_ADDR)
+/**************************************************************/
+/**\name	GYRO POWER MODE LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* PMU_Status Description of GYRO - Reg Addr --> (0x03), Bit --> 3...2 */
+#define SMI130_USER_GYRO_POWER_MODE_STAT__POS               (2)
+#define SMI130_USER_GYRO_POWER_MODE_STAT__LEN               (2)
+#define SMI130_USER_GYRO_POWER_MODE_STAT__MSK               (0x0C)
+#define SMI130_USER_GYRO_POWER_MODE_STAT__REG		      \
+(SMI130_USER_PMU_STAT_ADDR)
+/**************************************************************/
+/**\name	ACCEL POWER MODE LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* PMU_Status Description of ACCEL - Reg Addr --> (0x03), Bit --> 5...4 */
+#define SMI130_USER_ACCEL_POWER_MODE_STAT__POS               (4)
+#define SMI130_USER_ACCEL_POWER_MODE_STAT__LEN               (2)
+#define SMI130_USER_ACCEL_POWER_MODE_STAT__MSK               (0x30)
+#define SMI130_USER_ACCEL_POWER_MODE_STAT__REG		    \
+(SMI130_USER_PMU_STAT_ADDR)
+/**************************************************************/
+/**\name	MAG DATA XYZ LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Mag_X(LSB) Description - Reg Addr --> (0x04), Bit --> 0...7 */
+#define SMI130_USER_DATA_0_MAG_X_LSB__POS           (0)
+#define SMI130_USER_DATA_0_MAG_X_LSB__LEN           (8)
+#define SMI130_USER_DATA_0_MAG_X_LSB__MSK          (0xFF)
+#define SMI130_USER_DATA_0_MAG_X_LSB__REG          (SMI130_USER_DATA_0_ADDR)
+
+/* Mag_X(LSB) Description - Reg Addr --> (0x04), Bit --> 3...7 */
+#define SMI130_USER_DATA_MAG_X_LSB__POS           (3)
+#define SMI130_USER_DATA_MAG_X_LSB__LEN           (5)
+#define SMI130_USER_DATA_MAG_X_LSB__MSK          (0xF8)
+#define SMI130_USER_DATA_MAG_X_LSB__REG          (SMI130_USER_DATA_0_ADDR)
+
+/* Mag_X(MSB) Description - Reg Addr --> (0x05), Bit --> 0...7 */
+#define SMI130_USER_DATA_1_MAG_X_MSB__POS           (0)
+#define SMI130_USER_DATA_1_MAG_X_MSB__LEN           (8)
+#define SMI130_USER_DATA_1_MAG_X_MSB__MSK          (0xFF)
+#define SMI130_USER_DATA_1_MAG_X_MSB__REG          (SMI130_USER_DATA_1_ADDR)
+
+/* Mag_Y(LSB) Description - Reg Addr --> (0x06), Bit --> 0...7 */
+#define SMI130_USER_DATA_2_MAG_Y_LSB__POS           (0)
+#define SMI130_USER_DATA_2_MAG_Y_LSB__LEN           (8)
+#define SMI130_USER_DATA_2_MAG_Y_LSB__MSK          (0xFF)
+#define SMI130_USER_DATA_2_MAG_Y_LSB__REG          (SMI130_USER_DATA_2_ADDR)
+
+/* Mag_Y(LSB) Description - Reg Addr --> (0x06), Bit --> 3...7 */
+#define SMI130_USER_DATA_MAG_Y_LSB__POS           (3)
+#define SMI130_USER_DATA_MAG_Y_LSB__LEN           (5)
+#define SMI130_USER_DATA_MAG_Y_LSB__MSK          (0xF8)
+#define SMI130_USER_DATA_MAG_Y_LSB__REG          (SMI130_USER_DATA_2_ADDR)
+
+/* Mag_Y(MSB) Description - Reg Addr --> (0x07), Bit --> 0...7 */
+#define SMI130_USER_DATA_3_MAG_Y_MSB__POS           (0)
+#define SMI130_USER_DATA_3_MAG_Y_MSB__LEN           (8)
+#define SMI130_USER_DATA_3_MAG_Y_MSB__MSK          (0xFF)
+#define SMI130_USER_DATA_3_MAG_Y_MSB__REG          (SMI130_USER_DATA_3_ADDR)
+
+/* Mag_Z(LSB) Description - Reg Addr --> (0x08), Bit --> 0...7 */
+#define SMI130_USER_DATA_4_MAG_Z_LSB__POS           (0)
+#define SMI130_USER_DATA_4_MAG_Z_LSB__LEN           (8)
+#define SMI130_USER_DATA_4_MAG_Z_LSB__MSK          (0xFF)
+#define SMI130_USER_DATA_4_MAG_Z_LSB__REG          (SMI130_USER_DATA_4_ADDR)
+
+/* Mag_X(LSB) Description - Reg Addr --> (0x08), Bit --> 3...7 */
+#define SMI130_USER_DATA_MAG_Z_LSB__POS           (1)
+#define SMI130_USER_DATA_MAG_Z_LSB__LEN           (7)
+#define SMI130_USER_DATA_MAG_Z_LSB__MSK          (0xFE)
+#define SMI130_USER_DATA_MAG_Z_LSB__REG          (SMI130_USER_DATA_4_ADDR)
+
+/* Mag_Z(MSB) Description - Reg Addr --> (0x09), Bit --> 0...7 */
+#define SMI130_USER_DATA_5_MAG_Z_MSB__POS           (0)
+#define SMI130_USER_DATA_5_MAG_Z_MSB__LEN           (8)
+#define SMI130_USER_DATA_5_MAG_Z_MSB__MSK          (0xFF)
+#define SMI130_USER_DATA_5_MAG_Z_MSB__REG          (SMI130_USER_DATA_5_ADDR)
+
+/* RHALL(LSB) Description - Reg Addr --> (0x0A), Bit --> 0...7 */
+#define SMI130_USER_DATA_6_RHALL_LSB__POS           (0)
+#define SMI130_USER_DATA_6_RHALL_LSB__LEN           (8)
+#define SMI130_USER_DATA_6_RHALL_LSB__MSK          (0xFF)
+#define SMI130_USER_DATA_6_RHALL_LSB__REG          (SMI130_USER_DATA_6_ADDR)
+
+/* Mag_R(LSB) Description - Reg Addr --> (0x0A), Bit --> 3...7 */
+#define SMI130_USER_DATA_MAG_R_LSB__POS           (2)
+#define SMI130_USER_DATA_MAG_R_LSB__LEN           (6)
+#define SMI130_USER_DATA_MAG_R_LSB__MSK          (0xFC)
+#define SMI130_USER_DATA_MAG_R_LSB__REG          (SMI130_USER_DATA_6_ADDR)
+
+/* RHALL(MSB) Description - Reg Addr --> (0x0B), Bit --> 0...7 */
+#define SMI130_USER_DATA_7_RHALL_MSB__POS           (0)
+#define SMI130_USER_DATA_7_RHALL_MSB__LEN           (8)
+#define SMI130_USER_DATA_7_RHALL_MSB__MSK          (0xFF)
+#define SMI130_USER_DATA_7_RHALL_MSB__REG          (SMI130_USER_DATA_7_ADDR)
+/**************************************************************/
+/**\name	GYRO DATA XYZ LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* GYR_X (LSB) Description - Reg Addr --> (0x0C), Bit --> 0...7 */
+#define SMI130_USER_DATA_8_GYRO_X_LSB__POS           (0)
+#define SMI130_USER_DATA_8_GYRO_X_LSB__LEN           (8)
+#define SMI130_USER_DATA_8_GYRO_X_LSB__MSK          (0xFF)
+#define SMI130_USER_DATA_8_GYRO_X_LSB__REG          (SMI130_USER_DATA_8_ADDR)
+
+/* GYR_X (MSB) Description - Reg Addr --> (0x0D), Bit --> 0...7 */
+#define SMI130_USER_DATA_9_GYRO_X_MSB__POS           (0)
+#define SMI130_USER_DATA_9_GYRO_X_MSB__LEN           (8)
+#define SMI130_USER_DATA_9_GYRO_X_MSB__MSK          (0xFF)
+#define SMI130_USER_DATA_9_GYRO_X_MSB__REG          (SMI130_USER_DATA_9_ADDR)
+
+/* GYR_Y (LSB) Description - Reg Addr --> 0x0E, Bit --> 0...7 */
+#define SMI130_USER_DATA_10_GYRO_Y_LSB__POS           (0)
+#define SMI130_USER_DATA_10_GYRO_Y_LSB__LEN           (8)
+#define SMI130_USER_DATA_10_GYRO_Y_LSB__MSK          (0xFF)
+#define SMI130_USER_DATA_10_GYRO_Y_LSB__REG          (SMI130_USER_DATA_10_ADDR)
+
+/* GYR_Y (MSB) Description - Reg Addr --> (0x0F), Bit --> 0...7 */
+#define SMI130_USER_DATA_11_GYRO_Y_MSB__POS           (0)
+#define SMI130_USER_DATA_11_GYRO_Y_MSB__LEN           (8)
+#define SMI130_USER_DATA_11_GYRO_Y_MSB__MSK          (0xFF)
+#define SMI130_USER_DATA_11_GYRO_Y_MSB__REG          (SMI130_USER_DATA_11_ADDR)
+
+/* GYR_Z (LSB) Description - Reg Addr --> (0x10), Bit --> 0...7 */
+#define SMI130_USER_DATA_12_GYRO_Z_LSB__POS           (0)
+#define SMI130_USER_DATA_12_GYRO_Z_LSB__LEN           (8)
+#define SMI130_USER_DATA_12_GYRO_Z_LSB__MSK          (0xFF)
+#define SMI130_USER_DATA_12_GYRO_Z_LSB__REG          (SMI130_USER_DATA_12_ADDR)
+
+/* GYR_Z (MSB) Description - Reg Addr --> (0x11), Bit --> 0...7 */
+#define SMI130_USER_DATA_13_GYRO_Z_MSB__POS           (0)
+#define SMI130_USER_DATA_13_GYRO_Z_MSB__LEN           (8)
+#define SMI130_USER_DATA_13_GYRO_Z_MSB__MSK          (0xFF)
+#define SMI130_USER_DATA_13_GYRO_Z_MSB__REG          (SMI130_USER_DATA_13_ADDR)
+/**************************************************************/
+/**\name	ACCEL DATA XYZ LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* ACC_X (LSB) Description - Reg Addr --> (0x12), Bit --> 0...7 */
+#define SMI130_USER_DATA_14_ACCEL_X_LSB__POS           (0)
+#define SMI130_USER_DATA_14_ACCEL_X_LSB__LEN           (8)
+#define SMI130_USER_DATA_14_ACCEL_X_LSB__MSK          (0xFF)
+#define SMI130_USER_DATA_14_ACCEL_X_LSB__REG          (SMI130_USER_DATA_14_ADDR)
+
+/* ACC_X (MSB) Description - Reg Addr --> 0x13, Bit --> 0...7 */
+#define SMI130_USER_DATA_15_ACCEL_X_MSB__POS           (0)
+#define SMI130_USER_DATA_15_ACCEL_X_MSB__LEN           (8)
+#define SMI130_USER_DATA_15_ACCEL_X_MSB__MSK          (0xFF)
+#define SMI130_USER_DATA_15_ACCEL_X_MSB__REG          (SMI130_USER_DATA_15_ADDR)
+
+/* ACC_Y (LSB) Description - Reg Addr --> (0x14), Bit --> 0...7 */
+#define SMI130_USER_DATA_16_ACCEL_Y_LSB__POS           (0)
+#define SMI130_USER_DATA_16_ACCEL_Y_LSB__LEN           (8)
+#define SMI130_USER_DATA_16_ACCEL_Y_LSB__MSK          (0xFF)
+#define SMI130_USER_DATA_16_ACCEL_Y_LSB__REG          (SMI130_USER_DATA_16_ADDR)
+
+/* ACC_Y (MSB) Description - Reg Addr --> (0x15), Bit --> 0...7 */
+#define SMI130_USER_DATA_17_ACCEL_Y_MSB__POS           (0)
+#define SMI130_USER_DATA_17_ACCEL_Y_MSB__LEN           (8)
+#define SMI130_USER_DATA_17_ACCEL_Y_MSB__MSK          (0xFF)
+#define SMI130_USER_DATA_17_ACCEL_Y_MSB__REG          (SMI130_USER_DATA_17_ADDR)
+
+/* ACC_Z (LSB) Description - Reg Addr --> 0x16, Bit --> 0...7 */
+#define SMI130_USER_DATA_18_ACCEL_Z_LSB__POS           (0)
+#define SMI130_USER_DATA_18_ACCEL_Z_LSB__LEN           (8)
+#define SMI130_USER_DATA_18_ACCEL_Z_LSB__MSK          (0xFF)
+#define SMI130_USER_DATA_18_ACCEL_Z_LSB__REG          (SMI130_USER_DATA_18_ADDR)
+
+/* ACC_Z (MSB) Description - Reg Addr --> (0x17), Bit --> 0...7 */
+#define SMI130_USER_DATA_19_ACCEL_Z_MSB__POS           (0)
+#define SMI130_USER_DATA_19_ACCEL_Z_MSB__LEN           (8)
+#define SMI130_USER_DATA_19_ACCEL_Z_MSB__MSK          (0xFF)
+#define SMI130_USER_DATA_19_ACCEL_Z_MSB__REG          (SMI130_USER_DATA_19_ADDR)
+/**************************************************************/
+/**\name	SENSOR TIME LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* SENSORTIME_0 (LSB) Description - Reg Addr --> (0x18), Bit --> 0...7 */
+#define SMI130_USER_SENSORTIME_0_SENSOR_TIME_LSB__POS           (0)
+#define SMI130_USER_SENSORTIME_0_SENSOR_TIME_LSB__LEN           (8)
+#define SMI130_USER_SENSORTIME_0_SENSOR_TIME_LSB__MSK          (0xFF)
+#define SMI130_USER_SENSORTIME_0_SENSOR_TIME_LSB__REG          \
+		(SMI130_USER_SENSORTIME_0_ADDR)
+
+/* SENSORTIME_1 (MSB) Description - Reg Addr --> (0x19), Bit --> 0...7 */
+#define SMI130_USER_SENSORTIME_1_SENSOR_TIME_MSB__POS           (0)
+#define SMI130_USER_SENSORTIME_1_SENSOR_TIME_MSB__LEN           (8)
+#define SMI130_USER_SENSORTIME_1_SENSOR_TIME_MSB__MSK          (0xFF)
+#define SMI130_USER_SENSORTIME_1_SENSOR_TIME_MSB__REG          \
+		(SMI130_USER_SENSORTIME_1_ADDR)
+
+/* SENSORTIME_2 (MSB) Description - Reg Addr --> (0x1A), Bit --> 0...7 */
+#define SMI130_USER_SENSORTIME_2_SENSOR_TIME_MSB__POS           (0)
+#define SMI130_USER_SENSORTIME_2_SENSOR_TIME_MSB__LEN           (8)
+#define SMI130_USER_SENSORTIME_2_SENSOR_TIME_MSB__MSK          (0xFF)
+#define SMI130_USER_SENSORTIME_2_SENSOR_TIME_MSB__REG          \
+		(SMI130_USER_SENSORTIME_2_ADDR)
+/**************************************************************/
+/**\name	GYRO SELF TEST LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Status Description - Reg Addr --> 0x1B, Bit --> 1 */
+#define SMI130_USER_STAT_GYRO_SELFTEST_OK__POS          (1)
+#define SMI130_USER_STAT_GYRO_SELFTEST_OK__LEN          (1)
+#define SMI130_USER_STAT_GYRO_SELFTEST_OK__MSK          (0x02)
+#define SMI130_USER_STAT_GYRO_SELFTEST_OK__REG         \
+		(SMI130_USER_STAT_ADDR)
+/**************************************************************/
+/**\name	MAG MANUAL OPERATION LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Status Description - Reg Addr --> 0x1B, Bit --> 2 */
+#define SMI130_USER_STAT_MAG_MANUAL_OPERATION__POS          (2)
+#define SMI130_USER_STAT_MAG_MANUAL_OPERATION__LEN          (1)
+#define SMI130_USER_STAT_MAG_MANUAL_OPERATION__MSK          (0x04)
+#define SMI130_USER_STAT_MAG_MANUAL_OPERATION__REG          \
+		(SMI130_USER_STAT_ADDR)
+/**************************************************************/
+/**\name	FOC STATUS LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Status Description - Reg Addr --> 0x1B, Bit --> 3 */
+#define SMI130_USER_STAT_FOC_RDY__POS          (3)
+#define SMI130_USER_STAT_FOC_RDY__LEN          (1)
+#define SMI130_USER_STAT_FOC_RDY__MSK          (0x08)
+#define SMI130_USER_STAT_FOC_RDY__REG          (SMI130_USER_STAT_ADDR)
+/**************************************************************/
+/**\name	NVM READY LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Status Description - Reg Addr --> 0x1B, Bit --> 4 */
+#define SMI130_USER_STAT_NVM_RDY__POS           (4)
+#define SMI130_USER_STAT_NVM_RDY__LEN           (1)
+#define SMI130_USER_STAT_NVM_RDY__MSK           (0x10)
+#define SMI130_USER_STAT_NVM_RDY__REG           (SMI130_USER_STAT_ADDR)
+/**************************************************************/
+/**\name	DATA READY LENGTH, POSITION AND MASK FOR ACCEL, MAG AND GYRO*/
+/**************************************************************/
+/* Status Description - Reg Addr --> 0x1B, Bit --> 5 */
+#define SMI130_USER_STAT_DATA_RDY_MAG__POS           (5)
+#define SMI130_USER_STAT_DATA_RDY_MAG__LEN           (1)
+#define SMI130_USER_STAT_DATA_RDY_MAG__MSK           (0x20)
+#define SMI130_USER_STAT_DATA_RDY_MAG__REG           (SMI130_USER_STAT_ADDR)
+
+/* Status Description - Reg Addr --> 0x1B, Bit --> 6 */
+#define SMI130_USER_STAT_DATA_RDY_GYRO__POS           (6)
+#define SMI130_USER_STAT_DATA_RDY_GYRO__LEN           (1)
+#define SMI130_USER_STAT_DATA_RDY_GYRO__MSK           (0x40)
+#define SMI130_USER_STAT_DATA_RDY_GYRO__REG           (SMI130_USER_STAT_ADDR)
+
+/* Status Description - Reg Addr --> 0x1B, Bit --> 7 */
+#define SMI130_USER_STAT_DATA_RDY_ACCEL__POS           (7)
+#define SMI130_USER_STAT_DATA_RDY_ACCEL__LEN           (1)
+#define SMI130_USER_STAT_DATA_RDY_ACCEL__MSK           (0x80)
+#define SMI130_USER_STAT_DATA_RDY_ACCEL__REG           (SMI130_USER_STAT_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT STATUS LENGTH, POSITION AND MASK    */
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 0 */
+#define SMI130_USER_INTR_STAT_0_STEP_INTR__POS           (0)
+#define SMI130_USER_INTR_STAT_0_STEP_INTR__LEN           (1)
+#define SMI130_USER_INTR_STAT_0_STEP_INTR__MSK          (0x01)
+#define SMI130_USER_INTR_STAT_0_STEP_INTR__REG          \
+		(SMI130_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	SIGNIFICANT INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 1 */
+#define SMI130_USER_INTR_STAT_0_SIGNIFICANT_INTR__POS		(1)
+#define SMI130_USER_INTR_STAT_0_SIGNIFICANT_INTR__LEN		(1)
+#define SMI130_USER_INTR_STAT_0_SIGNIFICANT_INTR__MSK		(0x02)
+#define SMI130_USER_INTR_STAT_0_SIGNIFICANT_INTR__REG       \
+		(SMI130_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	ANY_MOTION INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 2 */
+#define SMI130_USER_INTR_STAT_0_ANY_MOTION__POS           (2)
+#define SMI130_USER_INTR_STAT_0_ANY_MOTION__LEN           (1)
+#define SMI130_USER_INTR_STAT_0_ANY_MOTION__MSK          (0x04)
+#define SMI130_USER_INTR_STAT_0_ANY_MOTION__REG          \
+		(SMI130_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	PMU TRIGGER INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 3 */
+#define SMI130_USER_INTR_STAT_0_PMU_TRIGGER__POS           3
+#define SMI130_USER_INTR_STAT_0_PMU_TRIGGER__LEN           (1)
+#define SMI130_USER_INTR_STAT_0_PMU_TRIGGER__MSK          (0x08)
+#define SMI130_USER_INTR_STAT_0_PMU_TRIGGER__REG          \
+		(SMI130_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	DOUBLE TAP INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 4 */
+#define SMI130_USER_INTR_STAT_0_DOUBLE_TAP_INTR__POS           4
+#define SMI130_USER_INTR_STAT_0_DOUBLE_TAP_INTR__LEN           (1)
+#define SMI130_USER_INTR_STAT_0_DOUBLE_TAP_INTR__MSK          (0x10)
+#define SMI130_USER_INTR_STAT_0_DOUBLE_TAP_INTR__REG          \
+		(SMI130_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	SINGLE TAP INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 5 */
+#define SMI130_USER_INTR_STAT_0_SINGLE_TAP_INTR__POS           5
+#define SMI130_USER_INTR_STAT_0_SINGLE_TAP_INTR__LEN           (1)
+#define SMI130_USER_INTR_STAT_0_SINGLE_TAP_INTR__MSK          (0x20)
+#define SMI130_USER_INTR_STAT_0_SINGLE_TAP_INTR__REG          \
+		(SMI130_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	ORIENT INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 6 */
+#define SMI130_USER_INTR_STAT_0_ORIENT__POS           (6)
+#define SMI130_USER_INTR_STAT_0_ORIENT__LEN           (1)
+#define SMI130_USER_INTR_STAT_0_ORIENT__MSK          (0x40)
+#define SMI130_USER_INTR_STAT_0_ORIENT__REG          \
+		(SMI130_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	FLAT INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_0 Description - Reg Addr --> 0x1C, Bit --> 7 */
+#define SMI130_USER_INTR_STAT_0_FLAT__POS           (7)
+#define SMI130_USER_INTR_STAT_0_FLAT__LEN           (1)
+#define SMI130_USER_INTR_STAT_0_FLAT__MSK          (0x80)
+#define SMI130_USER_INTR_STAT_0_FLAT__REG          \
+		(SMI130_USER_INTR_STAT_0_ADDR)
+/**************************************************************/
+/**\name	HIGH_G INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_1 Description - Reg Addr --> 0x1D, Bit --> 2 */
+#define SMI130_USER_INTR_STAT_1_HIGH_G_INTR__POS               (2)
+#define SMI130_USER_INTR_STAT_1_HIGH_G_INTR__LEN               (1)
+#define SMI130_USER_INTR_STAT_1_HIGH_G_INTR__MSK              (0x04)
+#define SMI130_USER_INTR_STAT_1_HIGH_G_INTR__REG              \
+		(SMI130_USER_INTR_STAT_1_ADDR)
+/**************************************************************/
+/**\name	LOW_G INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_1 Description - Reg Addr --> 0x1D, Bit --> 3 */
+#define SMI130_USER_INTR_STAT_1_LOW_G_INTR__POS               (3)
+#define SMI130_USER_INTR_STAT_1_LOW_G_INTR__LEN               (1)
+#define SMI130_USER_INTR_STAT_1_LOW_G_INTR__MSK              (0x08)
+#define SMI130_USER_INTR_STAT_1_LOW_G_INTR__REG              \
+		(SMI130_USER_INTR_STAT_1_ADDR)
+/**************************************************************/
+/**\name	DATA READY INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_1 Description - Reg Addr --> 0x1D, Bit --> 4 */
+#define SMI130_USER_INTR_STAT_1_DATA_RDY_INTR__POS               (4)
+#define SMI130_USER_INTR_STAT_1_DATA_RDY_INTR__LEN               (1)
+#define SMI130_USER_INTR_STAT_1_DATA_RDY_INTR__MSK               (0x10)
+#define SMI130_USER_INTR_STAT_1_DATA_RDY_INTR__REG               \
+		(SMI130_USER_INTR_STAT_1_ADDR)
+/**************************************************************/
+/**\name	FIFO FULL INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_1 Description - Reg Addr --> 0x1D, Bit --> 5 */
+#define SMI130_USER_INTR_STAT_1_FIFO_FULL_INTR__POS               (5)
+#define SMI130_USER_INTR_STAT_1_FIFO_FULL_INTR__LEN               (1)
+#define SMI130_USER_INTR_STAT_1_FIFO_FULL_INTR__MSK               (0x20)
+#define SMI130_USER_INTR_STAT_1_FIFO_FULL_INTR__REG               \
+		(SMI130_USER_INTR_STAT_1_ADDR)
+/**************************************************************/
+/**\name FIFO WATERMARK INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_1 Description - Reg Addr --> 0x1D, Bit --> 6 */
+#define SMI130_USER_INTR_STAT_1_FIFO_WM_INTR__POS               (6)
+#define SMI130_USER_INTR_STAT_1_FIFO_WM_INTR__LEN               (1)
+#define SMI130_USER_INTR_STAT_1_FIFO_WM_INTR__MSK               (0x40)
+#define SMI130_USER_INTR_STAT_1_FIFO_WM_INTR__REG               \
+		(SMI130_USER_INTR_STAT_1_ADDR)
+/**************************************************************/
+/**\name	NO MOTION INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_1 Description - Reg Addr --> 0x1D, Bit --> 7 */
+#define SMI130_USER_INTR_STAT_1_NOMOTION_INTR__POS               (7)
+#define SMI130_USER_INTR_STAT_1_NOMOTION_INTR__LEN               (1)
+#define SMI130_USER_INTR_STAT_1_NOMOTION_INTR__MSK               (0x80)
+#define SMI130_USER_INTR_STAT_1_NOMOTION_INTR__REG               \
+		(SMI130_USER_INTR_STAT_1_ADDR)
+/**************************************************************/
+/**\name	ANY MOTION-XYZ AXIS INTERRUPT STATUS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 0 */
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_X__POS               (0)
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_X__LEN               (1)
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_X__MSK               (0x01)
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_X__REG               \
+		(SMI130_USER_INTR_STAT_2_ADDR)
+
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 1 */
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y__POS               (1)
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y__LEN               (1)
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y__MSK               (0x02)
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y__REG               \
+		(SMI130_USER_INTR_STAT_2_ADDR)
+
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 2 */
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z__POS               (2)
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z__LEN               (1)
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z__MSK               (0x04)
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z__REG               \
+		(SMI130_USER_INTR_STAT_2_ADDR)
+/**************************************************************/
+/**\name	ANY MOTION SIGN LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 3 */
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_SIGN__POS               (3)
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_SIGN__LEN               (1)
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_SIGN__MSK               (0x08)
+#define SMI130_USER_INTR_STAT_2_ANY_MOTION_SIGN__REG               \
+		(SMI130_USER_INTR_STAT_2_ADDR)
+/**************************************************************/
+/**\name	TAP_XYZ AND SIGN LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 4 */
+#define SMI130_USER_INTR_STAT_2_TAP_FIRST_X__POS               (4)
+#define SMI130_USER_INTR_STAT_2_TAP_FIRST_X__LEN               (1)
+#define SMI130_USER_INTR_STAT_2_TAP_FIRST_X__MSK               (0x10)
+#define SMI130_USER_INTR_STAT_2_TAP_FIRST_X__REG               \
+		(SMI130_USER_INTR_STAT_2_ADDR)
+
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 5 */
+#define SMI130_USER_INTR_STAT_2_TAP_FIRST_Y__POS               (5)
+#define SMI130_USER_INTR_STAT_2_TAP_FIRST_Y__LEN               (1)
+#define SMI130_USER_INTR_STAT_2_TAP_FIRST_Y__MSK               (0x20)
+#define SMI130_USER_INTR_STAT_2_TAP_FIRST_Y__REG               \
+		(SMI130_USER_INTR_STAT_2_ADDR)
+
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 6 */
+#define SMI130_USER_INTR_STAT_2_TAP_FIRST_Z__POS               (6)
+#define SMI130_USER_INTR_STAT_2_TAP_FIRST_Z__LEN               (1)
+#define SMI130_USER_INTR_STAT_2_TAP_FIRST_Z__MSK               (0x40)
+#define SMI130_USER_INTR_STAT_2_TAP_FIRST_Z__REG               \
+		(SMI130_USER_INTR_STAT_2_ADDR)
+
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 7 */
+#define SMI130_USER_INTR_STAT_2_TAP_SIGN__POS               (7)
+#define SMI130_USER_INTR_STAT_2_TAP_SIGN__LEN               (1)
+#define SMI130_USER_INTR_STAT_2_TAP_SIGN__MSK               (0x80)
+#define SMI130_USER_INTR_STAT_2_TAP_SIGN__REG               \
+		(SMI130_USER_INTR_STAT_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT SATAUS FOR WHOLE 0x1E LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_2 Description - Reg Addr --> 0x1E, Bit --> 0...7 */
+#define SMI130_USER_INTR_STAT_2__POS               (0)
+#define SMI130_USER_INTR_STAT_2__LEN               (8)
+#define SMI130_USER_INTR_STAT_2__MSK               (0xFF)
+#define SMI130_USER_INTR_STAT_2__REG               \
+		(SMI130_USER_INTR_STAT_2_ADDR)
+/**************************************************************/
+/**\name	HIGH_G-XYZ AND SIGN LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 0 */
+#define SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_X__POS               (0)
+#define SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_X__LEN               (1)
+#define SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_X__MSK               (0x01)
+#define SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_X__REG               \
+		(SMI130_USER_INTR_STAT_3_ADDR)
+
+/* Int_Status_3 Description - Reg Addr --> 0x1E, Bit --> 1 */
+#define SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_Y__POS               (1)
+#define SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_Y__LEN               (1)
+#define SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_Y__MSK               (0x02)
+#define SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_Y__REG               \
+		(SMI130_USER_INTR_STAT_3_ADDR)
+
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 2 */
+#define SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_Z__POS               (2)
+#define SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_Z__LEN               (1)
+#define SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_Z__MSK               (0x04)
+#define SMI130_USER_INTR_STAT_3_HIGH_G_FIRST_Z__REG               \
+		(SMI130_USER_INTR_STAT_3_ADDR)
+
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 3 */
+#define SMI130_USER_INTR_STAT_3_HIGH_G_SIGN__POS               (3)
+#define SMI130_USER_INTR_STAT_3_HIGH_G_SIGN__LEN               (1)
+#define SMI130_USER_INTR_STAT_3_HIGH_G_SIGN__MSK               (0x08)
+#define SMI130_USER_INTR_STAT_3_HIGH_G_SIGN__REG               \
+		(SMI130_USER_INTR_STAT_3_ADDR)
+/**************************************************************/
+/**\name	ORIENT XY and Z AXIS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 4...5 */
+#define SMI130_USER_INTR_STAT_3_ORIENT_XY__POS               (4)
+#define SMI130_USER_INTR_STAT_3_ORIENT_XY__LEN               (2)
+#define SMI130_USER_INTR_STAT_3_ORIENT_XY__MSK               (0x30)
+#define SMI130_USER_INTR_STAT_3_ORIENT_XY__REG               \
+		(SMI130_USER_INTR_STAT_3_ADDR)
+
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 6 */
+#define SMI130_USER_INTR_STAT_3_ORIENT_Z__POS               (6)
+#define SMI130_USER_INTR_STAT_3_ORIENT_Z__LEN               (1)
+#define SMI130_USER_INTR_STAT_3_ORIENT_Z__MSK               (0x40)
+#define SMI130_USER_INTR_STAT_3_ORIENT_Z__REG               \
+		(SMI130_USER_INTR_STAT_3_ADDR)
+/**************************************************************/
+/**\name	FLAT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 7 */
+#define SMI130_USER_INTR_STAT_3_FLAT__POS               (7)
+#define SMI130_USER_INTR_STAT_3_FLAT__LEN               (1)
+#define SMI130_USER_INTR_STAT_3_FLAT__MSK               (0x80)
+#define SMI130_USER_INTR_STAT_3_FLAT__REG               \
+		(SMI130_USER_INTR_STAT_3_ADDR)
+/**************************************************************/
+/**\name	(0x1F) LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Status_3 Description - Reg Addr --> (0x1F), Bit --> 0...7 */
+#define SMI130_USER_INTR_STAT_3__POS               (0)
+#define SMI130_USER_INTR_STAT_3__LEN               (8)
+#define SMI130_USER_INTR_STAT_3__MSK               (0xFF)
+#define SMI130_USER_INTR_STAT_3__REG               \
+		(SMI130_USER_INTR_STAT_3_ADDR)
+/**************************************************************/
+/**\name	TEMPERATURE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Temperature Description - LSB Reg Addr --> (0x20), Bit --> 0...7 */
+#define SMI130_USER_TEMP_LSB_VALUE__POS               (0)
+#define SMI130_USER_TEMP_LSB_VALUE__LEN               (8)
+#define SMI130_USER_TEMP_LSB_VALUE__MSK               (0xFF)
+#define SMI130_USER_TEMP_LSB_VALUE__REG               \
+		(SMI130_USER_TEMPERATURE_0_ADDR)
+
+/* Temperature Description - LSB Reg Addr --> 0x21, Bit --> 0...7 */
+#define SMI130_USER_TEMP_MSB_VALUE__POS               (0)
+#define SMI130_USER_TEMP_MSB_VALUE__LEN               (8)
+#define SMI130_USER_TEMP_MSB_VALUE__MSK               (0xFF)
+#define SMI130_USER_TEMP_MSB_VALUE__REG               \
+		(SMI130_USER_TEMPERATURE_1_ADDR)
+/**************************************************************/
+/**\name	FIFO BYTE COUNTER LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Length0 Description - Reg Addr --> 0x22, Bit --> 0...7 */
+#define SMI130_USER_FIFO_BYTE_COUNTER_LSB__POS           (0)
+#define SMI130_USER_FIFO_BYTE_COUNTER_LSB__LEN           (8)
+#define SMI130_USER_FIFO_BYTE_COUNTER_LSB__MSK          (0xFF)
+#define SMI130_USER_FIFO_BYTE_COUNTER_LSB__REG          \
+		(SMI130_USER_FIFO_LENGTH_0_ADDR)
+
+/*Fifo_Length1 Description - Reg Addr --> 0x23, Bit --> 0...2 */
+#define SMI130_USER_FIFO_BYTE_COUNTER_MSB__POS           (0)
+#define SMI130_USER_FIFO_BYTE_COUNTER_MSB__LEN           3
+#define SMI130_USER_FIFO_BYTE_COUNTER_MSB__MSK          (0x07)
+#define SMI130_USER_FIFO_BYTE_COUNTER_MSB__REG          \
+		(SMI130_USER_FIFO_LENGTH_1_ADDR)
+
+/**************************************************************/
+/**\name	FIFO DATA LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Data Description - Reg Addr --> 0x24, Bit --> 0...7 */
+#define SMI130_USER_FIFO_DATA__POS           (0)
+#define SMI130_USER_FIFO_DATA__LEN           (8)
+#define SMI130_USER_FIFO_DATA__MSK          (0xFF)
+#define SMI130_USER_FIFO_DATA__REG          (SMI130_USER_FIFO_DATA_ADDR)
+
+/**************************************************************/
+/**\name	ACCEL CONFIGURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Acc_Conf Description - Reg Addr --> (0x40), Bit --> 0...3 */
+#define SMI130_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__POS               (0)
+#define SMI130_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__LEN               (4)
+#define SMI130_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__MSK               (0x0F)
+#define SMI130_USER_ACCEL_CONFIG_OUTPUT_DATA_RATE__REG		       \
+(SMI130_USER_ACCEL_CONFIG_ADDR)
+
+/* Acc_Conf Description - Reg Addr --> (0x40), Bit --> 4...6 */
+#define SMI130_USER_ACCEL_CONFIG_ACCEL_BW__POS               (4)
+#define SMI130_USER_ACCEL_CONFIG_ACCEL_BW__LEN               (3)
+#define SMI130_USER_ACCEL_CONFIG_ACCEL_BW__MSK               (0x70)
+#define SMI130_USER_ACCEL_CONFIG_ACCEL_BW__REG	(SMI130_USER_ACCEL_CONFIG_ADDR)
+
+/* Acc_Conf Description - Reg Addr --> (0x40), Bit --> 7 */
+#define SMI130_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__POS           (7)
+#define SMI130_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__LEN           (1)
+#define SMI130_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__MSK           (0x80)
+#define SMI130_USER_ACCEL_CONFIG_ACCEL_UNDER_SAMPLING__REG	\
+(SMI130_USER_ACCEL_CONFIG_ADDR)
+
+/* Acc_Range Description - Reg Addr --> 0x41, Bit --> 0...3 */
+#define SMI130_USER_ACCEL_RANGE__POS               (0)
+#define SMI130_USER_ACCEL_RANGE__LEN               (4)
+#define SMI130_USER_ACCEL_RANGE__MSK               (0x0F)
+#define SMI130_USER_ACCEL_RANGE__REG              \
+(SMI130_USER_ACCEL_RANGE_ADDR)
+/**************************************************************/
+/**\name	GYRO CONFIGURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Gyro_Conf Description - Reg Addr --> (0x42), Bit --> 0...3 */
+#define SMI130_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__POS               (0)
+#define SMI130_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__LEN               (4)
+#define SMI130_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__MSK               (0x0F)
+#define SMI130_USER_GYRO_CONFIG_OUTPUT_DATA_RATE__REG               \
+(SMI130_USER_GYRO_CONFIG_ADDR)
+
+/* Gyro_Conf Description - Reg Addr --> (0x42), Bit --> 4...5 */
+#define SMI130_USER_GYRO_CONFIG_BW__POS               (4)
+#define SMI130_USER_GYRO_CONFIG_BW__LEN               (2)
+#define SMI130_USER_GYRO_CONFIG_BW__MSK               (0x30)
+#define SMI130_USER_GYRO_CONFIG_BW__REG               \
+(SMI130_USER_GYRO_CONFIG_ADDR)
+
+/* Gyr_Range Description - Reg Addr --> 0x43, Bit --> 0...2 */
+#define SMI130_USER_GYRO_RANGE__POS               (0)
+#define SMI130_USER_GYRO_RANGE__LEN               (3)
+#define SMI130_USER_GYRO_RANGE__MSK               (0x07)
+#define SMI130_USER_GYRO_RANGE__REG               (SMI130_USER_GYRO_RANGE_ADDR)
+/**************************************************************/
+/**\name	MAG CONFIGURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Mag_Conf Description - Reg Addr --> (0x44), Bit --> 0...3 */
+#define SMI130_USER_MAG_CONFIG_OUTPUT_DATA_RATE__POS               (0)
+#define SMI130_USER_MAG_CONFIG_OUTPUT_DATA_RATE__LEN               (4)
+#define SMI130_USER_MAG_CONFIG_OUTPUT_DATA_RATE__MSK               (0x0F)
+#define SMI130_USER_MAG_CONFIG_OUTPUT_DATA_RATE__REG               \
+(SMI130_USER_MAG_CONFIG_ADDR)
+/**************************************************************/
+/**\name	FIFO DOWNS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Downs Description - Reg Addr --> 0x45, Bit --> 0...2 */
+#define SMI130_USER_FIFO_DOWN_GYRO__POS               (0)
+#define SMI130_USER_FIFO_DOWN_GYRO__LEN               (3)
+#define SMI130_USER_FIFO_DOWN_GYRO__MSK               (0x07)
+#define SMI130_USER_FIFO_DOWN_GYRO__REG	(SMI130_USER_FIFO_DOWN_ADDR)
+/**************************************************************/
+/**\name	FIFO FILTER FOR ACCEL AND GYRO LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_filt Description - Reg Addr --> 0x45, Bit --> 3 */
+#define SMI130_USER_FIFO_FILTER_GYRO__POS               (3)
+#define SMI130_USER_FIFO_FILTER_GYRO__LEN               (1)
+#define SMI130_USER_FIFO_FILTER_GYRO__MSK               (0x08)
+#define SMI130_USER_FIFO_FILTER_GYRO__REG	  (SMI130_USER_FIFO_DOWN_ADDR)
+
+/* Fifo_Downs Description - Reg Addr --> 0x45, Bit --> 4...6 */
+#define SMI130_USER_FIFO_DOWN_ACCEL__POS               (4)
+#define SMI130_USER_FIFO_DOWN_ACCEL__LEN               (3)
+#define SMI130_USER_FIFO_DOWN_ACCEL__MSK               (0x70)
+#define SMI130_USER_FIFO_DOWN_ACCEL__REG	(SMI130_USER_FIFO_DOWN_ADDR)
+
+/* Fifo_FILT Description - Reg Addr --> 0x45, Bit --> 7 */
+#define SMI130_USER_FIFO_FILTER_ACCEL__POS               (7)
+#define SMI130_USER_FIFO_FILTER_ACCEL__LEN               (1)
+#define SMI130_USER_FIFO_FILTER_ACCEL__MSK               (0x80)
+#define SMI130_USER_FIFO_FILTER_ACCEL__REG	(SMI130_USER_FIFO_DOWN_ADDR)
+/**************************************************************/
+/**\name	FIFO WATER MARK LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_0 Description - Reg Addr --> 0x46, Bit --> 0...7 */
+#define SMI130_USER_FIFO_WM__POS               (0)
+#define SMI130_USER_FIFO_WM__LEN               (8)
+#define SMI130_USER_FIFO_WM__MSK               (0xFF)
+#define SMI130_USER_FIFO_WM__REG	(SMI130_USER_FIFO_CONFIG_0_ADDR)
+/**************************************************************/
+/**\name	FIFO TIME LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 1 */
+#define SMI130_USER_FIFO_TIME_ENABLE__POS               (1)
+#define SMI130_USER_FIFO_TIME_ENABLE__LEN               (1)
+#define SMI130_USER_FIFO_TIME_ENABLE__MSK               (0x02)
+#define SMI130_USER_FIFO_TIME_ENABLE__REG	(SMI130_USER_FIFO_CONFIG_1_ADDR)
+/**************************************************************/
+/**\name	FIFO TAG INTERRUPT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 2 */
+#define SMI130_USER_FIFO_TAG_INTR2_ENABLE__POS               (2)
+#define SMI130_USER_FIFO_TAG_INTR2_ENABLE__LEN               (1)
+#define SMI130_USER_FIFO_TAG_INTR2_ENABLE__MSK               (0x04)
+#define SMI130_USER_FIFO_TAG_INTR2_ENABLE__REG	(SMI130_USER_FIFO_CONFIG_1_ADDR)
+
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 3 */
+#define SMI130_USER_FIFO_TAG_INTR1_ENABLE__POS               (3)
+#define SMI130_USER_FIFO_TAG_INTR1_ENABLE__LEN               (1)
+#define SMI130_USER_FIFO_TAG_INTR1_ENABLE__MSK               (0x08)
+#define SMI130_USER_FIFO_TAG_INTR1_ENABLE__REG	(SMI130_USER_FIFO_CONFIG_1_ADDR)
+/**************************************************************/
+/**\name	FIFO HEADER LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 4 */
+#define SMI130_USER_FIFO_HEADER_ENABLE__POS               (4)
+#define SMI130_USER_FIFO_HEADER_ENABLE__LEN               (1)
+#define SMI130_USER_FIFO_HEADER_ENABLE__MSK               (0x10)
+#define SMI130_USER_FIFO_HEADER_ENABLE__REG		         \
+(SMI130_USER_FIFO_CONFIG_1_ADDR)
+/**************************************************************/
+/**\name	FIFO MAG ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 5 */
+#define SMI130_USER_FIFO_MAG_ENABLE__POS               (5)
+#define SMI130_USER_FIFO_MAG_ENABLE__LEN               (1)
+#define SMI130_USER_FIFO_MAG_ENABLE__MSK               (0x20)
+#define SMI130_USER_FIFO_MAG_ENABLE__REG		     \
+(SMI130_USER_FIFO_CONFIG_1_ADDR)
+/**************************************************************/
+/**\name	FIFO ACCEL ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 6 */
+#define SMI130_USER_FIFO_ACCEL_ENABLE__POS               (6)
+#define SMI130_USER_FIFO_ACCEL_ENABLE__LEN               (1)
+#define SMI130_USER_FIFO_ACCEL_ENABLE__MSK               (0x40)
+#define SMI130_USER_FIFO_ACCEL_ENABLE__REG		        \
+(SMI130_USER_FIFO_CONFIG_1_ADDR)
+/**************************************************************/
+/**\name	FIFO GYRO ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Fifo_Config_1 Description - Reg Addr --> 0x47, Bit --> 7 */
+#define SMI130_USER_FIFO_GYRO_ENABLE__POS               (7)
+#define SMI130_USER_FIFO_GYRO_ENABLE__LEN               (1)
+#define SMI130_USER_FIFO_GYRO_ENABLE__MSK               (0x80)
+#define SMI130_USER_FIFO_GYRO_ENABLE__REG		       \
+(SMI130_USER_FIFO_CONFIG_1_ADDR)
+
+/**************************************************************/
+/**\name	MAG I2C ADDRESS SELECTION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+
+/* Mag_IF_0 Description - Reg Addr --> 0x4b, Bit --> 1...7 */
+#define SMI130_USER_I2C_DEVICE_ADDR__POS               (1)
+#define SMI130_USER_I2C_DEVICE_ADDR__LEN               (7)
+#define SMI130_USER_I2C_DEVICE_ADDR__MSK               (0xFE)
+#define SMI130_USER_I2C_DEVICE_ADDR__REG	(SMI130_USER_MAG_IF_0_ADDR)
+/**************************************************************/
+/**\name MAG CONFIGURATION FOR SECONDARY
+	INTERFACE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Mag_IF_1 Description - Reg Addr --> 0x4c, Bit --> 0...1 */
+#define SMI130_USER_MAG_BURST__POS               (0)
+#define SMI130_USER_MAG_BURST__LEN               (2)
+#define SMI130_USER_MAG_BURST__MSK               (0x03)
+#define SMI130_USER_MAG_BURST__REG               (SMI130_USER_MAG_IF_1_ADDR)
+
+/* Mag_IF_1 Description - Reg Addr --> 0x4c, Bit --> 2...5 */
+#define SMI130_USER_MAG_OFFSET__POS               (2)
+#define SMI130_USER_MAG_OFFSET__LEN               (4)
+#define SMI130_USER_MAG_OFFSET__MSK               (0x3C)
+#define SMI130_USER_MAG_OFFSET__REG               (SMI130_USER_MAG_IF_1_ADDR)
+
+/* Mag_IF_1 Description - Reg Addr --> 0x4c, Bit --> 7 */
+#define SMI130_USER_MAG_MANUAL_ENABLE__POS               (7)
+#define SMI130_USER_MAG_MANUAL_ENABLE__LEN               (1)
+#define SMI130_USER_MAG_MANUAL_ENABLE__MSK               (0x80)
+#define SMI130_USER_MAG_MANUAL_ENABLE__REG               \
+(SMI130_USER_MAG_IF_1_ADDR)
+
+/* Mag_IF_2 Description - Reg Addr --> 0x4d, Bit -->0... 7 */
+#define SMI130_USER_READ_ADDR__POS               (0)
+#define SMI130_USER_READ_ADDR__LEN               (8)
+#define SMI130_USER_READ_ADDR__MSK               (0xFF)
+#define SMI130_USER_READ_ADDR__REG               (SMI130_USER_MAG_IF_2_ADDR)
+
+/* Mag_IF_3 Description - Reg Addr --> 0x4e, Bit -->0... 7 */
+#define SMI130_USER_WRITE_ADDR__POS               (0)
+#define SMI130_USER_WRITE_ADDR__LEN               (8)
+#define SMI130_USER_WRITE_ADDR__MSK               (0xFF)
+#define SMI130_USER_WRITE_ADDR__REG               (SMI130_USER_MAG_IF_3_ADDR)
+
+/* Mag_IF_4 Description - Reg Addr --> 0x4f, Bit -->0... 7 */
+#define SMI130_USER_WRITE_DATA__POS               (0)
+#define SMI130_USER_WRITE_DATA__LEN               (8)
+#define SMI130_USER_WRITE_DATA__MSK               (0xFF)
+#define SMI130_USER_WRITE_DATA__REG               (SMI130_USER_MAG_IF_4_ADDR)
+/**************************************************************/
+/**\name	ANY MOTION XYZ AXIS ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->0 */
+#define SMI130_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__POS               (0)
+#define SMI130_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__MSK               (0x01)
+#define SMI130_USER_INTR_ENABLE_0_ANY_MOTION_X_ENABLE__REG	              \
+(SMI130_USER_INTR_ENABLE_0_ADDR)
+
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->1 */
+#define SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__POS               (1)
+#define SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__MSK               (0x02)
+#define SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Y_ENABLE__REG	          \
+(SMI130_USER_INTR_ENABLE_0_ADDR)
+
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->2 */
+#define SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__POS               (2)
+#define SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__MSK               (0x04)
+#define SMI130_USER_INTR_ENABLE_0_ANY_MOTION_Z_ENABLE__REG	            \
+(SMI130_USER_INTR_ENABLE_0_ADDR)
+/**************************************************************/
+/**\name	DOUBLE TAP ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->4 */
+#define SMI130_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__POS               (4)
+#define SMI130_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__MSK               (0x10)
+#define SMI130_USER_INTR_ENABLE_0_DOUBLE_TAP_ENABLE__REG	        \
+(SMI130_USER_INTR_ENABLE_0_ADDR)
+/**************************************************************/
+/**\name	SINGLE TAP ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->5 */
+#define SMI130_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__POS               (5)
+#define SMI130_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__MSK               (0x20)
+#define SMI130_USER_INTR_ENABLE_0_SINGLE_TAP_ENABLE__REG	       \
+(SMI130_USER_INTR_ENABLE_0_ADDR)
+/**************************************************************/
+/**\name	ORIENT ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->6 */
+#define SMI130_USER_INTR_ENABLE_0_ORIENT_ENABLE__POS               (6)
+#define SMI130_USER_INTR_ENABLE_0_ORIENT_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_0_ORIENT_ENABLE__MSK               (0x40)
+#define SMI130_USER_INTR_ENABLE_0_ORIENT_ENABLE__REG	           \
+(SMI130_USER_INTR_ENABLE_0_ADDR)
+/**************************************************************/
+/**\name	FLAT ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_0 Description - Reg Addr --> 0x50, Bit -->7 */
+#define SMI130_USER_INTR_ENABLE_0_FLAT_ENABLE__POS               (7)
+#define SMI130_USER_INTR_ENABLE_0_FLAT_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_0_FLAT_ENABLE__MSK               (0x80)
+#define SMI130_USER_INTR_ENABLE_0_FLAT_ENABLE__REG	           \
+(SMI130_USER_INTR_ENABLE_0_ADDR)
+/**************************************************************/
+/**\name	HIGH_G XYZ ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->0 */
+#define SMI130_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__POS               (0)
+#define SMI130_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__MSK               (0x01)
+#define SMI130_USER_INTR_ENABLE_1_HIGH_G_X_ENABLE__REG	           \
+(SMI130_USER_INTR_ENABLE_1_ADDR)
+
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->1 */
+#define SMI130_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__POS               (1)
+#define SMI130_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__MSK               (0x02)
+#define SMI130_USER_INTR_ENABLE_1_HIGH_G_Y_ENABLE__REG	           \
+(SMI130_USER_INTR_ENABLE_1_ADDR)
+
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->2 */
+#define SMI130_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__POS               (2)
+#define SMI130_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__MSK               (0x04)
+#define SMI130_USER_INTR_ENABLE_1_HIGH_G_Z_ENABLE__REG	           \
+(SMI130_USER_INTR_ENABLE_1_ADDR)
+/**************************************************************/
+/**\name	LOW_G ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->3 */
+#define SMI130_USER_INTR_ENABLE_1_LOW_G_ENABLE__POS               (3)
+#define SMI130_USER_INTR_ENABLE_1_LOW_G_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_1_LOW_G_ENABLE__MSK               (0x08)
+#define SMI130_USER_INTR_ENABLE_1_LOW_G_ENABLE__REG	          \
+(SMI130_USER_INTR_ENABLE_1_ADDR)
+/**************************************************************/
+/**\name	DATA READY ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->4 */
+#define SMI130_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__POS               (4)
+#define SMI130_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__MSK               (0x10)
+#define SMI130_USER_INTR_ENABLE_1_DATA_RDY_ENABLE__REG	            \
+(SMI130_USER_INTR_ENABLE_1_ADDR)
+/**************************************************************/
+/**\name	FIFO FULL AND WATER MARK ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->5 */
+#define SMI130_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__POS               (5)
+#define SMI130_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__MSK               (0x20)
+#define SMI130_USER_INTR_ENABLE_1_FIFO_FULL_ENABLE__REG	              \
+(SMI130_USER_INTR_ENABLE_1_ADDR)
+
+/* Int_En_1 Description - Reg Addr --> (0x51), Bit -->6 */
+#define SMI130_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__POS               (6)
+#define SMI130_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__MSK               (0x40)
+#define SMI130_USER_INTR_ENABLE_1_FIFO_WM_ENABLE__REG	           \
+(SMI130_USER_INTR_ENABLE_1_ADDR)
+/**************************************************************/
+/**\name	NO MOTION XYZ ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_2 Description - Reg Addr --> (0x52), Bit -->0 */
+#define SMI130_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__POS               (0)
+#define SMI130_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__MSK               (0x01)
+#define SMI130_USER_INTR_ENABLE_2_NOMOTION_X_ENABLE__REG	  \
+(SMI130_USER_INTR_ENABLE_2_ADDR)
+
+/* Int_En_2 Description - Reg Addr --> (0x52), Bit -->1 */
+#define SMI130_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__POS               (1)
+#define SMI130_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__MSK               (0x02)
+#define SMI130_USER_INTR_ENABLE_2_NOMOTION_Y_ENABLE__REG	  \
+(SMI130_USER_INTR_ENABLE_2_ADDR)
+
+/* Int_En_2 Description - Reg Addr --> (0x52), Bit -->2 */
+#define SMI130_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__POS               (2)
+#define SMI130_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__MSK               (0x04)
+#define SMI130_USER_INTR_ENABLE_2_NOMOTION_Z_ENABLE__REG	  \
+(SMI130_USER_INTR_ENABLE_2_ADDR)
+/**************************************************************/
+/**\name	STEP DETECTOR ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_En_2 Description - Reg Addr --> (0x52), Bit -->3 */
+#define SMI130_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__POS               (3)
+#define SMI130_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__LEN               (1)
+#define SMI130_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__MSK               (0x08)
+#define SMI130_USER_INTR_ENABLE_2_STEP_DETECTOR_ENABLE__REG	  \
+(SMI130_USER_INTR_ENABLE_2_ADDR)
+/**************************************************************/
+/**\name	EDGE CONTROL ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->0 */
+#define SMI130_USER_INTR1_EDGE_CTRL__POS               (0)
+#define SMI130_USER_INTR1_EDGE_CTRL__LEN               (1)
+#define SMI130_USER_INTR1_EDGE_CTRL__MSK               (0x01)
+#define SMI130_USER_INTR1_EDGE_CTRL__REG		\
+(SMI130_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	LEVEL CONTROL ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->1 */
+#define SMI130_USER_INTR1_LEVEL__POS               (1)
+#define SMI130_USER_INTR1_LEVEL__LEN               (1)
+#define SMI130_USER_INTR1_LEVEL__MSK               (0x02)
+#define SMI130_USER_INTR1_LEVEL__REG               \
+(SMI130_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	OUTPUT TYPE ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->2 */
+#define SMI130_USER_INTR1_OUTPUT_TYPE__POS               (2)
+#define SMI130_USER_INTR1_OUTPUT_TYPE__LEN               (1)
+#define SMI130_USER_INTR1_OUTPUT_TYPE__MSK               (0x04)
+#define SMI130_USER_INTR1_OUTPUT_TYPE__REG               \
+(SMI130_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	OUTPUT TYPE ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->3 */
+#define SMI130_USER_INTR1_OUTPUT_ENABLE__POS               (3)
+#define SMI130_USER_INTR1_OUTPUT_ENABLE__LEN               (1)
+#define SMI130_USER_INTR1_OUTPUT_ENABLE__MSK               (0x08)
+#define SMI130_USER_INTR1_OUTPUT_ENABLE__REG		\
+(SMI130_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	EDGE CONTROL ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->4 */
+#define SMI130_USER_INTR2_EDGE_CTRL__POS               (4)
+#define SMI130_USER_INTR2_EDGE_CTRL__LEN               (1)
+#define SMI130_USER_INTR2_EDGE_CTRL__MSK               (0x10)
+#define SMI130_USER_INTR2_EDGE_CTRL__REG		\
+(SMI130_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	LEVEL CONTROL ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->5 */
+#define SMI130_USER_INTR2_LEVEL__POS               (5)
+#define SMI130_USER_INTR2_LEVEL__LEN               (1)
+#define SMI130_USER_INTR2_LEVEL__MSK               (0x20)
+#define SMI130_USER_INTR2_LEVEL__REG               \
+(SMI130_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	OUTPUT TYPE ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->6 */
+#define SMI130_USER_INTR2_OUTPUT_TYPE__POS               (6)
+#define SMI130_USER_INTR2_OUTPUT_TYPE__LEN               (1)
+#define SMI130_USER_INTR2_OUTPUT_TYPE__MSK               (0x40)
+#define SMI130_USER_INTR2_OUTPUT_TYPE__REG               \
+(SMI130_USER_INTR_OUT_CTRL_ADDR)
+
+/* Int_Out_Ctrl Description - Reg Addr --> 0x53, Bit -->7 */
+#define SMI130_USER_INTR2_OUTPUT_EN__POS               (7)
+#define SMI130_USER_INTR2_OUTPUT_EN__LEN               (1)
+#define SMI130_USER_INTR2_OUTPUT_EN__MSK               (0x80)
+#define SMI130_USER_INTR2_OUTPUT_EN__REG		\
+(SMI130_USER_INTR_OUT_CTRL_ADDR)
+/**************************************************************/
+/**\name	LATCH INTERRUPT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Latch Description - Reg Addr --> 0x54, Bit -->0...3 */
+#define SMI130_USER_INTR_LATCH__POS               (0)
+#define SMI130_USER_INTR_LATCH__LEN               (4)
+#define SMI130_USER_INTR_LATCH__MSK               (0x0F)
+#define SMI130_USER_INTR_LATCH__REG               (SMI130_USER_INTR_LATCH_ADDR)
+/**************************************************************/
+/**\name	INPUT ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Latch Description - Reg Addr --> 0x54, Bit -->4 */
+#define SMI130_USER_INTR1_INPUT_ENABLE__POS               (4)
+#define SMI130_USER_INTR1_INPUT_ENABLE__LEN               (1)
+#define SMI130_USER_INTR1_INPUT_ENABLE__MSK               (0x10)
+#define SMI130_USER_INTR1_INPUT_ENABLE__REG               \
+(SMI130_USER_INTR_LATCH_ADDR)
+
+/* Int_Latch Description - Reg Addr --> 0x54, Bit -->5*/
+#define SMI130_USER_INTR2_INPUT_ENABLE__POS               (5)
+#define SMI130_USER_INTR2_INPUT_ENABLE__LEN               (1)
+#define SMI130_USER_INTR2_INPUT_ENABLE__MSK               (0x20)
+#define SMI130_USER_INTR2_INPUT_ENABLE__REG              \
+(SMI130_USER_INTR_LATCH_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF LOW_G LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->0 */
+#define SMI130_USER_INTR_MAP_0_INTR1_LOW_G__POS               (0)
+#define SMI130_USER_INTR_MAP_0_INTR1_LOW_G__LEN               (1)
+#define SMI130_USER_INTR_MAP_0_INTR1_LOW_G__MSK               (0x01)
+#define SMI130_USER_INTR_MAP_0_INTR1_LOW_G__REG	(SMI130_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF HIGH_G LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->1 */
+#define SMI130_USER_INTR_MAP_0_INTR1_HIGH_G__POS               (1)
+#define SMI130_USER_INTR_MAP_0_INTR1_HIGH_G__LEN               (1)
+#define SMI130_USER_INTR_MAP_0_INTR1_HIGH_G__MSK               (0x02)
+#define SMI130_USER_INTR_MAP_0_INTR1_HIGH_G__REG	\
+(SMI130_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT MAPPIONG OF ANY MOTION_G LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->2 */
+#define SMI130_USER_INTR_MAP_0_INTR1_ANY_MOTION__POS               (2)
+#define SMI130_USER_INTR_MAP_0_INTR1_ANY_MOTION__LEN               (1)
+#define SMI130_USER_INTR_MAP_0_INTR1_ANY_MOTION__MSK               (0x04)
+#define SMI130_USER_INTR_MAP_0_INTR1_ANY_MOTION__REG            \
+(SMI130_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF NO MOTION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->3 */
+#define SMI130_USER_INTR_MAP_0_INTR1_NOMOTION__POS               (3)
+#define SMI130_USER_INTR_MAP_0_INTR1_NOMOTION__LEN               (1)
+#define SMI130_USER_INTR_MAP_0_INTR1_NOMOTION__MSK               (0x08)
+#define SMI130_USER_INTR_MAP_0_INTR1_NOMOTION__REG (SMI130_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF DOUBLE TAP LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->4 */
+#define SMI130_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__POS               (4)
+#define SMI130_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__LEN               (1)
+#define SMI130_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__MSK               (0x10)
+#define SMI130_USER_INTR_MAP_0_INTR1_DOUBLE_TAP__REG	\
+(SMI130_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF SINGLE TAP LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->5 */
+#define SMI130_USER_INTR_MAP_0_INTR1_SINGLE_TAP__POS               (5)
+#define SMI130_USER_INTR_MAP_0_INTR1_SINGLE_TAP__LEN               (1)
+#define SMI130_USER_INTR_MAP_0_INTR1_SINGLE_TAP__MSK               (0x20)
+#define SMI130_USER_INTR_MAP_0_INTR1_SINGLE_TAP__REG	      \
+(SMI130_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF ORIENT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x55, Bit -->6 */
+#define SMI130_USER_INTR_MAP_0_INTR1_ORIENT__POS               (6)
+#define SMI130_USER_INTR_MAP_0_INTR1_ORIENT__LEN               (1)
+#define SMI130_USER_INTR_MAP_0_INTR1_ORIENT__MSK               (0x40)
+#define SMI130_USER_INTR_MAP_0_INTR1_ORIENT__REG	          \
+(SMI130_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT MAPPIONG OF FLAT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_0 Description - Reg Addr --> 0x56, Bit -->7 */
+#define SMI130_USER_INTR_MAP_0_INTR1_FLAT__POS               (7)
+#define SMI130_USER_INTR_MAP_0_INTR1_FLAT__LEN               (1)
+#define SMI130_USER_INTR_MAP_0_INTR1_FLAT__MSK               (0x80)
+#define SMI130_USER_INTR_MAP_0_INTR1_FLAT__REG	(SMI130_USER_INTR_MAP_0_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF PMU TRIGGER LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->0 */
+#define SMI130_USER_INTR_MAP_1_INTR2_PMU_TRIG__POS               (0)
+#define SMI130_USER_INTR_MAP_1_INTR2_PMU_TRIG__LEN               (1)
+#define SMI130_USER_INTR_MAP_1_INTR2_PMU_TRIG__MSK               (0x01)
+#define SMI130_USER_INTR_MAP_1_INTR2_PMU_TRIG__REG (SMI130_USER_INTR_MAP_1_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF FIFO FULL AND
+	WATER MARK LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->1 */
+#define SMI130_USER_INTR_MAP_1_INTR2_FIFO_FULL__POS               (1)
+#define SMI130_USER_INTR_MAP_1_INTR2_FIFO_FULL__LEN               (1)
+#define SMI130_USER_INTR_MAP_1_INTR2_FIFO_FULL__MSK               (0x02)
+#define SMI130_USER_INTR_MAP_1_INTR2_FIFO_FULL__REG	         \
+(SMI130_USER_INTR_MAP_1_ADDR)
+
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->2 */
+#define SMI130_USER_INTR_MAP_1_INTR2_FIFO_WM__POS               (2)
+#define SMI130_USER_INTR_MAP_1_INTR2_FIFO_WM__LEN               (1)
+#define SMI130_USER_INTR_MAP_1_INTR2_FIFO_WM__MSK               (0x04)
+#define SMI130_USER_INTR_MAP_1_INTR2_FIFO_WM__REG	         \
+(SMI130_USER_INTR_MAP_1_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF DATA READY LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->3 */
+#define SMI130_USER_INTR_MAP_1_INTR2_DATA_RDY__POS               (3)
+#define SMI130_USER_INTR_MAP_1_INTR2_DATA_RDY__LEN               (1)
+#define SMI130_USER_INTR_MAP_1_INTR2_DATA_RDY__MSK               (0x08)
+#define SMI130_USER_INTR_MAP_1_INTR2_DATA_RDY__REG	      \
+(SMI130_USER_INTR_MAP_1_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF PMU TRIGGER LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->4 */
+#define SMI130_USER_INTR_MAP_1_INTR1_PMU_TRIG__POS               (4)
+#define SMI130_USER_INTR_MAP_1_INTR1_PMU_TRIG__LEN               (1)
+#define SMI130_USER_INTR_MAP_1_INTR1_PMU_TRIG__MSK               (0x10)
+#define SMI130_USER_INTR_MAP_1_INTR1_PMU_TRIG__REG (SMI130_USER_INTR_MAP_1_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF FIFO FULL AND
+	WATER MARK LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->5 */
+#define SMI130_USER_INTR_MAP_1_INTR1_FIFO_FULL__POS               (5)
+#define SMI130_USER_INTR_MAP_1_INTR1_FIFO_FULL__LEN               (1)
+#define SMI130_USER_INTR_MAP_1_INTR1_FIFO_FULL__MSK               (0x20)
+#define SMI130_USER_INTR_MAP_1_INTR1_FIFO_FULL__REG	       \
+(SMI130_USER_INTR_MAP_1_ADDR)
+
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->6 */
+#define SMI130_USER_INTR_MAP_1_INTR1_FIFO_WM__POS               (6)
+#define SMI130_USER_INTR_MAP_1_INTR1_FIFO_WM__LEN               (1)
+#define SMI130_USER_INTR_MAP_1_INTR1_FIFO_WM__MSK               (0x40)
+#define SMI130_USER_INTR_MAP_1_INTR1_FIFO_WM__REG	\
+(SMI130_USER_INTR_MAP_1_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT1 MAPPIONG OF DATA READY LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_1 Description - Reg Addr --> 0x56, Bit -->7 */
+#define SMI130_USER_INTR_MAP_1_INTR1_DATA_RDY__POS               (7)
+#define SMI130_USER_INTR_MAP_1_INTR1_DATA_RDY__LEN               (1)
+#define SMI130_USER_INTR_MAP_1_INTR1_DATA_RDY__MSK               (0x80)
+#define SMI130_USER_INTR_MAP_1_INTR1_DATA_RDY__REG	\
+(SMI130_USER_INTR_MAP_1_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF LOW_G LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->0 */
+#define SMI130_USER_INTR_MAP_2_INTR2_LOW_G__POS               (0)
+#define SMI130_USER_INTR_MAP_2_INTR2_LOW_G__LEN               (1)
+#define SMI130_USER_INTR_MAP_2_INTR2_LOW_G__MSK               (0x01)
+#define SMI130_USER_INTR_MAP_2_INTR2_LOW_G__REG	(SMI130_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF HIGH_G LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->1 */
+#define SMI130_USER_INTR_MAP_2_INTR2_HIGH_G__POS               (1)
+#define SMI130_USER_INTR_MAP_2_INTR2_HIGH_G__LEN               (1)
+#define SMI130_USER_INTR_MAP_2_INTR2_HIGH_G__MSK               (0x02)
+#define SMI130_USER_INTR_MAP_2_INTR2_HIGH_G__REG	\
+(SMI130_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF ANY MOTION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->2 */
+#define SMI130_USER_INTR_MAP_2_INTR2_ANY_MOTION__POS      (2)
+#define SMI130_USER_INTR_MAP_2_INTR2_ANY_MOTION__LEN      (1)
+#define SMI130_USER_INTR_MAP_2_INTR2_ANY_MOTION__MSK     (0x04)
+#define SMI130_USER_INTR_MAP_2_INTR2_ANY_MOTION__REG     \
+(SMI130_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF NO MOTION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->3 */
+#define SMI130_USER_INTR_MAP_2_INTR2_NOMOTION__POS               (3)
+#define SMI130_USER_INTR_MAP_2_INTR2_NOMOTION__LEN               (1)
+#define SMI130_USER_INTR_MAP_2_INTR2_NOMOTION__MSK               (0x08)
+#define SMI130_USER_INTR_MAP_2_INTR2_NOMOTION__REG (SMI130_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF DOUBLE TAP LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->4 */
+#define SMI130_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__POS               (4)
+#define SMI130_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__LEN               (1)
+#define SMI130_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__MSK               (0x10)
+#define SMI130_USER_INTR_MAP_2_INTR2_DOUBLE_TAP__REG	\
+(SMI130_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF SINGLE TAP LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->5 */
+#define SMI130_USER_INTR_MAP_2_INTR2_SINGLE_TAP__POS               (5)
+#define SMI130_USER_INTR_MAP_2_INTR2_SINGLE_TAP__LEN               (1)
+#define SMI130_USER_INTR_MAP_2_INTR2_SINGLE_TAP__MSK               (0x20)
+#define SMI130_USER_INTR_MAP_2_INTR2_SINGLE_TAP__REG	\
+(SMI130_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF ORIENT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->6 */
+#define SMI130_USER_INTR_MAP_2_INTR2_ORIENT__POS               (6)
+#define SMI130_USER_INTR_MAP_2_INTR2_ORIENT__LEN               (1)
+#define SMI130_USER_INTR_MAP_2_INTR2_ORIENT__MSK               (0x40)
+#define SMI130_USER_INTR_MAP_2_INTR2_ORIENT__REG	\
+(SMI130_USER_INTR_MAP_2_ADDR)
+/**************************************************************/
+/**\name	INTERRUPT2 MAPPIONG OF FLAT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Map_2 Description - Reg Addr --> 0x57, Bit -->7 */
+#define SMI130_USER_INTR_MAP_2_INTR2_FLAT__POS               (7)
+#define SMI130_USER_INTR_MAP_2_INTR2_FLAT__LEN               (1)
+#define SMI130_USER_INTR_MAP_2_INTR2_FLAT__MSK               (0x80)
+#define SMI130_USER_INTR_MAP_2_INTR2_FLAT__REG	(SMI130_USER_INTR_MAP_2_ADDR)
+
+/**************************************************************/
+/**\name	TAP SOURCE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Data_0 Description - Reg Addr --> 0x58, Bit --> 3 */
+#define SMI130_USER_INTR_DATA_0_INTR_TAP_SOURCE__POS               (3)
+#define SMI130_USER_INTR_DATA_0_INTR_TAP_SOURCE__LEN               (1)
+#define SMI130_USER_INTR_DATA_0_INTR_TAP_SOURCE__MSK               (0x08)
+#define SMI130_USER_INTR_DATA_0_INTR_TAP_SOURCE__REG	           \
+(SMI130_USER_INTR_DATA_0_ADDR)
+
+/**************************************************************/
+/**\name	HIGH SOURCE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Data_0 Description - Reg Addr --> 0x58, Bit --> 7 */
+#define SMI130_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__POS           (7)
+#define SMI130_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__LEN           (1)
+#define SMI130_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__MSK           (0x80)
+#define SMI130_USER_INTR_DATA_0_INTR_LOW_HIGH_SOURCE__REG            \
+(SMI130_USER_INTR_DATA_0_ADDR)
+
+/**************************************************************/
+/**\name	MOTION SOURCE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Data_1 Description - Reg Addr --> 0x59, Bit --> 7 */
+#define SMI130_USER_INTR_DATA_1_INTR_MOTION_SOURCE__POS               (7)
+#define SMI130_USER_INTR_DATA_1_INTR_MOTION_SOURCE__LEN               (1)
+#define SMI130_USER_INTR_DATA_1_INTR_MOTION_SOURCE__MSK               (0x80)
+#define SMI130_USER_INTR_DATA_1_INTR_MOTION_SOURCE__REG               \
+		(SMI130_USER_INTR_DATA_1_ADDR)
+/**************************************************************/
+/**\name	LOW HIGH DURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_0 Description - Reg Addr --> 0x5a, Bit --> 0...7 */
+#define SMI130_USER_INTR_LOWHIGH_0_INTR_LOW_DURN__POS               (0)
+#define SMI130_USER_INTR_LOWHIGH_0_INTR_LOW_DURN__LEN               (8)
+#define SMI130_USER_INTR_LOWHIGH_0_INTR_LOW_DURN__MSK               (0xFF)
+#define SMI130_USER_INTR_LOWHIGH_0_INTR_LOW_DURN__REG               \
+		(SMI130_USER_INTR_LOWHIGH_0_ADDR)
+/**************************************************************/
+/**\name	LOW THRESHOLD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_1 Description - Reg Addr --> 0x5b, Bit --> 0...7 */
+#define SMI130_USER_INTR_LOWHIGH_1_INTR_LOW_THRES__POS               (0)
+#define SMI130_USER_INTR_LOWHIGH_1_INTR_LOW_THRES__LEN               (8)
+#define SMI130_USER_INTR_LOWHIGH_1_INTR_LOW_THRES__MSK               (0xFF)
+#define SMI130_USER_INTR_LOWHIGH_1_INTR_LOW_THRES__REG               \
+		(SMI130_USER_INTR_LOWHIGH_1_ADDR)
+/**************************************************************/
+/**\name	LOW HYSTERESIS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_2 Description - Reg Addr --> 0x5c, Bit --> 0...1 */
+#define SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__POS               (0)
+#define SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__LEN               (2)
+#define SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__MSK               (0x03)
+#define SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_HYST__REG               \
+		(SMI130_USER_INTR_LOWHIGH_2_ADDR)
+/**************************************************************/
+/**\name	LOW MODE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_2 Description - Reg Addr --> 0x5c, Bit --> 2 */
+#define SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__POS               (2)
+#define SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__LEN               (1)
+#define SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__MSK               (0x04)
+#define SMI130_USER_INTR_LOWHIGH_2_INTR_LOW_G_MODE__REG               \
+		(SMI130_USER_INTR_LOWHIGH_2_ADDR)
+/**************************************************************/
+/**\name	HIGH_G HYSTERESIS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_2 Description - Reg Addr --> 0x5c, Bit --> 6...7 */
+#define SMI130_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__POS               (6)
+#define SMI130_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__LEN               (2)
+#define SMI130_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__MSK               (0xC0)
+#define SMI130_USER_INTR_LOWHIGH_2_INTR_HIGH_G_HYST__REG               \
+		(SMI130_USER_INTR_LOWHIGH_2_ADDR)
+/**************************************************************/
+/**\name	HIGH_G DURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_3 Description - Reg Addr --> 0x5d, Bit --> 0...7 */
+#define SMI130_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN__POS               (0)
+#define SMI130_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN__LEN               (8)
+#define SMI130_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN__MSK               (0xFF)
+#define SMI130_USER_INTR_LOWHIGH_3_INTR_HIGH_G_DURN__REG               \
+		(SMI130_USER_INTR_LOWHIGH_3_ADDR)
+/**************************************************************/
+/**\name	HIGH_G THRESHOLD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_LowHigh_4 Description - Reg Addr --> 0x5e, Bit --> 0...7 */
+#define SMI130_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES__POS               (0)
+#define SMI130_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES__LEN               (8)
+#define SMI130_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES__MSK               (0xFF)
+#define SMI130_USER_INTR_LOWHIGH_4_INTR_HIGH_THRES__REG               \
+		(SMI130_USER_INTR_LOWHIGH_4_ADDR)
+/**************************************************************/
+/**\name	ANY MOTION DURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Motion_0 Description - Reg Addr --> 0x5f, Bit --> 0...1 */
+#define SMI130_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__POS               (0)
+#define SMI130_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__LEN               (2)
+#define SMI130_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__MSK               (0x03)
+#define SMI130_USER_INTR_MOTION_0_INTR_ANY_MOTION_DURN__REG               \
+		(SMI130_USER_INTR_MOTION_0_ADDR)
+/**************************************************************/
+/**\name	SLOW/NO MOTION DURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+	/* Int_Motion_0 Description - Reg Addr --> 0x5f, Bit --> 2...7 */
+#define SMI130_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__POS      (2)
+#define SMI130_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__LEN      (6)
+#define SMI130_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__MSK      (0xFC)
+#define SMI130_USER_INTR_MOTION_0_INTR_SLOW_NO_MOTION_DURN__REG       \
+		(SMI130_USER_INTR_MOTION_0_ADDR)
+/**************************************************************/
+/**\name	ANY MOTION THRESHOLD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Motion_1 Description - Reg Addr --> (0x60), Bit --> 0...7 */
+#define SMI130_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES__POS      (0)
+#define SMI130_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES__LEN      (8)
+#define SMI130_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES__MSK      (0xFF)
+#define SMI130_USER_INTR_MOTION_1_INTR_ANY_MOTION_THRES__REG               \
+		(SMI130_USER_INTR_MOTION_1_ADDR)
+/**************************************************************/
+/**\name	SLOW/NO MOTION THRESHOLD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Motion_2 Description - Reg Addr --> 0x61, Bit --> 0...7 */
+#define SMI130_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES__POS       (0)
+#define SMI130_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES__LEN       (8)
+#define SMI130_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES__MSK       (0xFF)
+#define SMI130_USER_INTR_MOTION_2_INTR_SLOW_NO_MOTION_THRES__REG       \
+		(SMI130_USER_INTR_MOTION_2_ADDR)
+/**************************************************************/
+/**\name	SLOW/NO MOTION SELECT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Motion_3 Description - Reg Addr --> (0x62), Bit --> 0 */
+#define SMI130_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__POS	(0)
+#define SMI130_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__LEN	(1)
+#define SMI130_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__MSK	(0x01)
+#define SMI130_USER_INTR_MOTION_3_INTR_SLOW_NO_MOTION_SELECT__REG   \
+(SMI130_USER_INTR_MOTION_3_ADDR)
+/**************************************************************/
+/**\name	SIGNIFICANT MOTION SELECT LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Motion_3 Description - Reg Addr --> (0x62), Bit --> 1 */
+#define SMI130_USER_INTR_SIGNIFICATION_MOTION_SELECT__POS		(1)
+#define SMI130_USER_INTR_SIGNIFICATION_MOTION_SELECT__LEN		(1)
+#define SMI130_USER_INTR_SIGNIFICATION_MOTION_SELECT__MSK		(0x02)
+#define SMI130_USER_INTR_SIGNIFICATION_MOTION_SELECT__REG		\
+		(SMI130_USER_INTR_MOTION_3_ADDR)
+
+/* Int_Motion_3 Description - Reg Addr --> (0x62), Bit --> 3..2 */
+#define SMI130_USER_INTR_SIGNIFICANT_MOTION_SKIP__POS		(2)
+#define SMI130_USER_INTR_SIGNIFICANT_MOTION_SKIP__LEN		(2)
+#define SMI130_USER_INTR_SIGNIFICANT_MOTION_SKIP__MSK		(0x0C)
+#define SMI130_USER_INTR_SIGNIFICANT_MOTION_SKIP__REG		\
+		(SMI130_USER_INTR_MOTION_3_ADDR)
+
+/* Int_Motion_3 Description - Reg Addr --> (0x62), Bit --> 5..4 */
+#define SMI130_USER_INTR_SIGNIFICANT_MOTION_PROOF__POS		(4)
+#define SMI130_USER_INTR_SIGNIFICANT_MOTION_PROOF__LEN		(2)
+#define SMI130_USER_INTR_SIGNIFICANT_MOTION_PROOF__MSK		(0x30)
+#define SMI130_USER_INTR_SIGNIFICANT_MOTION_PROOF__REG		\
+		(SMI130_USER_INTR_MOTION_3_ADDR)
+/**************************************************************/
+/**\name	TAP DURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* INT_TAP_0 Description - Reg Addr --> (0x63), Bit --> 0..2*/
+#define SMI130_USER_INTR_TAP_0_INTR_TAP_DURN__POS               (0)
+#define SMI130_USER_INTR_TAP_0_INTR_TAP_DURN__LEN               (3)
+#define SMI130_USER_INTR_TAP_0_INTR_TAP_DURN__MSK               (0x07)
+#define SMI130_USER_INTR_TAP_0_INTR_TAP_DURN__REG	\
+(SMI130_USER_INTR_TAP_0_ADDR)
+/**************************************************************/
+/**\name	TAP SHOCK LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Tap_0 Description - Reg Addr --> (0x63), Bit --> 6 */
+#define SMI130_USER_INTR_TAP_0_INTR_TAP_SHOCK__POS               (6)
+#define SMI130_USER_INTR_TAP_0_INTR_TAP_SHOCK__LEN               (1)
+#define SMI130_USER_INTR_TAP_0_INTR_TAP_SHOCK__MSK               (0x40)
+#define SMI130_USER_INTR_TAP_0_INTR_TAP_SHOCK__REG (SMI130_USER_INTR_TAP_0_ADDR)
+/**************************************************************/
+/**\name	TAP QUIET LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Tap_0 Description - Reg Addr --> (0x63), Bit --> 7 */
+#define SMI130_USER_INTR_TAP_0_INTR_TAP_QUIET__POS               (7)
+#define SMI130_USER_INTR_TAP_0_INTR_TAP_QUIET__LEN               (1)
+#define SMI130_USER_INTR_TAP_0_INTR_TAP_QUIET__MSK               (0x80)
+#define SMI130_USER_INTR_TAP_0_INTR_TAP_QUIET__REG (SMI130_USER_INTR_TAP_0_ADDR)
+/**************************************************************/
+/**\name	TAP THRESHOLD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Tap_1 Description - Reg Addr --> (0x64), Bit --> 0...4 */
+#define SMI130_USER_INTR_TAP_1_INTR_TAP_THRES__POS               (0)
+#define SMI130_USER_INTR_TAP_1_INTR_TAP_THRES__LEN               (5)
+#define SMI130_USER_INTR_TAP_1_INTR_TAP_THRES__MSK               (0x1F)
+#define SMI130_USER_INTR_TAP_1_INTR_TAP_THRES__REG (SMI130_USER_INTR_TAP_1_ADDR)
+/**************************************************************/
+/**\name	ORIENT MODE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Orient_0 Description - Reg Addr --> (0x65), Bit --> 0...1 */
+#define SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__POS               (0)
+#define SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__LEN               (2)
+#define SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__MSK               (0x03)
+#define SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_MODE__REG               \
+		(SMI130_USER_INTR_ORIENT_0_ADDR)
+/**************************************************************/
+/**\name	ORIENT BLOCKING LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Orient_0 Description - Reg Addr --> (0x65), Bit --> 2...3 */
+#define SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__POS               (2)
+#define SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__LEN               (2)
+#define SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__MSK               (0x0C)
+#define SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_BLOCKING__REG               \
+		(SMI130_USER_INTR_ORIENT_0_ADDR)
+/**************************************************************/
+/**\name	ORIENT HYSTERESIS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Orient_0 Description - Reg Addr --> (0x65), Bit --> 4...7 */
+#define SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__POS               (4)
+#define SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__LEN               (4)
+#define SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__MSK               (0xF0)
+#define SMI130_USER_INTR_ORIENT_0_INTR_ORIENT_HYST__REG               \
+		(SMI130_USER_INTR_ORIENT_0_ADDR)
+/**************************************************************/
+/**\name	ORIENT THETA LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Orient_1 Description - Reg Addr --> 0x66, Bit --> 0...5 */
+#define SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__POS               (0)
+#define SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__LEN               (6)
+#define SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__MSK               (0x3F)
+#define SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_THETA__REG               \
+		(SMI130_USER_INTR_ORIENT_1_ADDR)
+/**************************************************************/
+/**\name	ORIENT UD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Orient_1 Description - Reg Addr --> 0x66, Bit --> 6 */
+#define SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__POS         (6)
+#define SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__LEN         (1)
+#define SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__MSK         (0x40)
+#define SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_UD_ENABLE__REG          \
+		(SMI130_USER_INTR_ORIENT_1_ADDR)
+/**************************************************************/
+/**\name	ORIENT AXIS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Orient_1 Description - Reg Addr --> 0x66, Bit --> 7 */
+#define SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__POS               (7)
+#define SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__LEN               (1)
+#define SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__MSK               (0x80)
+#define SMI130_USER_INTR_ORIENT_1_INTR_ORIENT_AXES_EX__REG               \
+		(SMI130_USER_INTR_ORIENT_1_ADDR)
+/**************************************************************/
+/**\name	FLAT THETA LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Flat_0 Description - Reg Addr --> 0x67, Bit --> 0...5 */
+#define SMI130_USER_INTR_FLAT_0_INTR_FLAT_THETA__POS               (0)
+#define SMI130_USER_INTR_FLAT_0_INTR_FLAT_THETA__LEN               (6)
+#define SMI130_USER_INTR_FLAT_0_INTR_FLAT_THETA__MSK               (0x3F)
+#define SMI130_USER_INTR_FLAT_0_INTR_FLAT_THETA__REG  \
+		(SMI130_USER_INTR_FLAT_0_ADDR)
+/**************************************************************/
+/**\name	FLAT HYSTERESIS LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Flat_1 Description - Reg Addr --> (0x68), Bit --> 0...3 */
+#define SMI130_USER_INTR_FLAT_1_INTR_FLAT_HYST__POS		(0)
+#define SMI130_USER_INTR_FLAT_1_INTR_FLAT_HYST__LEN		(4)
+#define SMI130_USER_INTR_FLAT_1_INTR_FLAT_HYST__MSK		(0x0F)
+#define SMI130_USER_INTR_FLAT_1_INTR_FLAT_HYST__REG	 \
+(SMI130_USER_INTR_FLAT_1_ADDR)
+/**************************************************************/
+/**\name	FLAT HOLD LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Int_Flat_1 Description - Reg Addr --> (0x68), Bit --> 4...5 */
+#define SMI130_USER_INTR_FLAT_1_INTR_FLAT_HOLD__POS                (4)
+#define SMI130_USER_INTR_FLAT_1_INTR_FLAT_HOLD__LEN                (2)
+#define SMI130_USER_INTR_FLAT_1_INTR_FLAT_HOLD__MSK                (0x30)
+#define SMI130_USER_INTR_FLAT_1_INTR_FLAT_HOLD__REG  \
+(SMI130_USER_INTR_FLAT_1_ADDR)
+/**************************************************************/
+/**\name	FOC ACCEL XYZ LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Foc_Conf Description - Reg Addr --> (0x69), Bit --> 0...1 */
+#define SMI130_USER_FOC_ACCEL_Z__POS               (0)
+#define SMI130_USER_FOC_ACCEL_Z__LEN               (2)
+#define SMI130_USER_FOC_ACCEL_Z__MSK               (0x03)
+#define SMI130_USER_FOC_ACCEL_Z__REG               (SMI130_USER_FOC_CONFIG_ADDR)
+
+/* Foc_Conf Description - Reg Addr --> (0x69), Bit --> 2...3 */
+#define SMI130_USER_FOC_ACCEL_Y__POS               (2)
+#define SMI130_USER_FOC_ACCEL_Y__LEN               (2)
+#define SMI130_USER_FOC_ACCEL_Y__MSK               (0x0C)
+#define SMI130_USER_FOC_ACCEL_Y__REG               (SMI130_USER_FOC_CONFIG_ADDR)
+
+/* Foc_Conf Description - Reg Addr --> (0x69), Bit --> 4...5 */
+#define SMI130_USER_FOC_ACCEL_X__POS               (4)
+#define SMI130_USER_FOC_ACCEL_X__LEN               (2)
+#define SMI130_USER_FOC_ACCEL_X__MSK               (0x30)
+#define SMI130_USER_FOC_ACCEL_X__REG               (SMI130_USER_FOC_CONFIG_ADDR)
+/**************************************************************/
+/**\name	FOC GYRO LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Foc_Conf Description - Reg Addr --> (0x69), Bit --> 6 */
+#define SMI130_USER_FOC_GYRO_ENABLE__POS               (6)
+#define SMI130_USER_FOC_GYRO_ENABLE__LEN               (1)
+#define SMI130_USER_FOC_GYRO_ENABLE__MSK               (0x40)
+#define SMI130_USER_FOC_GYRO_ENABLE__REG               \
+(SMI130_USER_FOC_CONFIG_ADDR)
+/**************************************************************/
+/**\name	NVM PROGRAM LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* CONF Description - Reg Addr --> (0x6A), Bit --> 1 */
+#define SMI130_USER_CONFIG_NVM_PROG_ENABLE__POS               (1)
+#define SMI130_USER_CONFIG_NVM_PROG_ENABLE__LEN               (1)
+#define SMI130_USER_CONFIG_NVM_PROG_ENABLE__MSK               (0x02)
+#define SMI130_USER_CONFIG_NVM_PROG_ENABLE__REG               \
+(SMI130_USER_CONFIG_ADDR)
+
+/*IF_CONF Description - Reg Addr --> (0x6B), Bit --> 0 */
+
+#define SMI130_USER_IF_CONFIG_SPI3__POS               (0)
+#define SMI130_USER_IF_CONFIG_SPI3__LEN               (1)
+#define SMI130_USER_IF_CONFIG_SPI3__MSK               (0x01)
+#define SMI130_USER_IF_CONFIG_SPI3__REG               \
+(SMI130_USER_IF_CONFIG_ADDR)
+
+/*IF_CONF Description - Reg Addr --> (0x6B), Bit --> 5..4 */
+#define SMI130_USER_IF_CONFIG_IF_MODE__POS               (4)
+#define SMI130_USER_IF_CONFIG_IF_MODE__LEN               (2)
+#define SMI130_USER_IF_CONFIG_IF_MODE__MSK               (0x30)
+#define SMI130_USER_IF_CONFIG_IF_MODE__REG		\
+(SMI130_USER_IF_CONFIG_ADDR)
+/**************************************************************/
+/**\name	GYRO SLEEP CONFIGURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Pmu_Trigger Description - Reg Addr --> 0x6c, Bit --> 0...2 */
+#define SMI130_USER_GYRO_SLEEP_TRIGGER__POS               (0)
+#define SMI130_USER_GYRO_SLEEP_TRIGGER__LEN               (3)
+#define SMI130_USER_GYRO_SLEEP_TRIGGER__MSK               (0x07)
+#define SMI130_USER_GYRO_SLEEP_TRIGGER__REG	(SMI130_USER_PMU_TRIGGER_ADDR)
+
+/* Pmu_Trigger Description - Reg Addr --> 0x6c, Bit --> 3...4 */
+#define SMI130_USER_GYRO_WAKEUP_TRIGGER__POS               (3)
+#define SMI130_USER_GYRO_WAKEUP_TRIGGER__LEN               (2)
+#define SMI130_USER_GYRO_WAKEUP_TRIGGER__MSK               (0x18)
+#define SMI130_USER_GYRO_WAKEUP_TRIGGER__REG	(SMI130_USER_PMU_TRIGGER_ADDR)
+
+/* Pmu_Trigger Description - Reg Addr --> 0x6c, Bit --> 5 */
+#define SMI130_USER_GYRO_SLEEP_STATE__POS               (5)
+#define SMI130_USER_GYRO_SLEEP_STATE__LEN               (1)
+#define SMI130_USER_GYRO_SLEEP_STATE__MSK               (0x20)
+#define SMI130_USER_GYRO_SLEEP_STATE__REG	(SMI130_USER_PMU_TRIGGER_ADDR)
+
+/* Pmu_Trigger Description - Reg Addr --> 0x6c, Bit --> 6 */
+#define SMI130_USER_GYRO_WAKEUP_INTR__POS               (6)
+#define SMI130_USER_GYRO_WAKEUP_INTR__LEN               (1)
+#define SMI130_USER_GYRO_WAKEUP_INTR__MSK               (0x40)
+#define SMI130_USER_GYRO_WAKEUP_INTR__REG	(SMI130_USER_PMU_TRIGGER_ADDR)
+/**************************************************************/
+/**\name	ACCEL SELF TEST LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Self_Test Description - Reg Addr --> 0x6d, Bit --> 0...1 */
+#define SMI130_USER_ACCEL_SELFTEST_AXIS__POS               (0)
+#define SMI130_USER_ACCEL_SELFTEST_AXIS__LEN               (2)
+#define SMI130_USER_ACCEL_SELFTEST_AXIS__MSK               (0x03)
+#define SMI130_USER_ACCEL_SELFTEST_AXIS__REG	(SMI130_USER_SELF_TEST_ADDR)
+
+/* Self_Test Description - Reg Addr --> 0x6d, Bit --> 2 */
+#define SMI130_USER_ACCEL_SELFTEST_SIGN__POS               (2)
+#define SMI130_USER_ACCEL_SELFTEST_SIGN__LEN               (1)
+#define SMI130_USER_ACCEL_SELFTEST_SIGN__MSK               (0x04)
+#define SMI130_USER_ACCEL_SELFTEST_SIGN__REG	(SMI130_USER_SELF_TEST_ADDR)
+
+/* Self_Test Description - Reg Addr --> 0x6d, Bit --> 3 */
+#define SMI130_USER_SELFTEST_AMP__POS               (3)
+#define SMI130_USER_SELFTEST_AMP__LEN               (1)
+#define SMI130_USER_SELFTEST_AMP__MSK               (0x08)
+#define SMI130_USER_SELFTEST_AMP__REG		(SMI130_USER_SELF_TEST_ADDR)
+/**************************************************************/
+/**\name	GYRO SELF TEST LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Self_Test Description - Reg Addr --> 0x6d, Bit --> 4 */
+#define SMI130_USER_GYRO_SELFTEST_START__POS               (4)
+#define SMI130_USER_GYRO_SELFTEST_START__LEN               (1)
+#define SMI130_USER_GYRO_SELFTEST_START__MSK               (0x10)
+#define SMI130_USER_GYRO_SELFTEST_START__REG		    \
+(SMI130_USER_SELF_TEST_ADDR)
+/**************************************************************/
+/**\name	NV_CONFIG LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* NV_CONF Description - Reg Addr --> (0x70), Bit --> 0 */
+#define SMI130_USER_NV_CONFIG_SPI_ENABLE__POS               (0)
+#define SMI130_USER_NV_CONFIG_SPI_ENABLE__LEN               (1)
+#define SMI130_USER_NV_CONFIG_SPI_ENABLE__MSK               (0x01)
+#define SMI130_USER_NV_CONFIG_SPI_ENABLE__REG	 (SMI130_USER_NV_CONFIG_ADDR)
+
+/*IF_CONF Description - Reg Addr --> (0x70), Bit --> 1 */
+#define SMI130_USER_IF_CONFIG_I2C_WDT_SELECT__POS               (1)
+#define SMI130_USER_IF_CONFIG_I2C_WDT_SELECT__LEN               (1)
+#define SMI130_USER_IF_CONFIG_I2C_WDT_SELECT__MSK               (0x02)
+#define SMI130_USER_IF_CONFIG_I2C_WDT_SELECT__REG		\
+(SMI130_USER_NV_CONFIG_ADDR)
+
+/*IF_CONF Description - Reg Addr --> (0x70), Bit --> 2 */
+#define SMI130_USER_IF_CONFIG_I2C_WDT_ENABLE__POS               (2)
+#define SMI130_USER_IF_CONFIG_I2C_WDT_ENABLE__LEN               (1)
+#define SMI130_USER_IF_CONFIG_I2C_WDT_ENABLE__MSK               (0x04)
+#define SMI130_USER_IF_CONFIG_I2C_WDT_ENABLE__REG		\
+(SMI130_USER_NV_CONFIG_ADDR)
+
+/* NV_CONF Description - Reg Addr --> (0x70), Bit --> 3 */
+#define SMI130_USER_NV_CONFIG_SPARE0__POS               (3)
+#define SMI130_USER_NV_CONFIG_SPARE0__LEN               (1)
+#define SMI130_USER_NV_CONFIG_SPARE0__MSK               (0x08)
+#define SMI130_USER_NV_CONFIG_SPARE0__REG	(SMI130_USER_NV_CONFIG_ADDR)
+
+/* NV_CONF Description - Reg Addr --> (0x70), Bit --> 4...7 */
+#define SMI130_USER_NV_CONFIG_NVM_COUNTER__POS               (4)
+#define SMI130_USER_NV_CONFIG_NVM_COUNTER__LEN               (4)
+#define SMI130_USER_NV_CONFIG_NVM_COUNTER__MSK               (0xF0)
+#define SMI130_USER_NV_CONFIG_NVM_COUNTER__REG	(SMI130_USER_NV_CONFIG_ADDR)
+/**************************************************************/
+/**\name	ACCEL MANUAL OFFSET LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Offset_0 Description - Reg Addr --> (0x71), Bit --> 0...7 */
+#define SMI130_USER_OFFSET_0_ACCEL_OFF_X__POS               (0)
+#define SMI130_USER_OFFSET_0_ACCEL_OFF_X__LEN               (8)
+#define SMI130_USER_OFFSET_0_ACCEL_OFF_X__MSK               (0xFF)
+#define SMI130_USER_OFFSET_0_ACCEL_OFF_X__REG	(SMI130_USER_OFFSET_0_ADDR)
+
+/* Offset_1 Description - Reg Addr --> 0x72, Bit --> 0...7 */
+#define SMI130_USER_OFFSET_1_ACCEL_OFF_Y__POS               (0)
+#define SMI130_USER_OFFSET_1_ACCEL_OFF_Y__LEN               (8)
+#define SMI130_USER_OFFSET_1_ACCEL_OFF_Y__MSK               (0xFF)
+#define SMI130_USER_OFFSET_1_ACCEL_OFF_Y__REG	(SMI130_USER_OFFSET_1_ADDR)
+
+/* Offset_2 Description - Reg Addr --> 0x73, Bit --> 0...7 */
+#define SMI130_USER_OFFSET_2_ACCEL_OFF_Z__POS               (0)
+#define SMI130_USER_OFFSET_2_ACCEL_OFF_Z__LEN               (8)
+#define SMI130_USER_OFFSET_2_ACCEL_OFF_Z__MSK               (0xFF)
+#define SMI130_USER_OFFSET_2_ACCEL_OFF_Z__REG	(SMI130_USER_OFFSET_2_ADDR)
+/**************************************************************/
+/**\name	GYRO MANUAL OFFSET LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Offset_3 Description - Reg Addr --> 0x74, Bit --> 0...7 */
+#define SMI130_USER_OFFSET_3_GYRO_OFF_X__POS               (0)
+#define SMI130_USER_OFFSET_3_GYRO_OFF_X__LEN               (8)
+#define SMI130_USER_OFFSET_3_GYRO_OFF_X__MSK               (0xFF)
+#define SMI130_USER_OFFSET_3_GYRO_OFF_X__REG	(SMI130_USER_OFFSET_3_ADDR)
+
+/* Offset_4 Description - Reg Addr --> 0x75, Bit --> 0...7 */
+#define SMI130_USER_OFFSET_4_GYRO_OFF_Y__POS               (0)
+#define SMI130_USER_OFFSET_4_GYRO_OFF_Y__LEN               (8)
+#define SMI130_USER_OFFSET_4_GYRO_OFF_Y__MSK               (0xFF)
+#define SMI130_USER_OFFSET_4_GYRO_OFF_Y__REG	(SMI130_USER_OFFSET_4_ADDR)
+
+/* Offset_5 Description - Reg Addr --> 0x76, Bit --> 0...7 */
+#define SMI130_USER_OFFSET_5_GYRO_OFF_Z__POS               (0)
+#define SMI130_USER_OFFSET_5_GYRO_OFF_Z__LEN               (8)
+#define SMI130_USER_OFFSET_5_GYRO_OFF_Z__MSK               (0xFF)
+#define SMI130_USER_OFFSET_5_GYRO_OFF_Z__REG	(SMI130_USER_OFFSET_5_ADDR)
+
+
+/* Offset_6 Description - Reg Addr --> 0x77, Bit --> 0..1 */
+#define SMI130_USER_OFFSET_6_GYRO_OFF_X__POS               (0)
+#define SMI130_USER_OFFSET_6_GYRO_OFF_X__LEN               (2)
+#define SMI130_USER_OFFSET_6_GYRO_OFF_X__MSK               (0x03)
+#define SMI130_USER_OFFSET_6_GYRO_OFF_X__REG	(SMI130_USER_OFFSET_6_ADDR)
+
+/* Offset_6 Description - Reg Addr --> 0x77, Bit --> 2...3 */
+#define SMI130_USER_OFFSET_6_GYRO_OFF_Y__POS               (2)
+#define SMI130_USER_OFFSET_6_GYRO_OFF_Y__LEN               (2)
+#define SMI130_USER_OFFSET_6_GYRO_OFF_Y__MSK               (0x0C)
+#define SMI130_USER_OFFSET_6_GYRO_OFF_Y__REG	(SMI130_USER_OFFSET_6_ADDR)
+
+/* Offset_6 Description - Reg Addr --> 0x77, Bit --> 4...5 */
+#define SMI130_USER_OFFSET_6_GYRO_OFF_Z__POS               (4)
+#define SMI130_USER_OFFSET_6_GYRO_OFF_Z__LEN               (2)
+#define SMI130_USER_OFFSET_6_GYRO_OFF_Z__MSK               (0x30)
+#define SMI130_USER_OFFSET_6_GYRO_OFF_Z__REG	 (SMI130_USER_OFFSET_6_ADDR)
+/**************************************************************/
+/**\name	ACCEL OFFSET  ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Offset_6 Description - Reg Addr --> 0x77, Bit --> 6 */
+#define SMI130_USER_OFFSET_6_ACCEL_OFF_ENABLE__POS               (6)
+#define SMI130_USER_OFFSET_6_ACCEL_OFF_ENABLE__LEN               (1)
+#define SMI130_USER_OFFSET_6_ACCEL_OFF_ENABLE__MSK               (0x40)
+#define SMI130_USER_OFFSET_6_ACCEL_OFF_ENABLE__REG	 \
+(SMI130_USER_OFFSET_6_ADDR)
+/**************************************************************/
+/**\name	GYRO OFFSET  ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Offset_6 Description - Reg Addr --> 0x77, Bit -->  7 */
+#define SMI130_USER_OFFSET_6_GYRO_OFF_EN__POS               (7)
+#define SMI130_USER_OFFSET_6_GYRO_OFF_EN__LEN               (1)
+#define SMI130_USER_OFFSET_6_GYRO_OFF_EN__MSK               (0x80)
+#define SMI130_USER_OFFSET_6_GYRO_OFF_EN__REG	 (SMI130_USER_OFFSET_6_ADDR)
+/**************************************************************/
+/**\name	STEP COUNTER LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* STEP_CNT_0  Description - Reg Addr --> 0x78, Bit -->  0 to 7 */
+#define SMI130_USER_STEP_COUNT_LSB__POS               (0)
+#define SMI130_USER_STEP_COUNT_LSB__LEN               (7)
+#define SMI130_USER_STEP_COUNT_LSB__MSK               (0xFF)
+#define SMI130_USER_STEP_COUNT_LSB__REG	 (SMI130_USER_STEP_COUNT_0_ADDR)
+
+/* STEP_CNT_1  Description - Reg Addr --> 0x79, Bit -->  0 to 7 */
+#define SMI130_USER_STEP_COUNT_MSB__POS               (0)
+#define SMI130_USER_STEP_COUNT_MSB__LEN               (7)
+#define SMI130_USER_STEP_COUNT_MSB__MSK               (0xFF)
+#define SMI130_USER_STEP_COUNT_MSB__REG	 (SMI130_USER_STEP_COUNT_1_ADDR)
+/**************************************************************/
+/**\name	STEP COUNTER CONFIGURATION LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* STEP_CONFIG_0  Description - Reg Addr --> 0x7A, Bit -->  0 to 7 */
+#define SMI130_USER_STEP_CONFIG_ZERO__POS               (0)
+#define SMI130_USER_STEP_CONFIG_ZERO__LEN               (7)
+#define SMI130_USER_STEP_CONFIG_ZERO__MSK               (0xFF)
+#define SMI130_USER_STEP_CONFIG_ZERO__REG	 \
+(SMI130_USER_STEP_CONFIG_0_ADDR)
+
+
+/* STEP_CONFIG_1  Description - Reg Addr --> 0x7B, Bit -->  0 to 2 and
+4 to 7 */
+#define SMI130_USER_STEP_CONFIG_ONE_CNF1__POS               (0)
+#define SMI130_USER_STEP_CONFIG_ONE_CNF1__LEN               (3)
+#define SMI130_USER_STEP_CONFIG_ONE_CNF1__MSK               (0x07)
+#define SMI130_USER_STEP_CONFIG_ONE_CNF1__REG	 \
+(SMI130_USER_STEP_CONFIG_1_ADDR)
+
+#define SMI130_USER_STEP_CONFIG_ONE_CNF2__POS               (4)
+#define SMI130_USER_STEP_CONFIG_ONE_CNF2__LEN               (4)
+#define SMI130_USER_STEP_CONFIG_ONE_CNF2__MSK               (0xF0)
+#define SMI130_USER_STEP_CONFIG_ONE_CNF2__REG	 \
+(SMI130_USER_STEP_CONFIG_1_ADDR)
+/**************************************************************/
+/**\name	STEP COUNTER ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* STEP_CONFIG_1  Description - Reg Addr --> 0x7B, Bit -->  0 to 2 */
+#define SMI130_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__POS		(3)
+#define SMI130_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__LEN		(1)
+#define SMI130_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__MSK		(0x08)
+#define SMI130_USER_STEP_CONFIG_1_STEP_COUNT_ENABLE__REG	\
+(SMI130_USER_STEP_CONFIG_1_ADDR)
+
+/* USER REGISTERS DEFINITION END */
+/**************************************************************************/
+/* CMD REGISTERS DEFINITION START */
+/**************************************************************/
+/**\name	COMMAND REGISTER LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Command description address - Reg Addr --> 0x7E, Bit -->  0....7 */
+#define SMI130_CMD_COMMANDS__POS              (0)
+#define SMI130_CMD_COMMANDS__LEN              (8)
+#define SMI130_CMD_COMMANDS__MSK              (0xFF)
+#define SMI130_CMD_COMMANDS__REG	 (SMI130_CMD_COMMANDS_ADDR)
+/**************************************************************/
+/**\name	PAGE ENABLE LENGTH, POSITION AND MASK*/
+/**************************************************************/
+/* Target page address - Reg Addr --> 0x7F, Bit -->  4....5 */
+#define SMI130_CMD_TARGET_PAGE__POS           (4)
+#define SMI130_CMD_TARGET_PAGE__LEN           (2)
+#define SMI130_CMD_TARGET_PAGE__MSK           (0x30)
+#define SMI130_CMD_TARGET_PAGE__REG	 (SMI130_CMD_EXT_MODE_ADDR)
+
+/* Target page address - Reg Addr --> 0x7F, Bit -->  4....5 */
+#define SMI130_CMD_PAGING_EN__POS           (7)
+#define SMI130_CMD_PAGING_EN__LEN           (1)
+#define SMI130_CMD_PAGING_EN__MSK           (0x80)
+#define SMI130_CMD_PAGING_EN__REG		(SMI130_CMD_EXT_MODE_ADDR)
+
+/* Target page address - Reg Addr --> 0x7F, Bit -->  4....5 */
+#define SMI130_COM_C_TRIM_FIVE__POS           (0)
+#define SMI130_COM_C_TRIM_FIVE__LEN           (8)
+#define SMI130_COM_C_TRIM_FIVE__MSK           (0xFF)
+#define SMI130_COM_C_TRIM_FIVE__REG		(SMI130_COM_C_TRIM_FIVE_ADDR)
+
+/**************************************************************************/
+/* CMD REGISTERS DEFINITION END */
+
+/**************************************************/
+/**\name	FIFO FRAME COUNT DEFINITION           */
+/*************************************************/
+#define FIFO_FRAME				(1024)
+#define FIFO_CONFIG_CHECK1		(0x00)
+#define FIFO_CONFIG_CHECK2		(0x80)
+/**************************************************/
+/**\name	MAG SENSOR SELECT          */
+/*************************************************/
+#define BST_BMM		(0)
+#define BST_AKM		(1)
+#define SMI130_YAS537_I2C_ADDRESS	(0x2E)
+/**************************************************/
+/**\name	ACCEL RANGE          */
+/*************************************************/
+#define SMI130_ACCEL_RANGE_2G           (0X03)
+#define SMI130_ACCEL_RANGE_4G           (0X05)
+#define SMI130_ACCEL_RANGE_8G           (0X08)
+#define SMI130_ACCEL_RANGE_16G          (0X0C)
+/**************************************************/
+/**\name	ACCEL ODR          */
+/*************************************************/
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_RESERVED       (0x00)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_0_78HZ         (0x01)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_1_56HZ         (0x02)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_3_12HZ         (0x03)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_6_25HZ         (0x04)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_12_5HZ         (0x05)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_25HZ           (0x06)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_50HZ           (0x07)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_100HZ          (0x08)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_200HZ          (0x09)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_400HZ          (0x0A)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_800HZ          (0x0B)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_1600HZ         (0x0C)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_RESERVED0      (0x0D)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_RESERVED1      (0x0E)
+#define SMI130_ACCEL_OUTPUT_DATA_RATE_RESERVED2      (0x0F)
+/**************************************************/
+/**\name	ACCEL BANDWIDTH PARAMETER         */
+/*************************************************/
+#define SMI130_ACCEL_OSR4_AVG1			(0x00)
+#define SMI130_ACCEL_OSR2_AVG2			(0x01)
+#define SMI130_ACCEL_NORMAL_AVG4		(0x02)
+#define SMI130_ACCEL_CIC_AVG8			(0x03)
+#define SMI130_ACCEL_RES_AVG16			(0x04)
+#define SMI130_ACCEL_RES_AVG32			(0x05)
+#define SMI130_ACCEL_RES_AVG64			(0x06)
+#define SMI130_ACCEL_RES_AVG128			(0x07)
+/**************************************************/
+/**\name	GYRO ODR         */
+/*************************************************/
+#define SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED		(0x00)
+#define SMI130_GYRO_OUTPUT_DATA_RATE_25HZ			(0x06)
+#define SMI130_GYRO_OUTPUT_DATA_RATE_50HZ			(0x07)
+#define SMI130_GYRO_OUTPUT_DATA_RATE_100HZ			(0x08)
+#define SMI130_GYRO_OUTPUT_DATA_RATE_200HZ			(0x09)
+#define SMI130_GYRO_OUTPUT_DATA_RATE_400HZ			(0x0A)
+#define SMI130_GYRO_OUTPUT_DATA_RATE_800HZ			(0x0B)
+#define SMI130_GYRO_OUTPUT_DATA_RATE_1600HZ			(0x0C)
+#define SMI130_GYRO_OUTPUT_DATA_RATE_3200HZ			(0x0D)
+/**************************************************/
+/**\name	GYRO BANDWIDTH PARAMETER         */
+/*************************************************/
+#define SMI130_GYRO_OSR4_MODE		(0x00)
+#define SMI130_GYRO_OSR2_MODE		(0x01)
+#define SMI130_GYRO_NORMAL_MODE		(0x02)
+#define SMI130_GYRO_CIC_MODE		(0x03)
+/**************************************************/
+/**\name	GYROSCOPE RANGE PARAMETER         */
+/*************************************************/
+#define SMI130_GYRO_RANGE_2000_DEG_SEC	(0x00)
+#define SMI130_GYRO_RANGE_1000_DEG_SEC	(0x01)
+#define SMI130_GYRO_RANGE_500_DEG_SEC	(0x02)
+#define SMI130_GYRO_RANGE_250_DEG_SEC	(0x03)
+#define SMI130_GYRO_RANGE_125_DEG_SEC	(0x04)
+/**************************************************/
+/**\name	MAG ODR         */
+/*************************************************/
+#define SMI130_MAG_OUTPUT_DATA_RATE_RESERVED       (0x00)
+#define SMI130_MAG_OUTPUT_DATA_RATE_0_78HZ         (0x01)
+#define SMI130_MAG_OUTPUT_DATA_RATE_1_56HZ         (0x02)
+#define SMI130_MAG_OUTPUT_DATA_RATE_3_12HZ         (0x03)
+#define SMI130_MAG_OUTPUT_DATA_RATE_6_25HZ         (0x04)
+#define SMI130_MAG_OUTPUT_DATA_RATE_12_5HZ         (0x05)
+#define SMI130_MAG_OUTPUT_DATA_RATE_25HZ           (0x06)
+#define SMI130_MAG_OUTPUT_DATA_RATE_50HZ           (0x07)
+#define SMI130_MAG_OUTPUT_DATA_RATE_100HZ          (0x08)
+#define SMI130_MAG_OUTPUT_DATA_RATE_200HZ          (0x09)
+#define SMI130_MAG_OUTPUT_DATA_RATE_400HZ          (0x0A)
+#define SMI130_MAG_OUTPUT_DATA_RATE_800HZ          (0x0B)
+#define SMI130_MAG_OUTPUT_DATA_RATE_1600HZ         (0x0C)
+#define SMI130_MAG_OUTPUT_DATA_RATE_RESERVED0      (0x0D)
+#define SMI130_MAG_OUTPUT_DATA_RATE_RESERVED1      (0x0E)
+#define SMI130_MAG_OUTPUT_DATA_RATE_RESERVED2      (0x0F)
+
+/**************************************************/
+/**\name	ENABLE/DISABLE SELECTIONS        */
+/*************************************************/
+
+/* Enable accel and gyro offset */
+#define ACCEL_OFFSET_ENABLE		(0x01)
+#define GYRO_OFFSET_ENABLE		(0x01)
+
+/* command register definition */
+#define START_FOC_ACCEL_GYRO	(0X03)
+
+ /* INT ENABLE 1 */
+#define SMI130_ANY_MOTION_X_ENABLE       (0)
+#define SMI130_ANY_MOTION_Y_ENABLE       (1)
+#define SMI130_ANY_MOTION_Z_ENABLE       (2)
+#define SMI130_DOUBLE_TAP_ENABLE         (4)
+#define SMI130_SINGLE_TAP_ENABLE         (5)
+#define SMI130_ORIENT_ENABLE             (6)
+#define SMI130_FLAT_ENABLE               (7)
+
+/* INT ENABLE 1 */
+#define SMI130_HIGH_G_X_ENABLE       (0)
+#define SMI130_HIGH_G_Y_ENABLE       (1)
+#define SMI130_HIGH_G_Z_ENABLE       (2)
+#define SMI130_LOW_G_ENABLE          (3)
+#define SMI130_DATA_RDY_ENABLE       (4)
+#define SMI130_FIFO_FULL_ENABLE      (5)
+#define SMI130_FIFO_WM_ENABLE        (6)
+
+/* INT ENABLE 2 */
+#define  SMI130_NOMOTION_X_ENABLE	(0)
+#define  SMI130_NOMOTION_Y_ENABLE	(1)
+#define  SMI130_NOMOTION_Z_ENABLE	(2)
+#define  SMI130_STEP_DETECTOR_EN	(3)
+
+/* FOC axis selection for accel*/
+#define	FOC_X_AXIS		(0)
+#define	FOC_Y_AXIS		(1)
+#define	FOC_Z_AXIS		(2)
+
+/* IN OUT CONTROL */
+#define SMI130_INTR1_EDGE_CTRL			(0)
+#define SMI130_INTR2_EDGE_CTRL			(1)
+#define SMI130_INTR1_LEVEL				(0)
+#define SMI130_INTR2_LEVEL				(1)
+#define SMI130_INTR1_OUTPUT_TYPE		(0)
+#define SMI130_INTR2_OUTPUT_TYPE		(1)
+#define SMI130_INTR1_OUTPUT_ENABLE		(0)
+#define SMI130_INTR2_OUTPUT_ENABLE		(1)
+
+#define SMI130_INTR1_INPUT_ENABLE	(0)
+#define SMI130_INTR2_INPUT_ENABLE	(1)
+
+/*  INTERRUPT MAPS    */
+#define SMI130_INTR1_MAP_LOW_G			(0)
+#define SMI130_INTR2_MAP_LOW_G			(1)
+#define SMI130_INTR1_MAP_HIGH_G			(0)
+#define SMI130_INTR2_MAP_HIGH_G			(1)
+#define SMI130_INTR1_MAP_ANY_MOTION		(0)
+#define SMI130_INTR2_MAP_ANY_MOTION		(1)
+#define SMI130_INTR1_MAP_NOMO			(0)
+#define SMI130_INTR2_MAP_NOMO			(1)
+#define SMI130_INTR1_MAP_DOUBLE_TAP		(0)
+#define SMI130_INTR2_MAP_DOUBLE_TAP		(1)
+#define SMI130_INTR1_MAP_SINGLE_TAP		(0)
+#define SMI130_INTR2_MAP_SINGLE_TAP		(1)
+#define SMI130_INTR1_MAP_ORIENT			(0)
+#define SMI130_INTR2_MAP_ORIENT			(1)
+#define SMI130_INTR1_MAP_FLAT			(0)
+#define SMI130_INTR2_MAP_FLAT			(1)
+#define SMI130_INTR1_MAP_DATA_RDY		(0)
+#define SMI130_INTR2_MAP_DATA_RDY		(1)
+#define SMI130_INTR1_MAP_FIFO_WM		(0)
+#define SMI130_INTR2_MAP_FIFO_WM		(1)
+#define SMI130_INTR1_MAP_FIFO_FULL      (0)
+#define SMI130_INTR2_MAP_FIFO_FULL      (1)
+#define SMI130_INTR1_MAP_PMUTRIG        (0)
+#define SMI130_INTR2_MAP_PMUTRIG		(1)
+
+/* Interrupt mapping*/
+#define	SMI130_MAP_INTR1		(0)
+#define	SMI130_MAP_INTR2		(1)
+/**************************************************/
+/**\name	 TAP DURATION         */
+/*************************************************/
+#define SMI130_TAP_DURN_50MS     (0x00)
+#define SMI130_TAP_DURN_100MS    (0x01)
+#define SMI130_TAP_DURN_150MS    (0x02)
+#define SMI130_TAP_DURN_200MS    (0x03)
+#define SMI130_TAP_DURN_250MS    (0x04)
+#define SMI130_TAP_DURN_375MS    (0x05)
+#define SMI130_TAP_DURN_500MS    (0x06)
+#define SMI130_TAP_DURN_700MS    (0x07)
+/**************************************************/
+/**\name	TAP SHOCK         */
+/*************************************************/
+#define SMI130_TAP_SHOCK_50MS	(0x00)
+#define SMI130_TAP_SHOCK_75MS	(0x01)
+/**************************************************/
+/**\name	TAP QUIET        */
+/*************************************************/
+#define SMI130_TAP_QUIET_30MS	(0x00)
+#define SMI130_TAP_QUIET_20MS	(0x01)
+/**************************************************/
+/**\name	STEP DETECTION SELECTION MODES      */
+/*************************************************/
+#define	SMI130_STEP_NORMAL_MODE			(0)
+#define	SMI130_STEP_SENSITIVE_MODE		(1)
+#define	SMI130_STEP_ROBUST_MODE			(2)
+/**************************************************/
+/**\name	STEP CONFIGURATION SELECT MODE    */
+/*************************************************/
+#define	STEP_CONFIG_NORMAL		(0X315)
+#define	STEP_CONFIG_SENSITIVE	(0X2D)
+#define	STEP_CONFIG_ROBUST		(0X71D)
+/**************************************************/
+/**\name	BMM150 TRIM DATA DEFINITIONS      */
+/*************************************************/
+#define SMI130_MAG_DIG_X1                      (0x5D)
+#define SMI130_MAG_DIG_Y1                      (0x5E)
+#define SMI130_MAG_DIG_Z4_LSB                  (0x62)
+#define SMI130_MAG_DIG_Z4_MSB                  (0x63)
+#define SMI130_MAG_DIG_X2                      (0x64)
+#define SMI130_MAG_DIG_Y2                      (0x65)
+#define SMI130_MAG_DIG_Z2_LSB                  (0x68)
+#define SMI130_MAG_DIG_Z2_MSB                  (0x69)
+#define SMI130_MAG_DIG_Z1_LSB                  (0x6A)
+#define SMI130_MAG_DIG_Z1_MSB                  (0x6B)
+#define SMI130_MAG_DIG_XYZ1_LSB                (0x6C)
+#define SMI130_MAG_DIG_XYZ1_MSB                (0x6D)
+#define SMI130_MAG_DIG_Z3_LSB                  (0x6E)
+#define SMI130_MAG_DIG_Z3_MSB                  (0x6F)
+#define SMI130_MAG_DIG_XY2                     (0x70)
+#define SMI130_MAG_DIG_XY1                     (0x71)
+/**************************************************/
+/**\name	BMM150 PRE-SET MODE DEFINITIONS     */
+/*************************************************/
+#define SMI130_MAG_PRESETMODE_LOWPOWER                 (1)
+#define SMI130_MAG_PRESETMODE_REGULAR                  (2)
+#define SMI130_MAG_PRESETMODE_HIGHACCURACY             (3)
+#define SMI130_MAG_PRESETMODE_ENHANCED                 (4)
+/**************************************************/
+/**\name	BMM150 PRESET MODES - DATA RATES    */
+/*************************************************/
+#define SMI130_MAG_LOWPOWER_DR                       (0x02)
+#define SMI130_MAG_REGULAR_DR                        (0x02)
+#define SMI130_MAG_HIGHACCURACY_DR                   (0x2A)
+#define SMI130_MAG_ENHANCED_DR                       (0x02)
+/**************************************************/
+/**\name	BMM150 PRESET MODES - REPETITIONS-XY RATES */
+/*************************************************/
+#define SMI130_MAG_LOWPOWER_REPXY                    (1)
+#define SMI130_MAG_REGULAR_REPXY                     (4)
+#define SMI130_MAG_HIGHACCURACY_REPXY                (23)
+#define SMI130_MAG_ENHANCED_REPXY                    (7)
+/**************************************************/
+/**\name	BMM150 PRESET MODES - REPETITIONS-Z RATES */
+/*************************************************/
+#define SMI130_MAG_LOWPOWER_REPZ                     (2)
+#define SMI130_MAG_REGULAR_REPZ                      (14)
+#define SMI130_MAG_HIGHACCURACY_REPZ                 (82)
+#define SMI130_MAG_ENHANCED_REPZ                     (26)
+#define SMI130_MAG_NOAMRL_SWITCH_TIMES               (5)
+#define MAG_INTERFACE_PMU_ENABLE                     (1)
+#define MAG_INTERFACE_PMU_DISABLE                    (0)
+/**************************************************/
+/**\name	USED FOR MAG OVERFLOW CHECK FOR BMM150  */
+/*************************************************/
+#define SMI130_MAG_OVERFLOW_OUTPUT			((s16)-32768)
+#define SMI130_MAG_OVERFLOW_OUTPUT_S32		((s32)(-2147483647-1))
+#define SMI130_MAG_NEGATIVE_SATURATION_Z   ((s16)-32767)
+#define SMI130_MAG_POSITIVE_SATURATION_Z   ((u16)32767)
+#define SMI130_MAG_FLIP_OVERFLOW_ADCVAL		((s16)-4096)
+#define SMI130_MAG_HALL_OVERFLOW_ADCVAL		((s16)-16384)
+/**************************************************/
+/**\name	BMM150 REGISTER DEFINITION */
+/*************************************************/
+#define SMI130_BMM150_CHIP_ID           (0x40)
+#define SMI130_BMM150_POWE_CONTROL_REG	(0x4B)
+#define SMI130_BMM150_POWE_MODE_REG		(0x4C)
+#define SMI130_BMM150_DATA_REG			(0x42)
+#define SMI130_BMM150_XY_REP			(0x51)
+#define SMI130_BMM150_Z_REP				(0x52)
+/**************************************************/
+/**\name	AKM COMPENSATING DATA REGISTERS     */
+/*************************************************/
+#define SMI130_BST_AKM_ASAX		(0x60)
+#define SMI130_BST_AKM_ASAY		(0x61)
+#define SMI130_BST_AKM_ASAZ		(0x62)
+/**************************************************/
+/**\name	AKM POWER MODE SELECTION     */
+/*************************************************/
+#define AKM_POWER_DOWN_MODE			(0)
+#define AKM_SINGLE_MEAS_MODE		(1)
+#define FUSE_ROM_MODE				(2)
+/**************************************************/
+/**\name	SECONDARY_MAG POWER MODE SELECTION    */
+/*************************************************/
+#define SMI130_MAG_FORCE_MODE		(0)
+#define SMI130_MAG_SUSPEND_MODE		(1)
+/**************************************************/
+/**\name	MAG POWER MODE SELECTION    */
+/*************************************************/
+#define	FORCE_MODE		(0)
+#define	SUSPEND_MODE	(1)
+#define	NORMAL_MODE		(2)
+#define MAG_SUSPEND_MODE (1)
+/**************************************************/
+/**\name	FIFO CONFIGURATIONS    */
+/*************************************************/
+#define FIFO_HEADER_ENABLE			(0x01)
+#define FIFO_MAG_ENABLE				(0x01)
+#define FIFO_ACCEL_ENABLE			(0x01)
+#define FIFO_GYRO_ENABLE			(0x01)
+#define FIFO_TIME_ENABLE			(0x01)
+#define FIFO_STOPONFULL_ENABLE		(0x01)
+#define FIFO_WM_INTERRUPT_ENABLE	(0x01)
+#define	SMI130_FIFO_INDEX_LENGTH	(1)
+#define	SMI130_FIFO_TAG_INTR_MASK	(0xFC)
+
+/**************************************************/
+/**\name	ACCEL POWER MODE    */
+/*************************************************/
+#define ACCEL_MODE_NORMAL	(0x11)
+#define	ACCEL_LOWPOWER		(0X12)
+#define	ACCEL_SUSPEND		(0X10)
+/**************************************************/
+/**\name	GYRO POWER MODE    */
+/*************************************************/
+#define GYRO_MODE_SUSPEND		(0x14)
+#define GYRO_MODE_NORMAL		(0x15)
+#define GYRO_MODE_FASTSTARTUP	(0x17)
+/**************************************************/
+/**\name	MAG POWER MODE    */
+/*************************************************/
+#define MAG_MODE_SUSPEND	(0x18)
+#define MAG_MODE_NORMAL		(0x19)
+#define MAG_MODE_LOWPOWER	(0x1A)
+/**************************************************/
+/**\name	ENABLE/DISABLE BIT VALUES    */
+/*************************************************/
+#define SMI130_ENABLE	(0x01)
+#define SMI130_DISABLE	(0x00)
+/**************************************************/
+/**\name	INTERRUPT EDGE TRIGGER ENABLE    */
+/*************************************************/
+#define SMI130_EDGE		(0x01)
+#define SMI130_LEVEL	(0x00)
+/**************************************************/
+/**\name	INTERRUPT LEVEL ENABLE    */
+/*************************************************/
+#define SMI130_LEVEL_LOW		(0x00)
+#define SMI130_LEVEL_HIGH		(0x01)
+/**************************************************/
+/**\name	INTERRUPT OUTPUT ENABLE    */
+/*************************************************/
+#define SMI130_OPEN_DRAIN	(0x01)
+#define SMI130_PUSH_PULL	(0x00)
+
+/* interrupt output enable*/
+#define SMI130_INPUT	(0x01)
+#define SMI130_OUTPUT	(0x00)
+
+/**************************************************/
+/**\name	INTERRUPT TAP SOURCE ENABLE    */
+/*************************************************/
+#define FILTER_DATA		(0x00)
+#define UNFILTER_DATA	(0x01)
+/**************************************************/
+/**\name	SLOW MOTION/ NO MOTION SELECT   */
+/*************************************************/
+#define SLOW_MOTION		(0x00)
+#define NO_MOTION		(0x01)
+/**************************************************/
+/**\name	SIGNIFICANT MOTION SELECTION   */
+/*************************************************/
+#define ANY_MOTION			(0x00)
+#define SIGNIFICANT_MOTION	(0x01)
+/**************************************************/
+/**\name	LATCH DURATION   */
+/*************************************************/
+#define SMI130_LATCH_DUR_NONE				(0x00)
+#define SMI130_LATCH_DUR_312_5_MICRO_SEC	(0x01)
+#define SMI130_LATCH_DUR_625_MICRO_SEC		(0x02)
+#define SMI130_LATCH_DUR_1_25_MILLI_SEC		(0x03)
+#define SMI130_LATCH_DUR_2_5_MILLI_SEC		(0x04)
+#define SMI130_LATCH_DUR_5_MILLI_SEC		(0x05)
+#define SMI130_LATCH_DUR_10_MILLI_SEC		(0x06)
+#define SMI130_LATCH_DUR_20_MILLI_SEC		(0x07)
+#define SMI130_LATCH_DUR_40_MILLI_SEC		(0x08)
+#define SMI130_LATCH_DUR_80_MILLI_SEC		(0x09)
+#define SMI130_LATCH_DUR_160_MILLI_SEC		(0x0A)
+#define SMI130_LATCH_DUR_320_MILLI_SEC		(0x0B)
+#define SMI130_LATCH_DUR_640_MILLI_SEC		(0x0C)
+#define SMI130_LATCH_DUR_1_28_SEC			(0x0D)
+#define SMI130_LATCH_DUR_2_56_SEC			(0x0E)
+#define SMI130_LATCHED						(0x0F)
+/**************************************************/
+/**\name	GYRO OFFSET MASK DEFINITION   */
+/*************************************************/
+#define SMI130_GYRO_MANUAL_OFFSET_0_7	(0x00FF)
+#define SMI130_GYRO_MANUAL_OFFSET_8_9	(0x0300)
+/**************************************************/
+/**\name	STEP CONFIGURATION MASK DEFINITION   */
+/*************************************************/
+#define SMI130_STEP_CONFIG_0_7		(0x00FF)
+#define SMI130_STEP_CONFIG_8_10		(0x0700)
+#define SMI130_STEP_CONFIG_11_14	(0xF000)
+/**************************************************/
+/**\name	DEFINITION USED FOR DIFFERENT WRITE   */
+/*************************************************/
+#define	SMI130_WRITE_TARGET_PAGE0	(0x00)
+#define	SMI130_WRITE_TARGET_PAGE1	(0x01)
+#define	SMI130_WRITE_ENABLE_PAGE1	(0x01)
+#define	SMI130_MANUAL_DISABLE	    (0x00)
+#define	SMI130_MANUAL_ENABLE	    (0x01)
+#define	SMI130_YAS_DISABLE_RCOIL	(0x00)
+#define	SMI130_ENABLE_MAG_IF_MODE	(0x02)
+#define	SMI130_ENABLE_ANY_MOTION_INTR1	(0x04)
+#define	SMI130_ENABLE_ANY_MOTION_INTR2	(0x04)
+#define	SMI130_MAG_DATA_READ_REG        (0x04)
+#define SMI130_BMM_POWER_MODE_REG		(0x06)
+#define	SMI130_ENABLE_ANY_MOTION_AXIS	(0x07)
+#define	SMI130_ENABLE_LOW_G             (0x08)
+#define	SMI130_YAS532_ACQ_START         (0x11)
+#define	SMI130_YAS_DEVICE_ID_REG        (0x80)
+#define	SMI130_FIFO_GYRO_ENABLE         (0x80)
+#define	SMI130_SIG_MOTION_INTR_ENABLE   (0x01)
+#define	SMI130_STEP_DETECT_INTR_ENABLE  (0x01)
+#define	SMI130_LOW_G_INTR_STAT          (0x01)
+#define SMI130_PULL_UP_DATA             (0x30)
+#define SMI130_FIFO_M_G_A_ENABLE        (0xE0)
+#define SMI130_FIFO_M_G_ENABLE          (0xA0)
+#define SMI130_FIFO_M_A_ENABLE          (0x60)
+#define SMI130_FIFO_G_A_ENABLE          (0xC0)
+#define SMI130_FIFO_A_ENABLE            (0x40)
+#define SMI130_FIFO_M_ENABLE            (0x20)
+/**************************************************/
+/**\name	MAG INIT DEFINITION  */
+/*************************************************/
+#define SMI130_COMMAND_REG_ONE		(0x37)
+#define SMI130_COMMAND_REG_TWO		(0x9A)
+#define SMI130_COMMAND_REG_THREE	(0xC0)
+#define	RESET_STEP_COUNTER			(0xB2)
+/**************************************************/
+/**\name	BIT SLICE GET AND SET FUNCTIONS  */
+/*************************************************/
+#define SMI130_GET_BITSLICE(regvar, bitname)\
+		((regvar & bitname##__MSK) >> bitname##__POS)
+
+
+#define SMI130_SET_BITSLICE(regvar, bitname, val)\
+		((regvar & ~bitname##__MSK) | \
+		((val<<bitname##__POS)&bitname##__MSK))
+
+/**************************************************/
+/**\name	 FUNCTION DECLARATIONS  */
+/*************************************************/
+/**************************************************/
+/**\name	 FUNCTION FOR SMI130 INITIALIZE  */
+/*************************************************/
+/*!
+ *	@brief
+ *	This function is used for initialize
+ *	bus read and bus write functions
+ *	assign the chip id and device address
+ *	chip id is read in the register 0x00 bit from 0 to 7
+ *
+ *	@param smi130 : structure pointer
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *	@note
+ *	While changing the parameter of the smi130_t
+ *	consider the following point:
+ *	Changing the reference value of the parameter
+ *	will changes the local copy or local reference
+ *	make sure your changes will not
+ *	affect the reference value of the parameter
+ *	(Better case don't change the reference value of the parameter)
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_init(struct smi130_t *smi130);
+/**************************************************/
+/**\name	 FUNCTION FOR READ AND WRITE REGISTERS  */
+/*************************************************/
+/*!
+ * @brief
+ *	This API write the data to
+ *	the given register
+ *
+ *
+ *	@param v_addr_u8 -> Address of the register
+ *	@param v_data_u8 -> The data from the register
+ *	@param v_len_u8 -> no of bytes to read
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_write_reg(u8 v_addr_u8,
+u8 *v_data_u8, u8 v_len_u8);
+/*!
+ * @brief
+ *	This API reads the data from
+ *	the given register
+ *
+ *
+ *	@param v_addr_u8 -> Address of the register
+ *	@param v_data_u8 -> The data from the register
+ *	@param v_len_u8 -> no of bytes to read
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_read_reg(u8 v_addr_u8,
+u8 *v_data_u8, u8 v_len_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR ERROR CODES  */
+/*************************************************/
+/*!
+ *	@brief This API used to reads the fatal error
+ *	from the Register 0x02 bit 0
+ *	This flag will be reset only by power-on-reset and soft reset
+ *
+ *
+ *  @param v_fatal_err_u8 : The status of fatal error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fatal_err(u8
+*v_fatal_err_u8);
+/*!
+ *	@brief This API used to read the error code
+ *	from register 0x02 bit 1 to 4
+ *
+ *
+ *  @param v_err_code_u8 : The status of error codes
+ *	error_code  |    description
+ *  ------------|---------------
+ *	0x00        |no error
+ *	0x01        |ACC_CONF error (accel ODR and bandwidth not compatible)
+ *	0x02        |GYR_CONF error (Gyroscope ODR and bandwidth not compatible)
+ *	0x03        |Under sampling mode and interrupt uses pre filtered data
+ *	0x04        |reserved
+ *	0x05        |Selected trigger-readout offset in
+ *    -         |MAG_IF greater than selected ODR
+ *	0x06        |FIFO configuration error for header less mode
+ *	0x07        |Under sampling mode and pre filtered data as FIFO source
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_err_code(u8
+*v_error_code_u8);
+/*!
+ *	@brief This API Reads the i2c error code from the
+ *	Register 0x02 bit 5.
+ *	This error occurred in I2C master detected
+ *
+ *  @param v_i2c_err_code_u8 : The status of i2c fail error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_i2c_fail_err(u8
+*v_i2c_error_code_u8);
+ /*!
+ *	@brief This API Reads the dropped command error
+ *	from the register 0x02 bit 6
+ *
+ *
+ *  @param v_drop_cmd_err_u8 : The status of drop command error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_drop_cmd_err(u8
+*v_drop_cmd_err_u8);
+/*!
+ *	@brief This API reads the magnetometer data ready
+ *	interrupt not active.
+ *	It reads from the error register 0x0x2 bit 7
+ *
+ *
+ *
+ *
+ *  @param v_mag_data_rdy_err_u8 : The status of mag data ready interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_dada_rdy_err(u8
+*v_mag_data_rdy_err_u8);
+/*!
+ *	@brief This API reads the error status
+ *	from the error register 0x02 bit 0 to 7
+ *
+ *  @param v_mag_data_rdy_err_u8 : The status of mag data ready interrupt
+ *  @param v_fatal_er_u8r : The status of fatal error
+ *  @param v_err_code_u8 : The status of error code
+ *  @param v_i2c_fail_err_u8 : The status of I2C fail error
+ *  @param v_drop_cmd_err_u8 : The status of drop command error
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_error_status(u8 *v_fatal_er_u8r,
+u8 *v_err_code_u8, u8 *v_i2c_fail_err_u8,
+u8 *v_drop_cmd_err_u8, u8 *v_mag_data_rdy_err_u8);
+/******************************************************************/
+/**\name	 FUNCTIONS FOR MAG,ACCEL AND GYRO POWER MODE STATUS  */
+/*****************************************************************/
+/*!
+ *	@brief This API reads the magnetometer power mode from
+ *	PMU status register 0x03 bit 0 and 1
+ *
+ *  @param v_mag_power_mode_stat_u8 : The value of mag power mode
+ *	mag_powermode    |   value
+ * ------------------|----------
+ *    SUSPEND        |   0x00
+ *    NORMAL         |   0x01
+ *   LOW POWER       |   0x02
+ *
+ *
+ * @note The power mode of mag set by the 0x7E command register
+ * @note using the function "smi130_set_command_register()"
+ *  value    |   mode
+ *  ---------|----------------
+ *   0x18    | MAG_MODE_SUSPEND
+ *   0x19    | MAG_MODE_NORMAL
+ *   0x1A    | MAG_MODE_LOWPOWER
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_power_mode_stat(u8
+*v_mag_power_mode_stat_u8);
+/*!
+ *	@brief This API reads the gyroscope power mode from
+ *	PMU status register 0x03 bit 2 and 3
+ *
+ *  @param v_gyro_power_mode_stat_u8 :	The value of gyro power mode
+ *	gyro_powermode   |   value
+ * ------------------|----------
+ *    SUSPEND        |   0x00
+ *    NORMAL         |   0x01
+ *   FAST POWER UP   |   0x03
+ *
+ * @note The power mode of gyro set by the 0x7E command register
+ * @note using the function "smi130_set_command_register()"
+ *  value    |   mode
+ *  ---------|----------------
+ *   0x14    | GYRO_MODE_SUSPEND
+ *   0x15    | GYRO_MODE_NORMAL
+ *   0x17    | GYRO_MODE_FASTSTARTUP
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_power_mode_stat(u8
+*v_gyro_power_mode_stat_u8);
+/*!
+ *	@brief This API reads the accelerometer power mode from
+ *	PMU status register 0x03 bit 4 and 5
+ *
+ *
+ *  @param v_accel_power_mode_stat_u8 :	The value of accel power mode
+ *	accel_powermode  |   value
+ * ------------------|----------
+ *    SUSPEND        |   0x00
+ *    NORMAL         |   0x01
+ *  LOW POWER        |   0x03
+ *
+ * @note The power mode of accel set by the 0x7E command register
+ * @note using the function "smi130_set_command_register()"
+ *  value    |   mode
+ *  ---------|----------------
+ *   0x11    | ACCEL_MODE_NORMAL
+ *   0x12    | ACCEL_LOWPOWER
+ *   0x10    | ACCEL_SUSPEND
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_power_mode_stat(u8
+*v_accel_power_mode_stat_u8);
+/*!
+ *	@brief This API switch mag interface to normal mode
+ *	and confirm whether the mode switching done successfully or not
+*
+ *	@return results of bus communication function and current MAG_PMU result
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_interface_normal(void);
+/**************************************************/
+/**\name	 FUNCTION FOR Mag XYZ data read */
+/*************************************************/
+/*!
+ *	@brief This API reads magnetometer data X values
+ *	from the register 0x04 and 0x05
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param v_mag_x_s16 : The value of mag x
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note smi130_set_mag_output_data_rate()
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_mag_x(s16 *v_mag_x_s16,
+u8 v_sensor_select_u8);
+/*!
+ *	@brief This API reads magnetometer data Y values
+ *	from the register 0x06 and 0x07
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param v_mag_y_s16 : The value of mag y
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note smi130_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_mag_y(s16 *v_mag_y_s16,
+u8 v_sensor_select_u8);
+/*!
+ *	@brief This API reads magnetometer data Z values
+ *	from the register 0x08 and 0x09
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param v_mag_z_s16 : The value of mag z
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note smi130_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_mag_z(s16 *v_mag_z_s16,
+u8 v_sensor_select_u8);
+/*!
+ *	@brief This API reads magnetometer data RHALL values
+ *	from the register 0x0A and 0x0B
+ *
+ *
+ *  @param v_mag_r_s16 : The value of BMM150 r data
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_mag_r(
+s16 *v_mag_r_s16);
+/*!
+ *	@brief This API reads magnetometer data X,Y,Z values
+ *	from the register 0x04 to 0x09
+ *
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param mag : The value of mag xyz data
+ *  @param v_sensor_select_u8 : Mag selection value
+ *  value    |   sensor
+ *  ---------|----------------
+ *   0       | BMM150
+ *   1       | AKM09911 or AKM09912
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note smi130_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_mag_xyz(
+struct smi130_mag_t *mag, u8 v_sensor_select_u8);
+ /*!*
+ *	@brief This API reads magnetometer data X,Y,Z,r
+ *	values from the register 0x04 to 0x0B
+ *
+ *	@brief The mag sensor data read form auxiliary mag
+ *
+ *  @param mag : The value of mag-BMM150 xyzr data
+ *
+ *	@note For mag data output rate configuration use the following function
+ *	@note smi130_set_mag_output_data_rate()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_mag_xyzr(
+struct smi130_mag_xyzr_t *mag);
+/**************************************************/
+/**\name	 FUNCTION FOR GYRO XYZ DATA READ  */
+/*************************************************/
+/*!
+ *	@brief This API reads gyro data X values
+ *	form the register 0x0C and 0x0D
+ *
+ *
+ *
+ *
+ *  @param v_gyro_x_s16 : The value of gyro x data
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note smi130_set_gyro_output_data_rate()
+ *	@note smi130_set_gyro_bw()
+ *	@note smi130_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_gyro_x(
+s16 *v_gyro_x_s16);
+/*!
+ *	@brief This API reads gyro data Y values
+ *	form the register 0x0E and 0x0F
+ *
+ *
+ *
+ *
+ *  @param v_gyro_y_s16 : The value of gyro y data
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note smi130_set_gyro_output_data_rate()
+ *	@note smi130_set_gyro_bw()
+ *	@note smi130_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error result of communication routines
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_gyro_y(
+s16 *v_gyro_y_s16);
+/*!
+ *	@brief This API reads gyro data Z values
+ *	form the register 0x10 and 0x11
+ *
+ *
+ *
+ *
+ *  @param v_gyro_z_s16 : The value of gyro z data
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note smi130_set_gyro_output_data_rate()
+ *	@note smi130_set_gyro_bw()
+ *	@note smi130_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_gyro_z(
+s16 *v_gyro_z_s16);
+/*!
+ *	@brief This API reads gyro data X,Y,Z values
+ *	from the register 0x0C to 0x11
+ *
+ *
+ *
+ *
+ *  @param gyro : The value of gyro xyz
+ *
+ *	@note Gyro Configuration use the following function
+ *	@note smi130_set_gyro_output_data_rate()
+ *	@note smi130_set_gyro_bw()
+ *	@note smi130_set_gyro_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_gyro_xyz(
+struct smi130_gyro_t *gyro);
+/**************************************************/
+/**\name	 FUNCTION FOR ACCEL XYZ DATA READ  */
+/*************************************************/
+/*!
+ *	@brief This API reads accelerometer data X values
+ *	form the register 0x12 and 0x13
+ *
+ *
+ *
+ *
+ *  @param v_accel_x_s16 : The value of accel x
+ *
+ *	@note For accel configuration use the following functions
+ *	@note smi130_set_accel_output_data_rate()
+ *	@note smi130_set_accel_bw()
+ *	@note smi130_set_accel_under_sampling_parameter()
+ *	@note smi130_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_accel_x(
+s16 *v_accel_x_s16);
+/*!
+ *	@brief This API reads accelerometer data Y values
+ *	form the register 0x14 and 0x15
+ *
+ *
+ *
+ *
+ *  @param v_accel_y_s16 : The value of accel y
+ *
+ *	@note For accel configuration use the following functions
+ *	@note smi130_set_accel_output_data_rate()
+ *	@note smi130_set_accel_bw()
+ *	@note smi130_set_accel_under_sampling_parameter()
+ *	@note smi130_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_accel_y(
+s16 *v_accel_y_s16);
+/*!
+ *	@brief This API reads accelerometer data Z values
+ *	form the register 0x16 and 0x17
+ *
+ *
+ *
+ *
+ *  @param v_accel_z_s16 : The value of accel z
+ *
+ *	@note For accel configuration use the following functions
+ *	@note smi130_set_accel_output_data_rate()
+ *	@note smi130_set_accel_bw()
+ *	@note smi130_set_accel_under_sampling_parameter()
+ *	@note smi130_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_accel_z(
+s16 *v_accel_z_s16);
+/*!
+ *	@brief This API reads accelerometer data X,Y,Z values
+ *	from the register 0x12 to 0x17
+ *
+ *
+ *
+ *
+ *  @param accel :The value of accel xyz
+ *
+ *	@note For accel configuration use the following functions
+ *	@note smi130_set_accel_output_data_rate()
+ *	@note smi130_set_accel_bw()
+ *	@note smi130_set_accel_under_sampling_parameter()
+ *	@note smi130_set_accel_range()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_accel_xyz(
+struct smi130_accel_t *accel);
+/**************************************************/
+/**\name	 FUNCTION FOR SENSOR TIME */
+/*************************************************/
+/*!
+ *	@brief This API reads sensor_time from the register
+ *	0x18 to 0x1A
+ *
+ *
+ *  @param v_sensor_time_u32 : The value of sensor time
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_sensor_time(
+u32 *v_sensor_time_u32);
+/**************************************************/
+/**\name	 FUNCTION FOR GYRO SLEF TEST  */
+/*************************************************/
+/*!
+ *	@brief This API reads the Gyroscope self test
+ *	status from the register 0x1B bit 1
+ *
+ *
+ *  @param v_gyro_selftest_u8 : The value of gyro self test status
+ *  value    |   status
+ *  ---------|----------------
+ *   0       | Gyroscope self test is running or failed
+ *   1       | Gyroscope self test completed successfully
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_selftest(u8
+*v_gyro_selftest_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR MANUAL INTERFACE  */
+/*************************************************/
+/*!
+ *	@brief This API reads the status of
+ *	mag manual interface operation form the register 0x1B bit 2
+ *
+ *
+ *
+ *  @param v_mag_manual_stat_u8 : The value of mag manual operation status
+ *  value    |   status
+ *  ---------|----------------
+ *   0       | Indicates no manual magnetometer
+ *   -       | interface operation is ongoing
+ *   1       | Indicates manual magnetometer
+ *   -       | interface operation is ongoing
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_manual_operation_stat(u8
+*v_mag_manual_stat_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR FAST OFFSET READY  */
+/*************************************************/
+/*!
+ *	@brief This API reads the fast offset compensation
+ *	status form the register 0x1B bit 3
+ *
+ *
+ *  @param v_foc_rdy_u8 : The status of fast compensation
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_foc_rdy(u8
+*v_foc_rdy_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR NVM READY  */
+/*************************************************/
+/*!
+ * @brief This API Reads the nvm_rdy status from the
+ *	resister 0x1B bit 4
+ *
+ *
+ *  @param v_nvm_rdy_u8 : The value of NVM ready status
+ *  value    |   status
+ *  ---------|----------------
+ *   0       | NVM write operation in progress
+ *   1       | NVM is ready to accept a new write trigger
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_nvm_rdy(u8
+*v_nvm_rdy_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR DATA READY FOR MAG, GYRO, AND ACCEL */
+/*************************************************/
+/*!
+ *	@brief This API reads the status of mag data ready
+ *	from the register 0x1B bit 5
+ *	The status get reset when one mag data register is read out
+ *
+ *  @param v_data_rdy_u8 : The value of mag data ready status
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_data_rdy_mag(u8
+*v_data_rdy_u8);
+/*!
+ *	@brief This API reads the status of gyro data ready form the
+ *	register 0x1B bit 6
+ *	The status get reset when gyro data register read out
+ *
+ *
+ *	@param v_data_rdy_u8 :	The value of gyro data ready
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_data_rdy(u8
+*v_data_rdy_u8);
+/*!
+ *	@brief This API reads the status of accel data ready form the
+ *	register 0x1B bit 7
+ *	The status get reset when accel data register read out
+ *
+ *
+ *	@param v_data_rdy_u8 :	The value of accel data ready status
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_data_rdy(u8
+*drdy_acc);
+/**************************************************/
+/**\name	 FUNCTION FOR STEP INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the step detector interrupt status
+ *	from the register 0x1C bit 0
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_step_intr_u8 : The status of step detector interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_step_intr(u8
+*v_step_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR SIGNIFICANT INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the
+ *	significant motion interrupt status
+ *	from the register 0x1C bit 1
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *
+ *  @param v_significant_intr_u8 : The status of step
+ *	motion interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_significant_intr(u8
+*sigmot_intr);
+/**************************************************/
+/**\name	 FUNCTION FOR ANY MOTION INTERRUPT STATUS  */
+/*************************************************/
+ /*!
+ *	@brief This API reads the any motion interrupt status
+ *	from the register 0x1C bit 2
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *  @param v_any_motion_intr_u8 : The status of any-motion interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_any_motion_intr(u8
+*v_any_motion_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR PMU TRIGGER INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the power mode trigger interrupt status
+ *	from the register 0x1C bit 3
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *
+ *  @param v_pmu_trigger_intr_u8 : The status of power mode trigger interrupt
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_pmu_trigger_intr(u8
+*v_pmu_trigger_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR DOUBLE TAB STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the double tab status
+ *	from the register 0x1C bit 4
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_double_tap_intr_u8 :The status of double tab interrupt
+ *
+ *	@note Double tap interrupt can be configured by the following functions
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_double_tap()
+ *	@note AXIS MAPPING
+ *	@note smi130_get_stat2_tap_first_x()
+ *	@note smi130_get_stat2_tap_first_y()
+ *	@note smi130_get_stat2_tap_first_z()
+ *	@note DURATION
+ *	@note smi130_set_intr_tap_durn()
+ *	@note THRESHOLD
+ *	@note smi130_set_intr_tap_thres()
+ *	@note TAP QUIET
+ *	@note smi130_set_intr_tap_quiet()
+ *	@note TAP SHOCK
+ *	@note smi130_set_intr_tap_shock()
+ *	@note TAP SOURCE
+ *	@note smi130_set_intr_tap_source()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_double_tap_intr(u8
+*v_double_tap_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR SINGLE TAB STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the single tab status
+ *	from the register 0x1C bit 5
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the single tab interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt
+ *	signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_single_tap_intr_u8 :The status of single tap interrupt
+ *
+ *	@note Single tap interrupt can be configured by the following functions
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_single_tap()
+ *	@note AXIS MAPPING
+ *	@note smi130_get_stat2_tap_first_x()
+ *	@note smi130_get_stat2_tap_first_y()
+ *	@note smi130_get_stat2_tap_first_z()
+ *	@note DURATION
+ *	@note smi130_set_intr_tap_durn()
+ *	@note THRESHOLD
+ *	@note smi130_set_intr_tap_thres()
+ *	@note TAP QUIET
+ *	@note smi130_set_intr_tap_quiet()
+ *	@note TAP SHOCK
+ *	@note smi130_set_intr_tap_shock()
+ *	@note TAP SOURCE
+ *	@note smi130_set_intr_tap_source()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_single_tap_intr(u8
+*v_single_tap_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR ORIENT INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the orient_mbl status
+ *	from the register 0x1C bit 6
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the orient_mbl interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_orient_mbl_intr_u8 : The status of orient_mbl interrupt
+ *
+ *	@note For orient_mbl interrupt configuration use the following functions
+ *	@note STATUS
+ *	@note smi130_get_stat0_orient_mbl_intr()
+ *	@note AXIS MAPPING
+ *	@note smi130_get_stat3_orient_mbl_xy()
+ *	@note smi130_get_stat3_orient_mbl_z()
+ *	@note smi130_set_intr_orient_mbl_axes_enable()
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_orient_mbl()
+ *	@note INTERRUPT OUTPUT
+ *	@note smi130_set_intr_orient_mbl_ud_enable()
+ *	@note THETA
+ *	@note smi130_set_intr_orient_mbl_theta()
+ *	@note HYSTERESIS
+ *	@note smi130_set_intr_orient_mbl_hyst()
+ *	@note BLOCKING
+ *	@note smi130_set_intr_orient_mbl_blocking()
+ *	@note MODE
+ *	@note smi130_set_intr_orient_mbl_mode()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_orient_mbl_intr(u8
+*v_orient_mbl_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR FLAT INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the flat interrupt status
+ *	from the register 0x1C bit 7
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the flat interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_flat_intr_u8 : The status of  flat interrupt
+ *
+ *	@note For flat configuration use the following functions
+ *	@note STATS
+ *	@note smi130_get_stat0_flat_intr()
+ *	@note smi130_get_stat3_flat()
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_flat()
+ *	@note THETA
+ *	@note smi130_set_intr_flat_theta()
+ *	@note HOLD TIME
+ *	@note smi130_set_intr_flat_hold()
+ *	@note HYSTERESIS
+ *	@note smi130_set_intr_flat_hyst()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat0_flat_intr(u8
+*v_flat_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR HIGH_G INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the high_g interrupt status
+ *	from the register 0x1D bit 2
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the high g  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be permanently
+ *	latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_high_g_intr_u8 : The status of high_g interrupt
+ *
+ *	@note High_g interrupt configured by following functions
+ *	@note STATUS
+ *	@note smi130_get_stat1_high_g_intr()
+ *	@note AXIS MAPPING
+ *	@note smi130_get_stat3_high_g_first_x()
+ *	@note smi130_get_stat3_high_g_first_y()
+ *	@note smi130_get_stat3_high_g_first_z()
+ *	@note SIGN MAPPING
+ *	@note smi130_get_stat3_high_g_first_sign()
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_high_g()
+  *	@note HYSTERESIS
+ *	@note smi130_set_intr_high_g_hyst()
+ *	@note DURATION
+ *	@note smi130_set_intr_high_g_durn()
+ *	@note THRESHOLD
+ *	@note smi130_set_intr_high_g_thres()
+ *	@note SOURCE
+ *	@note smi130_set_intr_low_high_source()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat1_high_g_intr(u8
+*v_high_g_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR LOW_G INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads the low g interrupt status
+ *	from the register 0x1D bit 3
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the low g  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_low_g_intr_u8 : The status of low_g interrupt
+ *
+ *	@note Low_g interrupt configured by following functions
+ *	@note STATUS
+ *	@note smi130_get_stat1_low_g_intr()
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_low_g()
+ *	@note SOURCE
+ *	@note smi130_set_intr_low_high_source()
+ *	@note DURATION
+ *	@note smi130_set_intr_low_g_durn()
+ *	@note THRESHOLD
+ *	@note smi130_set_intr_low_g_thres()
+ *	@note HYSTERESIS
+ *	@note smi130_set_intr_low_g_hyst()
+ *	@note MODE
+ *	@note smi130_set_intr_low_g_mode()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat1_low_g_intr(u8
+*v_low_g_intr_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR DATA READY INTERRUPT STATUS  */
+/*************************************************/
+/*!
+ *	@brief This API reads data ready interrupt status
+ *	from the register 0x1D bit 4
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the  data ready  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_data_rdy_intr_u8 : The status of data ready interrupt
+ *
+ *	@note Data ready interrupt configured by following functions
+ *	@note STATUS
+ *	@note smi130_get_stat1_data_rdy_intr()
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_data_rdy()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat1_data_rdy_intr(u8
+*v_data_rdy_intr_u8);
+/**************************************************/
+/**\name	 FUNCTIONS FOR FIFO FULL AND WATER MARK INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads data ready FIFO full interrupt status
+ *	from the register 0x1D bit 5
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the FIFO full interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will
+ *	be permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_fifo_full_intr_u8 : The status of fifo full interrupt
+ *
+ *	@note FIFO full interrupt can be configured by following functions
+ *	@note smi130_set_intr_fifo_full()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat1_fifo_full_intr(u8
+*v_fifo_full_intr_u8);
+/*!
+ *	@brief This API reads data
+ *	 ready FIFO watermark interrupt status
+ *	from the register 0x1D bit 6
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the FIFO watermark interrupt triggers. The
+ *	setting of INT_LATCH controls if the
+ *	interrupt signal and hence the
+ *	respective interrupt flag will be
+ *	permanently latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_fifo_wm_intr_u8 : The status of fifo water mark interrupt
+ *
+ *	@note FIFO full interrupt can be configured by following functions
+ *	@note smi130_set_intr_fifo_wm()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat1_fifo_wm_intr(u8
+*v_fifo_wm_intr_u8);
+/**************************************************/
+/**\name	 FUNCTIONS FOR NO MOTION INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads data ready no motion interrupt status
+ *	from the register 0x1D bit 7
+ *	flag is associated with a specific interrupt function.
+ *	It is set when the no motion  interrupt triggers. The
+ *	setting of INT_LATCH controls if the interrupt signal and hence the
+ *	respective interrupt flag will be permanently
+ *	latched, temporarily latched
+ *	or not latched.
+ *
+ *
+ *
+ *
+ *  @param v_nomotion_intr_u8 : The status of no motion interrupt
+ *
+ *	@note No motion interrupt can be configured by following function
+ *	@note STATUS
+ *	@note smi130_get_stat1_nomotion_intr()
+ *	@note INTERRUPT MAPPING
+ *	@note smi130_set_intr_nomotion()
+ *	@note DURATION
+ *	@note smi130_set_intr_slow_no_motion_durn()
+ *	@note THRESHOLD
+ *	@note smi130_set_intr_slow_no_motion_thres()
+ *	@note SLOW/NO MOTION SELECT
+ *	@note smi130_set_intr_slow_no_motion_select()
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat1_nomotion_intr(u8
+*nomo_intr);
+/**************************************************/
+/**\name	 FUNCTIONS FOR ANY MOTION FIRST XYZ AND SIGN INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads the status of any motion first x
+ *	from the register 0x1E bit 0
+ *
+ *
+ *  @param v_anymotion_first_x_u8 : The status of any motion first x interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by x axis
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_any_motion_first_x(u8
+*v_anymotion_first_x_u8);
+/*!
+ *	@brief This API reads the status of any motion first y interrupt
+ *	from the register 0x1E bit 1
+ *
+ *
+ *
+ *@param v_any_motion_first_y_u8 : The status of any motion first y interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_any_motion_first_y(u8
+*v_any_motion_first_y_u8);
+/*!
+ *	@brief This API reads the status of any motion first z interrupt
+ *	from the register 0x1E bit 2
+ *
+ *
+ *
+ *
+ *@param v_any_motion_first_z_u8 : The status of any motion first z interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_any_motion_first_z(u8
+*v_any_motion_first_z_u8);
+/*!
+ *	@brief This API reads the any motion sign status from the
+ *	register 0x1E bit 3
+ *
+ *
+ *
+ *
+ *  @param v_anymotion_sign_u8 : The status of any motion sign
+ *  value     |  sign
+ * -----------|-------------
+ *   0        | positive
+ *   1        | negative
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_any_motion_sign(u8
+*v_anymotion_sign_u8);
+/**************************************************/
+/**\name	 FUNCTIONS FOR TAP FIRST XYZ AND SIGN INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads the any motion tap first x status from the
+ *	register 0x1E bit 4
+ *
+ *
+ *
+ *
+ *  @param v_tap_first_x_u8 :The status of any motion tap first x
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by x axis
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_tap_first_x(u8
+*v_tap_first_x_u8);
+/*!
+ *	@brief This API reads the tap first y interrupt status from the
+ *	register 0x1E bit 5
+ *
+ *
+ *
+ *
+ *  @param v_tap_first_y_u8 :The status of tap first y interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_tap_first_y(u8
+*v_tap_first_y_u8);
+/*!
+ *	@brief This API reads the tap first z interrupt status  from the
+ *	register 0x1E bit 6
+ *
+ *
+ *
+ *
+ *  @param v_tap_first_z_u8 :The status of tap first z interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_tap_first_z(u8
+*v_tap_first_z_u8);
+/*!
+ *	@brief This API reads the tap sign status from the
+ *	register 0x1E bit 7
+ *
+ *
+ *
+ *
+ *  @param v_tap_sign_u8 : The status of tap sign
+ *  value     |  sign
+ * -----------|-------------
+ *   0        | positive
+ *   1        | negative
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat2_tap_sign(u8
+*tap_sign);
+/**************************************************/
+/**\name	 FUNCTIONS FOR HIGH_G FIRST XYZ AND SIGN INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads the high_g first x status from the
+ *	register 0x1F bit 0
+ *
+ *
+ *
+ *
+ *  @param v_high_g_first_x_u8 :The status of high_g first x
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_high_g_first_x(u8
+*v_high_g_first_x_u8);
+/*!
+ *	@brief This API reads the high_g first y status from the
+ *	register 0x1F bit 1
+ *
+ *
+ *
+ *
+ *  @param v_high_g_first_y_u8 : The status of high_g first y
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_high_g_first_y(u8
+*v_high_g_first_y_u8);
+/*!
+ *	@brief This API reads the high_g first z status from the
+ *	register 0x1F bit 3
+ *
+ *
+ *
+ *
+ *  @param v_high_g_first_z_u8 : The status of high_g first z
+ *  value     |  status
+ * -----------|-------------
+ *   0        | not triggered
+ *   1        | triggered by z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_high_g_first_z(u8
+*v_high_g_first_z_u8);
+/*!
+ *	@brief This API reads the high sign status from the
+ *	register 0x1F bit 3
+ *
+ *
+ *
+ *
+ *  @param v_high_g_sign_u8 :The status of high sign
+ *  value     |  sign
+ * -----------|-------------
+ *   0        | positive
+ *   1        | negative
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_high_g_sign(u8
+*v_high_g_sign_u8);
+/**************************************************/
+/**\name	 FUNCTIONS FOR ORIENT XY AND Z INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads the status of orient_mbl_xy plane
+ *	from the register 0x1F bit 4 and 5
+ *
+ *
+ *  @param v_orient_mbl_xy_u8 :The status of orient_mbl_xy plane
+ *  value     |  status
+ * -----------|-------------
+ *   0x00     | portrait upright
+ *   0x01     | portrait upside down
+ *   0x02     | landscape left
+ *   0x03     | landscape right
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_orient_mbl_xy(u8
+*v_orient_mbl_xy_u8);
+/*!
+ *	@brief This API reads the status of orient_mbl z plane
+ *	from the register 0x1F bit 6
+ *
+ *
+ *  @param v_orient_mbl_z_u8 :The status of orient_mbl z
+ *  value     |  status
+ * -----------|-------------
+ *   0x00     | upward looking
+ *   0x01     | downward looking
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_orient_mbl_z(u8
+*v_orient_mbl_z_u8);
+/**************************************************/
+/**\name	 FUNCTIONS FOR FLAT INTERRUPT STATUS*/
+/*************************************************/
+/*!
+ *	@brief This API reads the flat status from the register
+ *	0x1F bit 7
+ *
+ *
+ *  @param v_flat_u8 : The status of flat interrupt
+ *  value     |  status
+ * -----------|-------------
+ *   0x00     | non flat
+ *   0x01     | flat position
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_stat3_flat(u8
+*flat);
+/**************************************************/
+/**\name	 FUNCTION FOR TEMPERATUE READ */
+/*************************************************/
+/*!
+ *	@brief This API reads the temperature of the sensor
+ *	from the register 0x21 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_temp_s16 : The value of temperature
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_temp(s16
+*v_temp_s16);
+/**************************************************/
+/**\name	 FUNCTION FOR FIFO LENGTH AND FIFO DATA READ */
+/*************************************************/
+/*!
+ *	@brief This API reads the  of the sensor
+ *	form the register 0x23 and 0x24 bit 0 to 7 and 0 to 2
+ *	@brief this byte counter is updated each time a complete frame
+ *	was read or writtern
+ *
+ *
+ *  @param v_fifo_length_u32 : The value of fifo byte counter
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_fifo_length(
+u32 *v_fifo_length_u32);
+/*!
+ *	@brief This API reads the fifo data of the sensor
+ *	from the register 0x24
+ *	@brief Data format depends on the setting of register FIFO_CONFIG
+ *
+ *
+ *
+ *  @param v_fifodata_u8 : Pointer holding the fifo data
+ *
+ *	@note For reading FIFO data use the following functions
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_fifo_data(
+u8 *v_fifodata_u8, u16 v_fifo_length_u16);
+/**************************************************/
+/**\name	 FUNCTION FOR ACCEL CONFIGURATIONS */
+/*************************************************/
+/*!
+ *	@brief This API is used to get the
+ *	accel output date rate form the register 0x40 bit 0 to 3
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of accel output date rate
+ *  value |  output data rate
+ * -------|--------------------------
+ *	 0    |	SMI130_ACCEL_OUTPUT_DATA_RATE_RESERVED
+ *	 1	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_0_78HZ
+ *	 2	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_1_56HZ
+ *	 3    |	SMI130_ACCEL_OUTPUT_DATA_RATE_3_12HZ
+ *	 4    | SMI130_ACCEL_OUTPUT_DATA_RATE_6_25HZ
+ *	 5	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_12_5HZ
+ *	 6	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_25HZ
+ *	 7	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_50HZ
+ *	 8	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_100HZ
+ *	 9	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_200HZ
+ *	 10	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_400HZ
+ *	 11	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_800HZ
+ *	 12	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_1600HZ
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_output_data_rate(
+u8 *v_output_data_rate_u8);
+/*!
+ *	@brief This API is used to set the
+ *	accel output date rate form the register 0x40 bit 0 to 3
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of accel output date rate
+ *  value |  output data rate
+ * -------|--------------------------
+ *	 0    |	SMI130_ACCEL_OUTPUT_DATA_RATE_RESERVED
+ *	 1	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_0_78HZ
+ *	 2	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_1_56HZ
+ *	 3    |	SMI130_ACCEL_OUTPUT_DATA_RATE_3_12HZ
+ *	 4    | SMI130_ACCEL_OUTPUT_DATA_RATE_6_25HZ
+ *	 5	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_12_5HZ
+ *	 6	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_25HZ
+ *	 7	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_50HZ
+ *	 8	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_100HZ
+ *	 9	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_200HZ
+ *	 10	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_400HZ
+ *	 11	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_800HZ
+ *	 12	  |	SMI130_ACCEL_OUTPUT_DATA_RATE_1600HZ
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_output_data_rate(u8 odr);
+/*!
+ *	@brief This API is used to get the
+ *	accel bandwidth from the register 0x40 bit 4 to 6
+ *	@brief bandwidth parameter determines filter configuration(acc_us=0)
+ *	and averaging for under sampling mode(acc_us=1)
+ *
+ *
+ *  @param  v_bw_u8 : The value of accel bandwidth
+ *
+ *	@note accel bandwidth depends on under sampling parameter
+ *	@note under sampling parameter cab be set by the function
+ *	"SMI130_SET_ACCEL_UNDER_SAMPLING_PARAMETER"
+ *
+ *	@note Filter configuration
+ *  accel_us  | Filter configuration
+ * -----------|---------------------
+ *    0x00    |  OSR4 mode
+ *    0x01    |  OSR2 mode
+ *    0x02    |  normal mode
+ *    0x03    |  CIC mode
+ *    0x04    |  Reserved
+ *    0x05    |  Reserved
+ *    0x06    |  Reserved
+ *    0x07    |  Reserved
+ *
+ *	@note accel under sampling mode
+ *  accel_us  | Under sampling mode
+ * -----------|---------------------
+ *    0x00    |  no averaging
+ *    0x01    |  average 2 samples
+ *    0x02    |  average 4 samples
+ *    0x03    |  average 8 samples
+ *    0x04    |  average 16 samples
+ *    0x05    |  average 32 samples
+ *    0x06    |  average 64 samples
+ *    0x07    |  average 128 samples
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_bw(u8 *v_bw_u8);
+/*!
+ *	@brief This API is used to set the
+ *	accel bandwidth from the register 0x40 bit 4 to 6
+ *	@brief bandwidth parameter determines filter configuration(acc_us=0)
+ *	and averaging for under sampling mode(acc_us=1)
+ *
+ *
+ *  @param  v_bw_u8 : The value of accel bandwidth
+ *
+ *	@note accel bandwidth depends on under sampling parameter
+ *	@note under sampling parameter cab be set by the function
+ *	"SMI130_SET_ACCEL_UNDER_SAMPLING_PARAMETER"
+ *
+ *	@note Filter configuration
+ *  accel_us  | Filter configuration
+ * -----------|---------------------
+ *    0x00    |  OSR4 mode
+ *    0x01    |  OSR2 mode
+ *    0x02    |  normal mode
+ *    0x03    |  CIC mode
+ *    0x04    |  Reserved
+ *    0x05    |  Reserved
+ *    0x06    |  Reserved
+ *    0x07    |  Reserved
+ *
+ *	@note accel under sampling mode
+ *  accel_us  | Under sampling mode
+ * -----------|---------------------
+ *    0x00    |  no averaging
+ *    0x01    |  average 2 samples
+ *    0x02    |  average 4 samples
+ *    0x03    |  average 8 samples
+ *    0x04    |  average 16 samples
+ *    0x05    |  average 32 samples
+ *    0x06    |  average 64 samples
+ *    0x07    |  average 128 samples
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_bw(u8 v_bw_u8);
+/*!
+ *	@brief This API is used to get the accel
+ *	under sampling parameter form the register 0x40 bit 7
+ *
+ *
+ *
+ *
+ *	@param  v_accel_under_sampling_u8 : The value of accel under sampling
+ *	value    | under_sampling
+ * ----------|---------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_under_sampling_parameter(
+u8 *v_accel_under_sampling_u8);
+/*!
+ *	@brief This API is used to set the accel
+ *	under sampling parameter form the register 0x40 bit 7
+ *
+ *
+ *
+ *
+ *	@param  v_accel_under_sampling_u8 : The value of accel under sampling
+ *	value    | under_sampling
+ * ----------|---------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_under_sampling_parameter(
+u8 v_accel_under_sampling_u8);
+/*!
+ *	@brief This API is used to get the ranges
+ *	(g values) of the accel from the register 0x41 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param v_range_u8 : The value of accel g range
+ *	value    | g_range
+ * ----------|-----------
+ *   0x03    | SMI130_ACCEL_RANGE_2G
+ *   0x05    | SMI130_ACCEL_RANGE_4G
+ *   0x08    | SMI130_ACCEL_RANGE_8G
+ *   0x0C    | SMI130_ACCEL_RANGE_16G
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_range(
+u8 *v_range_u8);
+/*!
+ *	@brief This API is used to set the ranges
+ *	(g values) of the accel from the register 0x41 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param v_range_u8 : The value of accel g range
+ *	value    | g_range
+ * ----------|-----------
+ *   0x03    | SMI130_ACCEL_RANGE_2G
+ *   0x05    | SMI130_ACCEL_RANGE_4G
+ *   0x08    | SMI130_ACCEL_RANGE_8G
+ *   0x0C    | SMI130_ACCEL_RANGE_16G
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_range(
+u8 v_range_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR GYRO CONFIGURATIONS */
+/*************************************************/
+/*!
+ *	@brief This API is used to get the
+ *	gyroscope output data rate from the register 0x42 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of gyro output data rate
+ *  value     |      gyro output data rate
+ * -----------|-----------------------------
+ *   0x00     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x01     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x02     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x03     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x04     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x05     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x06     | SMI130_GYRO_OUTPUT_DATA_RATE_25HZ
+ *   0x07     | SMI130_GYRO_OUTPUT_DATA_RATE_50HZ
+ *   0x08     | SMI130_GYRO_OUTPUT_DATA_RATE_100HZ
+ *   0x09     | SMI130_GYRO_OUTPUT_DATA_RATE_200HZ
+ *   0x0A     | SMI130_GYRO_OUTPUT_DATA_RATE_400HZ
+ *   0x0B     | SMI130_GYRO_OUTPUT_DATA_RATE_800HZ
+ *   0x0C     | SMI130_GYRO_OUTPUT_DATA_RATE_1600HZ
+ *   0x0D     | SMI130_GYRO_OUTPUT_DATA_RATE_3200HZ
+ *   0x0E     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x0F     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_output_data_rate(
+u8 *gyro_output_typer);
+/*!
+ *	@brief This API is used to set the
+ *	gyroscope output data rate from the register 0x42 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rate_u8 :The value of gyro output data rate
+ *  value     |      gyro output data rate
+ * -----------|-----------------------------
+ *   0x00     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x01     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x02     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x03     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x04     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x05     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x06     | SMI130_GYRO_OUTPUT_DATA_RATE_25HZ
+ *   0x07     | SMI130_GYRO_OUTPUT_DATA_RATE_50HZ
+ *   0x08     | SMI130_GYRO_OUTPUT_DATA_RATE_100HZ
+ *   0x09     | SMI130_GYRO_OUTPUT_DATA_RATE_200HZ
+ *   0x0A     | SMI130_GYRO_OUTPUT_DATA_RATE_400HZ
+ *   0x0B     | SMI130_GYRO_OUTPUT_DATA_RATE_800HZ
+ *   0x0C     | SMI130_GYRO_OUTPUT_DATA_RATE_1600HZ
+ *   0x0D     | SMI130_GYRO_OUTPUT_DATA_RATE_3200HZ
+ *   0x0E     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *   0x0F     | SMI130_GYRO_OUTPUT_DATA_RATE_RESERVED
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_output_data_rate(
+u8 gyro_output_typer);
+/*!
+ *	@brief This API is used to get the
+ *	data of gyro from the register 0x42 bit 4 to 5
+ *
+ *
+ *
+ *
+ *  @param  v_bw_u8 : The value of gyro bandwidth
+ *  value     | gyro bandwidth
+ *  ----------|----------------
+ *   0x00     | SMI130_GYRO_OSR4_MODE
+ *   0x01     | SMI130_GYRO_OSR2_MODE
+ *   0x02     | SMI130_GYRO_NORMAL_MODE
+ *   0x03     | SMI130_GYRO_CIC_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_bw(u8 *v_bw_u8);
+/*!
+ *	@brief This API is used to set the
+ *	data of gyro from the register 0x42 bit 4 to 5
+ *
+ *
+ *
+ *
+ *  @param  v_bw_u8 : The value of gyro bandwidth
+ *  value     | gyro bandwidth
+ *  ----------|----------------
+ *   0x00     | SMI130_GYRO_OSR4_MODE
+ *   0x01     | SMI130_GYRO_OSR2_MODE
+ *   0x02     | SMI130_GYRO_NORMAL_MODE
+ *   0x03     | SMI130_GYRO_CIC_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_bw(u8 v_bw_u8);
+/*!
+ *	@brief This API reads the range
+ *	of gyro from the register 0x43 bit 0 to 2
+ *
+ *  @param  v_range_u8 : The value of gyro range
+ *   value    |    range
+ *  ----------|-------------------------------
+ *    0x00    | SMI130_GYRO_RANGE_2000_DEG_SEC
+ *    0x01    | SMI130_GYRO_RANGE_1000_DEG_SEC
+ *    0x02    | SMI130_GYRO_RANGE_500_DEG_SEC
+ *    0x03    | SMI130_GYRO_RANGE_250_DEG_SEC
+ *    0x04    | SMI130_GYRO_RANGE_125_DEG_SEC
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_range(
+u8 *v_range_u8);
+/*!
+ *	@brief This API set the range
+ *	of gyro from the register 0x43 bit 0 to 2
+ *
+ *  @param  v_range_u8 : The value of gyro range
+ *   value    |    range
+ *  ----------|-------------------------------
+ *    0x00    | SMI130_GYRO_RANGE_2000_DEG_SEC
+ *    0x01    | SMI130_GYRO_RANGE_1000_DEG_SEC
+ *    0x02    | SMI130_GYRO_RANGE_500_DEG_SEC
+ *    0x03    | SMI130_GYRO_RANGE_250_DEG_SEC
+ *    0x04    | SMI130_GYRO_RANGE_125_DEG_SEC
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_range(
+u8 v_range_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR MAG CONFIGURATIONS */
+/*************************************************/
+/*!
+ *	@brief This API is used to get the
+ *	output data rate of magnetometer from the register 0x44 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rat_u8e : The value of mag output data rate
+ *  value   |    mag output data rate
+ * ---------|---------------------------
+ *  0x00    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED
+ *  0x01    |SMI130_MAG_OUTPUT_DATA_RATE_0_78HZ
+ *  0x02    |SMI130_MAG_OUTPUT_DATA_RATE_1_56HZ
+ *  0x03    |SMI130_MAG_OUTPUT_DATA_RATE_3_12HZ
+ *  0x04    |SMI130_MAG_OUTPUT_DATA_RATE_6_25HZ
+ *  0x05    |SMI130_MAG_OUTPUT_DATA_RATE_12_5HZ
+ *  0x06    |SMI130_MAG_OUTPUT_DATA_RATE_25HZ
+ *  0x07    |SMI130_MAG_OUTPUT_DATA_RATE_50HZ
+ *  0x08    |SMI130_MAG_OUTPUT_DATA_RATE_100HZ
+ *  0x09    |SMI130_MAG_OUTPUT_DATA_RATE_200HZ
+ *  0x0A    |SMI130_MAG_OUTPUT_DATA_RATE_400HZ
+ *  0x0B    |SMI130_MAG_OUTPUT_DATA_RATE_800HZ
+ *  0x0C    |SMI130_MAG_OUTPUT_DATA_RATE_1600HZ
+ *  0x0D    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED0
+ *  0x0E    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED1
+ *  0x0F    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED2
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_output_data_rate(u8 *odr);
+/*!
+ *	@brief This API is used to set the
+ *	output data rate of magnetometer from the register 0x44 bit 0 to 3
+ *
+ *
+ *
+ *
+ *  @param  v_output_data_rat_u8e : The value of mag output data rate
+ *  value   |    mag output data rate
+ * ---------|---------------------------
+ *  0x00    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED
+ *  0x01    |SMI130_MAG_OUTPUT_DATA_RATE_0_78HZ
+ *  0x02    |SMI130_MAG_OUTPUT_DATA_RATE_1_56HZ
+ *  0x03    |SMI130_MAG_OUTPUT_DATA_RATE_3_12HZ
+ *  0x04    |SMI130_MAG_OUTPUT_DATA_RATE_6_25HZ
+ *  0x05    |SMI130_MAG_OUTPUT_DATA_RATE_12_5HZ
+ *  0x06    |SMI130_MAG_OUTPUT_DATA_RATE_25HZ
+ *  0x07    |SMI130_MAG_OUTPUT_DATA_RATE_50HZ
+ *  0x08    |SMI130_MAG_OUTPUT_DATA_RATE_100HZ
+ *  0x09    |SMI130_MAG_OUTPUT_DATA_RATE_200HZ
+ *  0x0A    |SMI130_MAG_OUTPUT_DATA_RATE_400HZ
+ *  0x0B    |SMI130_MAG_OUTPUT_DATA_RATE_800HZ
+ *  0x0C    |SMI130_MAG_OUTPUT_DATA_RATE_1600HZ
+ *  0x0D    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED0
+ *  0x0E    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED1
+ *  0x0F    |SMI130_MAG_OUTPUT_DATA_RATE_RESERVED2
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_output_data_rate(u8 odr);
+/**************************************************/
+/**\name	 FUNCTION FOR FIFO CONFIGURATIONS */
+/*************************************************/
+ /*!
+ *	@brief This API is used to read Down sampling
+ *	for gyro (2**downs_gyro) in the register 0x45 bit 0 to 2
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_gyro_u8 :The value of gyro fifo down
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_down_gyro(
+u8 *v_fifo_down_gyro_u8);
+ /*!
+ *	@brief This API is used to set Down sampling
+ *	for gyro (2**downs_gyro) in the register 0x45 bit 0 to 2
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_gyro_u8 :The value of gyro fifo down
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_down_gyro(
+u8 v_fifo_down_gyro_u8);
+/*!
+ *	@brief This API is used to read gyro fifo filter data
+ *	from the register 0x45 bit 3
+ *
+ *
+ *
+ *  @param v_gyro_fifo_filter_data_u8 :The value of gyro filter data
+ *  value      |  gyro_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_fifo_filter_data(
+u8 *v_gyro_fifo_filter_data_u8);
+/*!
+ *	@brief This API is used to set gyro fifo filter data
+ *	from the register 0x45 bit 3
+ *
+ *
+ *
+ *  @param v_gyro_fifo_filter_data_u8 :The value of gyro filter data
+ *  value      |  gyro_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_fifo_filter_data(
+u8 v_gyro_fifo_filter_data_u8);
+/*!
+ *	@brief This API is used to read Down sampling
+ *	for accel (2*downs_accel) from the register 0x45 bit 4 to 6
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_u8 :The value of accel fifo down
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_down_accel(
+u8 *v_fifo_down_u8);
+ /*!
+ *	@brief This API is used to set Down sampling
+ *	for accel (2*downs_accel) from the register 0x45 bit 4 to 6
+ *
+ *
+ *
+ *
+ *  @param v_fifo_down_u8 :The value of accel fifo down
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_down_accel(
+u8 v_fifo_down_u8);
+/*!
+ *	@brief This API is used to read accel fifo filter data
+ *	from the register 0x45 bit 7
+ *
+ *
+ *
+ *  @param v_accel_fifo_filter_u8 :The value of accel filter data
+ *  value      |  accel_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_fifo_filter_data(
+u8 *v_accel_fifo_filter_u8);
+/*!
+ *	@brief This API is used to set accel fifo filter data
+ *	from the register 0x45 bit 7
+ *
+ *
+ *
+ *  @param v_accel_fifo_filter_u8 :The value of accel filter data
+ *  value      |  accel_fifo_filter_data
+ * ------------|-------------------------
+ *    0x00     |  Unfiltered data
+ *    0x01     |  Filtered data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_fifo_filter_data(
+u8 v_accel_fifo_filter_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR FIFO WATER MARK ENABLE */
+/*************************************************/
+/*!
+ *	@brief This API is used to Trigger an interrupt
+ *	when FIFO contains water mark level from the register 0x46 bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_fifo_wm_u8 : The value of fifo water mark level
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_wm(
+u8 *v_fifo_wm_u8);
+/*!
+ *	@brief This API is used to Trigger an interrupt
+ *	when FIFO contains water mark level from the register 0x46 bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_fifo_wm_u8 : The value of fifo water mark level
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_wm(
+u8 v_fifo_wm_u8);
+/**************************************************/
+/**\name	 FUNCTION FOR FIFO CONFIGURATIONS */
+/*************************************************/
+/*!
+ *	@brief This API reads fifo sensor time
+ *	frame after the last valid data frame form the register  0x47 bit 1
+ *
+ *
+ *
+ *
+ *  @param v_fifo_time_enable_u8 : The value of sensor time
+ *  value      |  fifo sensor time
+ * ------------|-------------------------
+ *    0x00     |  do not return sensortime frame
+ *    0x01     |  return sensortime frame
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_time_enable(
+u8 *v_fifo_time_enable_u8);
+/*!
+ *	@brief This API set fifo sensor time
+ *	frame after the last valid data frame form the register  0x47 bit 1
+ *
+ *
+ *
+ *
+ *  @param v_fifo_time_enable_u8 : The value of sensor time
+ *  value      |  fifo sensor time
+ * ------------|-------------------------
+ *    0x00     |  do not return sensortime frame
+ *    0x01     |  return sensortime frame
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_time_enable(
+u8 v_fifo_time_enable_u8);
+/*!
+ *	@brief This API reads FIFO tag interrupt2 enable status
+ *	from the resister 0x47 bit 2
+ *
+ *  @param v_fifo_tag_intr2_u8 : The value of fifo tag interrupt
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_tag_intr2_enable(
+u8 *v_fifo_tag_intr2_u8);
+/*!
+ *	@brief This API set FIFO tag interrupt2 enable status
+ *	from the resister 0x47 bit 2
+ *
+ *  @param v_fifo_tag_intr2_u8 : The value of fifo tag interrupt
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_tag_intr2_enable(
+u8 v_fifo_tag_intr2_u8);
+/*!
+ *	@brief This API get FIFO tag interrupt1 enable status
+ *	from the resister 0x47 bit 3
+ *
+ *  @param v_fifo_tag_intr1_u8 :The value of fifo tag interrupt1
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_tag_intr1_enable(
+u8 *v_fifo_tag_intr1_u8);
+/*!
+ *	@brief This API set FIFO tag interrupt1 enable status
+ *	from the resister 0x47 bit 3
+ *
+ *  @param v_fifo_tag_intr1_u8 :The value of fifo tag interrupt1
+ *	value    | fifo tag interrupt
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_tag_intr1_enable(
+u8 v_fifo_tag_intr1_u8);
+/*!
+ *	@brief This API reads FIFO frame
+ *	header enable from the register 0x47 bit 4
+ *
+ *  @param v_fifo_header_u8 :The value of fifo header
+ *	value    | fifo header
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_header_enable(
+u8 *v_fifo_header_u8);
+/*!
+ *	@brief This API set FIFO frame
+ *	header enable from the register 0x47 bit 4
+ *
+ *  @param v_fifo_header_u8 :The value of fifo header
+ *	value    | fifo header
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_header_enable(
+u8 v_fifo_header_u8);
+/*!
+ *	@brief This API is used to read stored
+ *	magnetometer data in FIFO (all 3 axes) from the register 0x47 bit 5
+ *
+ *  @param v_fifo_mag_u8 : The value of fifo mag enble
+ *	value    | fifo mag
+ * ----------|-------------------
+ *  0x00     |  no magnetometer data is stored
+ *  0x01     |  magnetometer data is stored
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_mag_enable(
+u8 *v_fifo_mag_u8);
+/*!
+ *	@brief This API is used to set stored
+ *	magnetometer data in FIFO (all 3 axes) from the register 0x47 bit 5
+ *
+ *  @param v_fifo_mag_u8 : The value of fifo mag enble
+ *	value    | fifo mag
+ * ----------|-------------------
+ *  0x00     |  no magnetometer data is stored
+ *  0x01     |  magnetometer data is stored
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_mag_enable(
+u8 v_fifo_mag_u8);
+/*!
+ *	@brief This API is used to read stored
+ *	accel data in FIFO (all 3 axes) from the register 0x47 bit 6
+ *
+ *  @param v_fifo_accel_u8 : The value of fifo accel enble
+ *	value    | fifo accel
+ * ----------|-------------------
+ *  0x00     |  no accel data is stored
+ *  0x01     |  accel data is stored
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_accel_enable(
+u8 *v_fifo_accel_u8);
+/*!
+ *	@brief This API is used to set stored
+ *	accel data in FIFO (all 3 axes) from the register 0x47 bit 6
+ *
+ *  @param v_fifo_accel_u8 : The value of fifo accel enble
+ *	value    | fifo accel
+ * ----------|-------------------
+ *  0x00     |  no accel data is stored
+ *  0x01     |  accel data is stored
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_accel_enable(
+u8 v_fifo_accel_u8);
+/*!
+ *	@brief This API is used to read stored
+ *	 gyro data in FIFO (all 3 axes) from the resister 0x47 bit 7
+ *
+ *
+ *  @param v_fifo_gyro_u8 : The value of fifo gyro enble
+ *	value    | fifo gyro
+ * ----------|-------------------
+ *  0x00     |  no gyro data is stored
+ *  0x01     |  gyro data is stored
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_fifo_gyro_enable(
+u8 *v_fifo_gyro_u8);
+/*!
+ *	@brief This API is used to set stored
+ *	gyro data in FIFO (all 3 axes) from the resister 0x47 bit 7
+ *
+ *
+ *  @param v_fifo_gyro_u8 : The value of fifo gyro enble
+ *	value    | fifo gyro
+ * ----------|-------------------
+ *  0x00     |  no gyro data is stored
+ *  0x01     |  gyro data is stored
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_fifo_gyro_enable(
+u8 v_fifo_gyro_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR MAG I2C ADDRESS SELECTION          */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read
+ *	I2C device address of auxiliary mag from the register 0x4B bit 1 to 7
+ *
+ *
+ *
+ *
+ *  @param v_i2c_device_addr_u8 : The value of mag I2C device address
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_i2c_device_addr(
+u8 *v_i2c_device_addr_u8);
+/*!
+ *	@brief This API is used to set
+ *	I2C device address of auxiliary mag from the register 0x4B bit 1 to 7
+ *
+ *
+ *
+ *
+ *  @param v_i2c_device_addr_u8 : The value of mag I2C device address
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_i2c_device_addr(
+u8 v_i2c_device_addr_u8);
+/*!
+ *	@brief This API is used to read
+ *	Burst data length (1,2,6,8 byte) from the register 0x4C bit 0 to 1
+ *
+ *
+ *
+ *
+ *  @param v_mag_burst_u8 : The data of mag burst read lenth
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_burst(
+u8 *v_mag_burst_u8);
+/*!
+ *	@brief This API is used to set
+ *	Burst data length (1,2,6,8 byte) from the register 0x4C bit 0 to 1
+ *
+ *
+ *
+ *
+ *  @param v_mag_burst_u8 : The data of mag burst read lenth
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_burst(
+u8 v_mag_burst_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR MAG OFFSET         */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read
+ *	trigger-readout offset in units of 2.5 ms. If set to zero,
+ *	the offset is maximum, i.e. after readout a trigger
+ *	is issued immediately. from the register 0x4C bit 2 to 5
+ *
+ *
+ *
+ *
+ *  @param v_mag_offset_u8 : The value of mag offset
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_offset(
+u8 *v_mag_offset_u8);
+/*!
+ *	@brief This API is used to set
+ *	trigger-readout offset in units of 2.5 ms. If set to zero,
+ *	the offset is maximum, i.e. after readout a trigger
+ *	is issued immediately. from the register 0x4C bit 2 to 5
+ *
+ *
+ *
+ *
+ *  @param v_mag_offset_u8 : The value of mag offset
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_offset(
+u8 v_mag_offset_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR MAG MANUAL/AUTO MODE SELECTION          */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read
+ *	Enable register access on MAG_IF[2] or MAG_IF[3] writes.
+ *	This implies that the DATA registers are not updated with
+ *	magnetometer values. Accessing magnetometer requires
+ *	the magnetometer in normal mode in PMU_STATUS.
+ *	from the register 0x4C bit 7
+ *
+ *
+ *
+ *  @param v_mag_manual_u8 : The value of mag manual enable
+ *	value    | mag manual
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_manual_enable(
+u8 *v_mag_manual_u8);
+/*!
+ *	@brief This API is used to set
+ *	Enable register access on MAG_IF[2] or MAG_IF[3] writes.
+ *	This implies that the DATA registers are not updated with
+ *	magnetometer values. Accessing magnetometer requires
+ *	the magnetometer in normal mode in PMU_STATUS.
+ *	from the register 0x4C bit 7
+ *
+ *
+ *
+ *  @param v_mag_manual_u8 : The value of mag manual enable
+ *	value    | mag manual
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_manual_enable(
+u8 v_mag_manual_u8);
+/***************************************************************/
+/**\name	FUNCTIONS FOR MAG READ, WRITE AND WRITE DATA ADDRESS  */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read data
+ *	magnetometer address to read from the register 0x4D bit 0 to 7
+ *	@brief It used to provide mag read address of auxiliary mag
+ *
+ *
+ *
+ *
+ *  @param  v_mag_read_addr_u8 : The value of address need to be read
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_read_addr(
+u8 *v_mag_read_addr_u8);
+/*!
+ *	@brief This API is used to set
+ *	magnetometer write address from the register 0x4D bit 0 to 7
+ *	@brief mag write address writes the address of auxiliary mag to write
+ *
+ *
+ *
+ *  @param v_mag_read_addr_u8:
+ *	The data of auxiliary mag address to write data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_read_addr(
+u8 v_mag_read_addr_u8);
+/*!
+ *	@brief This API is used to read
+ *	magnetometer write address from the register 0x4E bit 0 to 7
+ *	@brief mag write address writes the address of auxiliary mag to write
+ *
+ *
+ *
+ *  @param  v_mag_write_addr_u8:
+ *	The data of auxiliary mag address to write data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_write_addr(
+u8 *v_mag_write_addr_u8);
+/*!
+ *	@brief This API is used to set
+ *	magnetometer write address from the register 0x4E bit 0 to 7
+ *	@brief mag write address writes the address of auxiliary mag to write
+ *
+ *
+ *
+ *  @param  v_mag_write_addr_u8:
+ *	The data of auxiliary mag address to write data
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_write_addr(
+u8 v_mag_write_addr_u8);
+/*!
+ *	@brief This API is used to read magnetometer write data
+ *	form the resister 0x4F bit 0 to 7
+ *	@brief This writes the data will be wrote to mag
+ *
+ *
+ *
+ *  @param  v_mag_write_data_u8: The value of mag data
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_mag_write_data(
+u8 *v_mag_write_data_u8);
+/*!
+ *	@brief This API is used to set magnetometer write data
+ *	form the resister 0x4F bit 0 to 7
+ *	@brief This writes the data will be wrote to mag
+ *
+ *
+ *
+ *  @param  v_mag_write_data_u8: The value of mag data
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_mag_write_data(
+u8 v_mag_write_data_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT ENABLE OF
+ANY-MOTION XYZ, DOUBLE AND SINGLE TAP, ORIENT AND FLAT         */
+/***************************************************************/
+/*!
+ *	@brief  This API is used to read
+ *	interrupt enable from the register 0x50 bit 0 to 7
+ *
+ *
+ *
+ *
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_ANY_MOTION_X_ENABLE
+ *       1         | SMI130_ANY_MOTION_Y_ENABLE
+ *       2         | SMI130_ANY_MOTION_Z_ENABLE
+ *       3         | SMI130_DOUBLE_TAP_ENABLE
+ *       4         | SMI130_SINGLE_TAP_ENABLE
+ *       5         | SMI130_ORIENT_ENABLE
+ *       6         | SMI130_FLAT_ENABLE
+ *
+ *	@param v_intr_enable_zero_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_enable_0(
+u8 enable, u8 *v_intr_enable_zero_u8);
+/*!
+ *	@brief  This API is used to set
+ *	interrupt enable from the register 0x50 bit 0 to 7
+ *
+ *
+ *
+ *
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_ANY_MOTION_X_ENABLE
+ *       1         | SMI130_ANY_MOTION_Y_ENABLE
+ *       2         | SMI130_ANY_MOTION_Z_ENABLE
+ *       3         | SMI130_DOUBLE_TAP_ENABLE
+ *       4         | SMI130_SINGLE_TAP_ENABLE
+ *       5         | SMI130_ORIENT_ENABLE
+ *       6         | SMI130_FLAT_ENABLE
+ *
+ *	@param v_intr_enable_zero_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_enable_0(
+u8 enable, u8 v_intr_enable_zero_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT ENABLE OF
+HIGH_G XYZ, LOW_G, DATA READY, FIFO FULL AND FIFO WATER MARK  */
+/***************************************************************/
+/*!
+ *	@brief  This API is used to read
+ *	interrupt enable byte1 from the register 0x51 bit 0 to 6
+ *	@brief It read the high_g_x,high_g_y,high_g_z,low_g_enable
+ *	data ready, fifo full and fifo water mark.
+ *
+ *
+ *
+ *  @param  v_enable_u8 :  The value of interrupt enable
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_HIGH_G_X_ENABLE
+ *       1         | SMI130_HIGH_G_Y_ENABLE
+ *       2         | SMI130_HIGH_G_Z_ENABLE
+ *       3         | SMI130_LOW_G_ENABLE
+ *       4         | SMI130_DATA_RDY_ENABLE
+ *       5         | SMI130_FIFO_FULL_ENABLE
+ *       6         | SMI130_FIFO_WM_ENABLE
+ *
+ *	@param v_intr_enable_1_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_enable_1(
+u8 enable, u8 *v_intr_enable_1_u8);
+/*!
+ *	@brief  This API is used to set
+ *	interrupt enable byte1 from the register 0x51 bit 0 to 6
+ *	@brief It read the high_g_x,high_g_y,high_g_z,low_g_enable
+ *	data ready, fifo full and fifo water mark.
+ *
+ *
+ *
+ *  @param  v_enable_u8 :  The value of interrupt enable
+ *	@param v_enable_u8 : Value to decided to select interrupt
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_HIGH_G_X_ENABLE
+ *       1         | SMI130_HIGH_G_Y_ENABLE
+ *       2         | SMI130_HIGH_G_Z_ENABLE
+ *       3         | SMI130_LOW_G_ENABLE
+ *       4         | SMI130_DATA_RDY_ENABLE
+ *       5         | SMI130_FIFO_FULL_ENABLE
+ *       6         | SMI130_FIFO_WM_ENABLE
+ *
+ *	@param v_intr_enable_1_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_enable_1(
+u8 enable, u8 v_intr_enable_1_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT ENABLE OF
+NO MOTION XYZ  */
+/***************************************************************/
+/*!
+ *	@brief  This API is used to read
+ *	interrupt enable byte2 from the register bit 0x52 bit 0 to 3
+ *	@brief It reads no motion x,y and z
+ *
+ *
+ *
+ *	@param v_enable_u8: The value of interrupt enable
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_NOMOTION_X_ENABLE
+ *       1         | SMI130_NOMOTION_Y_ENABLE
+ *       2         | SMI130_NOMOTION_Z_ENABLE
+ *
+ *	@param v_intr_enable_2_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_enable_2(
+u8 enable, u8 *v_intr_enable_2_u8);
+/*!
+ *	@brief  This API is used to set
+ *	interrupt enable byte2 from the register bit 0x52 bit 0 to 3
+ *	@brief It reads no motion x,y and z
+ *
+ *
+ *
+ *	@param v_enable_u8: The value of interrupt enable
+ *   v_enable_u8   |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_NOMOTION_X_ENABLE
+ *       1         | SMI130_NOMOTION_Y_ENABLE
+ *       2         | SMI130_NOMOTION_Z_ENABLE
+ *
+ *	@param v_intr_enable_2_u8 : The interrupt enable value
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_enable_2(
+u8 enable, u8 v_intr_enable_2_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT ENABLE OF
+  STEP DETECTOR */
+/***************************************************************/
+ /*!
+ *	@brief This API is used to read
+ *	interrupt enable step detector interrupt from
+ *	the register bit 0x52 bit 3
+ *
+ *
+ *
+ *
+ *	@param v_step_intr_u8 : The value of step detector interrupt enable
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_step_detector_enable(
+u8 *v_step_intr_u8);
+ /*!
+ *	@brief This API is used to set
+ *	interrupt enable step detector interrupt from
+ *	the register bit 0x52 bit 3
+ *
+ *
+ *
+ *
+ *	@param v_step_intr_u8 : The value of step detector interrupt enable
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_step_detector_enable(
+u8 v_step_intr_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT CONTROL */
+/***************************************************************/
+/*!
+ *	@brief  Configure trigger condition of interrupt1
+ *	and interrupt2 pin from the register 0x53
+ *	@brief interrupt1 - bit 0
+ *	@brief interrupt2 - bit 4
+ *
+ *  @param v_channel_u8: The value of edge trigger selection
+ *   v_channel_u8  |   Edge trigger
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_EDGE_CTRL
+ *       1         | SMI130_INTR2_EDGE_CTRL
+ *
+ *	@param v_intr_edge_ctrl_u8 : The value of edge trigger enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_EDGE
+ *  0x00     |  SMI130_LEVEL
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_edge_ctrl(
+u8 v_channel_u8, u8 *v_intr_edge_ctrl_u8);
+/*!
+ *	@brief  Configure trigger condition of interrupt1
+ *	and interrupt2 pin from the register 0x53
+ *	@brief interrupt1 - bit 0
+ *	@brief interrupt2 - bit 4
+ *
+ *  @param v_channel_u8: The value of edge trigger selection
+ *   v_channel_u8  |   Edge trigger
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_EDGE_CTRL
+ *       1         | SMI130_INTR2_EDGE_CTRL
+ *
+ *	@param v_intr_edge_ctrl_u8 : The value of edge trigger enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_EDGE
+ *  0x00     |  SMI130_LEVEL
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_edge_ctrl(
+u8 v_channel_u8, u8 v_intr_edge_ctrl_u8);
+/*!
+ *	@brief  API used for get the Configure level condition of interrupt1
+ *	and interrupt2 pin form the register 0x53
+ *	@brief interrupt1 - bit 1
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of level condition selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_LEVEL
+ *       1         | SMI130_INTR2_LEVEL
+ *
+ *	@param v_intr_level_u8 : The value of level of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_LEVEL_HIGH
+ *  0x00     |  SMI130_LEVEL_LOW
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_level(
+u8 v_channel_u8, u8 *v_intr_level_u8);
+/*!
+ *	@brief  API used for set the Configure level condition of interrupt1
+ *	and interrupt2 pin form the register 0x53
+ *	@brief interrupt1 - bit 1
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of level condition selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_LEVEL
+ *       1         | SMI130_INTR2_LEVEL
+ *
+ *	@param v_intr_level_u8 : The value of level of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_LEVEL_HIGH
+ *  0x00     |  SMI130_LEVEL_LOW
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_level(
+u8 v_channel_u8, u8 v_intr_level_u8);
+/*!
+ *	@brief  API used to get configured output enable of interrupt1
+ *	and interrupt2 from the register 0x53
+ *	@brief interrupt1 - bit 2
+ *	@brief interrupt2 - bit 6
+ *
+ *
+ *  @param v_channel_u8: The value of output type enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_OUTPUT_TYPE
+ *       1         | SMI130_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_intr_output_type_u8 :
+ *	The value of output type of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_OPEN_DRAIN
+ *  0x00     |  SMI130_PUSH_PULL
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_output_type(
+u8 v_channel_u8, u8 *v_intr_output_type_u8);
+/*!
+ *	@brief  API used to set output enable of interrupt1
+ *	and interrupt2 from the register 0x53
+ *	@brief interrupt1 - bit 2
+ *	@brief interrupt2 - bit 6
+ *
+ *
+ *  @param v_channel_u8: The value of output type enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_OUTPUT_TYPE
+ *       1         | SMI130_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_intr_output_type_u8 :
+ *	The value of output type of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_OPEN_DRAIN
+ *  0x00     |  SMI130_PUSH_PULL
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_output_type(
+u8 v_channel_u8, u8 v_intr_output_type_u8);
+ /*!
+ *	@brief API used to get the Output enable for interrupt1
+ *	and interrupt1 pin from the register 0x53
+ *	@brief interrupt1 - bit 3
+ *	@brief interrupt2 - bit 7
+ *
+ *  @param v_channel_u8: The value of output enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_OUTPUT_TYPE
+ *       1         | SMI130_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_output_enable_u8 :
+ *	The value of output enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_INPUT
+ *  0x00     |  SMI130_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_output_enable(
+u8 v_channel_u8, u8 *v_output_enable_u8);
+ /*!
+ *	@brief API used to set the Output enable for interrupt1
+ *	and interrupt1 pin from the register 0x53
+ *	@brief interrupt1 - bit 3
+ *	@brief interrupt2 - bit 7
+ *
+ *  @param v_channel_u8: The value of output enable selection
+ *   v_channel_u8  |   level selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_OUTPUT_TYPE
+ *       1         | SMI130_INTR2_OUTPUT_TYPE
+ *
+ *	@param v_output_enable_u8 :
+ *	The value of output enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_INPUT
+ *  0x00     |  SMI130_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_output_enable(
+u8 v_channel_u8, u8 v_output_enable_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT LATCH INTERRUPT  */
+/***************************************************************/
+/*!
+*	@brief This API is used to get the latch duration
+*	from the register 0x54 bit 0 to 3
+*	@brief This latch selection is not applicable for data ready,
+*	orient_mblation and flat interrupts.
+*
+*
+*
+*  @param v_latch_intr_u8 : The value of latch duration
+*	Latch Duration                      |     value
+* --------------------------------------|------------------
+*    SMI130_LATCH_DUR_NONE              |      0x00
+*    SMI130_LATCH_DUR_312_5_MICRO_SEC   |      0x01
+*    SMI130_LATCH_DUR_625_MICRO_SEC     |      0x02
+*    SMI130_LATCH_DUR_1_25_MILLI_SEC    |      0x03
+*    SMI130_LATCH_DUR_2_5_MILLI_SEC     |      0x04
+*    SMI130_LATCH_DUR_5_MILLI_SEC       |      0x05
+*    SMI130_LATCH_DUR_10_MILLI_SEC      |      0x06
+*    SMI130_LATCH_DUR_20_MILLI_SEC      |      0x07
+*    SMI130_LATCH_DUR_40_MILLI_SEC      |      0x08
+*    SMI130_LATCH_DUR_80_MILLI_SEC      |      0x09
+*    SMI130_LATCH_DUR_160_MILLI_SEC     |      0x0A
+*    SMI130_LATCH_DUR_320_MILLI_SEC     |      0x0B
+*    SMI130_LATCH_DUR_640_MILLI_SEC     |      0x0C
+*    SMI130_LATCH_DUR_1_28_SEC          |      0x0D
+*    SMI130_LATCH_DUR_2_56_SEC          |      0x0E
+*    SMI130_LATCHED                     |      0x0F
+*
+*
+*
+*	@return results of bus communication function
+*	@retval 0 -> Success
+*	@retval -1 -> Error
+*
+*
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_latch_intr(
+u8 *v_latch_intr_u8);
+/*!
+*	@brief This API is used to set the latch duration
+*	from the register 0x54 bit 0 to 3
+*	@brief This latch selection is not applicable for data ready,
+*	orient_mblation and flat interrupts.
+*
+*
+*
+*  @param v_latch_intr_u8 : The value of latch duration
+*	Latch Duration                      |     value
+* --------------------------------------|------------------
+*    SMI130_LATCH_DUR_NONE              |      0x00
+*    SMI130_LATCH_DUR_312_5_MICRO_SEC   |      0x01
+*    SMI130_LATCH_DUR_625_MICRO_SEC     |      0x02
+*    SMI130_LATCH_DUR_1_25_MILLI_SEC    |      0x03
+*    SMI130_LATCH_DUR_2_5_MILLI_SEC     |      0x04
+*    SMI130_LATCH_DUR_5_MILLI_SEC       |      0x05
+*    SMI130_LATCH_DUR_10_MILLI_SEC      |      0x06
+*    SMI130_LATCH_DUR_20_MILLI_SEC      |      0x07
+*    SMI130_LATCH_DUR_40_MILLI_SEC      |      0x08
+*    SMI130_LATCH_DUR_80_MILLI_SEC      |      0x09
+*    SMI130_LATCH_DUR_160_MILLI_SEC     |      0x0A
+*    SMI130_LATCH_DUR_320_MILLI_SEC     |      0x0B
+*    SMI130_LATCH_DUR_640_MILLI_SEC     |      0x0C
+*    SMI130_LATCH_DUR_1_28_SEC          |      0x0D
+*    SMI130_LATCH_DUR_2_56_SEC          |      0x0E
+*    SMI130_LATCHED                     |      0x0F
+*
+*
+*
+*	@return results of bus communication function
+*	@retval 0 -> Success
+*	@retval -1 -> Error
+*
+*
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_latch_intr(
+u8 v_latch_intr_u8);
+/*!
+ *	@brief API used to get input enable for interrupt1
+ *	and interrupt2 pin from the register 0x54
+ *	@brief interrupt1 - bit 4
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of input enable selection
+ *   v_channel_u8  |   input selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_INPUT_ENABLE
+ *       1         | SMI130_INTR2_INPUT_ENABLE
+ *
+ *	@param v_input_en_u8 :
+ *	The value of input enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_INPUT
+ *  0x00     |  SMI130_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_input_enable(
+u8 v_channel_u8, u8 *v_input_en_u8);
+/*!
+ *	@brief API used to set input enable for interrupt1
+ *	and interrupt2 pin from the register 0x54
+ *	@brief interrupt1 - bit 4
+ *	@brief interrupt2 - bit 5
+ *
+ *  @param v_channel_u8: The value of input enable selection
+ *   v_channel_u8  |   input selection
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_INPUT_ENABLE
+ *       1         | SMI130_INTR2_INPUT_ENABLE
+ *
+ *	@param v_input_en_u8 :
+ *	The value of input enable of interrupt enable
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x01     |  SMI130_INPUT
+ *  0x00     |  SMI130_OUTPUT
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_input_enable(
+u8 v_channel_u8, u8 v_input_en_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR INTERRUPT1 AND INTERRUPT2 MAPPING */
+/***************************************************************/
+ /*!
+ *	@brief reads the Low g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 0 in the register 0x55
+ *	@brief interrupt2 bit 0 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of low_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_LOW_G
+ *       1         | SMI130_INTR2_MAP_LOW_G
+ *
+ *	@param v_intr_low_g_u8 : The value of low_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_low_g(
+u8 v_channel_u8, u8 *v_intr_low_g_u8);
+ /*!
+ *	@brief set the Low g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 0 in the register 0x55
+ *	@brief interrupt2 bit 0 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of low_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_LOW_G
+ *       1         | SMI130_INTR2_MAP_LOW_G
+ *
+ *	@param v_intr_low_g_u8 : The value of low_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_low_g(
+u8 v_channel_u8, u8 v_intr_low_g_u8);
+/*!
+ *	@brief Reads the HIGH g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 1 in the register 0x55
+ *	@brief interrupt2 bit 1 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of high_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_HIGH_G
+ *       1         | SMI130_INTR2_MAP_HIGH_G
+ *
+ *	@param v_intr_high_g_u8 : The value of high_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_high_g(
+u8 v_channel_u8, u8 *v_intr_high_g_u8);
+/*!
+ *	@brief Write the HIGH g interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 1 in the register 0x55
+ *	@brief interrupt2 bit 1 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of high_g selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_HIGH_G
+ *       1         | SMI130_INTR2_MAP_HIGH_G
+ *
+ *	@param v_intr_high_g_u8 : The value of high_g enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_high_g(
+u8 v_channel_u8, u8 v_intr_high_g_u8);
+/*!
+ *	@brief Reads the Any motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 2 in the register 0x55
+ *	@brief interrupt2 bit 2 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of any motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_ANY_MOTION
+ *       1         | SMI130_INTR2_MAP_ANY_MOTION
+ *
+ *	@param v_intr_any_motion_u8 : The value of any motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_any_motion(
+u8 v_channel_u8, u8 *v_intr_any_motion_u8);
+/*!
+ *	@brief Write the Any motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 2 in the register 0x55
+ *	@brief interrupt2 bit 2 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of any motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_ANY_MOTION
+ *       1         | SMI130_INTR2_MAP_ANY_MOTION
+ *
+ *	@param v_intr_any_motion_u8 : The value of any motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_any_motion(
+u8 v_channel_u8, u8 v_intr_any_motion_u8);
+/*!
+ *	@brief Reads the No motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 3 in the register 0x55
+ *	@brief interrupt2 bit 3 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of no motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_NOMO
+ *       1         | SMI130_INTR2_MAP_NOMO
+ *
+ *	@param v_intr_nomotion_u8 : The value of no motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_nomotion(
+u8 v_channel_u8, u8 *v_intr_nomotion_u8);
+/*!
+ *	@brief Write the No motion interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 3 in the register 0x55
+ *	@brief interrupt2 bit 3 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of no motion selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_NOMO
+ *       1         | SMI130_INTR2_MAP_NOMO
+ *
+ *	@param v_intr_nomotion_u8 : The value of no motion enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_nomotion(
+u8 v_channel_u8, u8 v_intr_nomotion_u8);
+/*!
+ *	@brief Reads the Double Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 4 in the register 0x55
+ *	@brief interrupt2 bit 4 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of double tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_DOUBLE_TAP
+ *       1         | SMI130_INTR2_MAP_DOUBLE_TAP
+ *
+ *	@param v_intr_double_tap_u8 : The value of double tap enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_double_tap(
+u8 v_channel_u8, u8 *v_intr_double_tap_u8);
+/*!
+ *	@brief Write the Double Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 4 in the register 0x55
+ *	@brief interrupt2 bit 4 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of double tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_DOUBLE_TAP
+ *       1         | SMI130_INTR2_MAP_DOUBLE_TAP
+ *
+ *	@param v_intr_double_tap_u8 : The value of double tap enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_double_tap(
+u8 v_channel_u8, u8 v_intr_double_tap_u8);
+/*!
+ *	@brief Reads the Single Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 5 in the register 0x55
+ *	@brief interrupt2 bit 5 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of single tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_SINGLE_TAP
+ *       1         | SMI130_INTR2_MAP_SINGLE_TAP
+ *
+ *	@param v_intr_single_tap_u8 : The value of single tap  enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_single_tap(
+u8 v_channel_u8, u8 *v_intr_single_tap_u8);
+/*!
+ *	@brief Write the Single Tap interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 5 in the register 0x55
+ *	@brief interrupt2 bit 5 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of single tap interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_SINGLE_TAP
+ *       1         | SMI130_INTR2_MAP_SINGLE_TAP
+ *
+ *	@param v_intr_single_tap_u8 : The value of single tap  enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_single_tap(
+u8 v_channel_u8, u8 v_intr_single_tap_u8);
+/*!
+ *	@brief Reads the Orient interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 6 in the register 0x55
+ *	@brief interrupt2 bit 6 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of orient_mbl interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_ORIENT
+ *       1         | SMI130_INTR2_MAP_ORIENT
+ *
+ *	@param v_intr_orient_mbl_u8 : The value of orient_mbl enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl(
+u8 v_channel_u8, u8 *v_intr_orient_mbl_u8);
+/*!
+ *	@brief Write the Orient interrupt
+ *	interrupt mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 6 in the register 0x55
+ *	@brief interrupt2 bit 6 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of orient_mbl interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_ORIENT
+ *       1         | SMI130_INTR2_MAP_ORIENT
+ *
+ *	@param v_intr_orient_mbl_u8 : The value of orient_mbl enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl(
+u8 v_channel_u8, u8 v_intr_orient_mbl_u8);
+ /*!
+ *	@brief Reads the Flat interrupt
+ *	mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 7 in the register 0x55
+ *	@brief interrupt2 bit 7 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of flat interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_FLAT
+ *       1         | SMI130_INTR2_MAP_FLAT
+ *
+ *	@param v_intr_flat_u8 : The value of flat enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_flat(
+u8 v_channel_u8, u8 *v_intr_flat_u8);
+ /*!
+ *	@brief Write the Flat interrupt
+ *	mapped to interrupt1
+ *	and interrupt2 from the register 0x55 and 0x57
+ *	@brief interrupt1 bit 7 in the register 0x55
+ *	@brief interrupt2 bit 7 in the register 0x57
+ *
+ *
+ *	@param v_channel_u8: The value of flat interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_FLAT
+ *       1         | SMI130_INTR2_MAP_FLAT
+ *
+ *	@param v_intr_flat_u8 : The value of flat enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_flat(
+u8 v_channel_u8, u8 v_intr_flat_u8);
+/*!
+ *	@brief Reads PMU trigger interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 0 and 4
+ *	@brief interrupt1 bit 0 in the register 0x56
+ *	@brief interrupt2 bit 4 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of pmu trigger selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_PMUTRIG
+ *       1         | SMI130_INTR2_MAP_PMUTRIG
+ *
+ *	@param v_intr_pmu_trig_u8 : The value of pmu trigger enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_pmu_trig(
+u8 v_channel_u8, u8 *v_intr_pmu_trig_u8);
+/*!
+ *	@brief Write PMU trigger interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 0 and 4
+ *	@brief interrupt1 bit 0 in the register 0x56
+ *	@brief interrupt2 bit 4 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of pmu trigger selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_PMUTRIG
+ *       1         | SMI130_INTR2_MAP_PMUTRIG
+ *
+ *	@param v_intr_pmu_trig_u8 : The value of pmu trigger enable
+ *	value    | trigger enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_pmu_trig(
+u8 v_channel_u8, u8 v_intr_pmu_trig_u8);
+/*!
+ *	@brief Reads FIFO Full interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 5 and 1
+ *	@brief interrupt1 bit 5 in the register 0x56
+ *	@brief interrupt2 bit 1 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo full interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_FIFO_FULL
+ *       1         | SMI130_INTR2_MAP_FIFO_FULL
+ *
+ *	@param v_intr_fifo_full_u8 : The value of fifo full interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_fifo_full(
+u8 v_channel_u8, u8 *v_intr_fifo_full_u8);
+/*!
+ *	@brief Write FIFO Full interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 5 and 1
+ *	@brief interrupt1 bit 5 in the register 0x56
+ *	@brief interrupt2 bit 1 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo full interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_FIFO_FULL
+ *       1         | SMI130_INTR2_MAP_FIFO_FULL
+ *
+ *	@param v_intr_fifo_full_u8 : The value of fifo full interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_fifo_full(
+u8 v_channel_u8, u8 v_intr_fifo_full_u8);
+/*!
+ *	@brief Reads FIFO Watermark interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 6 and 2
+ *	@brief interrupt1 bit 6 in the register 0x56
+ *	@brief interrupt2 bit 2 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo Watermark interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_FIFO_WM
+ *       1         | SMI130_INTR2_MAP_FIFO_WM
+ *
+ *	@param v_intr_fifo_wm_u8 : The value of fifo Watermark interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_fifo_wm(
+u8 v_channel_u8, u8 *v_intr_fifo_wm_u8);
+/*!
+ *	@brief Write FIFO Watermark interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56 bit 6 and 2
+ *	@brief interrupt1 bit 6 in the register 0x56
+ *	@brief interrupt2 bit 2 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of fifo Watermark interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_FIFO_WM
+ *       1         | SMI130_INTR2_MAP_FIFO_WM
+ *
+ *	@param v_intr_fifo_wm_u8 : The value of fifo Watermark interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_fifo_wm(
+u8 v_channel_u8, u8 v_intr_fifo_wm_u8);
+/*!
+ *	@brief Reads Data Ready interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56
+ *	@brief interrupt1 bit 7 in the register 0x56
+ *	@brief interrupt2 bit 3 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of data ready interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_DATA_RDY
+ *       1         | SMI130_INTR2_MAP_DATA_RDY
+ *
+ *	@param v_intr_data_rdy_u8 : The value of data ready interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_data_rdy(
+u8 v_channel_u8, u8 *v_intr_data_rdy_u8);
+/*!
+ *	@brief Write Data Ready interrupt mapped to interrupt1
+ *	and interrupt2 form the register 0x56
+ *	@brief interrupt1 bit 7 in the register 0x56
+ *	@brief interrupt2 bit 3 in the register 0x56
+ *
+ *
+ *	@param v_channel_u8: The value of data ready interrupt selection
+ *   v_channel_u8  |   interrupt
+ *  ---------------|---------------
+ *       0         | SMI130_INTR1_MAP_DATA_RDY
+ *       1         | SMI130_INTR2_MAP_DATA_RDY
+ *
+ *	@param v_intr_data_rdy_u8 : The value of data ready interrupt enable
+ *	value    | interrupt enable
+ * ----------|-------------------
+ *  0x01     |  SMI130_ENABLE
+ *  0x00     |  SMI130_DISABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_data_rdy(
+u8 v_channel_u8, u8 v_intr_data_rdy_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR TAP SOURCE CONFIGURATION          */
+/***************************************************************/
+ /*!
+ *	@brief This API reads data source for the interrupt
+ *	engine for the single and double tap interrupts from the register
+ *	0x58 bit 3
+ *
+ *
+ *  @param v_tap_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_tap_source(
+u8 *v_tap_source_u8);
+ /*!
+ *	@brief This API write data source for the interrupt
+ *	engine for the single and double tap interrupts from the register
+ *	0x58 bit 3
+ *
+ *
+ *  @param v_tap_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_tap_source(
+u8 v_tap_source_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR LOW_G AND HIGH_G SOURCE CONFIGURATION */
+/***************************************************************/
+ /*!
+ *	@brief This API Reads Data source for the
+ *	interrupt engine for the low and high g interrupts
+ *	from the register 0x58 bit 7
+ *
+ *  @param v_low_high_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_low_high_source(
+u8 *v_low_high_source_u8);
+ /*!
+ *	@brief This API write Data source for the
+ *	interrupt engine for the low and high g interrupts
+ *	from the register 0x58 bit 7
+ *
+ *  @param v_low_high_source_u8 : The value of the tap source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_low_high_source(
+u8 v_low_high_source_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR MOTION SOURCE CONFIGURATION          */
+/***************************************************************/
+ /*!
+ *	@brief This API reads Data source for the
+ *	interrupt engine for the nomotion and anymotion interrupts
+ *	from the register 0x59 bit 7
+ *
+ *  @param v_motion_source_u8 :
+ *	The value of the any/no motion interrupt source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_motion_source(
+u8 *v_motion_source_u8);
+ /*!
+ *	@brief This API write Data source for the
+ *	interrupt engine for the nomotion and anymotion interrupts
+ *	from the register 0x59 bit 7
+ *
+ *  @param v_motion_source_u8 :
+ *	The value of the any/no motion interrupt source
+ *	value    | Description
+ * ----------|-------------------
+ *  0x01     |  UNFILTER_DATA
+ *  0x00     |  FILTER_DATA
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_motion_source(
+u8 v_motion_source_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR LOW_G DURATION CONFIGURATION          */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read the low_g duration from register
+ *	0x5A bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_durn_u8 : The value of low_g duration
+ *
+ *	@note Low_g duration trigger trigger delay according to
+ *	"(v_low_g_durn_u8 * 2.5)ms" in a range from 2.5ms to 640ms.
+ *	the default corresponds delay is 20ms
+ *	@note When low_g data source of interrupt is unfiltered
+ *	the sensor must not be in low power mode
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_low_g_durn(
+u8 *v_low_durn_u8);
+ /*!
+ *	@brief This API is used to write the low_g duration from register
+ *	0x5A bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_durn_u8 : The value of low_g duration
+ *
+ *	@note Low_g duration trigger trigger delay according to
+ *	"(v_low_g_durn_u8 * 2.5)ms" in a range from 2.5ms to 640ms.
+ *	the default corresponds delay is 20ms
+ *	@note When low_g data source of interrupt is unfiltered
+ *	the sensor must not be in low power mode
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_low_g_durn(
+u8 v_low_durn_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR LOW_G THRESH CONFIGURATION          */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read Threshold
+ *	definition for the low-g interrupt from the register 0x5B bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_thres_u8 : The value of low_g threshold
+ *
+ *	@note Low_g interrupt trigger threshold according to
+ *	(v_low_g_thres_u8 * 7.81)mg for v_low_g_thres_u8 > 0
+ *	3.91 mg for v_low_g_thres_u8 = 0
+ *	The threshold range is form 3.91mg to 2.000mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_low_g_thres(
+u8 *v_low_g_thres_u8);
+/*!
+ *	@brief This API is used to write Threshold
+ *	definition for the low-g interrupt from the register 0x5B bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_low_g_thres_u8 : The value of low_g threshold
+ *
+ *	@note Low_g interrupt trigger threshold according to
+ *	(v_low_g_thres_u8 * 7.81)mg for v_low_g_thres_u8 > 0
+ *	3.91 mg for v_low_g_thres_u8 = 0
+ *	The threshold range is form 3.91mg to 2.000mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_low_g_thres(
+u8 v_low_g_thres_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR LOW_G HYSTERESIS CONFIGURATION     */
+/***************************************************************/
+ /*!
+ *	@brief This API Reads Low-g interrupt hysteresis
+ *	from the register 0x5C bit 0 to 1
+ *
+ *  @param v_low_hyst_u8 :The value of low_g hysteresis
+ *
+ *	@note Low_g hysteresis calculated by v_low_hyst_u8*125 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_low_g_hyst(
+u8 *v_low_hyst_u8);
+ /*!
+ *	@brief This API write Low-g interrupt hysteresis
+ *	from the register 0x5C bit 0 to 1
+ *
+ *  @param v_low_hyst_u8 :The value of low_g hysteresis
+ *
+ *	@note Low_g hysteresis calculated by v_low_hyst_u8*125 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_low_g_hyst(
+u8 v_low_hyst_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR LOW_G MODE CONFIGURATION     */
+/***************************************************************/
+/*!
+ *	@brief This API reads Low-g interrupt mode
+ *	from the register 0x5C bit 2
+ *
+ *  @param v_low_g_mode_u8 : The value of low_g mode
+ *	Value    |  Description
+ * ----------|-----------------
+ *	   0     | single-axis
+ *     1     | axis-summing
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_low_g_mode(
+u8 *v_low_g_mode_u8);
+/*!
+ *	@brief This API write Low-g interrupt mode
+ *	from the register 0x5C bit 2
+ *
+ *  @param v_low_g_mode_u8 : The value of low_g mode
+ *	Value    |  Description
+ * ----------|-----------------
+ *	   0     | single-axis
+ *     1     | axis-summing
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_low_g_mode(
+u8 v_low_g_mode_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR HIGH_G HYST CONFIGURATION     */
+/***************************************************************/
+/*!
+ *	@brief This API reads High-g interrupt hysteresis
+ *	from the register 0x5C bit 6 and 7
+ *
+ *  @param v_high_g_hyst_u8 : The value of high hysteresis
+ *
+ *	@note High_g hysteresis changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g hysteresis
+ *  ----------------|---------------------
+ *      2g          |  high_hy*125 mg
+ *      4g          |  high_hy*250 mg
+ *      8g          |  high_hy*500 mg
+ *      16g         |  high_hy*1000 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_high_g_hyst(
+u8 *v_high_g_hyst_u8);
+/*!
+ *	@brief This API write High-g interrupt hysteresis
+ *	from the register 0x5C bit 6 and 7
+ *
+ *  @param v_high_g_hyst_u8 : The value of high hysteresis
+ *
+ *	@note High_g hysteresis changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g hysteresis
+ *  ----------------|---------------------
+ *      2g          |  high_hy*125 mg
+ *      4g          |  high_hy*250 mg
+ *      8g          |  high_hy*500 mg
+ *      16g         |  high_hy*1000 mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_high_g_hyst(
+u8 v_high_g_hyst_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR HIGH_G DURATION CONFIGURATION     */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read Delay
+ *	time definition for the high-g interrupt from the register
+ *	0x5D bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_high_g_durn_u8 :  The value of high duration
+ *
+ *	@note High_g interrupt delay triggered according to
+ *	v_high_g_durn_u8 * 2.5ms in a range from 2.5ms to 640ms
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_high_g_durn(
+u8 *v_high_g_durn_u8);
+/*!
+ *	@brief This API is used to write Delay
+ *	time definition for the high-g interrupt from the register
+ *	0x5D bit 0 to 7
+ *
+ *
+ *
+ *  @param  v_high_g_durn_u8 :  The value of high duration
+ *
+ *	@note High_g interrupt delay triggered according to
+ *	v_high_g_durn_u8 * 2.5ms in a range from 2.5ms to 640ms
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_high_g_durn(
+u8 v_high_g_durn_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR HIGH_G THRESHOLD CONFIGURATION     */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read Threshold
+ *	definition for the high-g interrupt from the register 0x5E 0 to 7
+ *
+ *
+ *
+ *
+ *  @param  v_high_g_thres_u8 : Pointer holding the value of Threshold
+ *	@note High_g threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  v_high_g_thres_u8*7.81 mg
+ *      4g          |  v_high_g_thres_u8*15.63 mg
+ *      8g          |  v_high_g_thres_u8*31.25 mg
+ *      16g         |  v_high_g_thres_u8*62.5 mg
+ *	@note when v_high_g_thres_u8 = 0
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  3.91 mg
+ *      4g          |  7.81 mg
+ *      8g          |  15.63 mg
+ *      16g         |  31.25 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_high_g_thres(
+u8 *v_high_g_thres_u8);
+/*!
+ *	@brief This API is used to write Threshold
+ *	definition for the high-g interrupt from the register 0x5E 0 to 7
+ *
+ *
+ *
+ *
+ *  @param  v_high_g_thres_u8 : Pointer holding the value of Threshold
+ *	@note High_g threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  v_high_g_thres_u8*7.81 mg
+ *      4g          |  v_high_g_thres_u8*15.63 mg
+ *      8g          |  v_high_g_thres_u8*31.25 mg
+ *      16g         |  v_high_g_thres_u8*62.5 mg
+ *	@note when v_high_g_thres_u8 = 0
+ *   accel_range    | high_g threshold
+ *  ----------------|---------------------
+ *      2g          |  3.91 mg
+ *      4g          |  7.81 mg
+ *      8g          |  15.63 mg
+ *      16g         |  31.25 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_high_g_thres(
+u8 v_high_g_thres_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ANY MOTION DURATION CONFIGURATION     */
+/***************************************************************/
+/*!
+ *	@brief This API reads any motion duration
+ *	from the register 0x5F bit 0 and 1
+ *
+ *  @param v_any_motion_durn_u8 : The value of any motion duration
+ *
+ *	@note Any motion duration can be calculated by "v_any_motion_durn_u8 + 1"
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_any_motion_durn(
+u8 *v_any_motion_durn_u8);
+/*!
+ *	@brief This API write any motion duration
+ *	from the register 0x5F bit 0 and 1
+ *
+ *  @param v_any_motion_durn_u8 : The value of any motion duration
+ *
+ *	@note Any motion duration can be calculated by "v_any_motion_durn_u8 + 1"
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_any_motion_durn(
+u8 nomotion);
+/***************************************************************/
+/**\name	FUNCTION FOR SLOW NO MOTION DURATION CONFIGURATION  */
+/***************************************************************/
+ /*!
+ *	@brief This API read Slow/no-motion
+ *	interrupt trigger delay duration from the register 0x5F bit 2 to 7
+ *
+ *  @param v_slow_no_motion_u8 :The value of slow no motion duration
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *	@note
+ *	@note v_slow_no_motion_u8(5:4)=0b00 ->
+ *	[v_slow_no_motion_u8(3:0) + 1] * 1.28s (1.28s-20.48s)
+ *	@note v_slow_no_motion_u8(5:4)=1 ->
+ *	[v_slow_no_motion_u8(3:0)+5] * 5.12s (25.6s-102.4s)
+ *	@note v_slow_no_motion_u8(5)='1' ->
+ *	[(v_slow_no_motion_u8:0)+11] * 10.24s (112.64s-430.08s);
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_slow_no_motion_durn(
+u8 *v_slow_no_motion_u8);
+ /*!
+ *	@brief This API write Slow/no-motion
+ *	interrupt trigger delay duration from the register 0x5F bit 2 to 7
+ *
+ *  @param v_slow_no_motion_u8 :The value of slow no motion duration
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *	@note
+ *	@note v_slow_no_motion_u8(5:4)=0b00 ->
+ *	[v_slow_no_motion_u8(3:0) + 1] * 1.28s (1.28s-20.48s)
+ *	@note v_slow_no_motion_u8(5:4)=1 ->
+ *	[v_slow_no_motion_u8(3:0)+5] * 5.12s (25.6s-102.4s)
+ *	@note v_slow_no_motion_u8(5)='1' ->
+ *	[(v_slow_no_motion_u8:0)+11] * 10.24s (112.64s-430.08s);
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_slow_no_motion_durn(
+u8 v_slow_no_motion_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ANY MOTION THRESHOLD CONFIGURATION  */
+/***************************************************************/
+/*!
+ *	@brief This API is used to read threshold
+ *	definition for the any-motion interrupt
+ *	from the register 0x60 bit 0 to 7
+ *
+ *
+ *  @param  v_any_motion_thres_u8 : The value of any motion threshold
+ *
+ *	@note any motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_any_motion_thres_u8*3.91 mg
+ *      4g          |  v_any_motion_thres_u8*7.81 mg
+ *      8g          |  v_any_motion_thres_u8*15.63 mg
+ *      16g         |  v_any_motion_thres_u8*31.25 mg
+ *	@note when v_any_motion_thres_u8 = 0
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_any_motion_thres(
+u8 *v_any_motion_thres_u8);
+/*!
+ *	@brief This API is used to write threshold
+ *	definition for the any-motion interrupt
+ *	from the register 0x60 bit 0 to 7
+ *
+ *
+ *  @param  v_any_motion_thres_u8 : The value of any motion threshold
+ *
+ *	@note any motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_any_motion_thres_u8*3.91 mg
+ *      4g          |  v_any_motion_thres_u8*7.81 mg
+ *      8g          |  v_any_motion_thres_u8*15.63 mg
+ *      16g         |  v_any_motion_thres_u8*31.25 mg
+ *	@note when v_any_motion_thres_u8 = 0
+ *   accel_range    | any motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_any_motion_thres(
+u8 v_any_motion_thres_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR SLO/NO MOTION THRESHOLD CONFIGURATION  */
+/***************************************************************/
+ /*!
+ *	@brief This API is used to read threshold
+ *	for the slow/no-motion interrupt
+ *	from the register 0x61 bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_slow_no_motion_thres_u8 : The value of slow no motion threshold
+ *	@note slow no motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_slow_no_motion_thres_u8*3.91 mg
+ *      4g          |  v_slow_no_motion_thres_u8*7.81 mg
+ *      8g          |  v_slow_no_motion_thres_u8*15.63 mg
+ *      16g         |  v_slow_no_motion_thres_u8*31.25 mg
+ *	@note when v_slow_no_motion_thres_u8 = 0
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_slow_no_motion_thres(
+u8 *v_slow_no_motion_thres_u8);
+ /*!
+ *	@brief This API is used to write threshold
+ *	for the slow/no-motion interrupt
+ *	from the register 0x61 bit 0 to 7
+ *
+ *
+ *
+ *
+ *  @param v_slow_no_motion_thres_u8 : The value of slow no motion threshold
+ *	@note slow no motion threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  v_slow_no_motion_thres_u8*3.91 mg
+ *      4g          |  v_slow_no_motion_thres_u8*7.81 mg
+ *      8g          |  v_slow_no_motion_thres_u8*15.63 mg
+ *      16g         |  v_slow_no_motion_thres_u8*31.25 mg
+ *	@note when v_slow_no_motion_thres_u8 = 0
+ *   accel_range    | slow no motion threshold
+ *  ----------------|---------------------
+ *      2g          |  1.95 mg
+ *      4g          |  3.91 mg
+ *      8g          |  7.81 mg
+ *      16g         |  15.63 mg
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_slow_no_motion_thres(
+u8 v_slow_no_motion_thres_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR SLO/NO MOTION SELECT CONFIGURATION  */
+/***************************************************************/
+ /*!
+ *	@brief This API is used to read
+ *	the slow/no-motion selection from the register 0x62 bit 0
+ *
+ *
+ *
+ *
+ *  @param  v_intr_slow_no_motion_select_u8 :
+ *	The value of slow/no-motion select
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  SLOW_MOTION
+ *  0x01     |  NO_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_slow_no_motion_select(
+u8 *v_intr_slow_no_motion_select_u8);
+ /*!
+ *	@brief This API is used to write
+ *	the slow/no-motion selection from the register 0x62 bit 0
+ *
+ *
+ *
+ *
+ *  @param  v_intr_slow_no_motion_select_u8 :
+ *	The value of slow/no-motion select
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  SLOW_MOTION
+ *  0x01     |  NO_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_slow_no_motion_select(
+u8 v_intr_slow_no_motion_select_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR SIGNIFICANT MOTION SELECT CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API is used to select
+ *	the significant or any motion interrupt from the register 0x62 bit 1
+ *
+ *
+ *
+ *
+ *  @param  v_intr_significant_motion_select_u8 :
+ *	the value of significant or any motion interrupt selection
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  ANY_MOTION
+ *  0x01     |  SIGNIFICANT_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_significant_motion_select(
+u8 *int_sig_mot_sel);
+ /*!
+ *	@brief This API is used to write, select
+ *	the significant or any motion interrupt from the register 0x62 bit 1
+ *
+ *
+ *
+ *
+ *  @param  v_intr_significant_motion_select_u8 :
+ *	the value of significant or any motion interrupt selection
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  ANY_MOTION
+ *  0x01     |  SIGNIFICANT_MOTION
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_significant_motion_select(
+u8 int_sig_mot_sel);
+ /*!
+ *	@brief This API is used to read
+ *	the significant skip time from the register 0x62 bit  2 and 3
+ *
+ *
+ *
+ *
+ *  @param  v_int_sig_mot_skip_u8 : the value of significant skip time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  skip time 1.5 seconds
+ *  0x01     |  skip time 3 seconds
+ *  0x02     |  skip time 6 seconds
+ *  0x03     |  skip time 12 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_significant_motion_skip(
+u8 *v_int_sig_mot_skip_u8);
+ /*!
+ *	@brief This API is used to write
+ *	the significant skip time from the register 0x62 bit  2 and 3
+ *
+ *
+ *
+ *
+ *  @param  v_int_sig_mot_skip_u8 : the value of significant skip time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  skip time 1.5 seconds
+ *  0x01     |  skip time 3 seconds
+ *  0x02     |  skip time 6 seconds
+ *  0x03     |  skip time 12 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_significant_motion_skip(
+u8 v_int_sig_mot_skip_u8);
+ /*!
+ *	@brief This API is used to read
+ *	the significant proof time from the register 0x62 bit  4 and 5
+ *
+ *
+ *
+ *
+ *  @param  v_significant_motion_proof_u8 :
+ *	the value of significant proof time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  proof time 0.25 seconds
+ *  0x01     |  proof time 0.5 seconds
+ *  0x02     |  proof time 1 seconds
+ *  0x03     |  proof time 2 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_significant_motion_proof(
+u8 *int_sig_mot_proof);
+ /*!
+ *	@brief This API is used to write
+ *	the significant proof time from the register 0x62 bit  4 and 5
+ *
+ *
+ *
+ *
+ *  @param  v_significant_motion_proof_u8 :
+ *	the value of significant proof time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     |  proof time 0.25 seconds
+ *  0x01     |  proof time 0.5 seconds
+ *  0x02     |  proof time 1 seconds
+ *  0x03     |  proof time 2 seconds
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_significant_motion_proof(
+u8 int_sig_mot_proof);
+/***************************************************************/
+/**\name	FUNCTION FOR TAP DURATION CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API is used to get the tap duration
+ *	from the register 0x63 bit 0 to 2
+ *
+ *
+ *
+ *  @param v_tap_durn_u8 : The value of tap duration
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | SMI130_TAP_DURN_50MS
+ *  0x01     | SMI130_TAP_DURN_100MS
+ *  0x03     | SMI130_TAP_DURN_150MS
+ *  0x04     | SMI130_TAP_DURN_200MS
+ *  0x05     | SMI130_TAP_DURN_250MS
+ *  0x06     | SMI130_TAP_DURN_375MS
+ *  0x07     | SMI130_TAP_DURN_700MS
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_tap_durn(
+u8 *v_tap_durn_u8);
+/*!
+ *	@brief This API is used to write the tap duration
+ *	from the register 0x63 bit 0 to 2
+ *
+ *
+ *
+ *  @param v_tap_durn_u8 : The value of tap duration
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | SMI130_TAP_DURN_50MS
+ *  0x01     | SMI130_TAP_DURN_100MS
+ *  0x03     | SMI130_TAP_DURN_150MS
+ *  0x04     | SMI130_TAP_DURN_200MS
+ *  0x05     | SMI130_TAP_DURN_250MS
+ *  0x06     | SMI130_TAP_DURN_375MS
+ *  0x07     | SMI130_TAP_DURN_700MS
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_tap_durn(
+u8 v_tap_durn_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR TAP SHOCK CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API read the
+ *	tap shock duration from the register 0x63 bit 2
+ *
+ *  @param v_tap_shock_u8 :The value of tap shock
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | SMI130_TAP_SHOCK_50MS
+ *  0x01     | SMI130_TAP_SHOCK_75MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_tap_shock(
+u8 *v_tap_shock_u8);
+ /*!
+ *	@brief This API write the
+ *	tap shock duration from the register 0x63 bit 2
+ *
+ *  @param v_tap_shock_u8 :The value of tap shock
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | SMI130_TAP_SHOCK_50MS
+ *  0x01     | SMI130_TAP_SHOCK_75MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_tap_shock(
+u8 v_tap_shock_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR TAP QUIET CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API read
+ *	tap quiet duration from the register 0x63 bit 7
+ *
+ *
+ *  @param v_tap_quiet_u8 : The value of tap quiet
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | SMI130_TAP_QUIET_30MS
+ *  0x01     | SMI130_TAP_QUIET_20MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_tap_quiet(
+u8 *v_tap_quiet_u8);
+/*!
+ *	@brief This API write
+ *	tap quiet duration from the register 0x63 bit 7
+ *
+ *
+ *  @param v_tap_quiet_u8 : The value of tap quiet
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | SMI130_TAP_QUIET_30MS
+ *  0x01     | SMI130_TAP_QUIET_20MS
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_tap_quiet(
+u8 v_tap_quiet_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR TAP THRESHOLD CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API read Threshold of the
+ *	single/double tap interrupt from the register 0x64 bit 0 to 4
+ *
+ *
+ *	@param v_tap_thres_u8 : The value of single/double tap threshold
+ *
+ *	@note single/double tap threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | single/double tap threshold
+ *  ----------------|---------------------
+ *      2g          |  ((v_tap_thres_u8 + 1) * 62.5)mg
+ *      4g          |  ((v_tap_thres_u8 + 1) * 125)mg
+ *      8g          |  ((v_tap_thres_u8 + 1) * 250)mg
+ *      16g         |  ((v_tap_thres_u8 + 1) * 500)mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_tap_thres(
+u8 *v_tap_thres_u8);
+ /*!
+ *	@brief This API write Threshold of the
+ *	single/double tap interrupt from the register 0x64 bit 0 to 4
+ *
+ *
+ *	@param v_tap_thres_u8 : The value of single/double tap threshold
+ *
+ *	@note single/double tap threshold changes according to accel g range
+ *	accel g range can be set by the function ""
+ *   accel_range    | single/double tap threshold
+ *  ----------------|---------------------
+ *      2g          |  ((v_tap_thres_u8 + 1) * 62.5)mg
+ *      4g          |  ((v_tap_thres_u8 + 1) * 125)mg
+ *      8g          |  ((v_tap_thres_u8 + 1) * 250)mg
+ *      16g         |  ((v_tap_thres_u8 + 1) * 500)mg
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_tap_thres(
+u8 v_tap_thres_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ORIENT MODE CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API read the threshold for orient_mblation interrupt
+ *	from the register 0x65 bit 0 and 1
+ *
+ *  @param v_orient_mbl_mode_u8 : The value of threshold for orient_mblation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | symmetrical
+ *  0x01     | high-asymmetrical
+ *  0x02     | low-asymmetrical
+ *  0x03     | symmetrical
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl_mode(
+u8 *v_orient_mbl_mode_u8);
+ /*!
+ *	@brief This API write the threshold for orient_mblation interrupt
+ *	from the register 0x65 bit 0 and 1
+ *
+ *  @param v_orient_mbl_mode_u8 : The value of threshold for orient_mblation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | symmetrical
+ *  0x01     | high-asymmetrical
+ *  0x02     | low-asymmetrical
+ *  0x03     | symmetrical
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl_mode(
+u8 v_orient_mbl_mode_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ORIENT BLOCKING CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API read the orient_mbl blocking mode
+ *	that is used for the generation of the orient_mblation interrupt.
+ *	from the register 0x65 bit 2 and 3
+ *
+ *  @param v_orient_mbl_blocking_u8 : The value of orient_mbl blocking mode
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | No blocking
+ *  0x01     | Theta blocking or acceleration in any axis > 1.5g
+ *  0x02     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.2g or acceleration in any axis > 1.5g
+ *  0x03     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.4g or acceleration in any axis >
+ *   -       | 1.5g and value of orient_mbl is not stable
+ *   -       | for at least 100 ms
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl_blocking(
+u8 *v_orient_mbl_blocking_u8);
+/*!
+ *	@brief This API write the orient_mbl blocking mode
+ *	that is used for the generation of the orient_mblation interrupt.
+ *	from the register 0x65 bit 2 and 3
+ *
+ *  @param v_orient_mbl_blocking_u8 : The value of orient_mbl blocking mode
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | No blocking
+ *  0x01     | Theta blocking or acceleration in any axis > 1.5g
+ *  0x02     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.2g or acceleration in any axis > 1.5g
+ *  0x03     | Theta blocking or acceleration slope in any axis >
+ *   -       | 0.4g or acceleration in any axis >
+ *   -       | 1.5g and value of orient_mbl is not stable
+ *   -       | for at least 100 ms
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl_blocking(
+u8 v_orient_mbl_blocking_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ORIENT HYSTERESIS CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API read Orient interrupt
+ *	hysteresis, from the register 0x64 bit 4 to 7
+ *
+ *
+ *
+ *  @param v_orient_mbl_hyst_u8 : The value of orient_mbl hysteresis
+ *
+ *	@note 1 LSB corresponds to 62.5 mg,
+ *	irrespective of the selected accel range
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl_hyst(
+u8 *v_orient_mbl_hyst_u8);
+/*!
+ *	@brief This API write Orient interrupt
+ *	hysteresis, from the register 0x64 bit 4 to 7
+ *
+ *
+ *
+ *  @param v_orient_mbl_hyst_u8 : The value of orient_mbl hysteresis
+ *
+ *	@note 1 LSB corresponds to 62.5 mg,
+ *	irrespective of the selected accel range
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl_hyst(
+u8 v_orient_mbl_hyst_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ORIENT THETA CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API read Orient
+ *	blocking angle (0 to 44.8) from the register 0x66 bit 0 to 5
+ *
+ *  @param v_orient_mbl_theta_u8 : The value of Orient blocking angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl_theta(
+u8 *v_orient_mbl_theta_u8);
+ /*!
+ *	@brief This API write Orient
+ *	blocking angle (0 to 44.8) from the register 0x66 bit 0 to 5
+ *
+ *  @param v_orient_mbl_theta_u8 : The value of Orient blocking angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl_theta(
+u8 v_orient_mbl_theta_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ORIENT OUTPUT ENABLE CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API read orient_mbl change
+ *	of up/down bit from the register 0x66 bit 6
+ *
+ *  @param v_orient_mbl_ud_u8 : The value of orient_mbl change of up/down
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | Is ignored
+ *  0x01     | Generates orient_mblation interrupt
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl_ud_enable(
+u8 *v_orient_mbl_ud_u8);
+/*!
+ *	@brief This API write orient_mbl change
+ *	of up/down bit from the register 0x66 bit 6
+ *
+ *  @param v_orient_mbl_ud_u8 : The value of orient_mbl change of up/down
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | Is ignored
+ *  0x01     | Generates orient_mblation interrupt
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl_ud_enable(
+u8 v_orient_mbl_ud_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR ORIENT AXIS ENABLE CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API read orient_mblation axes changes
+ *	from the register 0x66 bit 7
+ *
+ *  @param v_orient_mbl_axes_u8 : The value of orient_mbl axes assignment
+ *	value    |       Behaviour    | Name
+ * ----------|--------------------|------
+ *  0x00     | x = x, y = y, z = z|orient_mbl_ax_noex
+ *  0x01     | x = y, y = z, z = x|orient_mbl_ax_ex
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_orient_mbl_axes_enable(
+u8 *v_orient_mbl_axes_u8);
+ /*!
+ *	@brief This API write orient_mblation axes changes
+ *	from the register 0x66 bit 7
+ *
+ *  @param v_orient_mbl_axes_u8 : The value of orient_mbl axes assignment
+ *	value    |       Behaviour    | Name
+ * ----------|--------------------|------
+ *  0x00     | x = x, y = y, z = z|orient_mbl_ax_noex
+ *  0x01     | x = y, y = z, z = x|orient_mbl_ax_ex
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_orient_mbl_axes_enable(
+u8 v_orient_mbl_axes_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR FLAT THETA CONFIGURATION*/
+/***************************************************************/
+ /*!
+ *	@brief This API read Flat angle (0 to 44.8) for flat interrupt
+ *	from the register 0x67 bit 0 to 5
+ *
+ *  @param v_flat_theta_u8 : The value of flat angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_flat_theta(
+u8 *v_flat_theta_u8);
+ /*!
+ *	@brief This API write Flat angle (0 to 44.8) for flat interrupt
+ *	from the register 0x67 bit 0 to 5
+ *
+ *  @param v_flat_theta_u8 : The value of flat angle
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_flat_theta(
+u8 v_flat_theta_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR FLAT HOLD CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API read Flat interrupt hold time;
+ *	from the register 0x68 bit 4 and 5
+ *
+ *  @param v_flat_hold_u8 : The value of flat hold time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | 0ms
+ *  0x01     | 512ms
+ *  0x01     | 1024ms
+ *  0x01     | 2048ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_flat_hold(
+u8 *v_flat_hold_u8);
+/*!
+ *	@brief This API write Flat interrupt hold time;
+ *	from the register 0x68 bit 4 and 5
+ *
+ *  @param v_flat_hold_u8 : The value of flat hold time
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | 0ms
+ *  0x01     | 512ms
+ *  0x01     | 1024ms
+ *  0x01     | 2048ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_flat_hold(
+u8 v_flat_hold_u8);
+/***************************************************************/
+/**\name	FUNCTION FOR FLAT HYSTERESIS CONFIGURATION*/
+/***************************************************************/
+/*!
+ *	@brief This API read flat interrupt hysteresis
+ *	from the register 0x68 bit 0 to 3
+ *
+ *  @param v_flat_hyst_u8 : The value of flat hysteresis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_intr_flat_hyst(
+u8 *v_flat_hyst_u8);
+/*!
+ *	@brief This API write flat interrupt hysteresis
+ *	from the register 0x68 bit 0 to 3
+ *
+ *  @param v_flat_hyst_u8 : The value of flat hysteresis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_intr_flat_hyst(
+u8 v_flat_hyst_u8);
+/***************************************************************/
+/**\name	FUNCTION FAST OFFSET COMPENSATION FOR ACCEL */
+/***************************************************************/
+ /*!
+ *	@brief This API read accel offset compensation
+ *	target value for z-axis from the register 0x69 bit 0 and 1
+ *
+ *  @param v_foc_accel_z_u8 : the value of accel offset compensation z axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_foc_accel_z(
+u8 *v_foc_accel_z_u8);
+ /*!
+ *	@brief This API write accel offset compensation
+ *	target value for z-axis from the register 0x69 bit 0 and 1
+ *
+ *  @param v_foc_accel_z_u8 : the value of accel offset compensation z axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_foc_accel_z(
+u8 v_foc_accel_z_u8);
+/*!
+ *	@brief This API read accel offset compensation
+ *	target value for y-axis
+ *	from the register 0x69 bit 2 and 3
+ *
+ *  @param v_foc_accel_y_u8 : the value of accel offset compensation y axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_foc_accel_y(
+u8 *v_foc_accel_y_u8);
+/*!
+ *	@brief This API write accel offset compensation
+ *	target value for y-axis
+ *	from the register 0x69 bit 2 and 3
+ *
+ *  @param v_foc_accel_y_u8 : the value of accel offset compensation y axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_foc_accel_y(
+u8 v_foc_accel_y_u8);
+/*!
+ *	@brief This API read accel offset compensation
+ *	target value for x-axis is
+ *	from the register 0x69 bit 4 and 5
+ *
+ *  @param v_foc_accel_x_u8 : the value of accel offset compensation x axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_foc_accel_x(
+u8 *v_foc_accel_x_u8);
+/*!
+ *	@brief This API write accel offset compensation
+ *	target value for x-axis is
+ *	from the register 0x69 bit 4 and 5
+ *
+ *  @param v_foc_accel_x_u8 : the value of accel offset compensation x axis
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_foc_accel_x(
+u8 v_foc_accel_x_u8);
+/***************************************************************/
+/**\name	FUNCTION FAST OFFSET COMPENSATION FOR GYRO */
+/***************************************************************/
+/*!
+ *	@brief This API write gyro fast offset enable
+ *	from the register 0x69 bit 6
+ *
+ *  @param v_foc_gyro_u8 : The value of gyro fast offset enable
+ *  value    |  Description
+ * ----------|-------------
+ *    0      | fast offset compensation disabled
+ *    1      |  fast offset compensation enabled
+ *
+ *	@param v_gyro_off_x_s16 : The value of gyro fast offset x axis data
+ *	@param v_gyro_off_y_s16 : The value of gyro fast offset y axis data
+ *	@param v_gyro_off_z_s16 : The value of gyro fast offset z axis data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_foc_gyro_enable(
+u8 v_foc_gyro_u8, s16 *v_gyro_off_x_s16,
+s16 *v_gyro_off_y_s16, s16 *v_gyro_off_z_s16);
+/***************************************************/
+/**\name	FUNCTION FOR NVM*/
+/***************************************************/
+ /*!
+ *	@brief This API read NVM program enable
+ *	from the register 0x6A bit 1
+ *
+ *  @param v_nvm_prog_u8 : The value of NVM program enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_nvm_prog_enable(
+u8 *v_nvm_prog_u8);
+ /*!
+ *	@brief This API write NVM program enable
+ *	from the register 0x6A bit 1
+ *
+ *  @param v_nvm_prog_u8 : The value of NVM program enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_nvm_prog_enable(
+u8 v_nvm_prog_u8);
+/***************************************************/
+/**\name	FUNCTION FOR SPI MODE*/
+/***************************************************/
+/*!
+ * @brief This API read to configure SPI
+ * Interface Mode for primary and OIS interface
+ * from the register 0x6B bit 0
+ *
+ *  @param v_spi3_u8 : The value of SPI mode selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  SPI 4-wire mode
+ *   1     |  SPI 3-wire mode
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_get_spi3(
+u8 *v_spi3_u8);
+/*!
+ * @brief This API write to configure SPI
+ * Interface Mode for primary and OIS interface
+ * from the register 0x6B bit 0
+ *
+ *  @param v_spi3_u8 : The value of SPI mode selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  SPI 4-wire mode
+ *   1     |  SPI 3-wire mode
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_spi3(
+u8 v_spi3_u8);
+/***************************************************/
+/**\name	FUNCTION FOR FOC GYRO */
+/***************************************************/
+/*!
+ *	@brief This API read gyro fast offset enable
+ *	from the register 0x69 bit 6
+ *
+ *  @param v_foc_gyro_u8 : The value of gyro fast offset enable
+ *  value    |  Description
+ * ----------|-------------
+ *    0      | fast offset compensation disabled
+ *    1      |  fast offset compensation enabled
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_foc_gyro_enable(
+u8 *v_foc_gyro_u8);
+/***************************************************/
+/**\name	FUNCTION FOR I2C WATCHDOG TIMBER */
+/***************************************************/
+/*!
+ *	@brief This API read I2C Watchdog timer
+ *	from the register 0x70 bit 1
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watch dog timer
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  I2C watchdog v_timeout_u8 after 1 ms
+ *   1     |  I2C watchdog v_timeout_u8 after 50 ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_i2c_wdt_select(
+u8 *v_i2c_wdt_u8);
+/*!
+ *	@brief This API write I2C Watchdog timer
+ *	from the register 0x70 bit 1
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watch dog timer
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  I2C watchdog v_timeout_u8 after 1 ms
+ *   1     |  I2C watchdog v_timeout_u8 after 50 ms
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE
+smi130_set_i2c_wdt_select(u8 v_i2c_wdt_u8);
+/*!
+ *	@brief This API read I2C watchdog enable
+ *	from the register 0x70 bit 2
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watchdog enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_i2c_wdt_enable(
+u8 *v_i2c_wdt_u8);
+/*!
+ *	@brief This API write I2C watchdog enable
+ *	from the register 0x70 bit 2
+ *
+ *  @param v_i2c_wdt_u8 : The value of I2C watchdog enable
+ *  Value  |  Description
+ * --------|-------------
+ *   0     |  DISABLE
+ *   1     |  ENABLE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_i2c_wdt_enable(
+u8 v_i2c_wdt_u8);
+/***************************************************/
+/**\name	FUNCTION FOR IF MODE*/
+/***************************************************/
+/*!
+ * @brief This API read I2C interface configuration(if) moe
+ * from the register 0x6B bit 4 and 5
+ *
+ *  @param  v_if_mode_u8 : The value of interface configuration mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  |  Primary interface:autoconfig / secondary interface:off
+ *   0x01  |  Primary interface:I2C / secondary interface:OIS
+ *   0x02  |  Primary interface:autoconfig/secondary interface:Magnetometer
+ *   0x03  |   Reserved
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_if_mode(
+u8 *v_if_mode_u8);
+/*!
+ * @brief This API write I2C interface configuration(if) moe
+ * from the register 0x6B bit 4 and 5
+ *
+ *  @param  v_if_mode_u8 : The value of interface configuration mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  |  Primary interface:autoconfig / secondary interface:off
+ *   0x01  |  Primary interface:I2C / secondary interface:OIS
+ *   0x02  |  Primary interface:autoconfig/secondary interface:Magnetometer
+ *   0x03  |   Reserved
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_if_mode(
+u8 v_if_mode_u8);
+/***************************************************/
+/**\name	FUNCTION FOR GYRO SLEEP TRIGGER INTERRUPT CONFIGURATION*/
+/***************************************************/
+/*!
+ *	@brief This API read gyro sleep trigger
+ *	from the register 0x6C bit 0 to 2
+ *
+ *  @param v_gyro_sleep_trigger_u8 : The value of gyro sleep trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | nomotion: no / Not INT1 pin: no / INT2 pin: no
+ *   0x01  | nomotion: no / Not INT1 pin: no / INT2 pin: yes
+ *   0x02  | nomotion: no / Not INT1 pin: yes / INT2 pin: no
+ *   0x03  | nomotion: no / Not INT1 pin: yes / INT2 pin: yes
+ *   0x04  | nomotion: yes / Not INT1 pin: no / INT2 pin: no
+ *   0x05  | anymotion: yes / Not INT1 pin: no / INT2 pin: yes
+ *   0x06  | anymotion: yes / Not INT1 pin: yes / INT2 pin: no
+ *   0x07  | anymotion: yes / Not INT1 pin: yes / INT2 pin: yes
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_sleep_trigger(
+u8 *v_gyro_sleep_trigger_u8);
+/*!
+ *	@brief This API write gyro sleep trigger
+ *	from the register 0x6C bit 0 to 2
+ *
+ *  @param v_gyro_sleep_trigger_u8 : The value of gyro sleep trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | nomotion: no / Not INT1 pin: no / INT2 pin: no
+ *   0x01  | nomotion: no / Not INT1 pin: no / INT2 pin: yes
+ *   0x02  | nomotion: no / Not INT1 pin: yes / INT2 pin: no
+ *   0x03  | nomotion: no / Not INT1 pin: yes / INT2 pin: yes
+ *   0x04  | nomotion: yes / Not INT1 pin: no / INT2 pin: no
+ *   0x05  | anymotion: yes / Not INT1 pin: no / INT2 pin: yes
+ *   0x06  | anymotion: yes / Not INT1 pin: yes / INT2 pin: no
+ *   0x07  | anymotion: yes / Not INT1 pin: yes / INT2 pin: yes
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_sleep_trigger(
+u8 v_gyro_sleep_trigger_u8);
+/*!
+ *	@brief This API read gyro wakeup trigger
+ *	from the register 0x6C bit 3 and 4
+ *
+ *  @param v_gyro_wakeup_trigger_u8 : The value of gyro wakeup trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | anymotion: no / INT1 pin: no
+ *   0x01  | anymotion: no / INT1 pin: yes
+ *   0x02  | anymotion: yes / INT1 pin: no
+ *   0x03  | anymotion: yes / INT1 pin: yes
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_wakeup_trigger(
+u8 *v_gyro_wakeup_trigger_u8);
+/*!
+ *	@brief This API write gyro wakeup trigger
+ *	from the register 0x6C bit 3 and 4
+ *
+ *  @param v_gyro_wakeup_trigger_u8 : The value of gyro wakeup trigger
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | anymotion: no / INT1 pin: no
+ *   0x01  | anymotion: no / INT1 pin: yes
+ *   0x02  | anymotion: yes / INT1 pin: no
+ *   0x03  | anymotion: yes / INT1 pin: yes
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_wakeup_trigger(
+u8 v_gyro_wakeup_trigger_u8);
+/*!
+ *	@brief This API read Target state for gyro sleep mode
+ *	from the register 0x6C bit 5
+ *
+ *  @param v_gyro_sleep_state_u8 : The value of gyro sleep mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | Sleep transition to fast wake up state
+ *   0x01  | Sleep transition to suspend state
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_sleep_state(
+u8 *v_gyro_sleep_state_u8);
+/*!
+ *	@brief This API write Target state for gyro sleep mode
+ *	from the register 0x6C bit 5
+ *
+ *  @param v_gyro_sleep_state_u8 : The value of gyro sleep mode
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | Sleep transition to fast wake up state
+ *   0x01  | Sleep transition to suspend state
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_sleep_state(
+u8 v_gyro_sleep_state_u8);
+/*!
+ *	@brief This API read gyro wakeup interrupt
+ *	from the register 0x6C bit 6
+ *
+ *  @param v_gyro_wakeup_intr_u8 : The valeu of gyro wakeup interrupt
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | DISABLE
+ *   0x01  | ENABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_wakeup_intr(
+u8 *v_gyro_wakeup_intr_u8);
+/*!
+ *	@brief This API write gyro wakeup interrupt
+ *	from the register 0x6C bit 6
+ *
+ *  @param v_gyro_wakeup_intr_u8 : The valeu of gyro wakeup interrupt
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | DISABLE
+ *   0x01  | ENABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_wakeup_intr(
+u8 v_gyro_wakeup_intr_u8);
+/***************************************************/
+/**\name	FUNCTION FOR ACCEL SELF TEST */
+/***************************************************/
+/*!
+ * @brief This API read accel select axis to be self-test
+ *
+ *  @param v_accel_selftest_axis_u8 :
+ *	The value of accel self test axis selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | disabled
+ *   0x01  | x-axis
+ *   0x02  | y-axis
+ *   0x03  | z-axis
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_selftest_axis(
+u8 *acc_selftest_axis);
+/*!
+ * @brief This API write accel select axis to be self-test
+ *
+ *  @param v_accel_selftest_axis_u8 :
+ *	The value of accel self test axis selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | disabled
+ *   0x01  | x-axis
+ *   0x02  | y-axis
+ *   0x03  | z-axis
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_selftest_axis(
+u8 acc_selftest_axis);
+/*!
+ *	@brief This API read accel self test axis sign
+ *	from the register 0x6D bit 2
+ *
+ *  @param v_accel_selftest_sign_u8: The value of accel self test axis sign
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | negative
+ *   0x01  | positive
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_selftest_sign(
+u8 *acc_selftest_sign);
+/*!
+ *	@brief This API write accel self test axis sign
+ *	from the register 0x6D bit 2
+ *
+ *  @param v_accel_selftest_sign_u8: The value of accel self test axis sign
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | negative
+ *   0x01  | positive
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_selftest_sign(
+u8 acc_selftest_sign);
+/*!
+ *	@brief This API read accel self test amplitude
+ *	from the register 0x6D bit 3
+ *        select amplitude of the selftest deflection:
+ *
+ *  @param v_accel_selftest_amp_u8 : The value of accel self test amplitude
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | LOW
+ *   0x01  | HIGH
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_selftest_amp(
+u8 *acc_selftest_amp);
+/*!
+ *	@brief This API write accel self test amplitude
+ *	from the register 0x6D bit 3
+ *        select amplitude of the selftest deflection:
+ *
+ *  @param v_accel_selftest_amp_u8 : The value of accel self test amplitude
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | LOW
+ *   0x01  | HIGH
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_selftest_amp(
+u8 acc_selftest_amp);
+/***************************************************/
+/**\name	FUNCTION FOR GYRO SELF TEST */
+/***************************************************/
+/*!
+ *	@brief This API read gyro self test trigger
+ *
+ *	@param v_gyro_selftest_start_u8: The value of gyro self test start
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_selftest_start(
+u8 *v_gyro_selftest_start_u8);
+/*!
+ *	@brief This API write gyro self test trigger
+ *
+ *	@param v_gyro_selftest_start_u8: The value of gyro self test start
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_selftest_start(
+u8 v_gyro_selftest_start_u8);
+/***************************************************/
+/**\name	FUNCTION FOR SPI/I2C ENABLE */
+/***************************************************/
+ /*!
+ * @brief This API read primary interface selection I2C or SPI
+ *	from the register 0x70 bit 0
+ *
+ *  @param v_spi_enable_u8: The value of Interface selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | I2C Enable
+ *   0x01  | I2C DISBALE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_spi_enable(
+u8 *v_spi_enable_u8);
+ /*!
+ * @brief This API write primary interface selection I2C or SPI
+ *	from the register 0x70 bit 0
+ *
+ *  @param v_spi_enable_u8: The value of Interface selection
+ *  Value  |  Description
+ * --------|-------------
+ *   0x00  | I2C Enable
+ *   0x01  | I2C DISBALE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_spi_enable(
+u8 v_spi_enable_u8);
+ /*!
+ *	@brief This API read the spare zero
+ *	form register 0x70 bit 3
+ *
+ *
+ *  @param v_spare0_trim_u8: The value of spare zero
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_spare0_trim
+(u8 *v_spare0_trim_u8);
+ /*!
+ *	@brief This API write the spare zero
+ *	form register 0x70 bit 3
+ *
+ *
+ *  @param v_spare0_trim_u8: The value of spare zero
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_spare0_trim
+(u8 v_spare0_trim_u8);
+/***************************************************/
+/**\name	FUNCTION FOR NVM COUNTER */
+/***************************************************/
+ /*!
+ *	@brief This API read the NVM counter
+ *	form register 0x70 bit 4 to 7
+ *
+ *
+ *  @param v_nvm_counter_u8: The value of NVM counter
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_nvm_counter(
+u8 *v_nvm_counter_u8);
+ /*!
+ *	@brief This API write the NVM counter
+ *	form register 0x70 bit 4 to 7
+ *
+ *
+ *  @param v_nvm_counter_u8: The value of NVM counter
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_nvm_counter(
+u8 v_nvm_counter_u8);
+/***************************************************/
+/**\name	FUNCTION FOR ACCEL MANUAL OFFSET COMPENSATION */
+/***************************************************/
+/*!
+ *	@brief This API read accel manual offset compensation of x axis
+ *	from the register 0x71 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_x_s8:
+ *	The value of accel manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_offset_compensation_xaxis(
+s8 *v_accel_off_x_s8);
+/*!
+ *	@brief This API write accel manual offset compensation of x axis
+ *	from the register 0x71 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_x_s8:
+ *	The value of accel manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_offset_compensation_xaxis(
+s8 v_accel_off_x_s8);
+/*!
+ *	@brief This API read accel manual offset compensation of y axis
+ *	from the register 0x72 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_y_s8:
+ *	The value of accel manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_offset_compensation_yaxis(
+s8 *v_accel_off_y_s8);
+/*!
+ *	@brief This API write accel manual offset compensation of y axis
+ *	from the register 0x72 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_y_s8:
+ *	The value of accel manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_offset_compensation_yaxis(
+s8 v_accel_off_y_s8);
+/*!
+ *	@brief This API read accel manual offset compensation of z axis
+ *	from the register 0x73 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_z_s8:
+ *	The value of accel manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_offset_compensation_zaxis(
+s8 *v_accel_off_z_s8);
+/*!
+ *	@brief This API write accel manual offset compensation of z axis
+ *	from the register 0x73 bit 0 to 7
+ *
+ *
+ *
+ *  @param v_accel_off_z_s8:
+ *	The value of accel manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_offset_compensation_zaxis(
+s8 v_accel_off_z_s8);
+/***************************************************/
+/**\name	FUNCTION FOR GYRO MANUAL OFFSET COMPENSATION */
+/***************************************************/
+/*!
+ *	@brief This API read gyro manual offset compensation of x axis
+ *	from the register 0x74 bit 0 to 7 and 0x77 bit 0 and 1
+ *
+ *
+ *
+ *  @param v_gyro_off_x_s16:
+ *	The value of gyro manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_offset_compensation_xaxis(
+s16 *v_gyro_off_x_s16);
+/*!
+ *	@brief This API write gyro manual offset compensation of x axis
+ *	from the register 0x74 bit 0 to 7 and 0x77 bit 0 and 1
+ *
+ *
+ *
+ *  @param v_gyro_off_x_s16:
+ *	The value of gyro manual offset compensation of x axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_offset_compensation_xaxis(
+s16 v_gyro_off_x_s16);
+/*!
+ *	@brief This API read gyro manual offset compensation of y axis
+ *	from the register 0x75 bit 0 to 7 and 0x77 bit 2 and 3
+ *
+ *
+ *
+ *  @param v_gyro_off_y_s16:
+ *	The value of gyro manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_offset_compensation_yaxis(
+s16 *v_gyro_off_y_s16);
+/*!
+ *	@brief This API write gyro manual offset compensation of y axis
+ *	from the register 0x75 bit 0 to 7 and 0x77 bit 2 and 3
+ *
+ *
+ *
+ *  @param v_gyro_off_y_s16:
+ *	The value of gyro manual offset compensation of y axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_offset_compensation_yaxis(
+s16 v_gyro_off_y_s16);
+/*!
+ *	@brief This API read gyro manual offset compensation of z axis
+ *	from the register 0x76 bit 0 to 7 and 0x77 bit 4 and 5
+ *
+ *
+ *
+ *  @param v_gyro_off_z_s16:
+ *	The value of gyro manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_offset_compensation_zaxis(
+s16 *v_gyro_off_z_s16);
+/*!
+ *	@brief This API write gyro manual offset compensation of z axis
+ *	from the register 0x76 bit 0 to 7 and 0x77 bit 4 and 5
+ *
+ *
+ *
+ *  @param v_gyro_off_z_s16:
+ *	The value of gyro manual offset compensation of z axis
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_offset_compensation_zaxis(
+s16 v_gyro_off_z_s16);
+/*!
+ *	@brief This API writes accel fast offset compensation
+ *	from the register 0x69 bit 0 to 5
+ *	@brief This API writes each axis individually
+ *	FOC_X_AXIS - bit 4 and 5
+ *	FOC_Y_AXIS - bit 2 and 3
+ *	FOC_Z_AXIS - bit 0 and 1
+ *
+ *  @param  v_foc_accel_u8: The value of accel offset compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_axis_u8: The value of accel offset axis selection
+  *	value    | axis
+ * ----------|-------------------
+ *  0        | FOC_X_AXIS
+ *  1        | FOC_Y_AXIS
+ *  2        | FOC_Z_AXIS
+ *
+ *	@param v_accel_offset_s8: The accel offset value
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_foc_trigger(u8 axis,
+u8 foc_acc, s8 *accel_offset);
+/*!
+ *	@brief This API write fast accel offset compensation
+ *	it writes all axis together.To the register 0x69 bit 0 to 5
+ *	FOC_X_AXIS - bit 4 and 5
+ *	FOC_Y_AXIS - bit 2 and 3
+ *	FOC_Z_AXIS - bit 0 and 1
+ *
+ *  @param  v_foc_accel_x_u8: The value of accel offset x compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_foc_accel_y_u8: The value of accel offset y compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_foc_accel_z_u8: The value of accel offset z compensation
+ *	value    | Behaviour
+ * ----------|-------------------
+ *  0x00     | disable
+ *  0x01     | +1g
+ *  0x01     | -1g
+ *  0x01     | 0g
+ *
+ *  @param  v_accel_off_x_s8: The value of accel offset x axis
+ *  @param  v_accel_off_y_s8: The value of accel offset y axis
+ *  @param  v_accel_off_z_s8: The value of accel offset z axis
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_accel_foc_trigger_xyz(u8 v_foc_accel_x_u8,
+u8 v_foc_accel_y_u8, u8 v_foc_accel_z_u8,
+s8 *acc_off_x, s8 *acc_off_y, s8 *acc_off_z);
+/***************************************************/
+/**\name	FUNCTION FOR ACEL AND GYRO OFFSET ENABLE */
+/***************************************************/
+/*!
+ *	@brief This API read the accel offset enable bit
+ *	from the register 0x77 bit 6
+ *
+ *
+ *
+ *  @param v_accel_off_enable_u8: The value of accel offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_accel_offset_enable(
+u8 *acc_off_en);
+/*!
+ *	@brief This API write the accel offset enable bit
+ *	from the register 0x77 bit 6
+ *
+ *
+ *
+ *  @param v_accel_off_enable_u8: The value of accel offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_accel_offset_enable(
+u8 acc_off_en);
+/*!
+ *	@brief This API read the accel offset enable bit
+ *	from the register 0x77 bit 7
+ *
+ *
+ *
+ *  @param v_gyro_off_enable_u8: The value of gyro offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_gyro_offset_enable(
+u8 *v_gyro_off_enable_u8);
+/*!
+ *	@brief This API write the accel offset enable bit
+ *	from the register 0x77 bit 7
+ *
+ *
+ *
+ *  @param v_gyro_off_enable_u8: The value of gyro offset enable
+ *  value    |  Description
+ * ----------|--------------
+ *   0x01    | ENABLE
+ *   0x00    | DISABLE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_gyro_offset_enable(
+u8 v_gyro_off_enable_u8);
+/***************************************************/
+/**\name	FUNCTION FOR STEP COUNTER INTERRUPT */
+/***************************************************/
+/*!
+ *	@brief This API reads step counter value
+ *	form the register 0x78 and 0x79
+ *
+ *
+ *
+ *
+ *  @param v_step_cnt_s16 : The value of step counter
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_read_step_count(u16 *v_step_cnt_s16);
+ /*!
+ *	@brief This API Reads
+ *	step counter configuration
+ *	from the register 0x7A bit 0 to 7
+ *	and from the register 0x7B bit 0 to 2 and 4 to 7
+ *
+ *
+ *  @param v_step_config_u16 : The value of step configuration
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_step_config(
+u16 *v_step_config_u16);
+ /*!
+ *	@brief This API write
+ *	step counter configuration
+ *	from the register 0x7A bit 0 to 7
+ *	and from the register 0x7B bit 0 to 2 and 4 to 7
+ *
+ *
+ *  @param v_step_config_u16   :
+ *	the value of  Enable step configuration
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_step_config(
+u16 v_step_config_u16);
+ /*!
+ *	@brief This API read enable step counter
+ *	from the register 0x7B bit 3
+ *
+ *
+ *  @param v_step_counter_u8 : The value of step counter enable
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_step_counter_enable(
+u8 *v_step_counter_u8);
+ /*!
+ *	@brief This API write enable step counter
+ *	from the register 0x7B bit 3
+ *
+ *
+ *  @param v_step_counter_u8 : The value of step counter enable
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_step_counter_enable(
+u8 v_step_counter_u8);
+ /*!
+ *	@brief This API set Step counter modes
+ *
+ *
+ *  @param  v_step_mode_u8 : The value of step counter mode
+ *  value    |   mode
+ * ----------|-----------
+ *   0       | SMI130_STEP_NORMAL_MODE
+ *   1       | SMI130_STEP_SENSITIVE_MODE
+ *   2       | SMI130_STEP_ROBUST_MODE
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_step_mode(u8 v_step_mode_u8);
+/*!
+ *	@brief This API used to trigger the  signification motion
+ *	interrupt
+ *
+ *
+ *  @param  v_significant_u8 : The value of interrupt selection
+ *  value    |  interrupt
+ * ----------|-----------
+ *   0       |  SMI130_MAP_INTR1
+ *   1       |  SMI130_MAP_INTR2
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_map_significant_motion_intr(
+u8 v_significant_u8);
+/*!
+ *	@brief This API used to trigger the step detector
+ *	interrupt
+ *
+ *
+ *  @param  v_step_detector_u8 : The value of interrupt selection
+ *  value    |  interrupt
+ * ----------|-----------
+ *   0       |  SMI130_MAP_INTR1
+ *   1       |  SMI130_MAP_INTR2
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_map_step_detector_intr(
+u8 v_step_detector_u8);
+ /*!
+ *	@brief This API used to clear the step counter interrupt
+ *	interrupt
+ *
+ *
+ *  @param  : None
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_clear_step_counter(void);
+/***************************************************/
+/**\name	FUNCTION FOR STEP COMMAND REGISTER WRITE */
+/***************************************************/
+ /*!
+ *	@brief This API writes value to the register 0x7E bit 0 to 7
+ *
+ *
+ *  @param  v_command_reg_u8 : The value to write command register
+ *  value   |  Description
+ * ---------|--------------------------------------------------------
+ *	0x00	|	Reserved
+ *  0x03	|	Starts fast offset calibration for the accel and gyro
+ *	0x10	|	Sets the PMU mode for the Accelerometer to suspend
+ *	0x11	|	Sets the PMU mode for the Accelerometer to normal
+ *	0x12	|	Sets the PMU mode for the Accelerometer Lowpower
+ *  0x14	|	Sets the PMU mode for the Gyroscope to suspend
+ *	0x15	|	Sets the PMU mode for the Gyroscope to normal
+ *	0x16	|	Reserved
+ *	0x17	|	Sets the PMU mode for the Gyroscope to fast start-up
+ *  0x18	|	Sets the PMU mode for the Magnetometer to suspend
+ *	0x19	|	Sets the PMU mode for the Magnetometer to normal
+ *	0x1A	|	Sets the PMU mode for the Magnetometer to Lowpower
+ *	0xB0	|	Clears all data in the FIFO
+ *  0xB1	|	Resets the interrupt engine
+ *	0xB2	|	step_cnt_clr Clears the step counter
+ *	0xB6	|	Triggers a reset
+ *	0x37	|	See extmode_en_last
+ *	0x9A	|	See extmode_en_last
+ *	0xC0	|	Enable the extended mode
+ *  0xC4	|	Erase NVM cell
+ *	0xC8	|	Load NVM cell
+ *	0xF0	|	Reset acceleration data path
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_command_register(
+u8 v_command_reg_u8);
+/***************************************************/
+/**\name	FUNCTION FOR PAGE ENABLE */
+/***************************************************/
+ /*!
+ *	@brief This API read target page from the register 0x7F bit 4 and 5
+ *
+ *  @param v_target_page_u8: The value of target page
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  User data/configure page
+ *   1      |  Chip level trim/test page
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_target_page(
+u8 *v_target_page_u8);
+ /*!
+ *	@brief This API write target page from the register 0x7F bit 4 and 5
+ *
+ *  @param v_target_page_u8: The value of target page
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  User data/configure page
+ *   1      |  Chip level trim/test page
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_target_page(
+u8 v_target_page_u8);
+ /*!
+ *	@brief This API read page enable from the register 0x7F bit 7
+ *
+ *
+ *
+ *  @param v_page_enable_u8: The value of page enable
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  DISABLE
+ *   1      |  ENABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_paging_enable(
+u8 *v_page_enable_u8);
+ /*!
+ *	@brief This API write page enable from the register 0x7F bit 7
+ *
+ *
+ *
+ *  @param v_page_enable_u8: The value of page enable
+ *  value   |  page
+ * ---------|-----------
+ *   0      |  DISABLE
+ *   1      |  ENABLE
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_paging_enable(
+u8 v_page_enable_u8);
+ /*!
+ *	@brief This API read
+ *	pull up configuration from the register 0X85 bit 4 an 5
+ *
+ *
+ *
+ *  @param v_control_pullup_u8: The value of pull up register
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_get_pullup_configuration(
+u8 *v_control_pullup_u8);
+ /*!
+ *	@brief This API write
+ *	pull up configuration from the register 0X85 bit 4 an 5
+ *
+ *
+ *
+ *  @param v_control_pullup_u8: The value of pull up register
+ *
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_pullup_configuration(
+u8 v_control_pullup_u8);
+/***************************************************/
+/**\name	FUNCTION FOR BMM150 */
+/***************************************************/
+ /*!
+ *	@brief This function used for initialize the bmm150 sensor
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bmm150_mag_interface_init(void);
+ /*!
+ *	@brief This function used for set the mag power control
+ *	bit enable
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bmm150_mag_wakeup(void);
+ /*!
+ *	@brief This function used for read the trim values of magnetometer
+ *
+ *	@note
+ *	Before reading the mag trimming values
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_bmm150_mag_trim_mbl(void);
+ /*!
+ *	@brief This function used for read the compensated value of mag
+ *	Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bmm150_mag_compensate_xyz(
+struct smi130_mag_xyz_s32_t *mag_comp_xyz);
+SMI130_RETURN_FUNCTION_TYPE smi130_bmm150_mag_compensate_xyz_raw(
+struct smi130_mag_xyz_s32_t *mag_comp_xyz, struct smi130_mag_xyzr_t mag_xyzr);
+
+/*!
+ *	@brief This API used to get the compensated BMM150-X data
+ *	the out put of X as s32
+ *	Before start reading the mag compensated X data
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *
+ *  @param  v_mag_data_x_s16 : The value of mag raw X data
+ *  @param  v_data_r_u16 : The value of mag R data
+ *
+ *	@return results of compensated X data value output as s32
+ *
+ */
+s32 smi130_bmm150_mag_compensate_X(s16 v_mag_data_x_s16, u16 v_data_r_u16);
+/*!
+ *	@brief This API used to get the compensated BMM150-Y data
+ *	the out put of Y as s32
+ *	Before start reading the mag compensated Y data
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *
+ *  @param  v_mag_data_y_s16 : The value of mag raw Y data
+ *  @param  v_data_r_u16 : The value of mag R data
+ *
+ *	@return results of compensated Y data value output as s32
+ */
+s32 smi130_bmm150_mag_compensate_Y(s16 v_mag_data_y_s16, u16 v_data_r_u16);
+/*!
+ *	@brief This API used to get the compensated BMM150-Z data
+ *	the out put of Z as s32
+ *	Before start reading the mag compensated Z data
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *
+ *  @param  v_mag_data_z_s16 : The value of mag raw Z data
+ *  @param  v_data_r_u16 : The value of mag R data
+ *
+ *	@return results of compensated Z data value output as s32
+ */
+s32 smi130_bmm150_mag_compensate_Z(s16 v_mag_data_z_s16, u16 v_data_r_u16);
+/*!
+ *	@brief This API used to set the pre-set modes of bmm150
+ *	The pre-set mode setting is depend on data rate and xy and z repetitions
+ *
+ *	@note
+ *	Before set the mag preset mode
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param  v_mode_u8: The value of pre-set mode selection value
+ *  value    |  pre_set mode
+ * ----------|------------
+ *   1       | SMI130_MAG_PRESETMODE_LOWPOWER
+ *   2       | SMI130_MAG_PRESETMODE_REGULAR
+ *   3       | SMI130_MAG_PRESETMODE_HIGHACCURACY
+ *   4       | SMI130_MAG_PRESETMODE_ENHANCED
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_set_bmm150_mag_presetmode(u8 mode);
+/*!
+ *	@brief This function used for set the magnetometer
+ *	power mode.
+ *	@note
+ *	Before set the mag power mode
+ *	make sure the following two points are addressed
+ *	@note
+ *	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note
+ *	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *	@param v_mag_pow_mode_u8 : The value of mag power mode
+ *  value    |  mode
+ * ----------|------------
+ *   0       | FORCE_MODE
+ *   1       | SUSPEND_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bmm150_mag_set_power_mode(u8 mag_pow_mode);
+ /*!
+ *	@brief This function used for set the magnetometer
+ *	power mode.
+ *	@note
+ *	Before set the mag power mode
+ *	make sure the following two point is addressed
+ *		Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *
+ *	@param v_mag_sec_if_pow_mode_u8 : The value of mag power mode
+ *  value    |  mode
+ * ----------|------------
+ *   0       | SMI130_MAG_FORCE_MODE
+ *   1       | SMI130_MAG_SUSPEND_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_bmm150_mag_and_secondary_if_power_mode(
+u8 v_mag_sec_if_pow_mode_u8);
+/***************************************************/
+/**\name	FUNCTIONS FOR AKM09911 AND AKM09912*/
+/***************************************************/
+ /*!
+ *	@brief This function used for initialize
+ *	the AKM09911 and AKM09912 sensor
+ *
+ *
+ *	@param v_akm_i2c_address_u8: The value of device address
+ *	AKM sensor   |  Slave address
+ * --------------|---------------------
+ *  AKM09911     |  AKM09911_I2C_ADDR_1
+ *     -         |  and AKM09911_I2C_ADDR_2
+ *  AKM09912     |  AKM09912_I2C_ADDR_1
+ *     -         |  AKM09912_I2C_ADDR_2
+ *     -         |  AKM09912_I2C_ADDR_3
+ *     -         |  AKM09912_I2C_ADDR_4
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_akm_mag_interface_init(
+u8 v_akm_i2c_address_u8);
+ /*!
+ *	@brief This function used for read the sensitivity data of
+ *	AKM09911 and AKM09912
+ *
+ *	@note Before reading the mag sensitivity values
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_read_bosch_akm_sensitivity_data(void);
+/*!
+ *	@brief This API used to get the compensated X data
+ *	of AKM09911 the out put of X as s32
+ *	@note	Before start reading the mag compensated X data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bosch_akm_x_s16 : The value of X data
+ *
+ *	@return results of compensated X data value output as s32
+ *
+ */
+s32 smi130_bosch_akm09911_compensate_X(s16 v_bosch_akm_x_s16);
+/*!
+ *	@brief This API used to get the compensated Y data
+ *	of AKM09911 the out put of Y as s32
+ *	@note	Before start reading the mag compensated Y data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bosch_akm_y_s16 : The value of Y data
+ *
+ *	@return results of compensated Y data value output as s32
+ *
+ */
+s32 smi130_bosch_akm09911_compensate_Y(s16 v_bosch_akm_y_s16);
+/*!
+ *	@brief This API used to get the compensated Z data
+ *	of AKM09911 the out put of Z as s32
+ *	@note	Before start reading the mag compensated Z data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bosch_akm_z_s16 : The value of Z data
+ *
+ *	@return results of compensated Z data value output as s32
+ *
+ */
+s32 smi130_bosch_akm09911_compensate_Z(s16 v_bosch_akm_z_s16);
+/*!
+ *	@brief This API used to get the compensated X data
+ *	of AKM09912 the out put of X as s32
+ *	@note	Before start reading the mag compensated X data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bosch_akm_x_s16 : The value of X data
+ *
+ *	@return results of compensated X data value output as s32
+ *
+ */
+s32 smi130_bosch_akm09912_compensate_X(s16 v_bosch_akm_x_s16);
+/*!
+ *	@brief This API used to get the compensated Y data
+ *	of AKM09912 the out put of Y as s32
+ *	@note	Before start reading the mag compensated Y data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bosch_akm_y_s16 : The value of Y data
+ *
+ *	@return results of compensated Y data value output as s32
+ *
+ */
+s32 smi130_bosch_akm09912_compensate_Y(s16 v_bosch_akm_y_s16);
+/*!
+ *	@brief This API used to get the compensated Z data
+ *	of AKM09912 the out put of Z as s32
+ *	@note	Before start reading the mag compensated Z data
+ *			make sure the following two points are addressed
+ *	@note 1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note 2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *
+ *  @param v_bosch_akm_z_s16 : The value of Z data
+ *
+ *	@return results of compensated Z data value output as s32
+ *
+ */
+s32 smi130_bosch_akm09912_compensate_Z(s16 v_bosch_akm_z_s16);
+ /*!
+ *	@brief This function used for read the compensated value of
+ *	AKM09911
+ *	@note Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_akm09911_compensate_xyz(
+struct smi130_mag_xyz_s32_t *bosch_akm_xyz);
+ /*!
+ *	@brief This function used for read the compensated value of
+ *	AKM09912
+ *	@note Before start reading the mag compensated data's
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_akm09912_compensate_xyz(
+struct smi130_mag_xyz_s32_t *bosch_akm_xyz);
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_akm09912_compensate_xyz_raw(
+struct smi130_mag_xyz_s32_t *bosch_akm_xyz);
+/*!
+ *	@brief This function used for set the AKM09911 and AKM09912
+ *	power mode.
+ *	@note Before set the AKM power mode
+ *	make sure the following two points are addressed
+ *	@note	1.	Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *	@note	2.	And also confirm the secondary-interface power mode
+ *		is not in the SUSPEND mode.
+ *		by using the function smi130_get_mag_pmu_status().
+ *		If the secondary-interface power mode is in SUSPEND mode
+ *		set the value of 0x19(NORMAL mode)by using the
+ *		smi130_set_command_register(0x19) function.
+ *
+ *	@param v_akm_pow_mode_u8 : The value of akm power mode
+ *  value   |    Description
+ * ---------|--------------------
+ *    0     |  AKM_POWER_DOWN_MODE
+ *    1     |  AKM_SINGLE_MEAS_MODE
+ *    2     |  FUSE_ROM_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_akm_set_powermode(u8 v_akm_pow_mode_u8);
+ /*!
+ *	@brief This function used for set the magnetometer
+ *	power mode of AKM09911 and AKM09912
+ *	@note Before set the mag power mode
+ *	make sure the following two point is addressed
+ *		Make sure the mag interface is enabled or not,
+ *		by using the smi130_get_if_mode() function.
+ *		If mag interface is not enabled set the value of 0x02
+ *		to the function smi130_get_if_mode(0x02)
+ *
+ *	@param v_mag_sec_if_pow_mode_u8 : The value of secondary if power mode
+ *  value   |    Description
+ * ---------|--------------------
+ *    0     |  SMI130_MAG_FORCE_MODE
+ *    1     |  SMI130_MAG_SUSPEND_MODE
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_set_bosch_akm_and_secondary_if_powermode(
+u8 v_mag_sec_if_pow_mode_u8);
+/***************************************************/
+/**\name	FUNCTIONS FOR YAMAH-YAS532 */
+/***************************************************/
+/*!
+ *	@brief This function used for read the YAMAH-YAS532 init
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yamaha_yas532_mag_interface_init(
+void);
+/*!
+ *	@brief This function used to set the YAS532 initial values
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_set_initial_values(void);
+/*!
+ *	@brief This function used for YAS532 offset correction
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_magnetic_measure_set_offset(
+void);
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS532 calibration data
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yamaha_yas532_calib_values(void);
+/*!
+ *	@brief This function used for calculate the
+ *	YAS532 read the linear data
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_xy1y2_to_linear(
+u16 *v_xy1y2_u16, s32 *xy1y2_linear);
+/*!
+ *	@brief This function used for read the YAS532 sensor data
+ *	@param	v_acquisition_command_u8: used to set the data acquisition
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ *	@param	v_busy_u8 : used to get the busy flay for sensor data read
+ *	@param	v_temp_u16 : used to get the temperature data
+ *	@param	v_xy1y2_u16 : used to get the sensor xy1y2 data
+ *	@param	v_overflow_u8 : used to get the overflow data
+ *
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_normal_measurement_data(
+u8 v_acquisition_command_u8, u8 *v_busy_u8,
+u16 *v_temp_u16, u16 *v_xy1y2_u16, u8 *v_overflow_u8);
+/*!
+ *	@brief This function used for YAS532 sensor data
+ *	@param	v_acquisition_command_u8	:	the value of CMDR
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ * @param xyz_data : the vector xyz output
+ * @param v_overflow_s8 : the value of overflow
+ * @param v_temp_correction_u8 : the value of temperate correction enable
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_measurement_xyz_data(
+struct yas532_vector *xyz_data, u8 *v_overflow_s8, u8 v_temp_correction_u8,
+u8 v_acquisition_command_u8);
+/*!
+ *	@brief This function used for YAS532 write data acquisition
+ *	command register write
+ *	@param	v_command_reg_data_u8	:	the value of data acquisition
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_acquisition_command_register(
+u8 v_command_reg_data_u8);
+/*!
+ *	@brief This function used write offset of YAS532
+ *
+ *	@param	p_offset_s8	: The value of offset to write
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas532_set_offset(
+const s8 *p_offset_s8);
+/*!
+ *	@brief This function used to init the YAMAH-YAS537
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+*/
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yamaha_yas537_mag_interface_init(
+void);
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 calibration data
+ *
+ *
+ *	@param v_rcoil_u8 : The value of r coil
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yamaha_yas537_calib_values(
+u8 v_rcoil_u8);
+/*!
+ *	@brief This function used for YAS537 write data acquisition
+ *	command register write
+ *	@param	v_command_reg_data_u8	:	the value of data acquisition
+ *	acquisition_command  |   operation
+ *  ---------------------|-------------------------
+ *         0x17          | turn on the acquisition coil
+ *         -             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Deferred acquisition mode
+ *        0x07           | turn on the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as minus(-))
+ *         _             | Normal acquisition mode
+ *        0x11           | turn OFF the acquisition coil
+ *         _             | set direction of the coil
+ *         _             | (x and y as plus(+))
+ *         _             | Deferred acquisition mode
+ *       0x01            | turn OFF the acquisition coil
+ *        _              | set direction of the coil
+ *        _              | (x and y as plus(+))
+ *        _              | Normal acquisition mode
+ *
+ *
+ *
+  *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yas537_acquisition_command_register(
+u8 v_command_reg_data_u8);
+
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 xy1y2 data
+ *
+ *	@param v_coil_stat_u8: The value of R coil status
+ *	@param v_busy_u8: The value of busy status
+ *	@param v_temperature_u16: The value of temperature
+ *	@param xy1y2: The value of raw xy1y2 data
+ *	@param v_ouflow_u8: The value of overflow
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yamaha_yas537_read_xy1y2_data(
+u8 *v_coil_stat_u8, u8 *v_busy_u8,
+u16 *v_temperature_u16, u16 *xy1y2, u8 *v_ouflow_u8);
+/*!
+ *	@brief This function used for read the
+ *	YAMAHA YAS537 xy1y2 data
+ *
+ *	@param v_ouflow_u8: The value of overflow
+ *
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_bosch_yamaha_yas537_measure_xyz_data(
+u8 *v_ouflow_u8, struct yas_vector *vector_xyz);
+
+/***************************************************/
+/**\name	FUNCTIONS FOR FIFO DATA READ */
+/***************************************************/
+/*!
+ *	@brief This function used for reading the
+ *	fifo data of  header less mode
+ *
+ *
+ *
+ *	@note Configure the below functions for FIFO header less mode
+ *	@note 1. smi130_set_fifo_down_gyro
+ *	@note 2. smi130_set_gyro_fifo_filter_data
+ *	@note 3. smi130_set_fifo_down_accel
+ *	@note 4. smi130_set_accel_fifo_filter_dat
+ *	@note 5. smi130_set_fifo_mag_enable
+ *	@note 6. smi130_set_fifo_accel_enable
+ *	@note 7. smi130_set_fifo_gyro_enable
+ *	@note For interrupt configuration
+ *	@note 1. smi130_set_intr_fifo_full
+ *	@note 2. smi130_set_intr_fifo_wm
+ *	@note 3. smi130_set_fifo_tag_intr2_enable
+ *	@note 4. smi130_set_fifo_tag_intr1_enable
+ *
+ *	@note The fifo reads the whole 1024 bytes
+ *	and processing the data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_read_fifo_headerless_mode(
+void);
+/*!
+ *	@brief This function used for reading the
+ *	fifo data of  header less mode for using user defined length
+ *
+ *
+ *	@param v_fifo_user_length_u16: The value of length of fifo read data
+ *
+ *	@note Configure the below functions for FIFO header less mode
+ *	@note 1. smi130_set_fifo_down_gyro
+ *	@note 2. smi130_set_gyro_fifo_filter_data
+ *	@note 3. smi130_set_fifo_down_accel
+ *	@note 4. smi130_set_accel_fifo_filter_dat
+ *	@note 5. smi130_set_fifo_mag_enable
+ *	@note 6. smi130_set_fifo_accel_enable
+ *	@note 7. smi130_set_fifo_gyro_enable
+ *	@note For interrupt configuration
+ *	@note 1. smi130_set_intr_fifo_full
+ *	@note 2. smi130_set_intr_fifo_wm
+ *	@note 3. smi130_set_fifo_tag_intr2_enable
+ *	@note 4. smi130_set_fifo_tag_intr1_enable
+ *
+ *	@note The fifo reads the whole 1024 bytes
+ *	and processing the data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE
+smi130_read_fifo_headerless_mode_user_defined_length(
+u16 v_fifo_user_length_u16);
+/*!
+ *	@brief This function used for reading the
+ *	fifo data of  header mode
+ *
+ *
+ *	@note Configure the below functions for FIFO header mode
+ *	@note 1. smi130_set_fifo_down_gyro()
+ *	@note 2. smi130_set_gyro_fifo_filter_data()
+ *	@note 3. smi130_set_fifo_down_accel()
+ *	@note 4. smi130_set_accel_fifo_filter_dat()
+ *	@note 5. smi130_set_fifo_mag_enable()
+ *	@note 6. smi130_set_fifo_accel_enable()
+ *	@note 7. smi130_set_fifo_gyro_enable()
+ *	@note 8. smi130_set_fifo_header_enable()
+ *	@note For interrupt configuration
+ *	@note 1. smi130_set_intr_fifo_full()
+ *	@note 2. smi130_set_intr_fifo_wm()
+ *	@note 3. smi130_set_fifo_tag_intr2_enable()
+ *	@note 4. smi130_set_fifo_tag_intr1_enable()
+ *
+ *	@note The fifo reads the whole 1024 bytes
+ *	and processing the data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_read_fifo_header_data(
+void);
+/*!
+ *	@brief This function used for reading the
+ *	fifo data of  header mode for using user defined length
+ *
+ *
+ *	@note Configure the below functions for FIFO header mode
+ *	@note 1. smi130_set_fifo_down_gyro()
+ *	@note 2. smi130_set_gyro_fifo_filter_data()
+ *	@note 3. smi130_set_fifo_down_accel()
+ *	@note 4. smi130_set_accel_fifo_filter_dat()
+ *	@note 5. smi130_set_fifo_mag_enable()
+ *	@note 6. smi130_set_fifo_accel_enable()
+ *	@note 7. smi130_set_fifo_gyro_enable()
+ *	@note 8. smi130_set_fifo_header_enable()
+ *	@note For interrupt configuration
+ *	@note 1. smi130_set_intr_fifo_full()
+ *	@note 2. smi130_set_intr_fifo_wm()
+ *	@note 3. smi130_set_fifo_tag_intr2_enable()
+ *	@note 4. smi130_set_fifo_tag_intr1_enable()
+ *
+ *	@note The fifo reads the whole 1024 bytes
+ *	and processing the data
+ *
+ *	@return results of bus communication function
+ *	@retval 0 -> Success
+ *	@retval -1 -> Error
+ *
+ *
+ */
+SMI130_RETURN_FUNCTION_TYPE smi130_read_fifo_header_data_user_defined_length(
+u16 v_fifo_user_length_u16);
+/*!
+ *	@brief This function used for reading
+ *	smi130_t structure
+ *
+ *  @return the reference and values of smi130_t
+ *
+ *
+*/
+struct smi130_t *smi130_get_ptr(void);
+
+#endif
+
diff --git a/drivers/input/sensors/smi130/smi130_acc.c b/drivers/input/sensors/smi130/smi130_acc.c
new file mode 100644
index 0000000..4828b39
--- /dev/null
+++ b/drivers/input/sensors/smi130/smi130_acc.c
@@ -0,0 +1,7507 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+ *
+ * @filename smi130_acc.c
+ * @date    2015/11/17 10:32
+ * @Modification Date 2018/08/28 18:20
+ * @id       "836294d"
+ * @version  2.1.2
+ *
+ * @brief
+ * This file contains all function implementations for the SMI_ACC2X2 in linux
+*/
+
+#ifdef CONFIG_SIG_MOTION
+#undef CONFIG_HAS_EARLYSUSPEND
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/irq.h>
+#include <linux/math64.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/unistd.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#else
+#include <unistd.h>
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "boschclass.h"
+#include "bs_log.h"
+#define DRIVER_VERSION "0.0.53.0"
+#define ACC_NAME  "ACC"
+#define SMI_ACC2X2_ENABLE_INT1 1
+#define CONFIG_SMI_ACC_ENABLE_NEWDATA_INT 1
+
+#define SENSOR_NAME                 "smi130_acc"
+#define SMI130_ACC_USE_BASIC_I2C_FUNC        1
+
+#define MSC_TIME                6
+#define ABSMIN                      -512
+#define ABSMAX                      512
+#define SLOPE_THRESHOLD_VALUE       32
+#define SLOPE_DURATION_VALUE        1
+#define INTERRUPT_LATCH_MODE        13
+#define INTERRUPT_ENABLE            1
+#define INTERRUPT_DISABLE           0
+#define MAP_SLOPE_INTERRUPT         2
+#define SLOPE_X_INDEX               5
+#define SLOPE_Y_INDEX               6
+#define SLOPE_Z_INDEX               7
+#define SMI_ACC2X2_MAX_DELAY            200
+#define SMI_ACC2X2_RANGE_SET            3  /* +/- 2G */
+#define SMI_ACC2X2_BW_SET               12 /* 125HZ  */
+
+#define LOW_G_INTERRUPT             REL_Z
+#define HIGH_G_INTERRUPT            REL_HWHEEL
+#define SLOP_INTERRUPT              REL_DIAL
+#define DOUBLE_TAP_INTERRUPT        REL_WHEEL
+#define SINGLE_TAP_INTERRUPT        REL_MISC
+#define ORIENT_INTERRUPT            ABS_PRESSURE
+#define FLAT_INTERRUPT              ABS_DISTANCE
+#define SLOW_NO_MOTION_INTERRUPT    REL_Y
+
+#define HIGH_G_INTERRUPT_X_HAPPENED                 1
+#define HIGH_G_INTERRUPT_Y_HAPPENED                 2
+#define HIGH_G_INTERRUPT_Z_HAPPENED                 3
+#define HIGH_G_INTERRUPT_X_NEGATIVE_HAPPENED        4
+#define HIGH_G_INTERRUPT_Y_NEGATIVE_HAPPENED        5
+#define HIGH_G_INTERRUPT_Z_NEGATIVE_HAPPENED        6
+#define SLOPE_INTERRUPT_X_HAPPENED                  7
+#define SLOPE_INTERRUPT_Y_HAPPENED                  8
+#define SLOPE_INTERRUPT_Z_HAPPENED                  9
+#define SLOPE_INTERRUPT_X_NEGATIVE_HAPPENED         10
+#define SLOPE_INTERRUPT_Y_NEGATIVE_HAPPENED         11
+#define SLOPE_INTERRUPT_Z_NEGATIVE_HAPPENED         12
+#define DOUBLE_TAP_INTERRUPT_HAPPENED               13
+#define SINGLE_TAP_INTERRUPT_HAPPENED               14
+#define UPWARD_PORTRAIT_UP_INTERRUPT_HAPPENED       15
+#define UPWARD_PORTRAIT_DOWN_INTERRUPT_HAPPENED     16
+#define UPWARD_LANDSCAPE_LEFT_INTERRUPT_HAPPENED    17
+#define UPWARD_LANDSCAPE_RIGHT_INTERRUPT_HAPPENED   18
+#define DOWNWARD_PORTRAIT_UP_INTERRUPT_HAPPENED     19
+#define DOWNWARD_PORTRAIT_DOWN_INTERRUPT_HAPPENED   20
+#define DOWNWARD_LANDSCAPE_LEFT_INTERRUPT_HAPPENED  21
+#define DOWNWARD_LANDSCAPE_RIGHT_INTERRUPT_HAPPENED 22
+#define FLAT_INTERRUPT_TURE_HAPPENED                23
+#define FLAT_INTERRUPT_FALSE_HAPPENED               24
+#define LOW_G_INTERRUPT_HAPPENED                    25
+#define SLOW_NO_MOTION_INTERRUPT_HAPPENED           26
+
+#define PAD_LOWG                    0
+#define PAD_HIGHG                   1
+#define PAD_SLOP                    2
+#define PAD_DOUBLE_TAP              3
+#define PAD_SINGLE_TAP              4
+#define PAD_ORIENT                  5
+#define PAD_FLAT                    6
+#define PAD_SLOW_NO_MOTION          7
+
+#define SMI_ACC2X2_EEP_OFFSET                       0x16
+#define SMI_ACC2X2_IMAGE_BASE                       0x38
+#define SMI_ACC2X2_IMAGE_LEN                        22
+
+#define SMI_ACC2X2_CHIP_ID_REG                      0x00
+#define SMI_ACC2X2_VERSION_REG                      0x01
+#define SMI_ACC2X2_X_AXIS_LSB_REG                   0x02
+#define SMI_ACC2X2_X_AXIS_MSB_REG                   0x03
+#define SMI_ACC2X2_Y_AXIS_LSB_REG                   0x04
+#define SMI_ACC2X2_Y_AXIS_MSB_REG                   0x05
+#define SMI_ACC2X2_Z_AXIS_LSB_REG                   0x06
+#define SMI_ACC2X2_Z_AXIS_MSB_REG                   0x07
+#define SMI_ACC2X2_TEMPERATURE_REG                  0x08
+#define SMI_ACC2X2_STATUS1_REG                      0x09
+#define SMI_ACC2X2_STATUS2_REG                      0x0A
+#define SMI_ACC2X2_STATUS_TAP_SLOPE_REG             0x0B
+#define SMI_ACC2X2_STATUS_ORIENT_HIGH_REG           0x0C
+#define SMI_ACC2X2_STATUS_FIFO_REG                  0x0E
+#define SMI_ACC2X2_RANGE_SEL_REG                    0x0F
+#define SMI_ACC2X2_BW_SEL_REG                       0x10
+#define SMI_ACC2X2_MODE_CTRL_REG                    0x11
+#define SMI_ACC2X2_LOW_NOISE_CTRL_REG               0x12
+#define SMI_ACC2X2_DATA_CTRL_REG                    0x13
+#define SMI_ACC2X2_RESET_REG                        0x14
+#define SMI_ACC2X2_INT_ENABLE1_REG                  0x16
+#define SMI_ACC2X2_INT_ENABLE2_REG                  0x17
+#define SMI_ACC2X2_INT_SLO_NO_MOT_REG               0x18
+#define SMI_ACC2X2_INT1_PAD_SEL_REG                 0x19
+#define SMI_ACC2X2_INT_DATA_SEL_REG                 0x1A
+#define SMI_ACC2X2_INT2_PAD_SEL_REG                 0x1B
+#define SMI_ACC2X2_INT_SRC_REG                      0x1E
+#define SMI_ACC2X2_INT_SET_REG                      0x20
+#define SMI_ACC2X2_INT_CTRL_REG                     0x21
+#define SMI_ACC2X2_LOW_DURN_REG                     0x22
+#define SMI_ACC2X2_LOW_THRES_REG                    0x23
+#define SMI_ACC2X2_LOW_HIGH_HYST_REG                0x24
+#define SMI_ACC2X2_HIGH_DURN_REG                    0x25
+#define SMI_ACC2X2_HIGH_THRES_REG                   0x26
+#define SMI_ACC2X2_SLOPE_DURN_REG                   0x27
+#define SMI_ACC2X2_SLOPE_THRES_REG                  0x28
+#define SMI_ACC2X2_SLO_NO_MOT_THRES_REG             0x29
+#define SMI_ACC2X2_TAP_PARAM_REG                    0x2A
+#define SMI_ACC2X2_TAP_THRES_REG                    0x2B
+#define SMI_ACC2X2_ORIENT_PARAM_REG                 0x2C
+#define SMI_ACC2X2_THETA_BLOCK_REG                  0x2D
+#define SMI_ACC2X2_THETA_FLAT_REG                   0x2E
+#define SMI_ACC2X2_FLAT_HOLD_TIME_REG               0x2F
+#define SMI_ACC2X2_FIFO_WML_TRIG                    0x30
+#define SMI_ACC2X2_SELF_TEST_REG                    0x32
+#define SMI_ACC2X2_EEPROM_CTRL_REG                  0x33
+#define SMI_ACC2X2_SERIAL_CTRL_REG                  0x34
+#define SMI_ACC2X2_EXTMODE_CTRL_REG                 0x35
+#define SMI_ACC2X2_OFFSET_CTRL_REG                  0x36
+#define SMI_ACC2X2_OFFSET_PARAMS_REG                0x37
+#define SMI_ACC2X2_OFFSET_X_AXIS_REG                0x38
+#define SMI_ACC2X2_OFFSET_Y_AXIS_REG                0x39
+#define SMI_ACC2X2_OFFSET_Z_AXIS_REG                0x3A
+#define SMI_ACC2X2_GP0_REG                          0x3B
+#define SMI_ACC2X2_GP1_REG                          0x3C
+#define SMI_ACC2X2_FIFO_MODE_REG                    0x3E
+#define SMI_ACC2X2_FIFO_DATA_OUTPUT_REG             0x3F
+
+#define SMI_ACC2X2_CHIP_ID__POS             0
+#define SMI_ACC2X2_CHIP_ID__MSK             0xFF
+#define SMI_ACC2X2_CHIP_ID__LEN             8
+#define SMI_ACC2X2_CHIP_ID__REG             SMI_ACC2X2_CHIP_ID_REG
+
+#define SMI_ACC2X2_VERSION__POS          0
+#define SMI_ACC2X2_VERSION__LEN          8
+#define SMI_ACC2X2_VERSION__MSK          0xFF
+#define SMI_ACC2X2_VERSION__REG          SMI_ACC2X2_VERSION_REG
+
+#define SMI130_ACC_SLO_NO_MOT_DUR__POS   2
+#define SMI130_ACC_SLO_NO_MOT_DUR__LEN   6
+#define SMI130_ACC_SLO_NO_MOT_DUR__MSK   0xFC
+#define SMI130_ACC_SLO_NO_MOT_DUR__REG   SMI_ACC2X2_SLOPE_DURN_REG
+
+#define SMI_ACC2X2_NEW_DATA_X__POS          0
+#define SMI_ACC2X2_NEW_DATA_X__LEN          1
+#define SMI_ACC2X2_NEW_DATA_X__MSK          0x01
+#define SMI_ACC2X2_NEW_DATA_X__REG          SMI_ACC2X2_X_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_X14_LSB__POS           2
+#define SMI_ACC2X2_ACC_X14_LSB__LEN           6
+#define SMI_ACC2X2_ACC_X14_LSB__MSK           0xFC
+#define SMI_ACC2X2_ACC_X14_LSB__REG           SMI_ACC2X2_X_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_X12_LSB__POS           4
+#define SMI_ACC2X2_ACC_X12_LSB__LEN           4
+#define SMI_ACC2X2_ACC_X12_LSB__MSK           0xF0
+#define SMI_ACC2X2_ACC_X12_LSB__REG           SMI_ACC2X2_X_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_X10_LSB__POS           6
+#define SMI_ACC2X2_ACC_X10_LSB__LEN           2
+#define SMI_ACC2X2_ACC_X10_LSB__MSK           0xC0
+#define SMI_ACC2X2_ACC_X10_LSB__REG           SMI_ACC2X2_X_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_X8_LSB__POS           0
+#define SMI_ACC2X2_ACC_X8_LSB__LEN           0
+#define SMI_ACC2X2_ACC_X8_LSB__MSK           0x00
+#define SMI_ACC2X2_ACC_X8_LSB__REG           SMI_ACC2X2_X_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_X_MSB__POS           0
+#define SMI_ACC2X2_ACC_X_MSB__LEN           8
+#define SMI_ACC2X2_ACC_X_MSB__MSK           0xFF
+#define SMI_ACC2X2_ACC_X_MSB__REG           SMI_ACC2X2_X_AXIS_MSB_REG
+
+#define SMI_ACC2X2_NEW_DATA_Y__POS          0
+#define SMI_ACC2X2_NEW_DATA_Y__LEN          1
+#define SMI_ACC2X2_NEW_DATA_Y__MSK          0x01
+#define SMI_ACC2X2_NEW_DATA_Y__REG          SMI_ACC2X2_Y_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_Y14_LSB__POS           2
+#define SMI_ACC2X2_ACC_Y14_LSB__LEN           6
+#define SMI_ACC2X2_ACC_Y14_LSB__MSK           0xFC
+#define SMI_ACC2X2_ACC_Y14_LSB__REG           SMI_ACC2X2_Y_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_Y12_LSB__POS           4
+#define SMI_ACC2X2_ACC_Y12_LSB__LEN           4
+#define SMI_ACC2X2_ACC_Y12_LSB__MSK           0xF0
+#define SMI_ACC2X2_ACC_Y12_LSB__REG           SMI_ACC2X2_Y_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_Y10_LSB__POS           6
+#define SMI_ACC2X2_ACC_Y10_LSB__LEN           2
+#define SMI_ACC2X2_ACC_Y10_LSB__MSK           0xC0
+#define SMI_ACC2X2_ACC_Y10_LSB__REG           SMI_ACC2X2_Y_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_Y8_LSB__POS           0
+#define SMI_ACC2X2_ACC_Y8_LSB__LEN           0
+#define SMI_ACC2X2_ACC_Y8_LSB__MSK           0x00
+#define SMI_ACC2X2_ACC_Y8_LSB__REG           SMI_ACC2X2_Y_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_Y_MSB__POS           0
+#define SMI_ACC2X2_ACC_Y_MSB__LEN           8
+#define SMI_ACC2X2_ACC_Y_MSB__MSK           0xFF
+#define SMI_ACC2X2_ACC_Y_MSB__REG           SMI_ACC2X2_Y_AXIS_MSB_REG
+
+#define SMI_ACC2X2_NEW_DATA_Z__POS          0
+#define SMI_ACC2X2_NEW_DATA_Z__LEN          1
+#define SMI_ACC2X2_NEW_DATA_Z__MSK          0x01
+#define SMI_ACC2X2_NEW_DATA_Z__REG          SMI_ACC2X2_Z_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_Z14_LSB__POS           2
+#define SMI_ACC2X2_ACC_Z14_LSB__LEN           6
+#define SMI_ACC2X2_ACC_Z14_LSB__MSK           0xFC
+#define SMI_ACC2X2_ACC_Z14_LSB__REG           SMI_ACC2X2_Z_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_Z12_LSB__POS           4
+#define SMI_ACC2X2_ACC_Z12_LSB__LEN           4
+#define SMI_ACC2X2_ACC_Z12_LSB__MSK           0xF0
+#define SMI_ACC2X2_ACC_Z12_LSB__REG           SMI_ACC2X2_Z_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_Z10_LSB__POS           6
+#define SMI_ACC2X2_ACC_Z10_LSB__LEN           2
+#define SMI_ACC2X2_ACC_Z10_LSB__MSK           0xC0
+#define SMI_ACC2X2_ACC_Z10_LSB__REG           SMI_ACC2X2_Z_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_Z8_LSB__POS           0
+#define SMI_ACC2X2_ACC_Z8_LSB__LEN           0
+#define SMI_ACC2X2_ACC_Z8_LSB__MSK           0x00
+#define SMI_ACC2X2_ACC_Z8_LSB__REG           SMI_ACC2X2_Z_AXIS_LSB_REG
+
+#define SMI_ACC2X2_ACC_Z_MSB__POS           0
+#define SMI_ACC2X2_ACC_Z_MSB__LEN           8
+#define SMI_ACC2X2_ACC_Z_MSB__MSK           0xFF
+#define SMI_ACC2X2_ACC_Z_MSB__REG           SMI_ACC2X2_Z_AXIS_MSB_REG
+
+#define SMI_ACC2X2_TEMPERATURE__POS         0
+#define SMI_ACC2X2_TEMPERATURE__LEN         8
+#define SMI_ACC2X2_TEMPERATURE__MSK         0xFF
+#define SMI_ACC2X2_TEMPERATURE__REG         SMI_ACC2X2_TEMP_RD_REG
+
+#define SMI_ACC2X2_LOWG_INT_S__POS          0
+#define SMI_ACC2X2_LOWG_INT_S__LEN          1
+#define SMI_ACC2X2_LOWG_INT_S__MSK          0x01
+#define SMI_ACC2X2_LOWG_INT_S__REG          SMI_ACC2X2_STATUS1_REG
+
+#define SMI_ACC2X2_HIGHG_INT_S__POS          1
+#define SMI_ACC2X2_HIGHG_INT_S__LEN          1
+#define SMI_ACC2X2_HIGHG_INT_S__MSK          0x02
+#define SMI_ACC2X2_HIGHG_INT_S__REG          SMI_ACC2X2_STATUS1_REG
+
+#define SMI_ACC2X2_SLOPE_INT_S__POS          2
+#define SMI_ACC2X2_SLOPE_INT_S__LEN          1
+#define SMI_ACC2X2_SLOPE_INT_S__MSK          0x04
+#define SMI_ACC2X2_SLOPE_INT_S__REG          SMI_ACC2X2_STATUS1_REG
+
+
+#define SMI_ACC2X2_SLO_NO_MOT_INT_S__POS          3
+#define SMI_ACC2X2_SLO_NO_MOT_INT_S__LEN          1
+#define SMI_ACC2X2_SLO_NO_MOT_INT_S__MSK          0x08
+#define SMI_ACC2X2_SLO_NO_MOT_INT_S__REG          SMI_ACC2X2_STATUS1_REG
+
+#define SMI_ACC2X2_DOUBLE_TAP_INT_S__POS     4
+#define SMI_ACC2X2_DOUBLE_TAP_INT_S__LEN     1
+#define SMI_ACC2X2_DOUBLE_TAP_INT_S__MSK     0x10
+#define SMI_ACC2X2_DOUBLE_TAP_INT_S__REG     SMI_ACC2X2_STATUS1_REG
+
+#define SMI_ACC2X2_SINGLE_TAP_INT_S__POS     5
+#define SMI_ACC2X2_SINGLE_TAP_INT_S__LEN     1
+#define SMI_ACC2X2_SINGLE_TAP_INT_S__MSK     0x20
+#define SMI_ACC2X2_SINGLE_TAP_INT_S__REG     SMI_ACC2X2_STATUS1_REG
+
+#define SMI_ACC2X2_ORIENT_INT_S__POS         6
+#define SMI_ACC2X2_ORIENT_INT_S__LEN         1
+#define SMI_ACC2X2_ORIENT_INT_S__MSK         0x40
+#define SMI_ACC2X2_ORIENT_INT_S__REG         SMI_ACC2X2_STATUS1_REG
+
+#define SMI_ACC2X2_FLAT_INT_S__POS           7
+#define SMI_ACC2X2_FLAT_INT_S__LEN           1
+#define SMI_ACC2X2_FLAT_INT_S__MSK           0x80
+#define SMI_ACC2X2_FLAT_INT_S__REG           SMI_ACC2X2_STATUS1_REG
+
+#define SMI_ACC2X2_FIFO_FULL_INT_S__POS           5
+#define SMI_ACC2X2_FIFO_FULL_INT_S__LEN           1
+#define SMI_ACC2X2_FIFO_FULL_INT_S__MSK           0x20
+#define SMI_ACC2X2_FIFO_FULL_INT_S__REG           SMI_ACC2X2_STATUS2_REG
+
+#define SMI_ACC2X2_FIFO_WM_INT_S__POS           6
+#define SMI_ACC2X2_FIFO_WM_INT_S__LEN           1
+#define SMI_ACC2X2_FIFO_WM_INT_S__MSK           0x40
+#define SMI_ACC2X2_FIFO_WM_INT_S__REG           SMI_ACC2X2_STATUS2_REG
+
+#define SMI_ACC2X2_DATA_INT_S__POS           7
+#define SMI_ACC2X2_DATA_INT_S__LEN           1
+#define SMI_ACC2X2_DATA_INT_S__MSK           0x80
+#define SMI_ACC2X2_DATA_INT_S__REG           SMI_ACC2X2_STATUS2_REG
+
+#define SMI_ACC2X2_SLOPE_FIRST_X__POS        0
+#define SMI_ACC2X2_SLOPE_FIRST_X__LEN        1
+#define SMI_ACC2X2_SLOPE_FIRST_X__MSK        0x01
+#define SMI_ACC2X2_SLOPE_FIRST_X__REG        SMI_ACC2X2_STATUS_TAP_SLOPE_REG
+
+#define SMI_ACC2X2_SLOPE_FIRST_Y__POS        1
+#define SMI_ACC2X2_SLOPE_FIRST_Y__LEN        1
+#define SMI_ACC2X2_SLOPE_FIRST_Y__MSK        0x02
+#define SMI_ACC2X2_SLOPE_FIRST_Y__REG        SMI_ACC2X2_STATUS_TAP_SLOPE_REG
+
+#define SMI_ACC2X2_SLOPE_FIRST_Z__POS        2
+#define SMI_ACC2X2_SLOPE_FIRST_Z__LEN        1
+#define SMI_ACC2X2_SLOPE_FIRST_Z__MSK        0x04
+#define SMI_ACC2X2_SLOPE_FIRST_Z__REG        SMI_ACC2X2_STATUS_TAP_SLOPE_REG
+
+#define SMI_ACC2X2_SLOPE_SIGN_S__POS         3
+#define SMI_ACC2X2_SLOPE_SIGN_S__LEN         1
+#define SMI_ACC2X2_SLOPE_SIGN_S__MSK         0x08
+#define SMI_ACC2X2_SLOPE_SIGN_S__REG         SMI_ACC2X2_STATUS_TAP_SLOPE_REG
+
+#define SMI_ACC2X2_TAP_FIRST_X__POS        4
+#define SMI_ACC2X2_TAP_FIRST_X__LEN        1
+#define SMI_ACC2X2_TAP_FIRST_X__MSK        0x10
+#define SMI_ACC2X2_TAP_FIRST_X__REG        SMI_ACC2X2_STATUS_TAP_SLOPE_REG
+
+#define SMI_ACC2X2_TAP_FIRST_Y__POS        5
+#define SMI_ACC2X2_TAP_FIRST_Y__LEN        1
+#define SMI_ACC2X2_TAP_FIRST_Y__MSK        0x20
+#define SMI_ACC2X2_TAP_FIRST_Y__REG        SMI_ACC2X2_STATUS_TAP_SLOPE_REG
+
+#define SMI_ACC2X2_TAP_FIRST_Z__POS        6
+#define SMI_ACC2X2_TAP_FIRST_Z__LEN        1
+#define SMI_ACC2X2_TAP_FIRST_Z__MSK        0x40
+#define SMI_ACC2X2_TAP_FIRST_Z__REG        SMI_ACC2X2_STATUS_TAP_SLOPE_REG
+
+#define SMI_ACC2X2_TAP_SIGN_S__POS         7
+#define SMI_ACC2X2_TAP_SIGN_S__LEN         1
+#define SMI_ACC2X2_TAP_SIGN_S__MSK         0x80
+#define SMI_ACC2X2_TAP_SIGN_S__REG         SMI_ACC2X2_STATUS_TAP_SLOPE_REG
+
+#define SMI_ACC2X2_HIGHG_FIRST_X__POS        0
+#define SMI_ACC2X2_HIGHG_FIRST_X__LEN        1
+#define SMI_ACC2X2_HIGHG_FIRST_X__MSK        0x01
+#define SMI_ACC2X2_HIGHG_FIRST_X__REG        SMI_ACC2X2_STATUS_ORIENT_HIGH_REG
+
+#define SMI_ACC2X2_HIGHG_FIRST_Y__POS        1
+#define SMI_ACC2X2_HIGHG_FIRST_Y__LEN        1
+#define SMI_ACC2X2_HIGHG_FIRST_Y__MSK        0x02
+#define SMI_ACC2X2_HIGHG_FIRST_Y__REG        SMI_ACC2X2_STATUS_ORIENT_HIGH_REG
+
+#define SMI_ACC2X2_HIGHG_FIRST_Z__POS        2
+#define SMI_ACC2X2_HIGHG_FIRST_Z__LEN        1
+#define SMI_ACC2X2_HIGHG_FIRST_Z__MSK        0x04
+#define SMI_ACC2X2_HIGHG_FIRST_Z__REG        SMI_ACC2X2_STATUS_ORIENT_HIGH_REG
+
+#define SMI_ACC2X2_HIGHG_SIGN_S__POS         3
+#define SMI_ACC2X2_HIGHG_SIGN_S__LEN         1
+#define SMI_ACC2X2_HIGHG_SIGN_S__MSK         0x08
+#define SMI_ACC2X2_HIGHG_SIGN_S__REG         SMI_ACC2X2_STATUS_ORIENT_HIGH_REG
+
+#define SMI_ACC2X2_ORIENT_S__POS             4
+#define SMI_ACC2X2_ORIENT_S__LEN             3
+#define SMI_ACC2X2_ORIENT_S__MSK             0x70
+#define SMI_ACC2X2_ORIENT_S__REG             SMI_ACC2X2_STATUS_ORIENT_HIGH_REG
+
+#define SMI_ACC2X2_FLAT_S__POS               7
+#define SMI_ACC2X2_FLAT_S__LEN               1
+#define SMI_ACC2X2_FLAT_S__MSK               0x80
+#define SMI_ACC2X2_FLAT_S__REG               SMI_ACC2X2_STATUS_ORIENT_HIGH_REG
+
+#define SMI_ACC2X2_FIFO_FRAME_COUNTER_S__POS             0
+#define SMI_ACC2X2_FIFO_FRAME_COUNTER_S__LEN             7
+#define SMI_ACC2X2_FIFO_FRAME_COUNTER_S__MSK             0x7F
+#define SMI_ACC2X2_FIFO_FRAME_COUNTER_S__REG             SMI_ACC2X2_STATUS_FIFO_REG
+
+#define SMI_ACC2X2_FIFO_OVERRUN_S__POS             7
+#define SMI_ACC2X2_FIFO_OVERRUN_S__LEN             1
+#define SMI_ACC2X2_FIFO_OVERRUN_S__MSK             0x80
+#define SMI_ACC2X2_FIFO_OVERRUN_S__REG             SMI_ACC2X2_STATUS_FIFO_REG
+
+#define SMI_ACC2X2_RANGE_SEL__POS             0
+#define SMI_ACC2X2_RANGE_SEL__LEN             4
+#define SMI_ACC2X2_RANGE_SEL__MSK             0x0F
+#define SMI_ACC2X2_RANGE_SEL__REG             SMI_ACC2X2_RANGE_SEL_REG
+
+#define SMI_ACC2X2_BANDWIDTH__POS             0
+#define SMI_ACC2X2_BANDWIDTH__LEN             5
+#define SMI_ACC2X2_BANDWIDTH__MSK             0x1F
+#define SMI_ACC2X2_BANDWIDTH__REG             SMI_ACC2X2_BW_SEL_REG
+
+#define SMI_ACC2X2_SLEEP_DUR__POS             1
+#define SMI_ACC2X2_SLEEP_DUR__LEN             4
+#define SMI_ACC2X2_SLEEP_DUR__MSK             0x1E
+#define SMI_ACC2X2_SLEEP_DUR__REG             SMI_ACC2X2_MODE_CTRL_REG
+
+#define SMI_ACC2X2_MODE_CTRL__POS             5
+#define SMI_ACC2X2_MODE_CTRL__LEN             3
+#define SMI_ACC2X2_MODE_CTRL__MSK             0xE0
+#define SMI_ACC2X2_MODE_CTRL__REG             SMI_ACC2X2_MODE_CTRL_REG
+
+#define SMI_ACC2X2_DEEP_SUSPEND__POS          5
+#define SMI_ACC2X2_DEEP_SUSPEND__LEN          1
+#define SMI_ACC2X2_DEEP_SUSPEND__MSK          0x20
+#define SMI_ACC2X2_DEEP_SUSPEND__REG          SMI_ACC2X2_MODE_CTRL_REG
+
+#define SMI_ACC2X2_EN_LOW_POWER__POS          6
+#define SMI_ACC2X2_EN_LOW_POWER__LEN          1
+#define SMI_ACC2X2_EN_LOW_POWER__MSK          0x40
+#define SMI_ACC2X2_EN_LOW_POWER__REG          SMI_ACC2X2_MODE_CTRL_REG
+
+#define SMI_ACC2X2_EN_SUSPEND__POS            7
+#define SMI_ACC2X2_EN_SUSPEND__LEN            1
+#define SMI_ACC2X2_EN_SUSPEND__MSK            0x80
+#define SMI_ACC2X2_EN_SUSPEND__REG            SMI_ACC2X2_MODE_CTRL_REG
+
+#define SMI_ACC2X2_SLEEP_TIMER__POS          5
+#define SMI_ACC2X2_SLEEP_TIMER__LEN          1
+#define SMI_ACC2X2_SLEEP_TIMER__MSK          0x20
+#define SMI_ACC2X2_SLEEP_TIMER__REG          SMI_ACC2X2_LOW_NOISE_CTRL_REG
+
+#define SMI_ACC2X2_LOW_POWER_MODE__POS          6
+#define SMI_ACC2X2_LOW_POWER_MODE__LEN          1
+#define SMI_ACC2X2_LOW_POWER_MODE__MSK          0x40
+#define SMI_ACC2X2_LOW_POWER_MODE__REG          SMI_ACC2X2_LOW_NOISE_CTRL_REG
+
+#define SMI_ACC2X2_EN_LOW_NOISE__POS          7
+#define SMI_ACC2X2_EN_LOW_NOISE__LEN          1
+#define SMI_ACC2X2_EN_LOW_NOISE__MSK          0x80
+#define SMI_ACC2X2_EN_LOW_NOISE__REG          SMI_ACC2X2_LOW_NOISE_CTRL_REG
+
+#define SMI_ACC2X2_DIS_SHADOW_PROC__POS       6
+#define SMI_ACC2X2_DIS_SHADOW_PROC__LEN       1
+#define SMI_ACC2X2_DIS_SHADOW_PROC__MSK       0x40
+#define SMI_ACC2X2_DIS_SHADOW_PROC__REG       SMI_ACC2X2_DATA_CTRL_REG
+
+#define SMI_ACC2X2_EN_DATA_HIGH_BW__POS         7
+#define SMI_ACC2X2_EN_DATA_HIGH_BW__LEN         1
+#define SMI_ACC2X2_EN_DATA_HIGH_BW__MSK         0x80
+#define SMI_ACC2X2_EN_DATA_HIGH_BW__REG         SMI_ACC2X2_DATA_CTRL_REG
+
+#define SMI_ACC2X2_EN_SOFT_RESET__POS         0
+#define SMI_ACC2X2_EN_SOFT_RESET__LEN         8
+#define SMI_ACC2X2_EN_SOFT_RESET__MSK         0xFF
+#define SMI_ACC2X2_EN_SOFT_RESET__REG         SMI_ACC2X2_RESET_REG
+
+#define SMI_ACC2X2_EN_SOFT_RESET_VALUE        0xB6
+
+#define SMI_ACC2X2_EN_SLOPE_X_INT__POS         0
+#define SMI_ACC2X2_EN_SLOPE_X_INT__LEN         1
+#define SMI_ACC2X2_EN_SLOPE_X_INT__MSK         0x01
+#define SMI_ACC2X2_EN_SLOPE_X_INT__REG         SMI_ACC2X2_INT_ENABLE1_REG
+
+#define SMI_ACC2X2_EN_SLOPE_Y_INT__POS         1
+#define SMI_ACC2X2_EN_SLOPE_Y_INT__LEN         1
+#define SMI_ACC2X2_EN_SLOPE_Y_INT__MSK         0x02
+#define SMI_ACC2X2_EN_SLOPE_Y_INT__REG         SMI_ACC2X2_INT_ENABLE1_REG
+
+#define SMI_ACC2X2_EN_SLOPE_Z_INT__POS         2
+#define SMI_ACC2X2_EN_SLOPE_Z_INT__LEN         1
+#define SMI_ACC2X2_EN_SLOPE_Z_INT__MSK         0x04
+#define SMI_ACC2X2_EN_SLOPE_Z_INT__REG         SMI_ACC2X2_INT_ENABLE1_REG
+
+#define SMI_ACC2X2_EN_DOUBLE_TAP_INT__POS      4
+#define SMI_ACC2X2_EN_DOUBLE_TAP_INT__LEN      1
+#define SMI_ACC2X2_EN_DOUBLE_TAP_INT__MSK      0x10
+#define SMI_ACC2X2_EN_DOUBLE_TAP_INT__REG      SMI_ACC2X2_INT_ENABLE1_REG
+
+#define SMI_ACC2X2_EN_SINGLE_TAP_INT__POS      5
+#define SMI_ACC2X2_EN_SINGLE_TAP_INT__LEN      1
+#define SMI_ACC2X2_EN_SINGLE_TAP_INT__MSK      0x20
+#define SMI_ACC2X2_EN_SINGLE_TAP_INT__REG      SMI_ACC2X2_INT_ENABLE1_REG
+
+#define SMI_ACC2X2_EN_ORIENT_INT__POS          6
+#define SMI_ACC2X2_EN_ORIENT_INT__LEN          1
+#define SMI_ACC2X2_EN_ORIENT_INT__MSK          0x40
+#define SMI_ACC2X2_EN_ORIENT_INT__REG          SMI_ACC2X2_INT_ENABLE1_REG
+
+#define SMI_ACC2X2_EN_FLAT_INT__POS            7
+#define SMI_ACC2X2_EN_FLAT_INT__LEN            1
+#define SMI_ACC2X2_EN_FLAT_INT__MSK            0x80
+#define SMI_ACC2X2_EN_FLAT_INT__REG            SMI_ACC2X2_INT_ENABLE1_REG
+
+#define SMI_ACC2X2_EN_HIGHG_X_INT__POS         0
+#define SMI_ACC2X2_EN_HIGHG_X_INT__LEN         1
+#define SMI_ACC2X2_EN_HIGHG_X_INT__MSK         0x01
+#define SMI_ACC2X2_EN_HIGHG_X_INT__REG         SMI_ACC2X2_INT_ENABLE2_REG
+
+#define SMI_ACC2X2_EN_HIGHG_Y_INT__POS         1
+#define SMI_ACC2X2_EN_HIGHG_Y_INT__LEN         1
+#define SMI_ACC2X2_EN_HIGHG_Y_INT__MSK         0x02
+#define SMI_ACC2X2_EN_HIGHG_Y_INT__REG         SMI_ACC2X2_INT_ENABLE2_REG
+
+#define SMI_ACC2X2_EN_HIGHG_Z_INT__POS         2
+#define SMI_ACC2X2_EN_HIGHG_Z_INT__LEN         1
+#define SMI_ACC2X2_EN_HIGHG_Z_INT__MSK         0x04
+#define SMI_ACC2X2_EN_HIGHG_Z_INT__REG         SMI_ACC2X2_INT_ENABLE2_REG
+
+#define SMI_ACC2X2_EN_LOWG_INT__POS            3
+#define SMI_ACC2X2_EN_LOWG_INT__LEN            1
+#define SMI_ACC2X2_EN_LOWG_INT__MSK            0x08
+#define SMI_ACC2X2_EN_LOWG_INT__REG            SMI_ACC2X2_INT_ENABLE2_REG
+
+#define SMI_ACC2X2_EN_NEW_DATA_INT__POS        4
+#define SMI_ACC2X2_EN_NEW_DATA_INT__LEN        1
+#define SMI_ACC2X2_EN_NEW_DATA_INT__MSK        0x10
+#define SMI_ACC2X2_EN_NEW_DATA_INT__REG        SMI_ACC2X2_INT_ENABLE2_REG
+
+#define SMI_ACC2X2_INT_FFULL_EN_INT__POS        5
+#define SMI_ACC2X2_INT_FFULL_EN_INT__LEN        1
+#define SMI_ACC2X2_INT_FFULL_EN_INT__MSK        0x20
+#define SMI_ACC2X2_INT_FFULL_EN_INT__REG        SMI_ACC2X2_INT_ENABLE2_REG
+
+#define SMI_ACC2X2_INT_FWM_EN_INT__POS        6
+#define SMI_ACC2X2_INT_FWM_EN_INT__LEN        1
+#define SMI_ACC2X2_INT_FWM_EN_INT__MSK        0x40
+#define SMI_ACC2X2_INT_FWM_EN_INT__REG        SMI_ACC2X2_INT_ENABLE2_REG
+
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_X_INT__POS        0
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_X_INT__LEN        1
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_X_INT__MSK        0x01
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_X_INT__REG        SMI_ACC2X2_INT_SLO_NO_MOT_REG
+
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_Y_INT__POS        1
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_Y_INT__LEN        1
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_Y_INT__MSK        0x02
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_Y_INT__REG        SMI_ACC2X2_INT_SLO_NO_MOT_REG
+
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_Z_INT__POS        2
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_Z_INT__LEN        1
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_Z_INT__MSK        0x04
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_Z_INT__REG        SMI_ACC2X2_INT_SLO_NO_MOT_REG
+
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_SEL_INT__POS        3
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_SEL_INT__LEN        1
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_SEL_INT__MSK        0x08
+#define SMI_ACC2X2_INT_SLO_NO_MOT_EN_SEL_INT__REG        SMI_ACC2X2_INT_SLO_NO_MOT_REG
+
+#define SMI_ACC2X2_EN_INT1_PAD_LOWG__POS        0
+#define SMI_ACC2X2_EN_INT1_PAD_LOWG__LEN        1
+#define SMI_ACC2X2_EN_INT1_PAD_LOWG__MSK        0x01
+#define SMI_ACC2X2_EN_INT1_PAD_LOWG__REG        SMI_ACC2X2_INT1_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT1_PAD_HIGHG__POS       1
+#define SMI_ACC2X2_EN_INT1_PAD_HIGHG__LEN       1
+#define SMI_ACC2X2_EN_INT1_PAD_HIGHG__MSK       0x02
+#define SMI_ACC2X2_EN_INT1_PAD_HIGHG__REG       SMI_ACC2X2_INT1_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT1_PAD_SLOPE__POS       2
+#define SMI_ACC2X2_EN_INT1_PAD_SLOPE__LEN       1
+#define SMI_ACC2X2_EN_INT1_PAD_SLOPE__MSK       0x04
+#define SMI_ACC2X2_EN_INT1_PAD_SLOPE__REG       SMI_ACC2X2_INT1_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT1_PAD_SLO_NO_MOT__POS        3
+#define SMI_ACC2X2_EN_INT1_PAD_SLO_NO_MOT__LEN        1
+#define SMI_ACC2X2_EN_INT1_PAD_SLO_NO_MOT__MSK        0x08
+#define SMI_ACC2X2_EN_INT1_PAD_SLO_NO_MOT__REG        SMI_ACC2X2_INT1_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT1_PAD_DB_TAP__POS      4
+#define SMI_ACC2X2_EN_INT1_PAD_DB_TAP__LEN      1
+#define SMI_ACC2X2_EN_INT1_PAD_DB_TAP__MSK      0x10
+#define SMI_ACC2X2_EN_INT1_PAD_DB_TAP__REG      SMI_ACC2X2_INT1_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT1_PAD_SNG_TAP__POS     5
+#define SMI_ACC2X2_EN_INT1_PAD_SNG_TAP__LEN     1
+#define SMI_ACC2X2_EN_INT1_PAD_SNG_TAP__MSK     0x20
+#define SMI_ACC2X2_EN_INT1_PAD_SNG_TAP__REG     SMI_ACC2X2_INT1_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT1_PAD_ORIENT__POS      6
+#define SMI_ACC2X2_EN_INT1_PAD_ORIENT__LEN      1
+#define SMI_ACC2X2_EN_INT1_PAD_ORIENT__MSK      0x40
+#define SMI_ACC2X2_EN_INT1_PAD_ORIENT__REG      SMI_ACC2X2_INT1_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT1_PAD_FLAT__POS        7
+#define SMI_ACC2X2_EN_INT1_PAD_FLAT__LEN        1
+#define SMI_ACC2X2_EN_INT1_PAD_FLAT__MSK        0x80
+#define SMI_ACC2X2_EN_INT1_PAD_FLAT__REG        SMI_ACC2X2_INT1_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT2_PAD_LOWG__POS        0
+#define SMI_ACC2X2_EN_INT2_PAD_LOWG__LEN        1
+#define SMI_ACC2X2_EN_INT2_PAD_LOWG__MSK        0x01
+#define SMI_ACC2X2_EN_INT2_PAD_LOWG__REG        SMI_ACC2X2_INT2_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT2_PAD_HIGHG__POS       1
+#define SMI_ACC2X2_EN_INT2_PAD_HIGHG__LEN       1
+#define SMI_ACC2X2_EN_INT2_PAD_HIGHG__MSK       0x02
+#define SMI_ACC2X2_EN_INT2_PAD_HIGHG__REG       SMI_ACC2X2_INT2_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT2_PAD_SLOPE__POS       2
+#define SMI_ACC2X2_EN_INT2_PAD_SLOPE__LEN       1
+#define SMI_ACC2X2_EN_INT2_PAD_SLOPE__MSK       0x04
+#define SMI_ACC2X2_EN_INT2_PAD_SLOPE__REG       SMI_ACC2X2_INT2_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT2_PAD_SLO_NO_MOT__POS        3
+#define SMI_ACC2X2_EN_INT2_PAD_SLO_NO_MOT__LEN        1
+#define SMI_ACC2X2_EN_INT2_PAD_SLO_NO_MOT__MSK        0x08
+#define SMI_ACC2X2_EN_INT2_PAD_SLO_NO_MOT__REG        SMI_ACC2X2_INT2_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT2_PAD_DB_TAP__POS      4
+#define SMI_ACC2X2_EN_INT2_PAD_DB_TAP__LEN      1
+#define SMI_ACC2X2_EN_INT2_PAD_DB_TAP__MSK      0x10
+#define SMI_ACC2X2_EN_INT2_PAD_DB_TAP__REG      SMI_ACC2X2_INT2_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT2_PAD_SNG_TAP__POS     5
+#define SMI_ACC2X2_EN_INT2_PAD_SNG_TAP__LEN     1
+#define SMI_ACC2X2_EN_INT2_PAD_SNG_TAP__MSK     0x20
+#define SMI_ACC2X2_EN_INT2_PAD_SNG_TAP__REG     SMI_ACC2X2_INT2_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT2_PAD_ORIENT__POS      6
+#define SMI_ACC2X2_EN_INT2_PAD_ORIENT__LEN      1
+#define SMI_ACC2X2_EN_INT2_PAD_ORIENT__MSK      0x40
+#define SMI_ACC2X2_EN_INT2_PAD_ORIENT__REG      SMI_ACC2X2_INT2_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT2_PAD_FLAT__POS        7
+#define SMI_ACC2X2_EN_INT2_PAD_FLAT__LEN        1
+#define SMI_ACC2X2_EN_INT2_PAD_FLAT__MSK        0x80
+#define SMI_ACC2X2_EN_INT2_PAD_FLAT__REG        SMI_ACC2X2_INT2_PAD_SEL_REG
+
+#define SMI_ACC2X2_EN_INT1_PAD_NEWDATA__POS     0
+#define SMI_ACC2X2_EN_INT1_PAD_NEWDATA__LEN     1
+#define SMI_ACC2X2_EN_INT1_PAD_NEWDATA__MSK     0x01
+#define SMI_ACC2X2_EN_INT1_PAD_NEWDATA__REG     SMI_ACC2X2_INT_DATA_SEL_REG
+
+#define SMI_ACC2X2_EN_INT1_PAD_FWM__POS     1
+#define SMI_ACC2X2_EN_INT1_PAD_FWM__LEN     1
+#define SMI_ACC2X2_EN_INT1_PAD_FWM__MSK     0x02
+#define SMI_ACC2X2_EN_INT1_PAD_FWM__REG     SMI_ACC2X2_INT_DATA_SEL_REG
+
+#define SMI_ACC2X2_EN_INT1_PAD_FFULL__POS     2
+#define SMI_ACC2X2_EN_INT1_PAD_FFULL__LEN     1
+#define SMI_ACC2X2_EN_INT1_PAD_FFULL__MSK     0x04
+#define SMI_ACC2X2_EN_INT1_PAD_FFULL__REG     SMI_ACC2X2_INT_DATA_SEL_REG
+
+#define SMI_ACC2X2_EN_INT2_PAD_FFULL__POS     5
+#define SMI_ACC2X2_EN_INT2_PAD_FFULL__LEN     1
+#define SMI_ACC2X2_EN_INT2_PAD_FFULL__MSK     0x20
+#define SMI_ACC2X2_EN_INT2_PAD_FFULL__REG     SMI_ACC2X2_INT_DATA_SEL_REG
+
+#define SMI_ACC2X2_EN_INT2_PAD_FWM__POS     6
+#define SMI_ACC2X2_EN_INT2_PAD_FWM__LEN     1
+#define SMI_ACC2X2_EN_INT2_PAD_FWM__MSK     0x40
+#define SMI_ACC2X2_EN_INT2_PAD_FWM__REG     SMI_ACC2X2_INT_DATA_SEL_REG
+
+#define SMI_ACC2X2_EN_INT2_PAD_NEWDATA__POS     7
+#define SMI_ACC2X2_EN_INT2_PAD_NEWDATA__LEN     1
+#define SMI_ACC2X2_EN_INT2_PAD_NEWDATA__MSK     0x80
+#define SMI_ACC2X2_EN_INT2_PAD_NEWDATA__REG     SMI_ACC2X2_INT_DATA_SEL_REG
+
+#define SMI_ACC2X2_UNFILT_INT_SRC_LOWG__POS        0
+#define SMI_ACC2X2_UNFILT_INT_SRC_LOWG__LEN        1
+#define SMI_ACC2X2_UNFILT_INT_SRC_LOWG__MSK        0x01
+#define SMI_ACC2X2_UNFILT_INT_SRC_LOWG__REG        SMI_ACC2X2_INT_SRC_REG
+
+#define SMI_ACC2X2_UNFILT_INT_SRC_HIGHG__POS       1
+#define SMI_ACC2X2_UNFILT_INT_SRC_HIGHG__LEN       1
+#define SMI_ACC2X2_UNFILT_INT_SRC_HIGHG__MSK       0x02
+#define SMI_ACC2X2_UNFILT_INT_SRC_HIGHG__REG       SMI_ACC2X2_INT_SRC_REG
+
+#define SMI_ACC2X2_UNFILT_INT_SRC_SLOPE__POS       2
+#define SMI_ACC2X2_UNFILT_INT_SRC_SLOPE__LEN       1
+#define SMI_ACC2X2_UNFILT_INT_SRC_SLOPE__MSK       0x04
+#define SMI_ACC2X2_UNFILT_INT_SRC_SLOPE__REG       SMI_ACC2X2_INT_SRC_REG
+
+#define SMI_ACC2X2_UNFILT_INT_SRC_SLO_NO_MOT__POS        3
+#define SMI_ACC2X2_UNFILT_INT_SRC_SLO_NO_MOT__LEN        1
+#define SMI_ACC2X2_UNFILT_INT_SRC_SLO_NO_MOT__MSK        0x08
+#define SMI_ACC2X2_UNFILT_INT_SRC_SLO_NO_MOT__REG        SMI_ACC2X2_INT_SRC_REG
+
+#define SMI_ACC2X2_UNFILT_INT_SRC_TAP__POS         4
+#define SMI_ACC2X2_UNFILT_INT_SRC_TAP__LEN         1
+#define SMI_ACC2X2_UNFILT_INT_SRC_TAP__MSK         0x10
+#define SMI_ACC2X2_UNFILT_INT_SRC_TAP__REG         SMI_ACC2X2_INT_SRC_REG
+
+#define SMI_ACC2X2_UNFILT_INT_SRC_DATA__POS        5
+#define SMI_ACC2X2_UNFILT_INT_SRC_DATA__LEN        1
+#define SMI_ACC2X2_UNFILT_INT_SRC_DATA__MSK        0x20
+#define SMI_ACC2X2_UNFILT_INT_SRC_DATA__REG        SMI_ACC2X2_INT_SRC_REG
+
+#define SMI_ACC2X2_INT1_PAD_ACTIVE_LEVEL__POS       0
+#define SMI_ACC2X2_INT1_PAD_ACTIVE_LEVEL__LEN       1
+#define SMI_ACC2X2_INT1_PAD_ACTIVE_LEVEL__MSK       0x01
+#define SMI_ACC2X2_INT1_PAD_ACTIVE_LEVEL__REG       SMI_ACC2X2_INT_SET_REG
+
+#define SMI_ACC2X2_INT2_PAD_ACTIVE_LEVEL__POS       2
+#define SMI_ACC2X2_INT2_PAD_ACTIVE_LEVEL__LEN       1
+#define SMI_ACC2X2_INT2_PAD_ACTIVE_LEVEL__MSK       0x04
+#define SMI_ACC2X2_INT2_PAD_ACTIVE_LEVEL__REG       SMI_ACC2X2_INT_SET_REG
+
+#define SMI_ACC2X2_INT1_PAD_OUTPUT_TYPE__POS        1
+#define SMI_ACC2X2_INT1_PAD_OUTPUT_TYPE__LEN        1
+#define SMI_ACC2X2_INT1_PAD_OUTPUT_TYPE__MSK        0x02
+#define SMI_ACC2X2_INT1_PAD_OUTPUT_TYPE__REG        SMI_ACC2X2_INT_SET_REG
+
+#define SMI_ACC2X2_INT2_PAD_OUTPUT_TYPE__POS        3
+#define SMI_ACC2X2_INT2_PAD_OUTPUT_TYPE__LEN        1
+#define SMI_ACC2X2_INT2_PAD_OUTPUT_TYPE__MSK        0x08
+#define SMI_ACC2X2_INT2_PAD_OUTPUT_TYPE__REG        SMI_ACC2X2_INT_SET_REG
+
+#define SMI_ACC2X2_INT_MODE_SEL__POS                0
+#define SMI_ACC2X2_INT_MODE_SEL__LEN                4
+#define SMI_ACC2X2_INT_MODE_SEL__MSK                0x0F
+#define SMI_ACC2X2_INT_MODE_SEL__REG                SMI_ACC2X2_INT_CTRL_REG
+
+#define SMI_ACC2X2_RESET_INT__POS           7
+#define SMI_ACC2X2_RESET_INT__LEN           1
+#define SMI_ACC2X2_RESET_INT__MSK           0x80
+#define SMI_ACC2X2_RESET_INT__REG           SMI_ACC2X2_INT_CTRL_REG
+
+#define SMI_ACC2X2_LOWG_DUR__POS                    0
+#define SMI_ACC2X2_LOWG_DUR__LEN                    8
+#define SMI_ACC2X2_LOWG_DUR__MSK                    0xFF
+#define SMI_ACC2X2_LOWG_DUR__REG                    SMI_ACC2X2_LOW_DURN_REG
+
+#define SMI_ACC2X2_LOWG_THRES__POS                  0
+#define SMI_ACC2X2_LOWG_THRES__LEN                  8
+#define SMI_ACC2X2_LOWG_THRES__MSK                  0xFF
+#define SMI_ACC2X2_LOWG_THRES__REG                  SMI_ACC2X2_LOW_THRES_REG
+
+#define SMI_ACC2X2_LOWG_HYST__POS                   0
+#define SMI_ACC2X2_LOWG_HYST__LEN                   2
+#define SMI_ACC2X2_LOWG_HYST__MSK                   0x03
+#define SMI_ACC2X2_LOWG_HYST__REG                   SMI_ACC2X2_LOW_HIGH_HYST_REG
+
+#define SMI_ACC2X2_LOWG_INT_MODE__POS               2
+#define SMI_ACC2X2_LOWG_INT_MODE__LEN               1
+#define SMI_ACC2X2_LOWG_INT_MODE__MSK               0x04
+#define SMI_ACC2X2_LOWG_INT_MODE__REG               SMI_ACC2X2_LOW_HIGH_HYST_REG
+
+#define SMI_ACC2X2_HIGHG_DUR__POS                    0
+#define SMI_ACC2X2_HIGHG_DUR__LEN                    8
+#define SMI_ACC2X2_HIGHG_DUR__MSK                    0xFF
+#define SMI_ACC2X2_HIGHG_DUR__REG                    SMI_ACC2X2_HIGH_DURN_REG
+
+#define SMI_ACC2X2_HIGHG_THRES__POS                  0
+#define SMI_ACC2X2_HIGHG_THRES__LEN                  8
+#define SMI_ACC2X2_HIGHG_THRES__MSK                  0xFF
+#define SMI_ACC2X2_HIGHG_THRES__REG                  SMI_ACC2X2_HIGH_THRES_REG
+
+#define SMI_ACC2X2_HIGHG_HYST__POS                  6
+#define SMI_ACC2X2_HIGHG_HYST__LEN                  2
+#define SMI_ACC2X2_HIGHG_HYST__MSK                  0xC0
+#define SMI_ACC2X2_HIGHG_HYST__REG                  SMI_ACC2X2_LOW_HIGH_HYST_REG
+
+#define SMI_ACC2X2_SLOPE_DUR__POS                    0
+#define SMI_ACC2X2_SLOPE_DUR__LEN                    2
+#define SMI_ACC2X2_SLOPE_DUR__MSK                    0x03
+#define SMI_ACC2X2_SLOPE_DUR__REG                    SMI_ACC2X2_SLOPE_DURN_REG
+
+#define SMI_ACC2X2_SLO_NO_MOT_DUR__POS                    2
+#define SMI_ACC2X2_SLO_NO_MOT_DUR__LEN                    6
+#define SMI_ACC2X2_SLO_NO_MOT_DUR__MSK                    0xFC
+#define SMI_ACC2X2_SLO_NO_MOT_DUR__REG                    SMI_ACC2X2_SLOPE_DURN_REG
+
+#define SMI_ACC2X2_SLOPE_THRES__POS                  0
+#define SMI_ACC2X2_SLOPE_THRES__LEN                  8
+#define SMI_ACC2X2_SLOPE_THRES__MSK                  0xFF
+#define SMI_ACC2X2_SLOPE_THRES__REG                  SMI_ACC2X2_SLOPE_THRES_REG
+
+#define SMI_ACC2X2_SLO_NO_MOT_THRES__POS                  0
+#define SMI_ACC2X2_SLO_NO_MOT_THRES__LEN                  8
+#define SMI_ACC2X2_SLO_NO_MOT_THRES__MSK                  0xFF
+#define SMI_ACC2X2_SLO_NO_MOT_THRES__REG           SMI_ACC2X2_SLO_NO_MOT_THRES_REG
+
+#define SMI_ACC2X2_TAP_DUR__POS                    0
+#define SMI_ACC2X2_TAP_DUR__LEN                    3
+#define SMI_ACC2X2_TAP_DUR__MSK                    0x07
+#define SMI_ACC2X2_TAP_DUR__REG                    SMI_ACC2X2_TAP_PARAM_REG
+
+#define SMI_ACC2X2_TAP_SHOCK_DURN__POS             6
+#define SMI_ACC2X2_TAP_SHOCK_DURN__LEN             1
+#define SMI_ACC2X2_TAP_SHOCK_DURN__MSK             0x40
+#define SMI_ACC2X2_TAP_SHOCK_DURN__REG             SMI_ACC2X2_TAP_PARAM_REG
+
+#define SMI_ACC2X2_ADV_TAP_INT__POS                5
+#define SMI_ACC2X2_ADV_TAP_INT__LEN                1
+#define SMI_ACC2X2_ADV_TAP_INT__MSK                0x20
+#define SMI_ACC2X2_ADV_TAP_INT__REG                SMI_ACC2X2_TAP_PARAM_REG
+
+#define SMI_ACC2X2_TAP_QUIET_DURN__POS             7
+#define SMI_ACC2X2_TAP_QUIET_DURN__LEN             1
+#define SMI_ACC2X2_TAP_QUIET_DURN__MSK             0x80
+#define SMI_ACC2X2_TAP_QUIET_DURN__REG             SMI_ACC2X2_TAP_PARAM_REG
+
+#define SMI_ACC2X2_TAP_THRES__POS                  0
+#define SMI_ACC2X2_TAP_THRES__LEN                  5
+#define SMI_ACC2X2_TAP_THRES__MSK                  0x1F
+#define SMI_ACC2X2_TAP_THRES__REG                  SMI_ACC2X2_TAP_THRES_REG
+
+#define SMI_ACC2X2_TAP_SAMPLES__POS                6
+#define SMI_ACC2X2_TAP_SAMPLES__LEN                2
+#define SMI_ACC2X2_TAP_SAMPLES__MSK                0xC0
+#define SMI_ACC2X2_TAP_SAMPLES__REG                SMI_ACC2X2_TAP_THRES_REG
+
+#define SMI_ACC2X2_ORIENT_MODE__POS                  0
+#define SMI_ACC2X2_ORIENT_MODE__LEN                  2
+#define SMI_ACC2X2_ORIENT_MODE__MSK                  0x03
+#define SMI_ACC2X2_ORIENT_MODE__REG                  SMI_ACC2X2_ORIENT_PARAM_REG
+
+#define SMI_ACC2X2_ORIENT_BLOCK__POS                 2
+#define SMI_ACC2X2_ORIENT_BLOCK__LEN                 2
+#define SMI_ACC2X2_ORIENT_BLOCK__MSK                 0x0C
+#define SMI_ACC2X2_ORIENT_BLOCK__REG                 SMI_ACC2X2_ORIENT_PARAM_REG
+
+#define SMI_ACC2X2_ORIENT_HYST__POS                  4
+#define SMI_ACC2X2_ORIENT_HYST__LEN                  3
+#define SMI_ACC2X2_ORIENT_HYST__MSK                  0x70
+#define SMI_ACC2X2_ORIENT_HYST__REG                  SMI_ACC2X2_ORIENT_PARAM_REG
+
+#define SMI_ACC2X2_ORIENT_AXIS__POS                  7
+#define SMI_ACC2X2_ORIENT_AXIS__LEN                  1
+#define SMI_ACC2X2_ORIENT_AXIS__MSK                  0x80
+#define SMI_ACC2X2_ORIENT_AXIS__REG                  SMI_ACC2X2_THETA_BLOCK_REG
+
+#define SMI_ACC2X2_ORIENT_UD_EN__POS                  6
+#define SMI_ACC2X2_ORIENT_UD_EN__LEN                  1
+#define SMI_ACC2X2_ORIENT_UD_EN__MSK                  0x40
+#define SMI_ACC2X2_ORIENT_UD_EN__REG                  SMI_ACC2X2_THETA_BLOCK_REG
+
+#define SMI_ACC2X2_THETA_BLOCK__POS                  0
+#define SMI_ACC2X2_THETA_BLOCK__LEN                  6
+#define SMI_ACC2X2_THETA_BLOCK__MSK                  0x3F
+#define SMI_ACC2X2_THETA_BLOCK__REG                  SMI_ACC2X2_THETA_BLOCK_REG
+
+#define SMI_ACC2X2_THETA_FLAT__POS                  0
+#define SMI_ACC2X2_THETA_FLAT__LEN                  6
+#define SMI_ACC2X2_THETA_FLAT__MSK                  0x3F
+#define SMI_ACC2X2_THETA_FLAT__REG                  SMI_ACC2X2_THETA_FLAT_REG
+
+#define SMI_ACC2X2_FLAT_HOLD_TIME__POS              4
+#define SMI_ACC2X2_FLAT_HOLD_TIME__LEN              2
+#define SMI_ACC2X2_FLAT_HOLD_TIME__MSK              0x30
+#define SMI_ACC2X2_FLAT_HOLD_TIME__REG              SMI_ACC2X2_FLAT_HOLD_TIME_REG
+
+#define SMI_ACC2X2_FLAT_HYS__POS                   0
+#define SMI_ACC2X2_FLAT_HYS__LEN                   3
+#define SMI_ACC2X2_FLAT_HYS__MSK                   0x07
+#define SMI_ACC2X2_FLAT_HYS__REG                   SMI_ACC2X2_FLAT_HOLD_TIME_REG
+
+#define SMI_ACC2X2_FIFO_WML_TRIG_RETAIN__POS                   0
+#define SMI_ACC2X2_FIFO_WML_TRIG_RETAIN__LEN                   6
+#define SMI_ACC2X2_FIFO_WML_TRIG_RETAIN__MSK                   0x3F
+#define SMI_ACC2X2_FIFO_WML_TRIG_RETAIN__REG                   SMI_ACC2X2_FIFO_WML_TRIG
+
+#define SMI_ACC2X2_EN_SELF_TEST__POS                0
+#define SMI_ACC2X2_EN_SELF_TEST__LEN                2
+#define SMI_ACC2X2_EN_SELF_TEST__MSK                0x03
+#define SMI_ACC2X2_EN_SELF_TEST__REG                SMI_ACC2X2_SELF_TEST_REG
+
+#define SMI_ACC2X2_NEG_SELF_TEST__POS               2
+#define SMI_ACC2X2_NEG_SELF_TEST__LEN               1
+#define SMI_ACC2X2_NEG_SELF_TEST__MSK               0x04
+#define SMI_ACC2X2_NEG_SELF_TEST__REG               SMI_ACC2X2_SELF_TEST_REG
+
+#define SMI_ACC2X2_SELF_TEST_AMP__POS               4
+#define SMI_ACC2X2_SELF_TEST_AMP__LEN               1
+#define SMI_ACC2X2_SELF_TEST_AMP__MSK               0x10
+#define SMI_ACC2X2_SELF_TEST_AMP__REG               SMI_ACC2X2_SELF_TEST_REG
+
+
+#define SMI_ACC2X2_UNLOCK_EE_PROG_MODE__POS     0
+#define SMI_ACC2X2_UNLOCK_EE_PROG_MODE__LEN     1
+#define SMI_ACC2X2_UNLOCK_EE_PROG_MODE__MSK     0x01
+#define SMI_ACC2X2_UNLOCK_EE_PROG_MODE__REG     SMI_ACC2X2_EEPROM_CTRL_REG
+
+#define SMI_ACC2X2_START_EE_PROG_TRIG__POS      1
+#define SMI_ACC2X2_START_EE_PROG_TRIG__LEN      1
+#define SMI_ACC2X2_START_EE_PROG_TRIG__MSK      0x02
+#define SMI_ACC2X2_START_EE_PROG_TRIG__REG      SMI_ACC2X2_EEPROM_CTRL_REG
+
+#define SMI_ACC2X2_EE_PROG_READY__POS          2
+#define SMI_ACC2X2_EE_PROG_READY__LEN          1
+#define SMI_ACC2X2_EE_PROG_READY__MSK          0x04
+#define SMI_ACC2X2_EE_PROG_READY__REG          SMI_ACC2X2_EEPROM_CTRL_REG
+
+#define SMI_ACC2X2_UPDATE_IMAGE__POS                3
+#define SMI_ACC2X2_UPDATE_IMAGE__LEN                1
+#define SMI_ACC2X2_UPDATE_IMAGE__MSK                0x08
+#define SMI_ACC2X2_UPDATE_IMAGE__REG                SMI_ACC2X2_EEPROM_CTRL_REG
+
+#define SMI_ACC2X2_EE_REMAIN__POS                4
+#define SMI_ACC2X2_EE_REMAIN__LEN                4
+#define SMI_ACC2X2_EE_REMAIN__MSK                0xF0
+#define SMI_ACC2X2_EE_REMAIN__REG                SMI_ACC2X2_EEPROM_CTRL_REG
+
+#define SMI_ACC2X2_EN_SPI_MODE_3__POS              0
+#define SMI_ACC2X2_EN_SPI_MODE_3__LEN              1
+#define SMI_ACC2X2_EN_SPI_MODE_3__MSK              0x01
+#define SMI_ACC2X2_EN_SPI_MODE_3__REG              SMI_ACC2X2_SERIAL_CTRL_REG
+
+#define SMI_ACC2X2_I2C_WATCHDOG_PERIOD__POS        1
+#define SMI_ACC2X2_I2C_WATCHDOG_PERIOD__LEN        1
+#define SMI_ACC2X2_I2C_WATCHDOG_PERIOD__MSK        0x02
+#define SMI_ACC2X2_I2C_WATCHDOG_PERIOD__REG        SMI_ACC2X2_SERIAL_CTRL_REG
+
+#define SMI_ACC2X2_EN_I2C_WATCHDOG__POS            2
+#define SMI_ACC2X2_EN_I2C_WATCHDOG__LEN            1
+#define SMI_ACC2X2_EN_I2C_WATCHDOG__MSK            0x04
+#define SMI_ACC2X2_EN_I2C_WATCHDOG__REG            SMI_ACC2X2_SERIAL_CTRL_REG
+
+#define SMI_ACC2X2_EXT_MODE__POS              7
+#define SMI_ACC2X2_EXT_MODE__LEN              1
+#define SMI_ACC2X2_EXT_MODE__MSK              0x80
+#define SMI_ACC2X2_EXT_MODE__REG              SMI_ACC2X2_EXTMODE_CTRL_REG
+
+#define SMI_ACC2X2_ALLOW_UPPER__POS        6
+#define SMI_ACC2X2_ALLOW_UPPER__LEN        1
+#define SMI_ACC2X2_ALLOW_UPPER__MSK        0x40
+#define SMI_ACC2X2_ALLOW_UPPER__REG        SMI_ACC2X2_EXTMODE_CTRL_REG
+
+#define SMI_ACC2X2_MAP_2_LOWER__POS            5
+#define SMI_ACC2X2_MAP_2_LOWER__LEN            1
+#define SMI_ACC2X2_MAP_2_LOWER__MSK            0x20
+#define SMI_ACC2X2_MAP_2_LOWER__REG            SMI_ACC2X2_EXTMODE_CTRL_REG
+
+#define SMI_ACC2X2_MAGIC_NUMBER__POS            0
+#define SMI_ACC2X2_MAGIC_NUMBER__LEN            5
+#define SMI_ACC2X2_MAGIC_NUMBER__MSK            0x1F
+#define SMI_ACC2X2_MAGIC_NUMBER__REG            SMI_ACC2X2_EXTMODE_CTRL_REG
+
+#define SMI_ACC2X2_UNLOCK_EE_WRITE_TRIM__POS        4
+#define SMI_ACC2X2_UNLOCK_EE_WRITE_TRIM__LEN        4
+#define SMI_ACC2X2_UNLOCK_EE_WRITE_TRIM__MSK        0xF0
+#define SMI_ACC2X2_UNLOCK_EE_WRITE_TRIM__REG        SMI_ACC2X2_CTRL_UNLOCK_REG
+
+#define SMI_ACC2X2_EN_SLOW_COMP_X__POS              0
+#define SMI_ACC2X2_EN_SLOW_COMP_X__LEN              1
+#define SMI_ACC2X2_EN_SLOW_COMP_X__MSK              0x01
+#define SMI_ACC2X2_EN_SLOW_COMP_X__REG              SMI_ACC2X2_OFFSET_CTRL_REG
+
+#define SMI_ACC2X2_EN_SLOW_COMP_Y__POS              1
+#define SMI_ACC2X2_EN_SLOW_COMP_Y__LEN              1
+#define SMI_ACC2X2_EN_SLOW_COMP_Y__MSK              0x02
+#define SMI_ACC2X2_EN_SLOW_COMP_Y__REG              SMI_ACC2X2_OFFSET_CTRL_REG
+
+#define SMI_ACC2X2_EN_SLOW_COMP_Z__POS              2
+#define SMI_ACC2X2_EN_SLOW_COMP_Z__LEN              1
+#define SMI_ACC2X2_EN_SLOW_COMP_Z__MSK              0x04
+#define SMI_ACC2X2_EN_SLOW_COMP_Z__REG              SMI_ACC2X2_OFFSET_CTRL_REG
+
+#define SMI_ACC2X2_FAST_CAL_RDY_S__POS             4
+#define SMI_ACC2X2_FAST_CAL_RDY_S__LEN             1
+#define SMI_ACC2X2_FAST_CAL_RDY_S__MSK             0x10
+#define SMI_ACC2X2_FAST_CAL_RDY_S__REG             SMI_ACC2X2_OFFSET_CTRL_REG
+
+#define SMI_ACC2X2_CAL_TRIGGER__POS                5
+#define SMI_ACC2X2_CAL_TRIGGER__LEN                2
+#define SMI_ACC2X2_CAL_TRIGGER__MSK                0x60
+#define SMI_ACC2X2_CAL_TRIGGER__REG                SMI_ACC2X2_OFFSET_CTRL_REG
+
+#define SMI_ACC2X2_RESET_OFFSET_REGS__POS           7
+#define SMI_ACC2X2_RESET_OFFSET_REGS__LEN           1
+#define SMI_ACC2X2_RESET_OFFSET_REGS__MSK           0x80
+#define SMI_ACC2X2_RESET_OFFSET_REGS__REG           SMI_ACC2X2_OFFSET_CTRL_REG
+
+#define SMI_ACC2X2_COMP_CUTOFF__POS                 0
+#define SMI_ACC2X2_COMP_CUTOFF__LEN                 1
+#define SMI_ACC2X2_COMP_CUTOFF__MSK                 0x01
+#define SMI_ACC2X2_COMP_CUTOFF__REG                 SMI_ACC2X2_OFFSET_PARAMS_REG
+
+#define SMI_ACC2X2_COMP_TARGET_OFFSET_X__POS        1
+#define SMI_ACC2X2_COMP_TARGET_OFFSET_X__LEN        2
+#define SMI_ACC2X2_COMP_TARGET_OFFSET_X__MSK        0x06
+#define SMI_ACC2X2_COMP_TARGET_OFFSET_X__REG        SMI_ACC2X2_OFFSET_PARAMS_REG
+
+#define SMI_ACC2X2_COMP_TARGET_OFFSET_Y__POS        3
+#define SMI_ACC2X2_COMP_TARGET_OFFSET_Y__LEN        2
+#define SMI_ACC2X2_COMP_TARGET_OFFSET_Y__MSK        0x18
+#define SMI_ACC2X2_COMP_TARGET_OFFSET_Y__REG        SMI_ACC2X2_OFFSET_PARAMS_REG
+
+#define SMI_ACC2X2_COMP_TARGET_OFFSET_Z__POS        5
+#define SMI_ACC2X2_COMP_TARGET_OFFSET_Z__LEN        2
+#define SMI_ACC2X2_COMP_TARGET_OFFSET_Z__MSK        0x60
+#define SMI_ACC2X2_COMP_TARGET_OFFSET_Z__REG        SMI_ACC2X2_OFFSET_PARAMS_REG
+
+#define SMI_ACC2X2_FIFO_DATA_SELECT__POS                 0
+#define SMI_ACC2X2_FIFO_DATA_SELECT__LEN                 2
+#define SMI_ACC2X2_FIFO_DATA_SELECT__MSK                 0x03
+#define SMI_ACC2X2_FIFO_DATA_SELECT__REG                 SMI_ACC2X2_FIFO_MODE_REG
+
+#define SMI_ACC2X2_FIFO_TRIGGER_SOURCE__POS                 2
+#define SMI_ACC2X2_FIFO_TRIGGER_SOURCE__LEN                 2
+#define SMI_ACC2X2_FIFO_TRIGGER_SOURCE__MSK                 0x0C
+#define SMI_ACC2X2_FIFO_TRIGGER_SOURCE__REG                 SMI_ACC2X2_FIFO_MODE_REG
+
+#define SMI_ACC2X2_FIFO_TRIGGER_ACTION__POS                 4
+#define SMI_ACC2X2_FIFO_TRIGGER_ACTION__LEN                 2
+#define SMI_ACC2X2_FIFO_TRIGGER_ACTION__MSK                 0x30
+#define SMI_ACC2X2_FIFO_TRIGGER_ACTION__REG                 SMI_ACC2X2_FIFO_MODE_REG
+
+#define SMI_ACC2X2_FIFO_MODE__POS                 6
+#define SMI_ACC2X2_FIFO_MODE__LEN                 2
+#define SMI_ACC2X2_FIFO_MODE__MSK                 0xC0
+#define SMI_ACC2X2_FIFO_MODE__REG                 SMI_ACC2X2_FIFO_MODE_REG
+
+
+#define SMI_ACC2X2_STATUS1                             0
+#define SMI_ACC2X2_STATUS2                             1
+#define SMI_ACC2X2_STATUS3                             2
+#define SMI_ACC2X2_STATUS4                             3
+#define SMI_ACC2X2_STATUS5                             4
+
+
+#define SMI_ACC2X2_RANGE_2G                 3
+#define SMI_ACC2X2_RANGE_4G                 5
+#define SMI_ACC2X2_RANGE_8G                 8
+#define SMI_ACC2X2_RANGE_16G                12
+
+
+#define SMI_ACC2X2_BW_7_81HZ        0x08
+#define SMI_ACC2X2_BW_15_63HZ       0x09
+#define SMI_ACC2X2_BW_31_25HZ       0x0A
+#define SMI_ACC2X2_BW_62_50HZ       0x0B
+#define SMI_ACC2X2_BW_125HZ         0x0C
+#define SMI_ACC2X2_BW_250HZ         0x0D
+#define SMI_ACC2X2_BW_500HZ         0x0E
+#define SMI_ACC2X2_BW_1000HZ        0x0F
+
+#define SMI_ACC2X2_SLEEP_DUR_0_5MS        0x05
+#define SMI_ACC2X2_SLEEP_DUR_1MS          0x06
+#define SMI_ACC2X2_SLEEP_DUR_2MS          0x07
+#define SMI_ACC2X2_SLEEP_DUR_4MS          0x08
+#define SMI_ACC2X2_SLEEP_DUR_6MS          0x09
+#define SMI_ACC2X2_SLEEP_DUR_10MS         0x0A
+#define SMI_ACC2X2_SLEEP_DUR_25MS         0x0B
+#define SMI_ACC2X2_SLEEP_DUR_50MS         0x0C
+#define SMI_ACC2X2_SLEEP_DUR_100MS        0x0D
+#define SMI_ACC2X2_SLEEP_DUR_500MS        0x0E
+#define SMI_ACC2X2_SLEEP_DUR_1S           0x0F
+
+#define SMI_ACC2X2_LATCH_DUR_NON_LATCH    0x00
+#define SMI_ACC2X2_LATCH_DUR_250MS        0x01
+#define SMI_ACC2X2_LATCH_DUR_500MS        0x02
+#define SMI_ACC2X2_LATCH_DUR_1S           0x03
+#define SMI_ACC2X2_LATCH_DUR_2S           0x04
+#define SMI_ACC2X2_LATCH_DUR_4S           0x05
+#define SMI_ACC2X2_LATCH_DUR_8S           0x06
+#define SMI_ACC2X2_LATCH_DUR_LATCH        0x07
+#define SMI_ACC2X2_LATCH_DUR_NON_LATCH1   0x08
+#define SMI_ACC2X2_LATCH_DUR_250US        0x09
+#define SMI_ACC2X2_LATCH_DUR_500US        0x0A
+#define SMI_ACC2X2_LATCH_DUR_1MS          0x0B
+#define SMI_ACC2X2_LATCH_DUR_12_5MS       0x0C
+#define SMI_ACC2X2_LATCH_DUR_25MS         0x0D
+#define SMI_ACC2X2_LATCH_DUR_50MS         0x0E
+#define SMI_ACC2X2_LATCH_DUR_LATCH1       0x0F
+
+#define SMI_ACC2X2_MODE_NORMAL             0
+#define SMI_ACC2X2_MODE_LOWPOWER1          1
+#define SMI_ACC2X2_MODE_SUSPEND            2
+#define SMI_ACC2X2_MODE_DEEP_SUSPEND       3
+#define SMI_ACC2X2_MODE_LOWPOWER2          4
+#define SMI_ACC2X2_MODE_STANDBY            5
+
+#define SMI_ACC2X2_X_AXIS           0
+#define SMI_ACC2X2_Y_AXIS           1
+#define SMI_ACC2X2_Z_AXIS           2
+
+#define SMI_ACC2X2_Low_G_Interrupt       0
+#define SMI_ACC2X2_High_G_X_Interrupt    1
+#define SMI_ACC2X2_High_G_Y_Interrupt    2
+#define SMI_ACC2X2_High_G_Z_Interrupt    3
+#define SMI_ACC2X2_DATA_EN               4
+#define SMI_ACC2X2_Slope_X_Interrupt     5
+#define SMI_ACC2X2_Slope_Y_Interrupt     6
+#define SMI_ACC2X2_Slope_Z_Interrupt     7
+#define SMI_ACC2X2_Single_Tap_Interrupt  8
+#define SMI_ACC2X2_Double_Tap_Interrupt  9
+#define SMI_ACC2X2_Orient_Interrupt      10
+#define SMI_ACC2X2_Flat_Interrupt        11
+#define SMI_ACC2X2_FFULL_INTERRUPT       12
+#define SMI_ACC2X2_FWM_INTERRUPT         13
+
+#define SMI_ACC2X2_INT1_LOWG         0
+#define SMI_ACC2X2_INT2_LOWG         1
+#define SMI_ACC2X2_INT1_HIGHG        0
+#define SMI_ACC2X2_INT2_HIGHG        1
+#define SMI_ACC2X2_INT1_SLOPE        0
+#define SMI_ACC2X2_INT2_SLOPE        1
+#define SMI_ACC2X2_INT1_SLO_NO_MOT   0
+#define SMI_ACC2X2_INT2_SLO_NO_MOT   1
+#define SMI_ACC2X2_INT1_DTAP         0
+#define SMI_ACC2X2_INT2_DTAP         1
+#define SMI_ACC2X2_INT1_STAP         0
+#define SMI_ACC2X2_INT2_STAP         1
+#define SMI_ACC2X2_INT1_ORIENT       0
+#define SMI_ACC2X2_INT2_ORIENT       1
+#define SMI_ACC2X2_INT1_FLAT         0
+#define SMI_ACC2X2_INT2_FLAT         1
+#define SMI_ACC2X2_INT1_NDATA        0
+#define SMI_ACC2X2_INT2_NDATA        1
+#define SMI_ACC2X2_INT1_FWM          0
+#define SMI_ACC2X2_INT2_FWM          1
+#define SMI_ACC2X2_INT1_FFULL        0
+#define SMI_ACC2X2_INT2_FFULL        1
+
+#define SMI_ACC2X2_SRC_LOWG         0
+#define SMI_ACC2X2_SRC_HIGHG        1
+#define SMI_ACC2X2_SRC_SLOPE        2
+#define SMI_ACC2X2_SRC_SLO_NO_MOT   3
+#define SMI_ACC2X2_SRC_TAP          4
+#define SMI_ACC2X2_SRC_DATA         5
+
+#define SMI_ACC2X2_INT1_OUTPUT      0
+#define SMI_ACC2X2_INT2_OUTPUT      1
+#define SMI_ACC2X2_INT1_LEVEL       0
+#define SMI_ACC2X2_INT2_LEVEL       1
+
+#define SMI_ACC2X2_LOW_DURATION            0
+#define SMI_ACC2X2_HIGH_DURATION           1
+#define SMI_ACC2X2_SLOPE_DURATION          2
+#define SMI_ACC2X2_SLO_NO_MOT_DURATION     3
+
+#define SMI_ACC2X2_LOW_THRESHOLD            0
+#define SMI_ACC2X2_HIGH_THRESHOLD           1
+#define SMI_ACC2X2_SLOPE_THRESHOLD          2
+#define SMI_ACC2X2_SLO_NO_MOT_THRESHOLD     3
+
+
+#define SMI_ACC2X2_LOWG_HYST                0
+#define SMI_ACC2X2_HIGHG_HYST               1
+
+#define SMI_ACC2X2_ORIENT_THETA             0
+#define SMI_ACC2X2_FLAT_THETA               1
+
+#define SMI_ACC2X2_I2C_SELECT               0
+#define SMI_ACC2X2_I2C_EN                   1
+
+#define SMI_ACC2X2_SLOW_COMP_X              0
+#define SMI_ACC2X2_SLOW_COMP_Y              1
+#define SMI_ACC2X2_SLOW_COMP_Z              2
+
+#define SMI_ACC2X2_CUT_OFF                  0
+#define SMI_ACC2X2_OFFSET_TRIGGER_X         1
+#define SMI_ACC2X2_OFFSET_TRIGGER_Y         2
+#define SMI_ACC2X2_OFFSET_TRIGGER_Z         3
+
+#define SMI_ACC2X2_GP0                      0
+#define SMI_ACC2X2_GP1                      1
+
+#define SMI_ACC2X2_SLO_NO_MOT_EN_X          0
+#define SMI_ACC2X2_SLO_NO_MOT_EN_Y          1
+#define SMI_ACC2X2_SLO_NO_MOT_EN_Z          2
+#define SMI_ACC2X2_SLO_NO_MOT_EN_SEL        3
+
+#define SMI_ACC2X2_WAKE_UP_DUR_20MS         0
+#define SMI_ACC2X2_WAKE_UP_DUR_80MS         1
+#define SMI_ACC2X2_WAKE_UP_DUR_320MS                2
+#define SMI_ACC2X2_WAKE_UP_DUR_2560MS               3
+
+#define SMI_ACC2X2_SELF_TEST0_ON            1
+#define SMI_ACC2X2_SELF_TEST1_ON            2
+
+#define SMI_ACC2X2_EE_W_OFF                 0
+#define SMI_ACC2X2_EE_W_ON                  1
+
+#define SMI_ACC2X2_LOW_TH_IN_G(gthres, range)           ((256 * gthres) / range)
+
+
+#define SMI_ACC2X2_HIGH_TH_IN_G(gthres, range)          ((256 * gthres) / range)
+
+
+#define SMI_ACC2X2_LOW_HY_IN_G(ghyst, range)            ((32 * ghyst) / range)
+
+
+#define SMI_ACC2X2_HIGH_HY_IN_G(ghyst, range)           ((32 * ghyst) / range)
+
+
+#define SMI_ACC2X2_SLOPE_TH_IN_G(gthres, range)    ((128 * gthres) / range)
+
+
+#define SMI_ACC2X2_GET_BITSLICE(regvar, bitname)\
+	((regvar & bitname##__MSK) >> bitname##__POS)
+
+
+#define SMI_ACC2X2_SET_BITSLICE(regvar, bitname, val)\
+	((regvar & ~bitname##__MSK) | ((val<<bitname##__POS)&bitname##__MSK))
+
+#define CHECK_CHIP_ID_TIME_MAX 5
+#define SMI_ACC255_CHIP_ID 0XFA
+#define SMI_ACC250E_CHIP_ID 0XF9
+#define SMI_ACC222E_CHIP_ID 0XF8
+#define SMI_ACC280_CHIP_ID 0XFB
+#define SMI_ACC355_CHIP_ID 0XEA
+
+#define SMI_ACC255_TYPE 0
+#define SMI_ACC250E_TYPE 1
+#define SMI_ACC222E_TYPE 2
+#define SMI_ACC280_TYPE 3
+
+#define MAX_FIFO_F_LEVEL 32
+#define MAX_FIFO_F_BYTES 6
+#define SMI_ACC_MAX_RETRY_I2C_XFER (100)
+
+#ifdef CONFIG_DOUBLE_TAP
+#define DEFAULT_TAP_JUDGE_PERIOD 1000    /* default judge in 1 second */
+#endif
+
+/*! Bosch sensor unknown place*/
+#define BOSCH_SENSOR_PLACE_UNKNOWN (-1)
+/*! Bosch sensor remapping table size P0~P7*/
+#define MAX_AXIS_REMAP_TAB_SZ 8
+
+/* How was SMI_ACC enabled(set to operation mode) */
+#define SMI_ACC_ENABLED_ALL 0
+#define SMI_ACC_ENABLED_SGM 1
+#define SMI_ACC_ENABLED_DTAP 2
+#define SMI_ACC_ENABLED_INPUT 3
+#define SMI_ACC_ENABLED_BSX 4
+
+
+/*!
+ * @brief:BMI058 feature
+ *  macro definition
+*/
+
+#define SMI_ACC2X2_FIFO_DAT_SEL_X                     1
+#define SMI_ACC2X2_FIFO_DAT_SEL_Y                     2
+#define SMI_ACC2X2_FIFO_DAT_SEL_Z                     3
+
+#ifdef CONFIG_SENSORS_BMI058
+#define C_BMI058_One_U8X                                 1
+#define C_BMI058_Two_U8X                                 2
+#define BMI058_OFFSET_TRIGGER_X                SMI_ACC2X2_OFFSET_TRIGGER_Y
+#define BMI058_OFFSET_TRIGGER_Y                SMI_ACC2X2_OFFSET_TRIGGER_X
+
+/*! BMI058 X AXIS OFFSET REG definition*/
+#define BMI058_OFFSET_X_AXIS_REG              SMI_ACC2X2_OFFSET_Y_AXIS_REG
+/*! BMI058 Y AXIS OFFSET REG definition*/
+#define BMI058_OFFSET_Y_AXIS_REG              SMI_ACC2X2_OFFSET_X_AXIS_REG
+
+#define BMI058_FIFO_DAT_SEL_X                       SMI_ACC2X2_FIFO_DAT_SEL_Y
+#define BMI058_FIFO_DAT_SEL_Y                       SMI_ACC2X2_FIFO_DAT_SEL_X
+
+/*! SMI130_ACC common slow no motion X interrupt type definition*/
+#define SMI_ACC2X2_SLOW_NO_MOT_X_INT          12
+/*! SMI130_ACC common slow no motion Y interrupt type definition*/
+#define SMI_ACC2X2_SLOW_NO_MOT_Y_INT          13
+/*! SMI130_ACC common High G X interrupt type definition*/
+#define SMI_ACC2X2_HIGHG_X_INT          1
+/*! SMI130_ACC common High G Y interrupt type definition*/
+#define SMI_ACC2X2_HIGHG_Y_INT          2
+/*! SMI130_ACC common slope X interrupt type definition*/
+#define SMI_ACC2X2_SLOPE_X_INT          5
+/*! SMI130_ACC common slope Y interrupt type definition*/
+#define SMI_ACC2X2_SLOPE_Y_INT          6
+
+/*! this structure holds some interrupt types difference
+**between SMI130_ACC and BMI058.
+*/
+struct interrupt_map_t {
+	int x;
+	int y;
+};
+/*!*Need to use SMI130_ACC Common interrupt type definition to
+* instead of Some of BMI058 reversed Interrupt type
+* because of HW Register.
+* The reversed Interrupt types contain:
+* slow_no_mot_x_int && slow_not_mot_y_int
+* highg_x_int && highg_y_int
+* slope_x_int && slope_y_int
+**/
+static const struct interrupt_map_t int_map[] = {
+	{SMI_ACC2X2_SLOW_NO_MOT_X_INT, SMI_ACC2X2_SLOW_NO_MOT_Y_INT},
+	{SMI_ACC2X2_HIGHG_X_INT, SMI_ACC2X2_HIGHG_Y_INT},
+	{SMI_ACC2X2_SLOPE_X_INT, SMI_ACC2X2_SLOPE_Y_INT}
+};
+
+/*! high g or slope interrupt type definition for BMI058*/
+/*! High G interrupt of x, y, z axis happened */
+#define HIGH_G_INTERRUPT_X            HIGH_G_INTERRUPT_Y_HAPPENED
+#define HIGH_G_INTERRUPT_Y            HIGH_G_INTERRUPT_X_HAPPENED
+#define HIGH_G_INTERRUPT_Z            HIGH_G_INTERRUPT_Z_HAPPENED
+/*! High G interrupt of x, y, z negative axis happened */
+#define HIGH_G_INTERRUPT_X_N          HIGH_G_INTERRUPT_Y_NEGATIVE_HAPPENED
+#define HIGH_G_INTERRUPT_Y_N          HIGH_G_INTERRUPT_X_NEGATIVE_HAPPENED
+#define HIGH_G_INTERRUPT_Z_N          HIGH_G_INTERRUPT_Z_NEGATIVE_HAPPENED
+/*! Slope interrupt of x, y, z axis happened */
+#define SLOPE_INTERRUPT_X             SLOPE_INTERRUPT_Y_HAPPENED
+#define SLOPE_INTERRUPT_Y             SLOPE_INTERRUPT_X_HAPPENED
+#define SLOPE_INTERRUPT_Z             SLOPE_INTERRUPT_Z_HAPPENED
+/*! Slope interrupt of x, y, z negative axis happened */
+#define SLOPE_INTERRUPT_X_N           SLOPE_INTERRUPT_Y_NEGATIVE_HAPPENED
+#define SLOPE_INTERRUPT_Y_N           SLOPE_INTERRUPT_X_NEGATIVE_HAPPENED
+#define SLOPE_INTERRUPT_Z_N           SLOPE_INTERRUPT_Z_NEGATIVE_HAPPENED
+
+
+#else
+
+/*! high g or slope interrupt type definition*/
+/*! High G interrupt of x, y, z axis happened */
+#define HIGH_G_INTERRUPT_X            HIGH_G_INTERRUPT_X_HAPPENED
+#define HIGH_G_INTERRUPT_Y            HIGH_G_INTERRUPT_Y_HAPPENED
+#define HIGH_G_INTERRUPT_Z            HIGH_G_INTERRUPT_Z_HAPPENED
+/*! High G interrupt of x, y, z negative axis happened */
+#define HIGH_G_INTERRUPT_X_N          HIGH_G_INTERRUPT_X_NEGATIVE_HAPPENED
+#define HIGH_G_INTERRUPT_Y_N          HIGH_G_INTERRUPT_Y_NEGATIVE_HAPPENED
+#define HIGH_G_INTERRUPT_Z_N          HIGH_G_INTERRUPT_Z_NEGATIVE_HAPPENED
+/*! Slope interrupt of x, y, z axis happened */
+#define SLOPE_INTERRUPT_X             SLOPE_INTERRUPT_X_HAPPENED
+#define SLOPE_INTERRUPT_Y             SLOPE_INTERRUPT_Y_HAPPENED
+#define SLOPE_INTERRUPT_Z             SLOPE_INTERRUPT_Z_HAPPENED
+/*! Slope interrupt of x, y, z negative axis happened */
+#define SLOPE_INTERRUPT_X_N           SLOPE_INTERRUPT_X_NEGATIVE_HAPPENED
+#define SLOPE_INTERRUPT_Y_N           SLOPE_INTERRUPT_Y_NEGATIVE_HAPPENED
+#define SLOPE_INTERRUPT_Z_N           SLOPE_INTERRUPT_Z_NEGATIVE_HAPPENED
+
+
+#endif/*End of CONFIG_SENSORS_BMI058*/
+
+/*! A workaroud mask definition with complete resolution exists
+* aim at writing operation FIFO_CONFIG_1, 0x3E register */
+#define FIFO_WORKAROUNDS_MSK         SMI_ACC2X2_FIFO_TRIGGER_SOURCE__MSK
+
+struct smi130_acc_type_map_t {
+
+	/*! smi130_acc sensor chip id */
+	uint16_t chip_id;
+
+	/*! smi130_acc sensor type */
+	uint16_t sensor_type;
+
+	/*! smi130_acc sensor name */
+	const char *sensor_name;
+};
+
+static const struct smi130_acc_type_map_t sensor_type_map[] = {
+
+	{SMI_ACC255_CHIP_ID, SMI_ACC255_TYPE, "SMI_ACC255/254"},
+	{SMI_ACC355_CHIP_ID, SMI_ACC255_TYPE, "SMI_ACC355"},
+	{SMI_ACC250E_CHIP_ID, SMI_ACC250E_TYPE, "SMI_ACC250E"},
+	{SMI_ACC222E_CHIP_ID, SMI_ACC222E_TYPE, "SMI_ACC222E"},
+	{SMI_ACC280_CHIP_ID, SMI_ACC280_TYPE, "SMI_ACC280"},
+
+};
+
+/*!
+* Bst sensor common definition,
+* please give parameters in BSP file.
+*/
+struct bosch_sensor_specific {
+	char *name;
+	/* 0 to 7 */
+	int place;
+	int irq;
+	int (*irq_gpio_cfg)(void);
+};
+
+
+/*!
+ * we use a typedef to hide the detail,
+ * because this type might be changed
+ */
+struct bosch_sensor_axis_remap {
+	/* src means which source will be mapped to target x, y, z axis */
+	/* if an target OS axis is remapped from (-)x,
+	 * src is 0, sign_* is (-)1 */
+	/* if an target OS axis is remapped from (-)y,
+	 * src is 1, sign_* is (-)1 */
+	/* if an target OS axis is remapped from (-)z,
+	 * src is 2, sign_* is (-)1 */
+	int src_x:3;
+	int src_y:3;
+	int src_z:3;
+
+	int sign_x:2;
+	int sign_y:2;
+	int sign_z:2;
+};
+
+struct bosch_sensor_data {
+	union {
+		int16_t v[3];
+		struct {
+			int16_t x;
+			int16_t y;
+			int16_t z;
+		};
+	};
+};
+
+struct smi130_accacc {
+	s16 x;
+	s16 y;
+	s16 z;
+};
+
+struct smi130_acc_data {
+	struct i2c_client *smi130_acc_client;
+	atomic_t delay;
+	atomic_t enable;
+	atomic_t selftest_result;
+	unsigned int chip_id;
+	unsigned int fifo_count;
+	unsigned char fifo_datasel;
+	unsigned char mode;
+	signed char sensor_type;
+	uint64_t timestamp;
+	uint64_t fifo_time;
+	uint64_t base_time;
+	uint64_t acc_count;
+	uint64_t time_odr;
+	uint8_t debug_level;
+	struct work_struct report_data_work;
+	int is_timer_running;
+	struct hrtimer timer;
+	ktime_t work_delay_kt;
+	struct input_dev *input;
+
+	struct bosch_dev *bosch_acc;
+
+	struct smi130_accacc value;
+	struct mutex value_mutex;
+	struct mutex enable_mutex;
+	struct mutex mode_mutex;
+	struct delayed_work work;
+	struct work_struct irq_work;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend;
+#endif
+	int16_t IRQ;
+	struct bosch_sensor_specific *bosch_pd;
+
+	int smi_acc_mode_enabled;
+	struct input_dev *dev_interrupt;
+
+#ifdef CONFIG_SIG_MOTION
+	struct class *g_sensor_class;
+	struct device *g_sensor_dev;
+
+	/*struct smi_acc250_platform_data *pdata;*/
+	atomic_t en_sig_motion;
+#endif
+
+#ifdef CONFIG_DOUBLE_TAP
+	struct class *g_sensor_class_doubletap;
+	struct device *g_sensor_dev_doubletap;
+	atomic_t en_double_tap;
+	unsigned char tap_times;
+	struct mutex		tap_mutex;
+	struct timer_list	tap_timer;
+	int tap_time_period;
+#endif
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void smi130_acc_early_suspend(struct early_suspend *h);
+static void smi130_acc_late_resume(struct early_suspend *h);
+#endif
+
+static int smi130_acc_set_mode(struct i2c_client *client,
+			u8 mode, u8 enabled_mode);
+static int smi130_acc_get_mode(struct i2c_client *client, u8 *mode);
+static int smi130_acc_get_fifo_mode(struct i2c_client *client, u8 *fifo_mode);
+static int smi130_acc_set_fifo_mode(struct i2c_client *client, u8 fifo_mode);
+static int smi130_acc_normal_to_suspend(struct smi130_acc_data *smi130_acc,
+				unsigned char data1, unsigned char data2);
+
+static void smi130_acc_delay(u32 msec)
+{
+	if (msec <= 20)
+		usleep_range(msec * 1000, msec * 1000);
+	else
+		msleep(msec);
+}
+/*Remapping for SMI_ACC2X2*/
+static const struct bosch_sensor_axis_remap
+bosch_axis_remap_tab_dft[MAX_AXIS_REMAP_TAB_SZ] = {
+	/* src_x src_y src_z  sign_x  sign_y  sign_z */
+	{  0,    1,    2,     1,      1,      1 }, /* P0 */
+	{  1,    0,    2,     1,     -1,      1 }, /* P1 */
+	{  0,    1,    2,    -1,     -1,      1 }, /* P2 */
+	{  1,    0,    2,    -1,      1,      1 }, /* P3 */
+
+	{  0,    1,    2,    -1,      1,     -1 }, /* P4 */
+	{  1,    0,    2,    -1,     -1,     -1 }, /* P5 */
+	{  0,    1,    2,     1,     -1,     -1 }, /* P6 */
+	{  1,    0,    2,     1,      1,     -1 }, /* P7 */
+};
+
+
+static void bosch_remap_sensor_data(struct bosch_sensor_data *data,
+		const struct bosch_sensor_axis_remap *remap)
+{
+	struct bosch_sensor_data tmp;
+
+	tmp.x = data->v[remap->src_x] * remap->sign_x;
+	tmp.y = data->v[remap->src_y] * remap->sign_y;
+	tmp.z = data->v[remap->src_z] * remap->sign_z;
+
+	memcpy(data, &tmp, sizeof(*data));
+}
+
+
+static void bosch_remap_sensor_data_dft_tab(struct bosch_sensor_data *data,
+		int place)
+{
+	/* sensor with place 0 needs not to be remapped */
+	if ((place <= 0) || (place >= MAX_AXIS_REMAP_TAB_SZ))
+		return;
+
+	bosch_remap_sensor_data(data, &bosch_axis_remap_tab_dft[place]);
+}
+
+static void smi130_acc_remap_sensor_data(struct smi130_accacc *val,
+		struct smi130_acc_data *client_data)
+{
+	struct bosch_sensor_data bsd;
+	int place;
+
+	if ((NULL == client_data->bosch_pd) || (BOSCH_SENSOR_PLACE_UNKNOWN
+			 == client_data->bosch_pd->place))
+		place = BOSCH_SENSOR_PLACE_UNKNOWN;
+	else
+		place = client_data->bosch_pd->place;
+
+#ifdef CONFIG_SENSORS_BMI058
+/*x,y need to be invesed becase of HW Register for BMI058*/
+	bsd.y = val->x;
+	bsd.x = val->y;
+	bsd.z = val->z;
+#else
+	bsd.x = val->x;
+	bsd.y = val->y;
+	bsd.z = val->z;
+#endif
+
+	bosch_remap_sensor_data_dft_tab(&bsd, place);
+
+	val->x = bsd.x;
+	val->y = bsd.y;
+	val->z = bsd.z;
+
+}
+
+
+static int smi130_acc_smbus_read_byte(struct i2c_client *client,
+		unsigned char reg_addr, unsigned char *data)
+{
+#if !defined SMI130_ACC_USE_BASIC_I2C_FUNC
+	s32 dummy;
+	int len = 1;
+	if (NULL == client)
+		return -ENODEV;
+
+	while (0 != len--) {
+#ifdef SMI130_ACC_SMBUS
+		dummy = i2c_smbus_read_byte_data(client, reg_addr);
+		if (dummy < 0) {
+			PERR("i2c bus read error");
+			return -EIO;
+		}
+		*data = (u8)(dummy & 0xff);
+#else
+		dummy = i2c_master_send(client, (char *)&reg_addr, 1);
+		if (dummy < 0)
+			return -EIO;
+
+		dummy = i2c_master_recv(client, (char *)data, 1);
+		if (dummy < 0)
+			return -EIO;
+#endif
+		reg_addr++;
+		data++;
+	}
+	return 0;
+#else
+	int retry;
+	int len = 1;
+	struct i2c_msg msg[] = {
+		{
+		 .addr = client->addr,
+		 .flags = 0,
+		 .len = 1,
+		 .buf = &reg_addr,
+		},
+
+		{
+		 .addr = client->addr,
+		 .flags = I2C_M_RD,
+		 .len = len,
+		 .buf = data,
+		 },
+	};
+
+	for (retry = 0; retry < SMI_ACC_MAX_RETRY_I2C_XFER; retry++) {
+		if (i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)) > 0)
+			break;
+		else
+			smi130_acc_delay(1);
+	}
+
+	if (SMI_ACC_MAX_RETRY_I2C_XFER <= retry) {
+		PERR("I2C xfer error");
+		return -EIO;
+	}
+
+	return 0;
+#endif
+}
+
+static int smi130_acc_smbus_write_byte(struct i2c_client *client,
+		unsigned char reg_addr, unsigned char *data)
+{
+#if !defined SMI130_ACC_USE_BASIC_I2C_FUNC
+	s32 dummy;
+	int len = 1;
+#ifndef SMI130_ACC_SMBUS
+	u8 buffer[2];
+#endif
+	if (NULL == client)
+		return -ENODEV;
+
+	while (0 != len--) {
+#ifdef SMI130_ACC_SMBUS
+		dummy = i2c_smbus_write_byte_data(client, reg_addr, *data);
+#else
+		buffer[0] = reg_addr;
+		buffer[1] = *data;
+		dummy = i2c_master_send(client, (char *)buffer, 2);
+#endif
+		reg_addr++;
+		data++;
+		if (dummy < 0) {
+			PERR("error writing i2c bus");
+			return -EIO;
+		}
+
+	}
+	return 0;
+#else
+	u8 buffer[2];
+	int retry;
+	int len = 1;
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = 2,
+			.buf = buffer,
+		},
+	};
+	while (0 != len--) {
+		buffer[0] = reg_addr;
+		buffer[1] = *data;
+		for (retry = 0; retry < SMI_ACC_MAX_RETRY_I2C_XFER; retry++) {
+			if (i2c_transfer(client->adapter, msg,
+						ARRAY_SIZE(msg)) > 0) {
+				break;
+			} else {
+				smi130_acc_delay(1);
+			}
+		}
+		if (SMI_ACC_MAX_RETRY_I2C_XFER <= retry) {
+			PERR("I2C xfer error");
+			return -EIO;
+		}
+		reg_addr++;
+		data++;
+	}
+
+	return 0;
+#endif
+}
+
+static int smi130_acc_smbus_read_byte_block(struct i2c_client *client,
+		unsigned char reg_addr, unsigned char *data, unsigned char len)
+{
+	int retry;
+
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &reg_addr,
+		},
+
+		{
+			.addr = client->addr,
+			.flags = I2C_M_RD,
+			.len = len,
+			.buf = data,
+		},
+	};
+
+	for (retry = 0; retry < SMI_ACC_MAX_RETRY_I2C_XFER; retry++) {
+		if (i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)) > 0)
+			break;
+		else
+			smi130_acc_delay(1);
+	}
+
+	if (SMI_ACC_MAX_RETRY_I2C_XFER <= retry) {
+		PERR("I2C xfer error");
+		return -EIO;
+	}
+	return 0;
+}
+
+static int smi_acc_i2c_burst_read(struct i2c_client *client, u8 reg_addr,
+		u8 *data, u16 len)
+{
+	int retry;
+
+	struct i2c_msg msg[] = {
+		{
+		 .addr = client->addr,
+		 .flags = 0,
+		 .len = 1,
+		 .buf = &reg_addr,
+		},
+
+		{
+		 .addr = client->addr,
+		 .flags = I2C_M_RD,
+		 .len = len,
+		 .buf = data,
+		 },
+	};
+
+	for (retry = 0; retry < SMI_ACC_MAX_RETRY_I2C_XFER; retry++) {
+		if (i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)) > 0)
+			break;
+		else
+			smi130_acc_delay(1);
+	}
+
+	if (SMI_ACC_MAX_RETRY_I2C_XFER <= retry) {
+		PINFO("I2C xfer error");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int smi130_acc_check_chip_id(struct i2c_client *client,
+					struct smi130_acc_data *data)
+{
+	int i = 0;
+	int err = 0;
+	unsigned char chip_id = 0;
+	unsigned char read_count = 0;
+	unsigned char smi130_acc_sensor_type_count = 0;
+
+	smi130_acc_sensor_type_count =
+		sizeof(sensor_type_map) / sizeof(struct smi130_acc_type_map_t);
+
+	while (read_count++ < CHECK_CHIP_ID_TIME_MAX) {
+		if (smi130_acc_smbus_read_byte(client, SMI_ACC2X2_CHIP_ID_REG,
+							&chip_id) < 0) {
+			PERR("Bosch Sensortec Device not found\n\n"
+			"i2c bus read error, read chip_id:%d\n", chip_id);
+			continue;
+		} else {
+		for (i = 0; i < smi130_acc_sensor_type_count; i++) {
+			if (sensor_type_map[i].chip_id == chip_id) {
+				data->sensor_type =
+					sensor_type_map[i].sensor_type;
+				data->chip_id = chip_id;
+				PINFO("Bosch Sensortec Device detected,\n\n"
+					" HW IC name: %s\n",
+						sensor_type_map[i].sensor_name);
+					return err;
+			}
+		}
+		if (i < smi130_acc_sensor_type_count)
+			return err;
+		else {
+			if (read_count == CHECK_CHIP_ID_TIME_MAX) {
+				PERR("Failed! Bosch Sensortec Device\n\n"
+					" not found, mismatch chip_id:%d\n",
+								chip_id);
+					err = -ENODEV;
+					return err;
+			}
+		}
+		smi130_acc_delay(1);
+		}
+	}
+	return err;
+}
+
+#ifdef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+static int smi130_acc_set_newdata(struct i2c_client *client,
+			unsigned char channel, unsigned char int_newdata)
+{
+
+	unsigned char data = 0;
+	int comres = 0;
+
+	switch (channel) {
+	case SMI_ACC2X2_INT1_NDATA:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_NEWDATA__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data,
+				SMI_ACC2X2_EN_INT1_PAD_NEWDATA, int_newdata);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_NEWDATA__REG, &data);
+		break;
+	case SMI_ACC2X2_INT2_NDATA:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_NEWDATA__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data,
+				SMI_ACC2X2_EN_INT2_PAD_NEWDATA, int_newdata);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_NEWDATA__REG, &data);
+		break;
+	default:
+		comres = -1;
+		break;
+	}
+
+	return comres;
+
+}
+#endif /* CONFIG_SMI_ACC_ENABLE_NEWDATA_INT */
+
+#ifdef SMI_ACC2X2_ENABLE_INT1
+static int smi130_acc_set_int1_pad_sel(struct i2c_client *client, unsigned char
+		int1sel)
+{
+	int comres = 0;
+	unsigned char data = 0;
+	unsigned char state;
+	state = 0x01;
+
+
+	switch (int1sel) {
+	case 0:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_LOWG__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT1_PAD_LOWG,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_LOWG__REG, &data);
+		break;
+	case 1:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_HIGHG__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT1_PAD_HIGHG,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_HIGHG__REG, &data);
+		break;
+	case 2:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_SLOPE__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT1_PAD_SLOPE,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_SLOPE__REG, &data);
+		break;
+	case 3:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_DB_TAP__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT1_PAD_DB_TAP,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_DB_TAP__REG, &data);
+		break;
+	case 4:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_SNG_TAP__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT1_PAD_SNG_TAP,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_SNG_TAP__REG, &data);
+		break;
+	case 5:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_ORIENT__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT1_PAD_ORIENT,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_ORIENT__REG, &data);
+		break;
+	case 6:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_FLAT__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT1_PAD_FLAT,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_FLAT__REG, &data);
+		break;
+	case 7:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_SLO_NO_MOT__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT1_PAD_SLO_NO_MOT,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT1_PAD_SLO_NO_MOT__REG, &data);
+		break;
+
+	default:
+		break;
+	}
+
+	return comres;
+}
+#endif /* SMI_ACC2X2_ENABLE_INT1 */
+
+#ifdef SMI_ACC2X2_ENABLE_INT2
+static int smi130_acc_set_int2_pad_sel(struct i2c_client *client, unsigned char
+		int2sel)
+{
+	int comres = 0;
+	unsigned char data = 0;
+	unsigned char state;
+	state = 0x01;
+
+
+	switch (int2sel) {
+	case 0:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_LOWG__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT2_PAD_LOWG,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_LOWG__REG, &data);
+		break;
+	case 1:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_HIGHG__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT2_PAD_HIGHG,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_HIGHG__REG, &data);
+		break;
+	case 2:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_SLOPE__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT2_PAD_SLOPE,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_SLOPE__REG, &data);
+		break;
+	case 3:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_DB_TAP__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT2_PAD_DB_TAP,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_DB_TAP__REG, &data);
+		break;
+	case 4:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_SNG_TAP__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT2_PAD_SNG_TAP,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_SNG_TAP__REG, &data);
+		break;
+	case 5:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_ORIENT__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT2_PAD_ORIENT,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_ORIENT__REG, &data);
+		break;
+	case 6:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_FLAT__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT2_PAD_FLAT,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_FLAT__REG, &data);
+		break;
+	case 7:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_SLO_NO_MOT__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_INT2_PAD_SLO_NO_MOT,
+				state);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_EN_INT2_PAD_SLO_NO_MOT__REG, &data);
+		break;
+	default:
+		break;
+	}
+
+	return comres;
+}
+#endif /* SMI_ACC2X2_ENABLE_INT2 */
+
+static int smi130_acc_set_Int_Enable(struct i2c_client *client, unsigned char
+		InterruptType , unsigned char value)
+{
+	int comres = 0;
+	unsigned char data1 = 0;
+	unsigned char data2 = 0;
+
+	if ((11 < InterruptType) && (InterruptType < 16)) {
+		switch (InterruptType) {
+		case 12:
+			/* slow/no motion X Interrupt  */
+			comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_INT_SLO_NO_MOT_EN_X_INT__REG, &data1);
+			data1 = SMI_ACC2X2_SET_BITSLICE(data1,
+				SMI_ACC2X2_INT_SLO_NO_MOT_EN_X_INT, value);
+			comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_INT_SLO_NO_MOT_EN_X_INT__REG, &data1);
+			break;
+		case 13:
+			/* slow/no motion Y Interrupt  */
+			comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_INT_SLO_NO_MOT_EN_Y_INT__REG, &data1);
+			data1 = SMI_ACC2X2_SET_BITSLICE(data1,
+				SMI_ACC2X2_INT_SLO_NO_MOT_EN_Y_INT, value);
+			comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_INT_SLO_NO_MOT_EN_Y_INT__REG, &data1);
+			break;
+		case 14:
+			/* slow/no motion Z Interrupt  */
+			comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_INT_SLO_NO_MOT_EN_Z_INT__REG, &data1);
+			data1 = SMI_ACC2X2_SET_BITSLICE(data1,
+				SMI_ACC2X2_INT_SLO_NO_MOT_EN_Z_INT, value);
+			comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_INT_SLO_NO_MOT_EN_Z_INT__REG, &data1);
+			break;
+		case 15:
+			/* slow / no motion Interrupt select */
+			comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_INT_SLO_NO_MOT_EN_SEL_INT__REG, &data1);
+			data1 = SMI_ACC2X2_SET_BITSLICE(data1,
+				SMI_ACC2X2_INT_SLO_NO_MOT_EN_SEL_INT, value);
+			comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_INT_SLO_NO_MOT_EN_SEL_INT__REG, &data1);
+		}
+
+	return comres;
+	}
+
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_INT_ENABLE1_REG, &data1);
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_INT_ENABLE2_REG, &data2);
+
+	value = value & 1;
+	switch (InterruptType) {
+	case 0:
+		/* Low G Interrupt  */
+		data2 = SMI_ACC2X2_SET_BITSLICE(data2, SMI_ACC2X2_EN_LOWG_INT, value);
+		break;
+
+	case 1:
+		/* High G X Interrupt */
+		data2 = SMI_ACC2X2_SET_BITSLICE(data2, SMI_ACC2X2_EN_HIGHG_X_INT,
+				value);
+		break;
+
+	case 2:
+		/* High G Y Interrupt */
+		data2 = SMI_ACC2X2_SET_BITSLICE(data2, SMI_ACC2X2_EN_HIGHG_Y_INT,
+				value);
+		break;
+
+	case 3:
+		/* High G Z Interrupt */
+		data2 = SMI_ACC2X2_SET_BITSLICE(data2, SMI_ACC2X2_EN_HIGHG_Z_INT,
+				value);
+		break;
+
+	case 4:
+		/* New Data Interrupt  */
+		data2 = SMI_ACC2X2_SET_BITSLICE(data2, SMI_ACC2X2_EN_NEW_DATA_INT,
+				value);
+		break;
+
+	case 5:
+		/* Slope X Interrupt */
+		data1 = SMI_ACC2X2_SET_BITSLICE(data1, SMI_ACC2X2_EN_SLOPE_X_INT,
+				value);
+		break;
+
+	case 6:
+		/* Slope Y Interrupt */
+		data1 = SMI_ACC2X2_SET_BITSLICE(data1, SMI_ACC2X2_EN_SLOPE_Y_INT,
+				value);
+		break;
+
+	case 7:
+		/* Slope Z Interrupt */
+		data1 = SMI_ACC2X2_SET_BITSLICE(data1, SMI_ACC2X2_EN_SLOPE_Z_INT,
+				value);
+		break;
+
+	case 8:
+		/* Single Tap Interrupt */
+		data1 = SMI_ACC2X2_SET_BITSLICE(data1, SMI_ACC2X2_EN_SINGLE_TAP_INT,
+				value);
+		break;
+
+	case 9:
+		/* Double Tap Interrupt */
+		data1 = SMI_ACC2X2_SET_BITSLICE(data1, SMI_ACC2X2_EN_DOUBLE_TAP_INT,
+				value);
+		break;
+
+	case 10:
+		/* Orient Interrupt  */
+		data1 = SMI_ACC2X2_SET_BITSLICE(data1, SMI_ACC2X2_EN_ORIENT_INT, value);
+		break;
+
+	case 11:
+		/* Flat Interrupt */
+		data1 = SMI_ACC2X2_SET_BITSLICE(data1, SMI_ACC2X2_EN_FLAT_INT, value);
+		break;
+
+	default:
+		break;
+	}
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_INT_ENABLE1_REG,
+			&data1);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_INT_ENABLE2_REG,
+			&data2);
+
+	return comres;
+}
+
+
+#if defined(SMI_ACC2X2_ENABLE_INT1) || defined(SMI_ACC2X2_ENABLE_INT2)
+static int smi130_acc_get_interruptstatus1(struct i2c_client *client, unsigned char
+		*intstatus)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_STATUS1_REG, &data);
+	*intstatus = data;
+
+	return comres;
+}
+
+#ifdef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+/*
+static int smi130_acc_get_interruptstatus2(struct i2c_client *client, unsigned char
+		*intstatus)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_STATUS2_REG, &data);
+	*intstatus = data;
+
+	return comres;
+}
+*/
+#endif
+
+static int smi130_acc_get_HIGH_first(struct i2c_client *client, unsigned char
+						param, unsigned char *intstatus)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	switch (param) {
+	case 0:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_STATUS_ORIENT_HIGH_REG, &data);
+		data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_HIGHG_FIRST_X);
+		*intstatus = data;
+		break;
+	case 1:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_STATUS_ORIENT_HIGH_REG, &data);
+		data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_HIGHG_FIRST_Y);
+		*intstatus = data;
+		break;
+	case 2:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_STATUS_ORIENT_HIGH_REG, &data);
+		data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_HIGHG_FIRST_Z);
+		*intstatus = data;
+		break;
+	default:
+		break;
+	}
+
+	return comres;
+}
+
+static int smi130_acc_get_HIGH_sign(struct i2c_client *client, unsigned char
+		*intstatus)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_STATUS_ORIENT_HIGH_REG,
+			&data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_HIGHG_SIGN_S);
+	*intstatus = data;
+
+	return comres;
+}
+
+#ifndef CONFIG_SIG_MOTION
+static int smi130_acc_get_slope_first(struct i2c_client *client, unsigned char
+	param, unsigned char *intstatus)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	switch (param) {
+	case 0:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_STATUS_TAP_SLOPE_REG, &data);
+		data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_SLOPE_FIRST_X);
+		*intstatus = data;
+		break;
+	case 1:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_STATUS_TAP_SLOPE_REG, &data);
+		data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_SLOPE_FIRST_Y);
+		*intstatus = data;
+		break;
+	case 2:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_STATUS_TAP_SLOPE_REG, &data);
+		data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_SLOPE_FIRST_Z);
+		*intstatus = data;
+		break;
+	default:
+		break;
+	}
+
+	return comres;
+}
+
+static int smi130_acc_get_slope_sign(struct i2c_client *client, unsigned char
+		*intstatus)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_STATUS_TAP_SLOPE_REG,
+			&data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_SLOPE_SIGN_S);
+	*intstatus = data;
+
+	return comres;
+}
+#endif
+
+static int smi130_acc_get_orient_mbl_status(struct i2c_client *client, unsigned char
+		*intstatus)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_STATUS_ORIENT_HIGH_REG,
+			&data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_ORIENT_S);
+	*intstatus = data;
+
+	return comres;
+}
+
+static int smi130_acc_get_orient_mbl_flat_status(struct i2c_client *client, unsigned
+		char *intstatus)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_STATUS_ORIENT_HIGH_REG,
+			&data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_FLAT_S);
+	*intstatus = data;
+
+	return comres;
+}
+#endif /* defined(SMI_ACC2X2_ENABLE_INT1)||defined(SMI_ACC2X2_ENABLE_INT2) */
+
+static int smi130_acc_set_Int_Mode(struct i2c_client *client, unsigned char Mode)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+
+	comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_INT_MODE_SEL__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_INT_MODE_SEL, Mode);
+	comres = smi130_acc_smbus_write_byte(client,
+			SMI_ACC2X2_INT_MODE_SEL__REG, &data);
+
+
+	return comres;
+}
+
+static int smi130_acc_get_Int_Mode(struct i2c_client *client, unsigned char *Mode)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+
+	comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_INT_MODE_SEL__REG, &data);
+	data  = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_INT_MODE_SEL);
+	*Mode = data;
+
+
+	return comres;
+}
+static int smi130_acc_set_slope_duration(struct i2c_client *client, unsigned char
+		duration)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+
+	comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_SLOPE_DUR__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_SLOPE_DUR, duration);
+	comres = smi130_acc_smbus_write_byte(client,
+			SMI_ACC2X2_SLOPE_DUR__REG, &data);
+
+	return comres;
+}
+
+static int smi130_acc_get_slope_duration(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+
+	comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_SLOPE_DURN_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_SLOPE_DUR);
+	*status = data;
+
+
+	return comres;
+}
+
+static int smi130_acc_set_slope_no_mot_duration(struct i2c_client *client,
+			unsigned char duration)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+
+	comres = smi130_acc_smbus_read_byte(client,
+			SMI130_ACC_SLO_NO_MOT_DUR__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI130_ACC_SLO_NO_MOT_DUR, duration);
+	comres = smi130_acc_smbus_write_byte(client,
+			SMI130_ACC_SLO_NO_MOT_DUR__REG, &data);
+
+
+	return comres;
+}
+
+static int smi130_acc_get_slope_no_mot_duration(struct i2c_client *client,
+			unsigned char *status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+
+	comres = smi130_acc_smbus_read_byte(client,
+			SMI130_ACC_SLO_NO_MOT_DUR__REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI130_ACC_SLO_NO_MOT_DUR);
+	*status = data;
+
+
+	return comres;
+}
+
+static int smi130_acc_set_slope_threshold(struct i2c_client *client,
+		unsigned char threshold)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	data = threshold;
+	comres = smi130_acc_smbus_write_byte(client,
+			SMI_ACC2X2_SLOPE_THRES__REG, &data);
+
+	return comres;
+}
+
+static int smi130_acc_get_slope_threshold(struct i2c_client *client,
+		unsigned char *status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+
+	comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_SLOPE_THRES_REG, &data);
+	*status = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_slope_no_mot_threshold(struct i2c_client *client,
+		unsigned char threshold)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	data = threshold;
+	comres = smi130_acc_smbus_write_byte(client,
+			SMI_ACC2X2_SLO_NO_MOT_THRES_REG, &data);
+
+	return comres;
+}
+
+static int smi130_acc_get_slope_no_mot_threshold(struct i2c_client *client,
+		unsigned char *status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+
+	comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_SLO_NO_MOT_THRES_REG, &data);
+	*status = data;
+
+	return comres;
+}
+
+
+static int smi130_acc_set_low_g_duration(struct i2c_client *client, unsigned char
+		duration)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_LOWG_DUR__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_LOWG_DUR, duration);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_LOWG_DUR__REG, &data);
+
+	return comres;
+}
+
+static int smi130_acc_get_low_g_duration(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_LOW_DURN_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_LOWG_DUR);
+	*status = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_low_g_threshold(struct i2c_client *client, unsigned char
+		threshold)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_LOWG_THRES__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_LOWG_THRES, threshold);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_LOWG_THRES__REG, &data);
+
+	return comres;
+}
+
+static int smi130_acc_get_low_g_threshold(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_LOW_THRES_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_LOWG_THRES);
+	*status = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_high_g_duration(struct i2c_client *client, unsigned char
+		duration)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_HIGHG_DUR__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_HIGHG_DUR, duration);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_HIGHG_DUR__REG, &data);
+
+	return comres;
+}
+
+static int smi130_acc_get_high_g_duration(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_HIGH_DURN_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_HIGHG_DUR);
+	*status = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_high_g_threshold(struct i2c_client *client, unsigned char
+		threshold)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_HIGHG_THRES__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_HIGHG_THRES, threshold);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_HIGHG_THRES__REG,
+			&data);
+
+	return comres;
+}
+
+static int smi130_acc_get_high_g_threshold(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_HIGH_THRES_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_HIGHG_THRES);
+	*status = data;
+
+	return comres;
+}
+
+
+static int smi130_acc_set_tap_duration(struct i2c_client *client, unsigned char
+		duration)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_TAP_DUR__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_TAP_DUR, duration);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_TAP_DUR__REG, &data);
+
+	return comres;
+}
+
+static int smi130_acc_get_tap_duration(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_TAP_PARAM_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_TAP_DUR);
+	*status = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_tap_shock(struct i2c_client *client, unsigned char setval)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_TAP_SHOCK_DURN__REG,
+			&data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_TAP_SHOCK_DURN, setval);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_TAP_SHOCK_DURN__REG,
+			&data);
+
+	return comres;
+}
+
+static int smi130_acc_get_tap_shock(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_TAP_PARAM_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_TAP_SHOCK_DURN);
+	*status = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_tap_quiet(struct i2c_client *client, unsigned char
+		duration)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_TAP_QUIET_DURN__REG,
+			&data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_TAP_QUIET_DURN, duration);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_TAP_QUIET_DURN__REG,
+			&data);
+
+	return comres;
+}
+
+static int smi130_acc_get_tap_quiet(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_TAP_PARAM_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_TAP_QUIET_DURN);
+	*status = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_tap_threshold(struct i2c_client *client, unsigned char
+		threshold)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_TAP_THRES__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_TAP_THRES, threshold);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_TAP_THRES__REG, &data);
+
+	return comres;
+}
+
+static int smi130_acc_get_tap_threshold(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_TAP_THRES_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_TAP_THRES);
+	*status = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_tap_samp(struct i2c_client *client, unsigned char samp)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_TAP_SAMPLES__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_TAP_SAMPLES, samp);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_TAP_SAMPLES__REG,
+			&data);
+
+	return comres;
+}
+
+static int smi130_acc_get_tap_samp(struct i2c_client *client, unsigned char *status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_TAP_THRES_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_TAP_SAMPLES);
+	*status = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_orient_mbl_mode(struct i2c_client *client, unsigned char mode)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_ORIENT_MODE__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_ORIENT_MODE, mode);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_ORIENT_MODE__REG,
+			&data);
+
+	return comres;
+}
+
+static int smi130_acc_get_orient_mbl_mode(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_ORIENT_PARAM_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_ORIENT_MODE);
+	*status = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_orient_mbl_blocking(struct i2c_client *client, unsigned char
+		samp)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_ORIENT_BLOCK__REG,
+			&data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_ORIENT_BLOCK, samp);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_ORIENT_BLOCK__REG,
+			&data);
+
+	return comres;
+}
+
+static int smi130_acc_get_orient_mbl_blocking(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_ORIENT_PARAM_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_ORIENT_BLOCK);
+	*status = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_orient_mbl_hyst(struct i2c_client *client, unsigned char
+		orient_mblhyst)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_ORIENT_HYST__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_ORIENT_HYST, orient_mblhyst);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_ORIENT_HYST__REG,
+			&data);
+
+	return comres;
+}
+
+static int smi130_acc_get_orient_mbl_hyst(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_ORIENT_PARAM_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_ORIENT_HYST);
+	*status = data;
+
+	return comres;
+}
+static int smi130_acc_set_theta_blocking(struct i2c_client *client, unsigned char
+		thetablk)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_THETA_BLOCK__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_THETA_BLOCK, thetablk);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_THETA_BLOCK__REG,
+			&data);
+
+	return comres;
+}
+
+static int smi130_acc_get_theta_blocking(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_THETA_BLOCK_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_THETA_BLOCK);
+	*status = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_theta_flat(struct i2c_client *client, unsigned char
+		thetaflat)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_THETA_FLAT__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_THETA_FLAT, thetaflat);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_THETA_FLAT__REG, &data);
+
+	return comres;
+}
+
+static int smi130_acc_get_theta_flat(struct i2c_client *client, unsigned char
+		*status)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_THETA_FLAT_REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_THETA_FLAT);
+	*status = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_flat_hold_time(struct i2c_client *client, unsigned char
+		holdtime)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_FLAT_HOLD_TIME__REG,
+			&data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_FLAT_HOLD_TIME, holdtime);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_FLAT_HOLD_TIME__REG,
+			&data);
+
+	return comres;
+}
+
+static int smi130_acc_get_flat_hold_time(struct i2c_client *client, unsigned char
+		*holdtime)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_FLAT_HOLD_TIME_REG,
+			&data);
+	data  = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_FLAT_HOLD_TIME);
+	*holdtime = data;
+
+	return comres;
+}
+
+/*!
+ * brief: smi130_acc switch from normal to suspend mode
+ * @param[i] smi130_acc
+ * @param[i] data1, write to PMU_LPW
+ * @param[i] data2, write to PMU_LOW_NOSIE
+ *
+ * @return zero success, none-zero failed
+ */
+static int smi130_acc_normal_to_suspend(struct smi130_acc_data *smi130_acc,
+				unsigned char data1, unsigned char data2)
+{
+	unsigned char current_fifo_mode;
+	unsigned char current_op_mode;
+	if (smi130_acc == NULL)
+		return -ENODEV;
+	/* get current op mode from mode register */
+	if (smi130_acc_get_mode(smi130_acc->smi130_acc_client, &current_op_mode) < 0)
+		return -EIO;
+	/* only aimed at operatiom mode chang from normal/lpw1 mode
+	 * to suspend state.
+	*/
+	if (current_op_mode == SMI_ACC2X2_MODE_NORMAL ||
+			current_op_mode == SMI_ACC2X2_MODE_LOWPOWER1) {
+		/* get current fifo mode from fifo config register */
+		if (smi130_acc_get_fifo_mode(smi130_acc->smi130_acc_client,
+							&current_fifo_mode) < 0)
+			return -EIO;
+		else {
+			smi130_acc_smbus_write_byte(smi130_acc->smi130_acc_client,
+					SMI_ACC2X2_LOW_NOISE_CTRL_REG, &data2);
+			smi130_acc_smbus_write_byte(smi130_acc->smi130_acc_client,
+					SMI_ACC2X2_MODE_CTRL_REG, &data1);
+			/*! Aim at fifo workarounds with FIFO_CONFIG_1 */
+			current_fifo_mode |= FIFO_WORKAROUNDS_MSK;
+			smi130_acc_smbus_write_byte(smi130_acc->smi130_acc_client,
+				SMI_ACC2X2_FIFO_MODE__REG, &current_fifo_mode);
+			smi130_acc_delay(3);
+			return 0;
+		}
+	} else {
+		smi130_acc_smbus_write_byte(smi130_acc->smi130_acc_client,
+					SMI_ACC2X2_LOW_NOISE_CTRL_REG, &data2);
+		smi130_acc_smbus_write_byte(smi130_acc->smi130_acc_client,
+					SMI_ACC2X2_MODE_CTRL_REG, &data1);
+		smi130_acc_delay(3);
+		return 0;
+	}
+
+}
+
+static int smi130_acc_set_mode(struct i2c_client *client, unsigned char mode,
+						unsigned char enabled_mode)
+{
+	int comres = 0;
+	unsigned char data1 = 0;
+	unsigned char data2 = 0;
+	int ret = 0;
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	mutex_lock(&smi130_acc->mode_mutex);
+	if (SMI_ACC2X2_MODE_SUSPEND == mode) {
+		if (enabled_mode != SMI_ACC_ENABLED_ALL) {
+			if ((smi130_acc->smi_acc_mode_enabled &
+						(1<<enabled_mode)) == 0) {
+				/* sensor is already closed in this mode */
+				mutex_unlock(&smi130_acc->mode_mutex);
+				return 0;
+			} else {
+				smi130_acc->smi_acc_mode_enabled &= ~(1<<enabled_mode);
+			}
+		} else {
+			/* shut down, close all and force do it*/
+			smi130_acc->smi_acc_mode_enabled = 0;
+		}
+	} else if (SMI_ACC2X2_MODE_NORMAL == mode) {
+		if ((smi130_acc->smi_acc_mode_enabled & (1<<enabled_mode)) != 0) {
+			/* sensor is already enabled in this mode */
+			mutex_unlock(&smi130_acc->mode_mutex);
+			return 0;
+		} else {
+			smi130_acc->smi_acc_mode_enabled |= (1<<enabled_mode);
+		}
+	} else {
+		/* other mode, close all and force do it*/
+		smi130_acc->smi_acc_mode_enabled = 0;
+	}
+	mutex_unlock(&smi130_acc->mode_mutex);
+
+	if (mode < 6) {
+		comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_MODE_CTRL_REG,
+				&data1);
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_LOW_NOISE_CTRL_REG,
+				&data2);
+		switch (mode) {
+		case SMI_ACC2X2_MODE_NORMAL:
+				data1  = SMI_ACC2X2_SET_BITSLICE(data1,
+						SMI_ACC2X2_MODE_CTRL, 0);
+				data2  = SMI_ACC2X2_SET_BITSLICE(data2,
+						SMI_ACC2X2_LOW_POWER_MODE, 0);
+				smi130_acc_smbus_write_byte(client,
+						SMI_ACC2X2_MODE_CTRL_REG, &data1);
+				smi130_acc_delay(3);
+				smi130_acc_smbus_write_byte(client,
+					SMI_ACC2X2_LOW_NOISE_CTRL_REG, &data2);
+				break;
+		case SMI_ACC2X2_MODE_LOWPOWER1:
+				data1  = SMI_ACC2X2_SET_BITSLICE(data1,
+						SMI_ACC2X2_MODE_CTRL, 2);
+				data2  = SMI_ACC2X2_SET_BITSLICE(data2,
+						SMI_ACC2X2_LOW_POWER_MODE, 0);
+				smi130_acc_smbus_write_byte(client,
+						SMI_ACC2X2_MODE_CTRL_REG, &data1);
+				smi130_acc_delay(3);
+				smi130_acc_smbus_write_byte(client,
+					SMI_ACC2X2_LOW_NOISE_CTRL_REG, &data2);
+				break;
+		case SMI_ACC2X2_MODE_SUSPEND:
+			if (smi130_acc->smi_acc_mode_enabled != 0) {
+				PERR("smi_acc still working");
+				return 0;
+			}
+			data1  = SMI_ACC2X2_SET_BITSLICE(data1,
+					SMI_ACC2X2_MODE_CTRL, 4);
+			data2  = SMI_ACC2X2_SET_BITSLICE(data2,
+					SMI_ACC2X2_LOW_POWER_MODE, 0);
+			/*aimed at anomaly resolution when switch to suspend*/
+			ret = smi130_acc_normal_to_suspend(smi130_acc, data1, data2);
+			if (ret < 0)
+				PERR("Error switching to suspend");
+			break;
+		case SMI_ACC2X2_MODE_DEEP_SUSPEND:
+			if (smi130_acc->smi_acc_mode_enabled != 0) {
+				PERR("smi_acc still working");
+				return 0;
+			}
+			data1  = SMI_ACC2X2_SET_BITSLICE(data1,
+				SMI_ACC2X2_MODE_CTRL, 1);
+			data2  = SMI_ACC2X2_SET_BITSLICE(data2,
+				SMI_ACC2X2_LOW_POWER_MODE, 1);
+			smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_MODE_CTRL_REG, &data1);
+			smi130_acc_delay(3);
+			smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_LOW_NOISE_CTRL_REG, &data2);
+			break;
+		case SMI_ACC2X2_MODE_LOWPOWER2:
+				data1  = SMI_ACC2X2_SET_BITSLICE(data1,
+						SMI_ACC2X2_MODE_CTRL, 2);
+				data2  = SMI_ACC2X2_SET_BITSLICE(data2,
+						SMI_ACC2X2_LOW_POWER_MODE, 1);
+				smi130_acc_smbus_write_byte(client,
+						SMI_ACC2X2_MODE_CTRL_REG, &data1);
+				smi130_acc_delay(3);
+				smi130_acc_smbus_write_byte(client,
+					SMI_ACC2X2_LOW_NOISE_CTRL_REG, &data2);
+				break;
+		case SMI_ACC2X2_MODE_STANDBY:
+				data1  = SMI_ACC2X2_SET_BITSLICE(data1,
+						SMI_ACC2X2_MODE_CTRL, 4);
+				data2  = SMI_ACC2X2_SET_BITSLICE(data2,
+						SMI_ACC2X2_LOW_POWER_MODE, 1);
+				smi130_acc_smbus_write_byte(client,
+					SMI_ACC2X2_LOW_NOISE_CTRL_REG, &data2);
+				smi130_acc_delay(3);
+				smi130_acc_smbus_write_byte(client,
+						SMI_ACC2X2_MODE_CTRL_REG, &data1);
+				break;
+		}
+	} else {
+		comres = -1;
+	}
+
+	return comres;
+}
+
+
+static int smi130_acc_get_mode(struct i2c_client *client, unsigned char *mode)
+{
+	int comres = 0;
+	unsigned char data1 = 0;
+	unsigned char data2 = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_MODE_CTRL_REG, &data1);
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_LOW_NOISE_CTRL_REG,
+			&data2);
+
+	data1  = (data1 & 0xE0) >> 5;
+	data2  = (data2 & 0x40) >> 6;
+
+
+	if ((data1 == 0x00) && (data2 == 0x00)) {
+		*mode  = SMI_ACC2X2_MODE_NORMAL;
+	} else {
+		if ((data1 == 0x02) && (data2 == 0x00)) {
+			*mode  = SMI_ACC2X2_MODE_LOWPOWER1;
+		} else {
+			if ((data1 == 0x04 || data1 == 0x06) &&
+						(data2 == 0x00)) {
+				*mode  = SMI_ACC2X2_MODE_SUSPEND;
+			} else {
+				if (((data1 & 0x01) == 0x01)) {
+					*mode  = SMI_ACC2X2_MODE_DEEP_SUSPEND;
+				} else {
+					if ((data1 == 0x02) &&
+							(data2 == 0x01)) {
+						*mode  = SMI_ACC2X2_MODE_LOWPOWER2;
+					} else {
+					if ((data1 == 0x04) && (data2 ==
+									0x01)) {
+							*mode  =
+							SMI_ACC2X2_MODE_STANDBY;
+					} else {
+							*mode =
+						SMI_ACC2X2_MODE_DEEP_SUSPEND;
+						}
+					}
+				}
+			}
+		}
+	}
+
+	return comres;
+}
+
+static int smi130_acc_set_range(struct i2c_client *client, unsigned char Range)
+{
+	int comres = 0;
+	unsigned char data1 = 0;
+
+	if ((Range == 3) || (Range == 5) || (Range == 8) || (Range == 12)) {
+		comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_RANGE_SEL_REG,
+				&data1);
+		switch (Range) {
+		case SMI_ACC2X2_RANGE_2G:
+			data1  = SMI_ACC2X2_SET_BITSLICE(data1,
+					SMI_ACC2X2_RANGE_SEL, 3);
+			break;
+		case SMI_ACC2X2_RANGE_4G:
+			data1  = SMI_ACC2X2_SET_BITSLICE(data1,
+					SMI_ACC2X2_RANGE_SEL, 5);
+			break;
+		case SMI_ACC2X2_RANGE_8G:
+			data1  = SMI_ACC2X2_SET_BITSLICE(data1,
+					SMI_ACC2X2_RANGE_SEL, 8);
+			break;
+		case SMI_ACC2X2_RANGE_16G:
+			data1  = SMI_ACC2X2_SET_BITSLICE(data1,
+					SMI_ACC2X2_RANGE_SEL, 12);
+			break;
+		default:
+			break;
+		}
+		comres += smi130_acc_smbus_write_byte(client, SMI_ACC2X2_RANGE_SEL_REG,
+				&data1);
+	} else {
+		comres = -1;
+	}
+
+	return comres;
+}
+
+static int smi130_acc_get_range(struct i2c_client *client, unsigned char *Range)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_RANGE_SEL__REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_RANGE_SEL);
+	*Range = data;
+
+	return comres;
+}
+
+
+static int smi130_acc_set_bandwidth(struct i2c_client *client, unsigned char BW)
+{
+	int comres = 0;
+	unsigned char data = 0;
+	int Bandwidth = 0;
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (BW > 7 && BW < 16) {
+		switch (BW) {
+		case SMI_ACC2X2_BW_7_81HZ:
+			Bandwidth = SMI_ACC2X2_BW_7_81HZ;
+			smi130_acc->time_odr = 64000000;
+
+			/*  7.81 Hz      64000 uS   */
+			break;
+		case SMI_ACC2X2_BW_15_63HZ:
+			Bandwidth = SMI_ACC2X2_BW_15_63HZ;
+			smi130_acc->time_odr = 32000000;
+			/*  15.63 Hz     32000 uS   */
+			break;
+		case SMI_ACC2X2_BW_31_25HZ:
+			Bandwidth = SMI_ACC2X2_BW_31_25HZ;
+			smi130_acc->time_odr = 16000000;
+			/*  31.25 Hz     16000 uS   */
+			break;
+		case SMI_ACC2X2_BW_62_50HZ:
+			Bandwidth = SMI_ACC2X2_BW_62_50HZ;
+			smi130_acc->time_odr = 8000000;
+			/*  62.50 Hz     8000 uS   */
+			break;
+		case SMI_ACC2X2_BW_125HZ:
+			Bandwidth = SMI_ACC2X2_BW_125HZ;
+			smi130_acc->time_odr = 4000000;
+			/*  125 Hz       4000 uS   */
+			break;
+		case SMI_ACC2X2_BW_250HZ:
+			Bandwidth = SMI_ACC2X2_BW_250HZ;
+			smi130_acc->time_odr = 2000000;
+			/*  250 Hz       2000 uS   */
+			break;
+		case SMI_ACC2X2_BW_500HZ:
+			Bandwidth = SMI_ACC2X2_BW_500HZ;
+			smi130_acc->time_odr = 1000000;
+			/*  500 Hz       1000 uS   */
+			break;
+		case SMI_ACC2X2_BW_1000HZ:
+			Bandwidth = SMI_ACC2X2_BW_1000HZ;
+			smi130_acc->time_odr = 500000;
+			/*  1000 Hz      500 uS   */
+			break;
+		default:
+			break;
+		}
+		comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_BANDWIDTH__REG,
+				&data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_BANDWIDTH, Bandwidth);
+		comres += smi130_acc_smbus_write_byte(client, SMI_ACC2X2_BANDWIDTH__REG,
+				&data);
+	} else {
+		comres = -1;
+	}
+
+	return comres;
+}
+
+static int smi130_acc_get_bandwidth(struct i2c_client *client, unsigned char *BW)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_BANDWIDTH__REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_BANDWIDTH);
+	*BW = data;
+
+	return comres;
+}
+
+int smi130_acc_get_sleep_duration(struct i2c_client *client, unsigned char
+		*sleep_dur)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_SLEEP_DUR__REG, &data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_SLEEP_DUR);
+	*sleep_dur = data;
+
+	return comres;
+}
+
+int smi130_acc_set_sleep_duration(struct i2c_client *client, unsigned char
+		sleep_dur)
+{
+	int comres = 0;
+	unsigned char data = 0;
+	int sleep_duration = 0;
+
+	if (sleep_dur > 4 && sleep_dur < 16) {
+		switch (sleep_dur) {
+		case SMI_ACC2X2_SLEEP_DUR_0_5MS:
+			sleep_duration = SMI_ACC2X2_SLEEP_DUR_0_5MS;
+
+			/*  0.5 MS   */
+			break;
+		case SMI_ACC2X2_SLEEP_DUR_1MS:
+			sleep_duration = SMI_ACC2X2_SLEEP_DUR_1MS;
+
+			/*  1 MS  */
+			break;
+		case SMI_ACC2X2_SLEEP_DUR_2MS:
+			sleep_duration = SMI_ACC2X2_SLEEP_DUR_2MS;
+
+			/*  2 MS  */
+			break;
+		case SMI_ACC2X2_SLEEP_DUR_4MS:
+			sleep_duration = SMI_ACC2X2_SLEEP_DUR_4MS;
+
+			/*  4 MS   */
+			break;
+		case SMI_ACC2X2_SLEEP_DUR_6MS:
+			sleep_duration = SMI_ACC2X2_SLEEP_DUR_6MS;
+
+			/*  6 MS  */
+			break;
+		case SMI_ACC2X2_SLEEP_DUR_10MS:
+			sleep_duration = SMI_ACC2X2_SLEEP_DUR_10MS;
+
+			/*  10 MS  */
+			break;
+		case SMI_ACC2X2_SLEEP_DUR_25MS:
+			sleep_duration = SMI_ACC2X2_SLEEP_DUR_25MS;
+
+			/*  25 MS  */
+			break;
+		case SMI_ACC2X2_SLEEP_DUR_50MS:
+			sleep_duration = SMI_ACC2X2_SLEEP_DUR_50MS;
+
+			/*  50 MS   */
+			break;
+		case SMI_ACC2X2_SLEEP_DUR_100MS:
+			sleep_duration = SMI_ACC2X2_SLEEP_DUR_100MS;
+
+			/*  100 MS  */
+			break;
+		case SMI_ACC2X2_SLEEP_DUR_500MS:
+			sleep_duration = SMI_ACC2X2_SLEEP_DUR_500MS;
+
+			/*  500 MS   */
+			break;
+		case SMI_ACC2X2_SLEEP_DUR_1S:
+			sleep_duration = SMI_ACC2X2_SLEEP_DUR_1S;
+
+			/*  1 SECS   */
+			break;
+		default:
+			break;
+		}
+		comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_SLEEP_DUR__REG,
+				&data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_SLEEP_DUR,
+				sleep_duration);
+		comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_SLEEP_DUR__REG,
+				&data);
+	} else {
+		comres = -1;
+	}
+
+
+	return comres;
+}
+
+static int smi130_acc_get_fifo_mode(struct i2c_client *client, unsigned char
+		*fifo_mode)
+{
+	int comres;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_FIFO_MODE__REG, &data);
+	*fifo_mode = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_FIFO_MODE);
+
+	return comres;
+}
+
+static int smi130_acc_set_fifo_mode(struct i2c_client *client, unsigned char
+		fifo_mode)
+{
+	unsigned char data = 0;
+	int comres = 0;
+
+	if (fifo_mode < 4) {
+		comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_FIFO_MODE__REG,
+				&data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_FIFO_MODE, fifo_mode);
+		/*! Aim at fifo workarounds with FIFO_CONFIG_1 */
+		data |= FIFO_WORKAROUNDS_MSK;
+		comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_FIFO_MODE__REG,
+				&data);
+	} else {
+		comres = -1;
+	}
+
+	return comres;
+}
+
+static int smi130_acc_get_fifo_trig(struct i2c_client *client, unsigned char
+		*fifo_trig)
+{
+	int comres;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_FIFO_TRIGGER_ACTION__REG, &data);
+	*fifo_trig = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_FIFO_TRIGGER_ACTION);
+
+	return comres;
+}
+
+static int smi130_acc_set_fifo_trig(struct i2c_client *client, unsigned char
+		fifo_trig)
+{
+	unsigned char data = 0;
+	int comres = 0;
+
+	if (fifo_trig < 4) {
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_FIFO_TRIGGER_ACTION__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_FIFO_TRIGGER_ACTION,
+				fifo_trig);
+		/*! Aim at fifo workarounds with FIFO_CONFIG_1 */
+		data |= FIFO_WORKAROUNDS_MSK;
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_FIFO_TRIGGER_ACTION__REG, &data);
+	} else {
+		comres = -1;
+	}
+
+	return comres;
+}
+
+static int smi130_acc_get_fifo_trig_src(struct i2c_client *client, unsigned char
+		*trig_src)
+{
+	int comres;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_FIFO_TRIGGER_SOURCE__REG, &data);
+	*trig_src = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_FIFO_TRIGGER_SOURCE);
+
+	return comres;
+}
+
+static int smi130_acc_set_fifo_trig_src(struct i2c_client *client, unsigned char
+		trig_src)
+{
+	unsigned char data = 0;
+	int comres = 0;
+
+	if (trig_src < 4) {
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_FIFO_TRIGGER_SOURCE__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_FIFO_TRIGGER_SOURCE,
+				trig_src);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_FIFO_TRIGGER_SOURCE__REG, &data);
+	} else {
+		comres = -1;
+	}
+
+	return comres;
+}
+
+static int smi130_acc_get_fifo_framecount(struct i2c_client *client, unsigned char
+			 *framecount)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_FIFO_FRAME_COUNTER_S__REG, &data);
+	*framecount = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_FIFO_FRAME_COUNTER_S);
+
+	return comres;
+}
+
+static int smi130_acc_get_fifo_data_sel(struct i2c_client *client, unsigned char
+		*data_sel)
+{
+	int comres;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_FIFO_DATA_SELECT__REG, &data);
+	*data_sel = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_FIFO_DATA_SELECT);
+
+	return comres;
+}
+
+static int smi130_acc_set_fifo_data_sel(struct i2c_client *client, unsigned char
+		data_sel)
+{
+	unsigned char data = 0;
+	int comres = 0;
+
+	if (data_sel < 4) {
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_FIFO_DATA_SELECT__REG,
+				&data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_FIFO_DATA_SELECT,
+				data_sel);
+		/*! Aim at fifo workarounds with FIFO_CONFIG_1 */
+		data |= FIFO_WORKAROUNDS_MSK;
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_FIFO_DATA_SELECT__REG,
+				&data);
+	} else {
+		comres = -1;
+	}
+
+	return comres;
+}
+
+
+static int smi130_acc_get_offset_target(struct i2c_client *client, unsigned char
+		channel, unsigned char *offset)
+{
+	unsigned char data = 0;
+	int comres = 0;
+
+	switch (channel) {
+	case SMI_ACC2X2_CUT_OFF:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_COMP_CUTOFF__REG, &data);
+		*offset = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_COMP_CUTOFF);
+		break;
+	case SMI_ACC2X2_OFFSET_TRIGGER_X:
+		comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_COMP_TARGET_OFFSET_X__REG, &data);
+		*offset = SMI_ACC2X2_GET_BITSLICE(data,
+				SMI_ACC2X2_COMP_TARGET_OFFSET_X);
+		break;
+	case SMI_ACC2X2_OFFSET_TRIGGER_Y:
+		comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_COMP_TARGET_OFFSET_Y__REG, &data);
+		*offset = SMI_ACC2X2_GET_BITSLICE(data,
+				SMI_ACC2X2_COMP_TARGET_OFFSET_Y);
+		break;
+	case SMI_ACC2X2_OFFSET_TRIGGER_Z:
+		comres = smi130_acc_smbus_read_byte(client,
+			SMI_ACC2X2_COMP_TARGET_OFFSET_Z__REG, &data);
+		*offset = SMI_ACC2X2_GET_BITSLICE(data,
+				SMI_ACC2X2_COMP_TARGET_OFFSET_Z);
+		break;
+	default:
+		comres = -1;
+		break;
+	}
+
+	return comres;
+}
+
+static int smi130_acc_set_offset_target(struct i2c_client *client, unsigned char
+		channel, unsigned char offset)
+{
+	unsigned char data = 0;
+	int comres = 0;
+
+	switch (channel) {
+	case SMI_ACC2X2_CUT_OFF:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_COMP_CUTOFF__REG, &data);
+		data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_COMP_CUTOFF,
+				offset);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_COMP_CUTOFF__REG, &data);
+		break;
+	case SMI_ACC2X2_OFFSET_TRIGGER_X:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_COMP_TARGET_OFFSET_X__REG,
+				&data);
+		data = SMI_ACC2X2_SET_BITSLICE(data,
+				SMI_ACC2X2_COMP_TARGET_OFFSET_X,
+				offset);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_COMP_TARGET_OFFSET_X__REG,
+				&data);
+		break;
+	case SMI_ACC2X2_OFFSET_TRIGGER_Y:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_COMP_TARGET_OFFSET_Y__REG,
+				&data);
+		data = SMI_ACC2X2_SET_BITSLICE(data,
+				SMI_ACC2X2_COMP_TARGET_OFFSET_Y,
+				offset);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_COMP_TARGET_OFFSET_Y__REG,
+				&data);
+		break;
+	case SMI_ACC2X2_OFFSET_TRIGGER_Z:
+		comres = smi130_acc_smbus_read_byte(client,
+				SMI_ACC2X2_COMP_TARGET_OFFSET_Z__REG,
+				&data);
+		data = SMI_ACC2X2_SET_BITSLICE(data,
+				SMI_ACC2X2_COMP_TARGET_OFFSET_Z,
+				offset);
+		comres = smi130_acc_smbus_write_byte(client,
+				SMI_ACC2X2_COMP_TARGET_OFFSET_Z__REG,
+				&data);
+		break;
+	default:
+		comres = -1;
+		break;
+	}
+
+	return comres;
+}
+
+static int smi130_acc_get_cal_ready(struct i2c_client *client,
+					unsigned char *calrdy)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_FAST_CAL_RDY_S__REG,
+			&data);
+	data = SMI_ACC2X2_GET_BITSLICE(data, SMI_ACC2X2_FAST_CAL_RDY_S);
+	*calrdy = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_cal_trigger(struct i2c_client *client, unsigned char
+		caltrigger)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_CAL_TRIGGER__REG, &data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_CAL_TRIGGER, caltrigger);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_CAL_TRIGGER__REG,
+			&data);
+
+	return comres;
+}
+
+static int smi130_acc_write_reg(struct i2c_client *client, unsigned char addr,
+		unsigned char *data)
+{
+	int comres = 0;
+	comres = smi130_acc_smbus_write_byte(client, addr, data);
+
+	return comres;
+}
+
+
+static int smi130_acc_set_offset_x(struct i2c_client *client, unsigned char
+		offsetfilt)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	data =  offsetfilt;
+
+#ifdef CONFIG_SENSORS_BMI058
+	comres = smi130_acc_smbus_write_byte(client, BMI058_OFFSET_X_AXIS_REG,
+							&data);
+#else
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_OFFSET_X_AXIS_REG,
+						&data);
+#endif
+
+	return comres;
+}
+
+
+static int smi130_acc_get_offset_x(struct i2c_client *client, unsigned char
+						*offsetfilt)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+#ifdef CONFIG_SENSORS_BMI058
+	comres = smi130_acc_smbus_read_byte(client, BMI058_OFFSET_X_AXIS_REG,
+							&data);
+#else
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_OFFSET_X_AXIS_REG,
+							&data);
+#endif
+	*offsetfilt = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_offset_y(struct i2c_client *client, unsigned char
+						offsetfilt)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	data =  offsetfilt;
+
+#ifdef CONFIG_SENSORS_BMI058
+	comres = smi130_acc_smbus_write_byte(client, BMI058_OFFSET_Y_AXIS_REG,
+							&data);
+#else
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_OFFSET_Y_AXIS_REG,
+							&data);
+#endif
+	return comres;
+}
+
+static int smi130_acc_get_offset_y(struct i2c_client *client, unsigned char
+						*offsetfilt)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+#ifdef CONFIG_SENSORS_BMI058
+	comres = smi130_acc_smbus_read_byte(client, BMI058_OFFSET_Y_AXIS_REG,
+							&data);
+#else
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_OFFSET_Y_AXIS_REG,
+							&data);
+#endif
+	*offsetfilt = data;
+
+	return comres;
+}
+
+static int smi130_acc_set_offset_z(struct i2c_client *client, unsigned char
+						offsetfilt)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	data =  offsetfilt;
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_OFFSET_Z_AXIS_REG,
+						&data);
+
+	return comres;
+}
+
+static int smi130_acc_get_offset_z(struct i2c_client *client, unsigned char
+						*offsetfilt)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_OFFSET_Z_AXIS_REG,
+						&data);
+	*offsetfilt = data;
+
+	return comres;
+}
+
+
+static int smi130_acc_set_selftest_st(struct i2c_client *client, unsigned char
+		selftest)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_EN_SELF_TEST__REG,
+			&data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_EN_SELF_TEST, selftest);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_EN_SELF_TEST__REG,
+			&data);
+
+	return comres;
+}
+
+static int smi130_acc_set_selftest_stn(struct i2c_client *client, unsigned char stn)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_NEG_SELF_TEST__REG,
+			&data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_NEG_SELF_TEST, stn);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_NEG_SELF_TEST__REG,
+			&data);
+
+	return comres;
+}
+
+static int smi130_acc_set_selftest_amp(struct i2c_client *client, unsigned char amp)
+{
+	int comres = 0;
+	unsigned char data = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_SELF_TEST_AMP__REG,
+			&data);
+	data = SMI_ACC2X2_SET_BITSLICE(data, SMI_ACC2X2_SELF_TEST_AMP, amp);
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_SELF_TEST_AMP__REG,
+			&data);
+
+	return comres;
+}
+
+static int smi130_acc_read_accel_x(struct i2c_client *client,
+				signed char sensor_type, short *a_x)
+{
+	int comres = 0;
+	unsigned char data[2];
+
+	switch (sensor_type) {
+	case 0:
+		comres = smi130_acc_smbus_read_byte_block(client,
+					SMI_ACC2X2_ACC_X12_LSB__REG, data, 2);
+		*a_x = SMI_ACC2X2_GET_BITSLICE(data[0], SMI_ACC2X2_ACC_X12_LSB)|
+			(SMI_ACC2X2_GET_BITSLICE(data[1],
+				SMI_ACC2X2_ACC_X_MSB)<<(SMI_ACC2X2_ACC_X12_LSB__LEN));
+		*a_x = *a_x << (sizeof(short)*8-(SMI_ACC2X2_ACC_X12_LSB__LEN
+					+ SMI_ACC2X2_ACC_X_MSB__LEN));
+		*a_x = *a_x >> (sizeof(short)*8-(SMI_ACC2X2_ACC_X12_LSB__LEN
+					+ SMI_ACC2X2_ACC_X_MSB__LEN));
+		break;
+	case 1:
+		comres = smi130_acc_smbus_read_byte_block(client,
+					SMI_ACC2X2_ACC_X10_LSB__REG, data, 2);
+		*a_x = SMI_ACC2X2_GET_BITSLICE(data[0], SMI_ACC2X2_ACC_X10_LSB)|
+			(SMI_ACC2X2_GET_BITSLICE(data[1],
+				SMI_ACC2X2_ACC_X_MSB)<<(SMI_ACC2X2_ACC_X10_LSB__LEN));
+		*a_x = *a_x << (sizeof(short)*8-(SMI_ACC2X2_ACC_X10_LSB__LEN
+					+ SMI_ACC2X2_ACC_X_MSB__LEN));
+		*a_x = *a_x >> (sizeof(short)*8-(SMI_ACC2X2_ACC_X10_LSB__LEN
+					+ SMI_ACC2X2_ACC_X_MSB__LEN));
+		break;
+	case 2:
+		comres = smi130_acc_smbus_read_byte_block(client,
+					SMI_ACC2X2_ACC_X8_LSB__REG, data, 2);
+		*a_x = SMI_ACC2X2_GET_BITSLICE(data[0], SMI_ACC2X2_ACC_X8_LSB)|
+			(SMI_ACC2X2_GET_BITSLICE(data[1],
+				SMI_ACC2X2_ACC_X_MSB)<<(SMI_ACC2X2_ACC_X8_LSB__LEN));
+		*a_x = *a_x << (sizeof(short)*8-(SMI_ACC2X2_ACC_X8_LSB__LEN
+					+ SMI_ACC2X2_ACC_X_MSB__LEN));
+		*a_x = *a_x >> (sizeof(short)*8-(SMI_ACC2X2_ACC_X8_LSB__LEN
+					+ SMI_ACC2X2_ACC_X_MSB__LEN));
+		break;
+	case 3:
+		comres = smi130_acc_smbus_read_byte_block(client,
+					SMI_ACC2X2_ACC_X14_LSB__REG, data, 2);
+		*a_x = SMI_ACC2X2_GET_BITSLICE(data[0], SMI_ACC2X2_ACC_X14_LSB)|
+			(SMI_ACC2X2_GET_BITSLICE(data[1],
+				SMI_ACC2X2_ACC_X_MSB)<<(SMI_ACC2X2_ACC_X14_LSB__LEN));
+		*a_x = *a_x << (sizeof(short)*8-(SMI_ACC2X2_ACC_X14_LSB__LEN
+					+ SMI_ACC2X2_ACC_X_MSB__LEN));
+		*a_x = *a_x >> (sizeof(short)*8-(SMI_ACC2X2_ACC_X14_LSB__LEN
+					+ SMI_ACC2X2_ACC_X_MSB__LEN));
+		break;
+	default:
+		break;
+	}
+
+	return comres;
+}
+
+static int smi130_acc_soft_reset(struct i2c_client *client)
+{
+	int comres = 0;
+	unsigned char data = SMI_ACC2X2_EN_SOFT_RESET_VALUE;
+
+	comres = smi130_acc_smbus_write_byte(client, SMI_ACC2X2_EN_SOFT_RESET__REG,
+					&data);
+
+	return comres;
+}
+
+static int smi130_acc_read_accel_y(struct i2c_client *client,
+				signed char sensor_type, short *a_y)
+{
+	int comres = 0;
+	unsigned char data[2];
+
+	switch (sensor_type) {
+	case 0:
+		comres = smi130_acc_smbus_read_byte_block(client,
+				SMI_ACC2X2_ACC_Y12_LSB__REG, data, 2);
+		*a_y = SMI_ACC2X2_GET_BITSLICE(data[0], SMI_ACC2X2_ACC_Y12_LSB)|
+			(SMI_ACC2X2_GET_BITSLICE(data[1],
+				SMI_ACC2X2_ACC_Y_MSB)<<(SMI_ACC2X2_ACC_Y12_LSB__LEN));
+		*a_y = *a_y << (sizeof(short)*8-(SMI_ACC2X2_ACC_Y12_LSB__LEN
+						+ SMI_ACC2X2_ACC_Y_MSB__LEN));
+		*a_y = *a_y >> (sizeof(short)*8-(SMI_ACC2X2_ACC_Y12_LSB__LEN
+						+ SMI_ACC2X2_ACC_Y_MSB__LEN));
+		break;
+	case 1:
+		comres = smi130_acc_smbus_read_byte_block(client,
+				SMI_ACC2X2_ACC_Y10_LSB__REG, data, 2);
+		*a_y = SMI_ACC2X2_GET_BITSLICE(data[0], SMI_ACC2X2_ACC_Y10_LSB)|
+			(SMI_ACC2X2_GET_BITSLICE(data[1],
+				SMI_ACC2X2_ACC_Y_MSB)<<(SMI_ACC2X2_ACC_Y10_LSB__LEN));
+		*a_y = *a_y << (sizeof(short)*8-(SMI_ACC2X2_ACC_Y10_LSB__LEN
+						+ SMI_ACC2X2_ACC_Y_MSB__LEN));
+		*a_y = *a_y >> (sizeof(short)*8-(SMI_ACC2X2_ACC_Y10_LSB__LEN
+						+ SMI_ACC2X2_ACC_Y_MSB__LEN));
+		break;
+	case 2:
+		comres = smi130_acc_smbus_read_byte_block(client,
+				SMI_ACC2X2_ACC_Y8_LSB__REG, data, 2);
+		*a_y = SMI_ACC2X2_GET_BITSLICE(data[0], SMI_ACC2X2_ACC_Y8_LSB)|
+				(SMI_ACC2X2_GET_BITSLICE(data[1],
+				SMI_ACC2X2_ACC_Y_MSB)<<(SMI_ACC2X2_ACC_Y8_LSB__LEN));
+		*a_y = *a_y << (sizeof(short)*8-(SMI_ACC2X2_ACC_Y8_LSB__LEN
+						+ SMI_ACC2X2_ACC_Y_MSB__LEN));
+		*a_y = *a_y >> (sizeof(short)*8-(SMI_ACC2X2_ACC_Y8_LSB__LEN
+						+ SMI_ACC2X2_ACC_Y_MSB__LEN));
+		break;
+	case 3:
+		comres = smi130_acc_smbus_read_byte_block(client,
+				SMI_ACC2X2_ACC_Y14_LSB__REG, data, 2);
+		*a_y = SMI_ACC2X2_GET_BITSLICE(data[0], SMI_ACC2X2_ACC_Y14_LSB)|
+			(SMI_ACC2X2_GET_BITSLICE(data[1],
+				SMI_ACC2X2_ACC_Y_MSB)<<(SMI_ACC2X2_ACC_Y14_LSB__LEN));
+		*a_y = *a_y << (sizeof(short)*8-(SMI_ACC2X2_ACC_Y14_LSB__LEN
+						+ SMI_ACC2X2_ACC_Y_MSB__LEN));
+		*a_y = *a_y >> (sizeof(short)*8-(SMI_ACC2X2_ACC_Y14_LSB__LEN
+						+ SMI_ACC2X2_ACC_Y_MSB__LEN));
+		break;
+	default:
+		break;
+	}
+
+	return comres;
+}
+
+static int smi130_acc_read_accel_z(struct i2c_client *client,
+				signed char sensor_type, short *a_z)
+{
+	int comres = 0;
+	unsigned char data[2];
+
+	switch (sensor_type) {
+	case 0:
+		comres = smi130_acc_smbus_read_byte_block(client,
+				SMI_ACC2X2_ACC_Z12_LSB__REG, data, 2);
+		*a_z = SMI_ACC2X2_GET_BITSLICE(data[0], SMI_ACC2X2_ACC_Z12_LSB)|
+			(SMI_ACC2X2_GET_BITSLICE(data[1],
+				SMI_ACC2X2_ACC_Z_MSB)<<(SMI_ACC2X2_ACC_Z12_LSB__LEN));
+		*a_z = *a_z << (sizeof(short)*8-(SMI_ACC2X2_ACC_Z12_LSB__LEN
+						+ SMI_ACC2X2_ACC_Z_MSB__LEN));
+		*a_z = *a_z >> (sizeof(short)*8-(SMI_ACC2X2_ACC_Z12_LSB__LEN
+						+ SMI_ACC2X2_ACC_Z_MSB__LEN));
+		break;
+	case 1:
+		comres = smi130_acc_smbus_read_byte_block(client,
+				SMI_ACC2X2_ACC_Z10_LSB__REG, data, 2);
+		*a_z = SMI_ACC2X2_GET_BITSLICE(data[0], SMI_ACC2X2_ACC_Z10_LSB)|
+			(SMI_ACC2X2_GET_BITSLICE(data[1],
+				SMI_ACC2X2_ACC_Z_MSB)<<(SMI_ACC2X2_ACC_Z10_LSB__LEN));
+		*a_z = *a_z << (sizeof(short)*8-(SMI_ACC2X2_ACC_Z10_LSB__LEN
+						+ SMI_ACC2X2_ACC_Z_MSB__LEN));
+		*a_z = *a_z >> (sizeof(short)*8-(SMI_ACC2X2_ACC_Z10_LSB__LEN
+						+ SMI_ACC2X2_ACC_Z_MSB__LEN));
+		break;
+	case 2:
+		comres = smi130_acc_smbus_read_byte_block(client,
+				SMI_ACC2X2_ACC_Z8_LSB__REG, data, 2);
+		*a_z = SMI_ACC2X2_GET_BITSLICE(data[0], SMI_ACC2X2_ACC_Z8_LSB)|
+			(SMI_ACC2X2_GET_BITSLICE(data[1],
+				SMI_ACC2X2_ACC_Z_MSB)<<(SMI_ACC2X2_ACC_Z8_LSB__LEN));
+		*a_z = *a_z << (sizeof(short)*8-(SMI_ACC2X2_ACC_Z8_LSB__LEN
+						+ SMI_ACC2X2_ACC_Z_MSB__LEN));
+		*a_z = *a_z >> (sizeof(short)*8-(SMI_ACC2X2_ACC_Z8_LSB__LEN
+						+ SMI_ACC2X2_ACC_Z_MSB__LEN));
+		break;
+	case 3:
+		comres = smi130_acc_smbus_read_byte_block(client,
+				SMI_ACC2X2_ACC_Z14_LSB__REG, data, 2);
+		*a_z = SMI_ACC2X2_GET_BITSLICE(data[0], SMI_ACC2X2_ACC_Z14_LSB)|
+				(SMI_ACC2X2_GET_BITSLICE(data[1],
+				SMI_ACC2X2_ACC_Z_MSB)<<(SMI_ACC2X2_ACC_Z14_LSB__LEN));
+		*a_z = *a_z << (sizeof(short)*8-(SMI_ACC2X2_ACC_Z14_LSB__LEN
+						+ SMI_ACC2X2_ACC_Z_MSB__LEN));
+		*a_z = *a_z >> (sizeof(short)*8-(SMI_ACC2X2_ACC_Z14_LSB__LEN
+						+ SMI_ACC2X2_ACC_Z_MSB__LEN));
+		break;
+	default:
+		break;
+	}
+
+	return comres;
+}
+
+
+static int smi130_acc_read_temperature(struct i2c_client *client,
+					signed char *temperature)
+{
+	unsigned char data = 0;
+	int comres = 0;
+
+	comres = smi130_acc_smbus_read_byte(client, SMI_ACC2X2_TEMPERATURE_REG, &data);
+	*temperature = (signed char)data;
+
+	return comres;
+}
+
+static ssize_t smi130_acc_enable_int_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int type, value;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+#ifdef CONFIG_SENSORS_BMI058
+	int i;
+#endif
+
+	sscanf(buf, "%3d %3d", &type, &value);
+
+#ifdef CONFIG_SENSORS_BMI058
+	for (i = 0; i < sizeof(int_map) / sizeof(struct interrupt_map_t); i++) {
+		if (int_map[i].x == type) {
+			type = int_map[i].y;
+			break;
+		}
+		if (int_map[i].y == type) {
+			type = int_map[i].x;
+			break;
+		}
+	}
+#endif
+
+	if (smi130_acc_set_Int_Enable(smi130_acc->smi130_acc_client, type, value) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+
+static ssize_t smi130_acc_int_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_Int_Mode(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_acc_int_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_Int_Mode(smi130_acc->smi130_acc_client, (unsigned char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+static ssize_t smi130_acc_slope_duration_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_slope_duration(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_slope_duration_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_slope_duration(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_slope_no_mot_duration_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_slope_no_mot_duration(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_slope_no_mot_duration_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_slope_no_mot_duration(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+
+static ssize_t smi130_acc_slope_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_slope_threshold(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_slope_threshold_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (smi130_acc_set_slope_threshold(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_slope_no_mot_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_slope_no_mot_threshold(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_slope_no_mot_threshold_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (smi130_acc_set_slope_no_mot_threshold(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_high_g_duration_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_high_g_duration(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_high_g_duration_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_high_g_duration(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_high_g_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_high_g_threshold(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_high_g_threshold_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (smi130_acc_set_high_g_threshold(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_low_g_duration_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_low_g_duration(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_low_g_duration_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_low_g_duration(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_low_g_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_low_g_threshold(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_low_g_threshold_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (smi130_acc_set_low_g_threshold(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+static ssize_t smi130_acc_tap_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_tap_threshold(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_tap_threshold_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (smi130_acc_set_tap_threshold(smi130_acc->smi130_acc_client, (unsigned char)data)
+			< 0)
+		return -EINVAL;
+
+	return count;
+}
+static ssize_t smi130_acc_tap_duration_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_tap_duration(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_tap_duration_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_tap_duration(smi130_acc->smi130_acc_client, (unsigned char)data)
+			< 0)
+		return -EINVAL;
+
+	return count;
+}
+static ssize_t smi130_acc_tap_quiet_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_tap_quiet(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_tap_quiet_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_tap_quiet(smi130_acc->smi130_acc_client, (unsigned char)data) <
+			0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_tap_shock_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_tap_shock(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_tap_shock_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_tap_shock(smi130_acc->smi130_acc_client, (unsigned char)data) <
+			0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_tap_samp_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_tap_samp(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_tap_samp_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_tap_samp(smi130_acc->smi130_acc_client, (unsigned char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_orient_mbl_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_orient_mbl_mode(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_orient_mbl_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_orient_mbl_mode(smi130_acc->smi130_acc_client, (unsigned char)data) <
+			0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_orient_mbl_blocking_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_orient_mbl_blocking(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_orient_mbl_blocking_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_orient_mbl_blocking(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+static ssize_t smi130_acc_orient_mbl_hyst_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_orient_mbl_hyst(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_orient_mbl_hyst_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_orient_mbl_hyst(smi130_acc->smi130_acc_client, (unsigned char)data) <
+			0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_orient_mbl_theta_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_theta_blocking(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_orient_mbl_theta_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_theta_blocking(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_flat_theta_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_theta_flat(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_flat_theta_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_theta_flat(smi130_acc->smi130_acc_client, (unsigned char)data) <
+			0)
+		return -EINVAL;
+
+	return count;
+}
+static ssize_t smi130_acc_flat_hold_time_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_flat_hold_time(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+static ssize_t smi130_acc_selftest_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+
+
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	return snprintf(buf, 16, "%d\n", atomic_read(&smi130_acc->selftest_result));
+
+}
+
+static ssize_t smi130_acc_softreset_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_soft_reset(smi130_acc->smi130_acc_client) < 0)
+		return -EINVAL;
+
+	return count;
+}
+static ssize_t smi130_acc_selftest_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+
+	unsigned long data;
+	unsigned char clear_value = 0;
+	int error;
+	short value1 = 0;
+	short value2 = 0;
+	short diff = 0;
+	unsigned long result = 0;
+	unsigned char test_result_branch = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	smi130_acc_soft_reset(smi130_acc->smi130_acc_client);
+	smi130_acc_delay(5);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (data != 1)
+		return -EINVAL;
+
+	smi130_acc_write_reg(smi130_acc->smi130_acc_client, 0x32, &clear_value);
+
+	if ((smi130_acc->sensor_type == SMI_ACC280_TYPE) ||
+		(smi130_acc->sensor_type == SMI_ACC255_TYPE)) {
+#ifdef CONFIG_SENSORS_BMI058
+		/*set self test amp */
+		if (smi130_acc_set_selftest_amp(smi130_acc->smi130_acc_client, 1) < 0)
+			return -EINVAL;
+		/* set to 8 G range */
+		if (smi130_acc_set_range(smi130_acc->smi130_acc_client,
+							SMI_ACC2X2_RANGE_8G) < 0)
+			return -EINVAL;
+#else
+		/* set to 4 G range */
+		if (smi130_acc_set_range(smi130_acc->smi130_acc_client,
+							SMI_ACC2X2_RANGE_4G) < 0)
+			return -EINVAL;
+#endif
+	}
+
+	if ((smi130_acc->sensor_type == SMI_ACC250E_TYPE) ||
+			(smi130_acc->sensor_type == SMI_ACC222E_TYPE)) {
+		/* set to 8 G range */
+		if (smi130_acc_set_range(smi130_acc->smi130_acc_client, 8) < 0)
+			return -EINVAL;
+		if (smi130_acc_set_selftest_amp(smi130_acc->smi130_acc_client, 1) < 0)
+			return -EINVAL;
+	}
+
+	/* 1 for x-axis(but BMI058 is 1 for y-axis )*/
+	smi130_acc_set_selftest_st(smi130_acc->smi130_acc_client, 1);
+	smi130_acc_set_selftest_stn(smi130_acc->smi130_acc_client, 0);
+	smi130_acc_delay(10);
+	smi130_acc_read_accel_x(smi130_acc->smi130_acc_client,
+					smi130_acc->sensor_type, &value1);
+	smi130_acc_set_selftest_stn(smi130_acc->smi130_acc_client, 1);
+	smi130_acc_delay(10);
+	smi130_acc_read_accel_x(smi130_acc->smi130_acc_client,
+					smi130_acc->sensor_type, &value2);
+	diff = value1-value2;
+
+#ifdef CONFIG_SENSORS_BMI058
+	PINFO("diff y is %d,value1 is %d, value2 is %d\n", diff,
+				value1, value2);
+	test_result_branch = 2;
+#else
+	PINFO("diff x is %d,value1 is %d, value2 is %d\n", diff,
+				value1, value2);
+	test_result_branch = 1;
+#endif
+
+	if (smi130_acc->sensor_type == SMI_ACC280_TYPE) {
+#ifdef CONFIG_SENSORS_BMI058
+		if (abs(diff) < 819)
+			result |= test_result_branch;
+#else
+		if (abs(diff) < 1638)
+			result |= test_result_branch;
+#endif
+	}
+	if (smi130_acc->sensor_type == SMI_ACC255_TYPE) {
+		if (abs(diff) < 409)
+			result |= 1;
+	}
+	if (smi130_acc->sensor_type == SMI_ACC250E_TYPE) {
+		if (abs(diff) < 51)
+			result |= 1;
+	}
+	if (smi130_acc->sensor_type == SMI_ACC222E_TYPE) {
+		if (abs(diff) < 12)
+			result |= 1;
+	}
+
+	/* 2 for y-axis but BMI058 is 1*/
+	smi130_acc_set_selftest_st(smi130_acc->smi130_acc_client, 2);
+	smi130_acc_set_selftest_stn(smi130_acc->smi130_acc_client, 0);
+	smi130_acc_delay(10);
+	smi130_acc_read_accel_y(smi130_acc->smi130_acc_client,
+					smi130_acc->sensor_type, &value1);
+	smi130_acc_set_selftest_stn(smi130_acc->smi130_acc_client, 1);
+	smi130_acc_delay(10);
+	smi130_acc_read_accel_y(smi130_acc->smi130_acc_client,
+					smi130_acc->sensor_type, &value2);
+	diff = value1-value2;
+
+#ifdef CONFIG_SENSORS_BMI058
+	PINFO("diff x is %d,value1 is %d, value2 is %d\n", diff,
+				value1, value2);
+	test_result_branch = 1;
+#else
+	PINFO("diff y is %d,value1 is %d, value2 is %d\n", diff,
+				value1, value2);
+	test_result_branch = 2;
+#endif
+
+	if (smi130_acc->sensor_type == SMI_ACC280_TYPE) {
+#ifdef CONFIG_SENSORS_BMI058
+		if (abs(diff) < 819)
+			result |= test_result_branch;
+#else
+		if (abs(diff) < 1638)
+			result |= test_result_branch;
+#endif
+	}
+	if (smi130_acc->sensor_type == SMI_ACC255_TYPE) {
+		if (abs(diff) < 409)
+			result |= test_result_branch;
+	}
+	if (smi130_acc->sensor_type == SMI_ACC250E_TYPE) {
+		if (abs(diff) < 51)
+			result |= test_result_branch;
+	}
+	if (smi130_acc->sensor_type == SMI_ACC222E_TYPE) {
+		if (abs(diff) < 12)
+			result |= test_result_branch;
+	}
+
+
+	smi130_acc_set_selftest_st(smi130_acc->smi130_acc_client, 3); /* 3 for z-axis*/
+	smi130_acc_set_selftest_stn(smi130_acc->smi130_acc_client, 0);
+	smi130_acc_delay(10);
+	smi130_acc_read_accel_z(smi130_acc->smi130_acc_client,
+					smi130_acc->sensor_type, &value1);
+	smi130_acc_set_selftest_stn(smi130_acc->smi130_acc_client, 1);
+	smi130_acc_delay(10);
+	smi130_acc_read_accel_z(smi130_acc->smi130_acc_client,
+					smi130_acc->sensor_type, &value2);
+	diff = value1-value2;
+
+	PINFO("diff z is %d,value1 is %d, value2 is %d\n", diff,
+			value1, value2);
+
+	if (smi130_acc->sensor_type == SMI_ACC280_TYPE) {
+#ifdef CONFIG_SENSORS_BMI058
+			if (abs(diff) < 409)
+				result |= 4;
+#else
+			if (abs(diff) < 819)
+				result |= 4;
+#endif
+	}
+	if (smi130_acc->sensor_type == SMI_ACC255_TYPE) {
+		if (abs(diff) < 204)
+			result |= 4;
+	}
+	if (smi130_acc->sensor_type == SMI_ACC250E_TYPE) {
+		if (abs(diff) < 25)
+			result |= 4;
+	}
+	if (smi130_acc->sensor_type == SMI_ACC222E_TYPE) {
+		if (abs(diff) < 6)
+			result |= 4;
+	}
+
+	/* self test for smi_acc254 */
+	if ((smi130_acc->sensor_type == SMI_ACC255_TYPE) && (result > 0)) {
+		result = 0;
+		smi130_acc_soft_reset(smi130_acc->smi130_acc_client);
+		smi130_acc_delay(5);
+		smi130_acc_write_reg(smi130_acc->smi130_acc_client, 0x32, &clear_value);
+		/* set to 8 G range */
+		if (smi130_acc_set_range(smi130_acc->smi130_acc_client, 8) < 0)
+			return -EINVAL;
+		if (smi130_acc_set_selftest_amp(smi130_acc->smi130_acc_client, 1) < 0)
+			return -EINVAL;
+
+		smi130_acc_set_selftest_st(smi130_acc->smi130_acc_client, 1); /* 1
+								for x-axis*/
+		smi130_acc_set_selftest_stn(smi130_acc->smi130_acc_client, 0); /*
+							positive direction*/
+		smi130_acc_delay(10);
+		smi130_acc_read_accel_x(smi130_acc->smi130_acc_client,
+						smi130_acc->sensor_type, &value1);
+		smi130_acc_set_selftest_stn(smi130_acc->smi130_acc_client, 1); /*
+							negative direction*/
+		smi130_acc_delay(10);
+		smi130_acc_read_accel_x(smi130_acc->smi130_acc_client,
+						smi130_acc->sensor_type, &value2);
+		diff = value1-value2;
+
+		PINFO("diff x is %d,value1 is %d, value2 is %d\n",
+						diff, value1, value2);
+		if (abs(diff) < 204)
+			result |= 1;
+
+		smi130_acc_set_selftest_st(smi130_acc->smi130_acc_client, 2); /* 2
+								for y-axis*/
+		smi130_acc_set_selftest_stn(smi130_acc->smi130_acc_client, 0); /*
+							positive direction*/
+		smi130_acc_delay(10);
+		smi130_acc_read_accel_y(smi130_acc->smi130_acc_client,
+						smi130_acc->sensor_type, &value1);
+		smi130_acc_set_selftest_stn(smi130_acc->smi130_acc_client, 1); /*
+							negative direction*/
+		smi130_acc_delay(10);
+		smi130_acc_read_accel_y(smi130_acc->smi130_acc_client,
+						smi130_acc->sensor_type, &value2);
+		diff = value1-value2;
+		PINFO("diff y is %d,value1 is %d, value2 is %d\n",
+						diff, value1, value2);
+
+		if (abs(diff) < 204)
+			result |= 2;
+
+		smi130_acc_set_selftest_st(smi130_acc->smi130_acc_client, 3); /* 3
+								for z-axis*/
+		smi130_acc_set_selftest_stn(smi130_acc->smi130_acc_client, 0); /*
+							positive direction*/
+		smi130_acc_delay(10);
+		smi130_acc_read_accel_z(smi130_acc->smi130_acc_client,
+						smi130_acc->sensor_type, &value1);
+		smi130_acc_set_selftest_stn(smi130_acc->smi130_acc_client, 1); /*
+							negative direction*/
+		smi130_acc_delay(10);
+		smi130_acc_read_accel_z(smi130_acc->smi130_acc_client,
+						smi130_acc->sensor_type, &value2);
+		diff = value1-value2;
+
+		PINFO("diff z is %d,value1 is %d, value2 is %d\n",
+						diff, value1, value2);
+		if (abs(diff) < 102)
+			result |= 4;
+	}
+
+	atomic_set(&smi130_acc->selftest_result, (unsigned int)result);
+
+	smi130_acc_soft_reset(smi130_acc->smi130_acc_client);
+	smi130_acc_delay(5);
+	PINFO("self test finished\n");
+
+	return count;
+}
+
+
+
+static ssize_t smi130_acc_flat_hold_time_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_flat_hold_time(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+const int smi130_acc_sensor_bitwidth[] = {
+	12,  10,  8, 14
+};
+
+static int smi130_acc_read_accel_xyz(struct i2c_client *client,
+		signed char sensor_type, struct smi130_accacc *acc)
+{
+	int comres = 0;
+	unsigned char data[6];
+	struct smi130_acc_data *client_data = i2c_get_clientdata(client);
+#ifndef SMI_ACC2X2_SENSOR_IDENTIFICATION_ENABLE
+	int bitwidth;
+#endif
+	comres = smi130_acc_smbus_read_byte_block(client,
+				SMI_ACC2X2_ACC_X12_LSB__REG, data, 6);
+	if (sensor_type >= 4)
+		return -EINVAL;
+
+	acc->x = (data[1]<<8)|data[0];
+	acc->y = (data[3]<<8)|data[2];
+	acc->z = (data[5]<<8)|data[4];
+
+#ifndef SMI_ACC2X2_SENSOR_IDENTIFICATION_ENABLE
+	bitwidth = smi130_acc_sensor_bitwidth[sensor_type];
+
+	acc->x = (acc->x >> (16 - bitwidth));
+	acc->y = (acc->y >> (16 - bitwidth));
+	acc->z = (acc->z >> (16 - bitwidth));
+#endif
+
+	smi130_acc_remap_sensor_data(acc, client_data);
+	return comres;
+}
+
+#ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+static void smi130_acc_work_func(struct work_struct *work)
+{
+	struct smi130_acc_data *smi130_acc = container_of((struct delayed_work *)work,
+			struct smi130_acc_data, work);
+	static struct smi130_accacc acc;
+	unsigned long delay = msecs_to_jiffies(atomic_read(&smi130_acc->delay));
+
+	smi130_acc_read_accel_xyz(smi130_acc->smi130_acc_client, smi130_acc->sensor_type, &acc);
+	input_report_abs(smi130_acc->input, ABS_X, acc.x);
+	input_report_abs(smi130_acc->input, ABS_Y, acc.y);
+	input_report_abs(smi130_acc->input, ABS_Z, acc.z);
+	input_sync(smi130_acc->input);
+	mutex_lock(&smi130_acc->value_mutex);
+	smi130_acc->value = acc;
+	mutex_unlock(&smi130_acc->value_mutex);
+	schedule_delayed_work(&smi130_acc->work, delay);
+}
+#endif
+static struct workqueue_struct *reportdata_wq;
+
+uint64_t smi130_acc_get_alarm_timestamp(void)
+{
+	uint64_t ts_ap;
+	struct timespec tmp_time;
+	get_monotonic_boottime(&tmp_time);
+	ts_ap = (uint64_t)tmp_time.tv_sec * 1000000000 + tmp_time.tv_nsec;
+	return ts_ap;
+}
+
+#define ABS(x) ((x) > 0 ? (x) : -(x))
+
+static void smi130_acc_timer_work_fun(struct work_struct *work)
+{
+	struct  smi130_acc_data *smi130_acc =
+		container_of(work,
+				struct smi130_acc_data, report_data_work);
+	int i;
+	unsigned char count = 0;
+	unsigned char mode = 0;
+	signed char fifo_data_out[MAX_FIFO_F_LEVEL * MAX_FIFO_F_BYTES] = {0};
+	unsigned char f_len = 0;
+	uint64_t del = 0;
+	uint64_t time_internal = 0;
+	int64_t drift_time = 0;
+	static uint64_t time_odr;
+	struct smi130_accacc acc_lsb;
+	struct timespec ts;
+	static uint32_t data_cnt;
+	static uint32_t pre_data_cnt;
+	static int64_t sample_drift_offset;
+
+	if (smi130_acc->fifo_datasel) {
+		/*Select one axis data output for every fifo frame*/
+		f_len = 2;
+	} else	{
+		/*Select X Y Z axis data output for every fifo frame*/
+		f_len = 6;
+	}
+	if (smi130_acc_get_fifo_framecount(smi130_acc->smi130_acc_client, &count) < 0) {
+		PERR("smi130_acc_get_fifo_framecount err\n");
+		return;
+	}
+	if (count == 0) {
+		PERR("smi130_acc_get_fifo_framecount zero\n");
+		return;
+	}
+	if (count > MAX_FIFO_F_LEVEL) {
+		if (smi130_acc_get_mode(smi130_acc->smi130_acc_client, &mode) < 0) {
+			PERR("smi130_acc_get_mode err\n");
+			return;
+		}
+		if (SMI_ACC2X2_MODE_NORMAL == mode) {
+			PERR("smi130_acc fifo_count: %d abnormal, op_mode: %d\n",
+					count, mode);
+			count = MAX_FIFO_F_LEVEL;
+		} else {
+			/*chip already suspend or shutdown*/
+			count = 0;
+			return;
+		}
+	}
+	if (smi_acc_i2c_burst_read(smi130_acc->smi130_acc_client,
+			SMI_ACC2X2_FIFO_DATA_OUTPUT_REG, fifo_data_out,
+						count * f_len) < 0) {
+		PERR("smi130_acc read fifo err\n");
+		return;
+	}
+	smi130_acc->fifo_time = smi130_acc_get_alarm_timestamp();
+	if (smi130_acc->acc_count == 0)
+		smi130_acc->base_time = smi130_acc->timestamp =
+		smi130_acc->fifo_time - (count-1) * smi130_acc->time_odr;
+
+	smi130_acc->acc_count += count;
+	del = smi130_acc->fifo_time - smi130_acc->base_time;
+	time_internal = div64_u64(del, smi130_acc->acc_count);
+
+	data_cnt++;
+	if (data_cnt == 1)
+		time_odr = smi130_acc->time_odr;
+
+	if (time_internal > time_odr) {
+		if (time_internal - time_odr > div64_u64 (time_odr, 200))
+			time_internal = time_odr + div64_u64(time_odr, 200);
+	} else {
+		if (time_odr - time_internal > div64_u64(time_odr, 200))
+			time_internal = time_odr - div64_u64(time_odr, 200);
+	}
+/* please give attation for the fifo output data format*/
+	if (f_len == 6) {
+		/* Select X Y Z axis data output for every frame */
+		for (i = 0; i < count; i++) {
+			if (smi130_acc->debug_level & 0x01)
+				printk(KERN_INFO "smi_acc time =%llu fifo_time =%llu  smi_acc->count=%llu time_internal =%lld time_odr = %lld ",
+				smi130_acc->timestamp, smi130_acc->fifo_time,
+				smi130_acc->acc_count, time_internal, time_odr);
+
+			ts = ns_to_timespec(smi130_acc->timestamp);
+			acc_lsb.x =
+			((unsigned char)fifo_data_out[i * f_len + 1] << 8 |
+				(unsigned char)fifo_data_out[i * f_len + 0]);
+			acc_lsb.y =
+			((unsigned char)fifo_data_out[i * f_len + 3] << 8 |
+				(unsigned char)fifo_data_out[i * f_len + 2]);
+			acc_lsb.z =
+			((unsigned char)fifo_data_out[i * f_len + 5] << 8 |
+				(unsigned char)fifo_data_out[i * f_len + 4]);
+#ifndef SMI_ACC2X2_SENSOR_IDENTIFICATION_ENABLE
+			acc_lsb.x >>=
+			(16 - smi130_acc_sensor_bitwidth[smi130_acc->sensor_type]);
+			acc_lsb.y >>=
+			(16 - smi130_acc_sensor_bitwidth[smi130_acc->sensor_type]);
+			acc_lsb.z >>=
+			(16 - smi130_acc_sensor_bitwidth[smi130_acc->sensor_type]);
+#endif
+			smi130_acc_remap_sensor_data(&acc_lsb, smi130_acc);
+			input_event(smi130_acc->input, EV_MSC, MSC_TIME,
+			ts.tv_sec);
+			input_event(smi130_acc->input, EV_MSC, MSC_TIME,
+			ts.tv_nsec);
+			input_event(smi130_acc->input, EV_MSC,
+				MSC_GESTURE, acc_lsb.x);
+			input_event(smi130_acc->input, EV_MSC,
+				MSC_RAW, acc_lsb.y);
+			input_event(smi130_acc->input, EV_MSC,
+				MSC_SCAN, acc_lsb.z);
+			input_sync(smi130_acc->input);
+			smi130_acc->timestamp +=
+				time_internal - sample_drift_offset;
+		}
+	}
+	drift_time = smi130_acc->timestamp - smi130_acc->fifo_time;
+	if (data_cnt % 20 == 0) {
+		if (ABS(drift_time) > div64_u64(time_odr, 5)) {
+			sample_drift_offset =
+			div64_s64(drift_time, smi130_acc->acc_count - pre_data_cnt);
+			pre_data_cnt = smi130_acc->acc_count;
+			time_odr = time_internal;
+		}
+	}
+
+}
+
+static enum hrtimer_restart reportdata_timer_fun(
+	struct hrtimer *hrtimer)
+{
+	struct smi130_acc_data *client_data =
+		container_of(hrtimer, struct smi130_acc_data, timer);
+	int32_t delay = 0;
+	delay = 8;
+	queue_work(reportdata_wq, &(client_data->report_data_work));
+	/*set delay 8ms*/
+	client_data->work_delay_kt = ns_to_ktime(delay*1000000);
+	hrtimer_forward(hrtimer, ktime_get(), client_data->work_delay_kt);
+
+	return HRTIMER_RESTART;
+}
+
+static ssize_t smi130_acc_enable_timer_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	return snprintf(buf, 16, "%d\n", smi130_acc->is_timer_running);
+}
+
+static ssize_t smi130_acc_enable_timer_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (data) {
+		if (0 == smi130_acc->is_timer_running) {
+			hrtimer_start(&smi130_acc->timer,
+			ns_to_ktime(1000000),
+			HRTIMER_MODE_REL);
+			smi130_acc->base_time = 0;
+			smi130_acc->timestamp = 0;
+			smi130_acc->is_timer_running = 1;
+	}
+	} else {
+		if (1 == smi130_acc->is_timer_running) {
+			hrtimer_cancel(&smi130_acc->timer);
+			smi130_acc->is_timer_running = 0;
+			smi130_acc->base_time = 0;
+			smi130_acc->timestamp = 0;
+			smi130_acc->fifo_time = 0;
+			smi130_acc->acc_count = 0;
+	}
+	}
+	return count;
+}
+
+static ssize_t smi130_acc_debug_level_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+	err = snprintf(buf, 8, "%d\n", smi130_acc->debug_level);
+	return err;
+}
+static ssize_t smi130_acc_debug_level_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int32_t ret = 0;
+	unsigned long data;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	ret = kstrtoul(buf, 16, &data);
+	if (ret)
+		return ret;
+	smi130_acc->debug_level = (uint8_t)data;
+	return count;
+}
+
+static ssize_t smi130_acc_register_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int address, value;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	sscanf(buf, "%3d %3d", &address, &value);
+	if (smi130_acc_write_reg(smi130_acc->smi130_acc_client, (unsigned char)address,
+				(unsigned char *)&value) < 0)
+		return -EINVAL;
+	return count;
+}
+static ssize_t smi130_acc_register_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	size_t count = 0;
+	u8 reg[0x40];
+	int i;
+
+	for (i = 0; i < 0x40; i++) {
+		smi130_acc_smbus_read_byte(smi130_acc->smi130_acc_client, i, reg+i);
+
+		count += snprintf(&buf[count], 32, "0x%x: %d\n", i, reg[i]);
+	}
+	return count;
+
+
+}
+
+static ssize_t smi130_acc_range_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_range(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_acc_range_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (smi130_acc_set_range(smi130_acc->smi130_acc_client, (unsigned char) data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_bandwidth_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_bandwidth(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_bandwidth_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc->sensor_type == SMI_ACC280_TYPE)
+		if ((unsigned char) data > 14)
+			return -EINVAL;
+
+	if (smi130_acc_set_bandwidth(smi130_acc->smi130_acc_client,
+				(unsigned char) data) < 0)
+		return -EINVAL;
+	smi130_acc->base_time = 0;
+	smi130_acc->acc_count = 0;
+
+	return count;
+}
+
+static ssize_t smi130_acc_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_mode(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 32, "%d %d\n", data, smi130_acc->smi_acc_mode_enabled);
+}
+
+static ssize_t smi130_acc_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (smi130_acc_set_mode(smi130_acc->smi130_acc_client,
+		(unsigned char) data, SMI_ACC_ENABLED_BSX) < 0)
+			return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_value_cache_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi130_acc_data *smi130_acc = input_get_drvdata(input);
+	struct smi130_accacc acc_value;
+
+	mutex_lock(&smi130_acc->value_mutex);
+	acc_value = smi130_acc->value;
+	mutex_unlock(&smi130_acc->value_mutex);
+
+	return snprintf(buf, 96, "%d %d %d\n", acc_value.x, acc_value.y,
+			acc_value.z);
+}
+
+static ssize_t smi130_acc_value_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi130_acc_data *smi130_acc = input_get_drvdata(input);
+	struct smi130_accacc acc_value;
+
+	smi130_acc_read_accel_xyz(smi130_acc->smi130_acc_client, smi130_acc->sensor_type,
+								&acc_value);
+
+	return snprintf(buf, 96, "%d %d %d\n", acc_value.x, acc_value.y,
+			acc_value.z);
+}
+
+static ssize_t smi130_acc_delay_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	return snprintf(buf, 16, "%d\n", atomic_read(&smi130_acc->delay));
+
+}
+
+static ssize_t smi130_acc_chip_id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	return snprintf(buf, 16, "%u\n", smi130_acc->chip_id);
+
+}
+
+
+static ssize_t smi130_acc_place_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+	int place = BOSCH_SENSOR_PLACE_UNKNOWN;
+
+	if (NULL != smi130_acc->bosch_pd)
+		place = smi130_acc->bosch_pd->place;
+
+	return snprintf(buf, 16, "%d\n", place);
+}
+
+
+static ssize_t smi130_acc_delay_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (data > SMI_ACC2X2_MAX_DELAY)
+		data = SMI_ACC2X2_MAX_DELAY;
+	atomic_set(&smi130_acc->delay, (unsigned int) data);
+
+	return count;
+}
+
+
+static ssize_t smi130_acc_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	return snprintf(buf, 16, "%d\n", atomic_read(&smi130_acc->enable));
+
+}
+
+static void smi130_acc_set_enable(struct device *dev, int enable)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+	int pre_enable = atomic_read(&smi130_acc->enable);
+
+	mutex_lock(&smi130_acc->enable_mutex);
+	if (enable) {
+		if (pre_enable == 0) {
+			smi130_acc_set_mode(smi130_acc->smi130_acc_client,
+					SMI_ACC2X2_MODE_NORMAL, SMI_ACC_ENABLED_INPUT);
+
+		#ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+			schedule_delayed_work(&smi130_acc->work,
+				msecs_to_jiffies(atomic_read(&smi130_acc->delay)));
+#endif
+			atomic_set(&smi130_acc->enable, 1);
+		}
+
+	} else {
+		if (pre_enable == 1) {
+			smi130_acc_set_mode(smi130_acc->smi130_acc_client,
+					SMI_ACC2X2_MODE_SUSPEND, SMI_ACC_ENABLED_INPUT);
+
+		#ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+			cancel_delayed_work_sync(&smi130_acc->work);
+#endif
+			atomic_set(&smi130_acc->enable, 0);
+		}
+	}
+	mutex_unlock(&smi130_acc->enable_mutex);
+
+}
+
+static ssize_t smi130_acc_enable_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if ((data == 0) || (data == 1))
+		smi130_acc_set_enable(dev, data);
+
+	return count;
+}
+static ssize_t smi130_acc_fast_calibration_x_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+
+
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+#ifdef CONFIG_SENSORS_BMI058
+	if (smi130_acc_get_offset_target(smi130_acc->smi130_acc_client,
+				BMI058_OFFSET_TRIGGER_X, &data) < 0)
+		return -EINVAL;
+#else
+	if (smi130_acc_get_offset_target(smi130_acc->smi130_acc_client,
+				SMI_ACC2X2_OFFSET_TRIGGER_X, &data) < 0)
+		return -EINVAL;
+#endif
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_fast_calibration_x_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	signed char tmp;
+	unsigned char timeout = 0;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+#ifdef CONFIG_SENSORS_BMI058
+	if (smi130_acc_set_offset_target(smi130_acc->smi130_acc_client,
+			BMI058_OFFSET_TRIGGER_X, (unsigned char)data) < 0)
+		return -EINVAL;
+#else
+	if (smi130_acc_set_offset_target(smi130_acc->smi130_acc_client,
+			SMI_ACC2X2_OFFSET_TRIGGER_X, (unsigned char)data) < 0)
+		return -EINVAL;
+#endif
+
+	if (smi130_acc_set_cal_trigger(smi130_acc->smi130_acc_client, 1) < 0)
+		return -EINVAL;
+
+	do {
+		smi130_acc_delay(2);
+		smi130_acc_get_cal_ready(smi130_acc->smi130_acc_client, &tmp);
+
+		/*PINFO("wait 2ms cal ready flag is %d\n", tmp); */
+		timeout++;
+		if (timeout == 50) {
+			PINFO("get fast calibration ready error\n");
+			return -EINVAL;
+		};
+
+	} while (tmp == 0);
+
+	PINFO("x axis fast calibration finished\n");
+	return count;
+}
+
+static ssize_t smi130_acc_fast_calibration_y_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+
+
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+#ifdef CONFIG_SENSORS_BMI058
+	if (smi130_acc_get_offset_target(smi130_acc->smi130_acc_client,
+					BMI058_OFFSET_TRIGGER_Y, &data) < 0)
+		return -EINVAL;
+#else
+	if (smi130_acc_get_offset_target(smi130_acc->smi130_acc_client,
+					SMI_ACC2X2_OFFSET_TRIGGER_Y, &data) < 0)
+		return -EINVAL;
+#endif
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_fast_calibration_y_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	signed char tmp;
+	unsigned char timeout = 0;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+#ifdef CONFIG_SENSORS_BMI058
+	if (smi130_acc_set_offset_target(smi130_acc->smi130_acc_client,
+			BMI058_OFFSET_TRIGGER_Y, (unsigned char)data) < 0)
+		return -EINVAL;
+#else
+	if (smi130_acc_set_offset_target(smi130_acc->smi130_acc_client,
+			SMI_ACC2X2_OFFSET_TRIGGER_Y, (unsigned char)data) < 0)
+		return -EINVAL;
+#endif
+
+	if (smi130_acc_set_cal_trigger(smi130_acc->smi130_acc_client, 2) < 0)
+		return -EINVAL;
+
+	do {
+		smi130_acc_delay(2);
+		smi130_acc_get_cal_ready(smi130_acc->smi130_acc_client, &tmp);
+
+		/*PINFO("wait 2ms cal ready flag is %d\n", tmp);*/
+		timeout++;
+		if (timeout == 50) {
+			PINFO("get fast calibration ready error\n");
+			return -EINVAL;
+		};
+
+	} while (tmp == 0);
+
+	PINFO("y axis fast calibration finished\n");
+	return count;
+}
+
+static ssize_t smi130_acc_fast_calibration_z_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+
+
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_offset_target(smi130_acc->smi130_acc_client, 3, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_fast_calibration_z_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	signed char tmp;
+	unsigned char timeout = 0;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_offset_target(smi130_acc->smi130_acc_client, 3, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	if (smi130_acc_set_cal_trigger(smi130_acc->smi130_acc_client, 3) < 0)
+		return -EINVAL;
+
+	do {
+		smi130_acc_delay(2);
+		smi130_acc_get_cal_ready(smi130_acc->smi130_acc_client, &tmp);
+
+		/*PINFO("wait 2ms cal ready flag is %d\n", tmp);*/
+		timeout++;
+		if (timeout == 50) {
+			PINFO("get fast calibration ready error\n");
+			return -EINVAL;
+		};
+
+	} while (tmp == 0);
+
+	PINFO("z axis fast calibration finished\n");
+	return count;
+}
+
+
+static ssize_t smi130_acc_SleepDur_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_sleep_duration(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_SleepDur_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (smi130_acc_set_sleep_duration(smi130_acc->smi130_acc_client,
+				(unsigned char) data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_fifo_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_fifo_mode(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_fifo_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (smi130_acc_set_fifo_mode(smi130_acc->smi130_acc_client,
+				(unsigned char) data) < 0)
+		return -EINVAL;
+	return count;
+}
+
+
+
+static ssize_t smi130_acc_fifo_trig_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_fifo_trig(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_fifo_trig_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (smi130_acc_set_fifo_trig(smi130_acc->smi130_acc_client,
+				(unsigned char) data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+
+
+static ssize_t smi130_acc_fifo_trig_src_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_fifo_trig_src(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_fifo_trig_src_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (smi130_acc_set_fifo_trig_src(smi130_acc->smi130_acc_client,
+				(unsigned char) data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+
+/*!
+ * @brief show fifo_data_sel axis definition(Android definition, not sensor HW reg).
+ * 0--> x, y, z axis fifo data for every frame
+ * 1--> only x axis fifo data for every frame
+ * 2--> only y axis fifo data for every frame
+ * 3--> only z axis fifo data for every frame
+ */
+static ssize_t smi130_acc_fifo_data_sel_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+	signed char place = BOSCH_SENSOR_PLACE_UNKNOWN;
+	if (smi130_acc_get_fifo_data_sel(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+#ifdef CONFIG_SENSORS_BMI058
+/*Update BMI058 fifo_data_sel to the SMI130_ACC common definition*/
+	if (BMI058_FIFO_DAT_SEL_X == data)
+		data = SMI_ACC2X2_FIFO_DAT_SEL_X;
+	else if (BMI058_FIFO_DAT_SEL_Y == data)
+		data = SMI_ACC2X2_FIFO_DAT_SEL_Y;
+#endif
+
+	/*remaping fifo_dat_sel if define virtual place in BSP files*/
+	if ((NULL != smi130_acc->bosch_pd) &&
+		(BOSCH_SENSOR_PLACE_UNKNOWN != smi130_acc->bosch_pd->place)) {
+		place = smi130_acc->bosch_pd->place;
+		/* sensor with place 0 needs not to be remapped */
+		if ((place > 0) && (place < MAX_AXIS_REMAP_TAB_SZ)) {
+			/* SMI_ACC2X2_FIFO_DAT_SEL_X: 1, Y:2, Z:3;
+			* but bosch_axis_remap_tab_dft[i].src_x:0, y:1, z:2
+			* so we need to +1*/
+			if (SMI_ACC2X2_FIFO_DAT_SEL_X == data)
+				data = bosch_axis_remap_tab_dft[place].src_x + 1;
+			else if (SMI_ACC2X2_FIFO_DAT_SEL_Y == data)
+				data = bosch_axis_remap_tab_dft[place].src_y + 1;
+		}
+	}
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_fifo_framecount_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	unsigned char mode;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_fifo_framecount(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	if (data > MAX_FIFO_F_LEVEL) {
+
+		if (smi130_acc_get_mode(smi130_acc->smi130_acc_client, &mode) < 0)
+			return -EINVAL;
+
+		if (SMI_ACC2X2_MODE_NORMAL == mode) {
+			PERR("smi130_acc fifo_count: %d abnormal, op_mode: %d",
+					data, mode);
+			data = MAX_FIFO_F_LEVEL;
+		} else {
+			/*chip already suspend or shutdown*/
+			data = 0;
+		}
+	}
+
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_acc_fifo_framecount_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	smi130_acc->fifo_count = (unsigned int) data;
+
+	return count;
+}
+
+static ssize_t smi130_acc_temperature_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_read_temperature(smi130_acc->smi130_acc_client, &data) < 0)
+		return -EINVAL;
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+/*!
+ * @brief store fifo_data_sel axis definition(Android definition, not sensor HW reg).
+ * 0--> x, y, z axis fifo data for every frame
+ * 1--> only x axis fifo data for every frame
+ * 2--> only y axis fifo data for every frame
+ * 3--> only z axis fifo data for every frame
+ */
+static ssize_t smi130_acc_fifo_data_sel_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+	signed char place;
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	/*save fifo_data_sel(android definition)*/
+	smi130_acc->fifo_datasel = (unsigned char) data;
+
+	/*remaping fifo_dat_sel if define virtual place*/
+	if ((NULL != smi130_acc->bosch_pd) &&
+		(BOSCH_SENSOR_PLACE_UNKNOWN != smi130_acc->bosch_pd->place)) {
+		place = smi130_acc->bosch_pd->place;
+		/* sensor with place 0 needs not to be remapped */
+		if ((place > 0) && (place < MAX_AXIS_REMAP_TAB_SZ)) {
+			/*Need X Y axis revesal sensor place: P1, P3, P5, P7 */
+			/* SMI_ACC2X2_FIFO_DAT_SEL_X: 1, Y:2, Z:3;
+			  * but bosch_axis_remap_tab_dft[i].src_x:0, y:1, z:2
+			  * so we need to +1*/
+			if (SMI_ACC2X2_FIFO_DAT_SEL_X == data)
+				data =  bosch_axis_remap_tab_dft[place].src_x + 1;
+			else if (SMI_ACC2X2_FIFO_DAT_SEL_Y == data)
+				data =  bosch_axis_remap_tab_dft[place].src_y + 1;
+		}
+	}
+#ifdef CONFIG_SENSORS_BMI058
+	/*Update BMI058 fifo_data_sel to the SMI130_ACC common definition*/
+		if (SMI_ACC2X2_FIFO_DAT_SEL_X == data)
+			data = BMI058_FIFO_DAT_SEL_X;
+		else if (SMI_ACC2X2_FIFO_DAT_SEL_Y == data)
+			data = BMI058_FIFO_DAT_SEL_Y;
+
+#endif
+	if (smi130_acc_set_fifo_data_sel(smi130_acc->smi130_acc_client,
+				(unsigned char) data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_fifo_data_out_frame_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char f_len = 0;
+	unsigned char count = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+	if (smi130_acc->fifo_datasel) {
+		/*Select one axis data output for every fifo frame*/
+		f_len = 2;
+	} else	{
+		/*Select X Y Z axis data output for every fifo frame*/
+		f_len = 6;
+	}
+	if (smi130_acc_get_fifo_framecount(smi130_acc->smi130_acc_client, &count) < 0) {
+		PERR("smi130_acc_get_fifo_framecount err\n");
+		return -EINVAL;
+	}
+	if (count == 0)
+		return 0;
+	if (smi_acc_i2c_burst_read(smi130_acc->smi130_acc_client,
+			SMI_ACC2X2_FIFO_DATA_OUTPUT_REG, buf,
+						count * f_len) < 0)
+		return -EINVAL;
+
+	return count * f_len;
+}
+
+static ssize_t smi130_acc_offset_x_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_offset_x(smi130_acc->smi130_acc_client, &data) < 0)
+		return snprintf(buf, 48, "Read error\n");
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_offset_x_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_offset_x(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_offset_y_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_offset_y(smi130_acc->smi130_acc_client, &data) < 0)
+		return snprintf(buf, 48, "Read error\n");
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_offset_y_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_offset_y(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_offset_z_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data = 0;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	if (smi130_acc_get_offset_z(smi130_acc->smi130_acc_client, &data) < 0)
+		return snprintf(buf, 48, "Read error\n");
+
+	return snprintf(buf, 16, "%d\n", data);
+
+}
+
+static ssize_t smi130_acc_offset_z_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if (smi130_acc_set_offset_z(smi130_acc->smi130_acc_client, (unsigned
+					char)data) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi130_acc_driver_version_show(struct device *dev
+		, struct device_attribute *attr, char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+	int ret;
+
+	if (smi130_acc == NULL) {
+		printk(KERN_ERR "Invalid client_data pointer");
+		return -ENODEV;
+	}
+
+	ret = snprintf(buf, 128, "Driver version: %s\n",
+			DRIVER_VERSION);
+	return ret;
+}
+
+#ifdef CONFIG_SIG_MOTION
+static int smi130_acc_set_en_slope_int(struct smi130_acc_data *smi130_acc,
+		int en)
+{
+	int err;
+	struct i2c_client *client = smi130_acc->smi130_acc_client;
+
+	if (en) {
+		/* Set the related parameters which needs to be fine tuned by
+		* interfaces: slope_threshold and slope_duration
+		*/
+		/*dur: 192 samples ~= 3s*/
+		err = smi130_acc_set_slope_duration(client, 0x0);
+		err += smi130_acc_set_slope_threshold(client, 0x16);
+
+		/*Enable the interrupts*/
+		err += smi130_acc_set_Int_Enable(client, 5, 1);/*Slope X*/
+		err += smi130_acc_set_Int_Enable(client, 6, 1);/*Slope Y*/
+		err += smi130_acc_set_Int_Enable(client, 7, 1);/*Slope Z*/
+	#ifdef SMI_ACC2X2_ENABLE_INT1
+		/* TODO: SLOPE can now only be routed to INT1 pin*/
+		err += smi130_acc_set_int1_pad_sel(client, PAD_SLOP);
+	#else
+		/* err += smi130_acc_set_int2_pad_sel(client, PAD_SLOP); */
+	#endif
+	} else {
+		err = smi130_acc_set_Int_Enable(client, 5, 0);/*Slope X*/
+		err += smi130_acc_set_Int_Enable(client, 6, 0);/*Slope Y*/
+		err += smi130_acc_set_Int_Enable(client, 7, 0);/*Slope Z*/
+	}
+	return err;
+}
+
+static ssize_t smi130_acc_en_sig_motion_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	return snprintf(buf, 16, "%d\n", atomic_read(&smi130_acc->en_sig_motion));
+}
+
+static int smi130_acc_set_en_sig_motion(struct smi130_acc_data *smi130_acc,
+		int en)
+{
+	int err = 0;
+
+	en = (en >= 1) ? 1 : 0;  /* set sig motion sensor status */
+
+	if (atomic_read(&smi130_acc->en_sig_motion) != en) {
+		if (en) {
+			err = smi130_acc_set_mode(smi130_acc->smi130_acc_client,
+					SMI_ACC2X2_MODE_NORMAL, SMI_ACC_ENABLED_SGM);
+			err = smi130_acc_set_en_slope_int(smi130_acc, en);
+			enable_irq_wake(smi130_acc->IRQ);
+		} else {
+			disable_irq_wake(smi130_acc->IRQ);
+			err = smi130_acc_set_en_slope_int(smi130_acc, en);
+			err = smi130_acc_set_mode(smi130_acc->smi130_acc_client,
+					SMI_ACC2X2_MODE_SUSPEND, SMI_ACC_ENABLED_SGM);
+		}
+		atomic_set(&smi130_acc->en_sig_motion, en);
+	}
+	return err;
+}
+
+static ssize_t smi130_acc_en_sig_motion_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if ((data == 0) || (data == 1))
+		smi130_acc_set_en_sig_motion(smi130_acc, data);
+
+	return count;
+}
+#endif
+
+#ifdef CONFIG_DOUBLE_TAP
+static int smi130_acc_set_en_single_tap_int(struct smi130_acc_data *smi130_acc, int en)
+{
+	int err;
+	struct i2c_client *client = smi130_acc->smi130_acc_client;
+
+	if (en) {
+		/* set tap interruption parameter here if needed.
+		smi130_acc_set_tap_duration(client, 0xc0);
+		smi130_acc_set_tap_threshold(client, 0x16);
+		*/
+
+		/*Enable the single tap interrupts*/
+		err = smi130_acc_set_Int_Enable(client, 8, 1);
+	#ifdef SMI_ACC2X2_ENABLE_INT1
+		err += smi130_acc_set_int1_pad_sel(client, PAD_SINGLE_TAP);
+	#else
+		err += smi130_acc_set_int2_pad_sel(client, PAD_SINGLE_TAP);
+	#endif
+	} else {
+		err = smi130_acc_set_Int_Enable(client, 8, 0);
+	}
+	return err;
+}
+
+static ssize_t smi130_acc_tap_time_period_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	return snprintf(buf, 16, "%d\n", smi130_acc->tap_time_period);
+}
+
+static ssize_t smi130_acc_tap_time_period_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	smi130_acc->tap_time_period = data;
+
+	return count;
+}
+
+static ssize_t smi130_acc_en_double_tap_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	return snprintf(buf, 16, "%d\n", atomic_read(&smi130_acc->en_double_tap));
+}
+
+static int smi130_acc_set_en_double_tap(struct smi130_acc_data *smi130_acc,
+		int en)
+{
+	int err = 0;
+
+	en = (en >= 1) ? 1 : 0;
+
+	if (atomic_read(&smi130_acc->en_double_tap) != en) {
+		if (en) {
+			err = smi130_acc_set_mode(smi130_acc->smi130_acc_client,
+					SMI_ACC2X2_MODE_NORMAL, SMI_ACC_ENABLED_DTAP);
+			err = smi130_acc_set_en_single_tap_int(smi130_acc, en);
+		} else {
+			err = smi130_acc_set_en_single_tap_int(smi130_acc, en);
+			err = smi130_acc_set_mode(smi130_acc->smi130_acc_client,
+					SMI_ACC2X2_MODE_SUSPEND, SMI_ACC_ENABLED_DTAP);
+		}
+		atomic_set(&smi130_acc->en_double_tap, en);
+	}
+	return err;
+}
+
+static ssize_t smi130_acc_en_double_tap_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
+
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+
+	if ((data == 0) || (data == 1))
+		smi130_acc_set_en_double_tap(smi130_acc, data);
+
+	return count;
+}
+
+static void smi130_acc_tap_timeout_handle(unsigned long data)
+{
+	struct smi130_acc_data *smi130_acc = (struct smi130_acc_data *)data;
+
+	PINFO("tap interrupt handle, timeout\n");
+	mutex_lock(&smi130_acc->tap_mutex);
+	smi130_acc->tap_times = 0;
+	mutex_unlock(&smi130_acc->tap_mutex);
+
+	/* if a single tap need to report, open the define */
+#ifdef REPORT_SINGLE_TAP_WHEN_DOUBLE_TAP_SENSOR_ENABLED
+	input_report_rel(smi130_acc->dev_interrupt,
+		SINGLE_TAP_INTERRUPT,
+		SINGLE_TAP_INTERRUPT_HAPPENED);
+	input_sync(smi130_acc->dev_interrupt);
+#endif
+
+}
+#endif
+
+static DEVICE_ATTR(range, S_IRUGO | S_IWUSR,
+		smi130_acc_range_show, smi130_acc_range_store);
+static DEVICE_ATTR(bandwidth, S_IRUGO | S_IWUSR,
+		smi130_acc_bandwidth_show, smi130_acc_bandwidth_store);
+static DEVICE_ATTR(op_mode, S_IRUGO | S_IWUSR,
+		smi130_acc_mode_show, smi130_acc_mode_store);
+static DEVICE_ATTR(value, S_IRUSR,
+		smi130_acc_value_show, NULL);
+static DEVICE_ATTR(value_cache, S_IRUSR,
+		smi130_acc_value_cache_show, NULL);
+static DEVICE_ATTR(delay, S_IRUGO | S_IWUSR,
+		smi130_acc_delay_show, smi130_acc_delay_store);
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
+		smi130_acc_enable_show, smi130_acc_enable_store);
+static DEVICE_ATTR(SleepDur, S_IRUGO | S_IWUSR,
+		smi130_acc_SleepDur_show, smi130_acc_SleepDur_store);
+static DEVICE_ATTR(fast_calibration_x, S_IRUGO | S_IWUSR,
+		smi130_acc_fast_calibration_x_show,
+		smi130_acc_fast_calibration_x_store);
+static DEVICE_ATTR(fast_calibration_y, S_IRUGO | S_IWUSR,
+		smi130_acc_fast_calibration_y_show,
+		smi130_acc_fast_calibration_y_store);
+static DEVICE_ATTR(fast_calibration_z, S_IRUGO | S_IWUSR,
+		smi130_acc_fast_calibration_z_show,
+		smi130_acc_fast_calibration_z_store);
+static DEVICE_ATTR(fifo_mode, S_IRUGO | S_IWUSR,
+		smi130_acc_fifo_mode_show, smi130_acc_fifo_mode_store);
+static DEVICE_ATTR(fifo_framecount, S_IRUGO | S_IWUSR,
+		smi130_acc_fifo_framecount_show, smi130_acc_fifo_framecount_store);
+static DEVICE_ATTR(fifo_trig, S_IRUGO | S_IWUSR,
+		smi130_acc_fifo_trig_show, smi130_acc_fifo_trig_store);
+static DEVICE_ATTR(fifo_trig_src, S_IRUGO | S_IWUSR,
+		smi130_acc_fifo_trig_src_show, smi130_acc_fifo_trig_src_store);
+static DEVICE_ATTR(fifo_data_sel, S_IRUGO | S_IWUSR,
+		smi130_acc_fifo_data_sel_show, smi130_acc_fifo_data_sel_store);
+static DEVICE_ATTR(fifo_data_frame, S_IRUGO,
+		smi130_acc_fifo_data_out_frame_show, NULL);
+static DEVICE_ATTR(reg, S_IRUGO | S_IWUSR,
+		smi130_acc_register_show, smi130_acc_register_store);
+static DEVICE_ATTR(chip_id, S_IRUSR,
+		smi130_acc_chip_id_show, NULL);
+static DEVICE_ATTR(offset_x, S_IRUGO | S_IWUSR,
+		smi130_acc_offset_x_show,
+		smi130_acc_offset_x_store);
+static DEVICE_ATTR(offset_y, S_IRUGO | S_IWUSR,
+		smi130_acc_offset_y_show,
+		smi130_acc_offset_y_store);
+static DEVICE_ATTR(offset_z, S_IRUGO | S_IWUSR,
+		smi130_acc_offset_z_show,
+		smi130_acc_offset_z_store);
+static DEVICE_ATTR(enable_int, S_IWUSR,
+		NULL, smi130_acc_enable_int_store);
+static DEVICE_ATTR(int_mode, S_IRUGO | S_IWUSR,
+		smi130_acc_int_mode_show, smi130_acc_int_mode_store);
+static DEVICE_ATTR(slope_duration, S_IRUGO | S_IWUSR,
+		smi130_acc_slope_duration_show, smi130_acc_slope_duration_store);
+static DEVICE_ATTR(slope_threshold, S_IRUGO | S_IWUSR,
+		smi130_acc_slope_threshold_show, smi130_acc_slope_threshold_store);
+static DEVICE_ATTR(slope_no_mot_duration, S_IRUGO | S_IWUSR,
+		smi130_acc_slope_no_mot_duration_show,
+			smi130_acc_slope_no_mot_duration_store);
+static DEVICE_ATTR(slope_no_mot_threshold, S_IRUGO | S_IWUSR,
+		smi130_acc_slope_no_mot_threshold_show,
+			smi130_acc_slope_no_mot_threshold_store);
+static DEVICE_ATTR(high_g_duration, S_IRUGO | S_IWUSR,
+		smi130_acc_high_g_duration_show, smi130_acc_high_g_duration_store);
+static DEVICE_ATTR(high_g_threshold, S_IRUGO | S_IWUSR,
+		smi130_acc_high_g_threshold_show, smi130_acc_high_g_threshold_store);
+static DEVICE_ATTR(low_g_duration, S_IRUGO | S_IWUSR,
+		smi130_acc_low_g_duration_show, smi130_acc_low_g_duration_store);
+static DEVICE_ATTR(low_g_threshold, S_IRUGO | S_IWUSR,
+		smi130_acc_low_g_threshold_show, smi130_acc_low_g_threshold_store);
+static DEVICE_ATTR(tap_duration, S_IRUGO | S_IWUSR,
+		smi130_acc_tap_duration_show, smi130_acc_tap_duration_store);
+static DEVICE_ATTR(tap_threshold, S_IRUGO | S_IWUSR,
+		smi130_acc_tap_threshold_show, smi130_acc_tap_threshold_store);
+static DEVICE_ATTR(tap_quiet, S_IRUGO | S_IWUSR,
+		smi130_acc_tap_quiet_show, smi130_acc_tap_quiet_store);
+static DEVICE_ATTR(tap_shock, S_IRUGO | S_IWUSR,
+		smi130_acc_tap_shock_show, smi130_acc_tap_shock_store);
+static DEVICE_ATTR(tap_samp, S_IRUGO | S_IWUSR,
+		smi130_acc_tap_samp_show, smi130_acc_tap_samp_store);
+static DEVICE_ATTR(orient_mbl_mode, S_IRUGO | S_IWUSR,
+		smi130_acc_orient_mbl_mode_show, smi130_acc_orient_mbl_mode_store);
+static DEVICE_ATTR(orient_mbl_blocking, S_IRUGO | S_IWUSR,
+		smi130_acc_orient_mbl_blocking_show, smi130_acc_orient_mbl_blocking_store);
+static DEVICE_ATTR(orient_mbl_hyst, S_IRUGO | S_IWUSR,
+		smi130_acc_orient_mbl_hyst_show, smi130_acc_orient_mbl_hyst_store);
+static DEVICE_ATTR(orient_mbl_theta, S_IRUGO | S_IWUSR,
+		smi130_acc_orient_mbl_theta_show, smi130_acc_orient_mbl_theta_store);
+static DEVICE_ATTR(flat_theta, S_IRUGO | S_IWUSR,
+		smi130_acc_flat_theta_show, smi130_acc_flat_theta_store);
+static DEVICE_ATTR(flat_hold_time, S_IRUGO | S_IWUSR,
+		smi130_acc_flat_hold_time_show, smi130_acc_flat_hold_time_store);
+static DEVICE_ATTR(selftest, S_IRUGO | S_IWUSR,
+		smi130_acc_selftest_show, smi130_acc_selftest_store);
+static DEVICE_ATTR(softreset, S_IWUSR,
+		NULL, smi130_acc_softreset_store);
+static DEVICE_ATTR(enable_timer, S_IRUGO | S_IWUSR,
+		smi130_acc_enable_timer_show, smi130_acc_enable_timer_store);
+static DEVICE_ATTR(debug_level, S_IRUGO | S_IWUSR,
+		smi130_acc_debug_level_show, smi130_acc_debug_level_store);
+static DEVICE_ATTR(temperature, S_IRUSR,
+		smi130_acc_temperature_show, NULL);
+static DEVICE_ATTR(place, S_IRUSR,
+		smi130_acc_place_show, NULL);
+static DEVICE_ATTR(driver_version, S_IRUSR,
+		smi130_acc_driver_version_show, NULL);
+
+#ifdef CONFIG_SIG_MOTION
+static DEVICE_ATTR(en_sig_motion, S_IRUGO|S_IWUSR|S_IWGRP|S_IWOTH,
+		smi130_acc_en_sig_motion_show, smi130_acc_en_sig_motion_store);
+#endif
+#ifdef CONFIG_DOUBLE_TAP
+static DEVICE_ATTR(tap_time_period, S_IRUGO|S_IWUSR|S_IWGRP|S_IWOTH,
+		smi130_acc_tap_time_period_show, smi130_acc_tap_time_period_store);
+static DEVICE_ATTR(en_double_tap, S_IRUGO|S_IWUSR|S_IWGRP|S_IWOTH,
+		smi130_acc_en_double_tap_show, smi130_acc_en_double_tap_store);
+#endif
+
+static struct attribute *smi130_acc_attributes[] = {
+	&dev_attr_range.attr,
+	&dev_attr_bandwidth.attr,
+	&dev_attr_op_mode.attr,
+	&dev_attr_value.attr,
+	&dev_attr_value_cache.attr,
+	&dev_attr_delay.attr,
+	&dev_attr_enable.attr,
+	&dev_attr_SleepDur.attr,
+	&dev_attr_reg.attr,
+	&dev_attr_fast_calibration_x.attr,
+	&dev_attr_fast_calibration_y.attr,
+	&dev_attr_fast_calibration_z.attr,
+	&dev_attr_fifo_mode.attr,
+	&dev_attr_fifo_framecount.attr,
+	&dev_attr_fifo_trig.attr,
+	&dev_attr_fifo_trig_src.attr,
+	&dev_attr_fifo_data_sel.attr,
+	&dev_attr_fifo_data_frame.attr,
+	&dev_attr_chip_id.attr,
+	&dev_attr_offset_x.attr,
+	&dev_attr_offset_y.attr,
+	&dev_attr_offset_z.attr,
+	&dev_attr_enable_int.attr,
+	&dev_attr_enable_timer.attr,
+	&dev_attr_debug_level.attr,
+	&dev_attr_int_mode.attr,
+	&dev_attr_slope_duration.attr,
+	&dev_attr_slope_threshold.attr,
+	&dev_attr_slope_no_mot_duration.attr,
+	&dev_attr_slope_no_mot_threshold.attr,
+	&dev_attr_high_g_duration.attr,
+	&dev_attr_high_g_threshold.attr,
+	&dev_attr_low_g_duration.attr,
+	&dev_attr_low_g_threshold.attr,
+	&dev_attr_tap_threshold.attr,
+	&dev_attr_tap_duration.attr,
+	&dev_attr_tap_quiet.attr,
+	&dev_attr_tap_shock.attr,
+	&dev_attr_tap_samp.attr,
+	&dev_attr_orient_mbl_mode.attr,
+	&dev_attr_orient_mbl_blocking.attr,
+	&dev_attr_orient_mbl_hyst.attr,
+	&dev_attr_orient_mbl_theta.attr,
+	&dev_attr_flat_theta.attr,
+	&dev_attr_flat_hold_time.attr,
+	&dev_attr_selftest.attr,
+	&dev_attr_softreset.attr,
+	&dev_attr_temperature.attr,
+	&dev_attr_place.attr,
+	&dev_attr_driver_version.attr,
+#ifdef CONFIG_SIG_MOTION
+	&dev_attr_en_sig_motion.attr,
+#endif
+#ifdef CONFIG_DOUBLE_TAP
+	&dev_attr_en_double_tap.attr,
+#endif
+
+	NULL
+};
+
+static struct attribute_group smi130_acc_attribute_group = {
+	.attrs = smi130_acc_attributes
+};
+
+#ifdef CONFIG_SIG_MOTION
+static struct attribute *smi130_acc_sig_motion_attributes[] = {
+	&dev_attr_slope_duration.attr,
+	&dev_attr_slope_threshold.attr,
+	&dev_attr_en_sig_motion.attr,
+	NULL
+};
+static struct attribute_group smi130_acc_sig_motion_attribute_group = {
+	.attrs = smi130_acc_sig_motion_attributes
+};
+#endif
+
+#ifdef CONFIG_DOUBLE_TAP
+static struct attribute *smi130_acc_double_tap_attributes[] = {
+	&dev_attr_tap_threshold.attr,
+	&dev_attr_tap_duration.attr,
+	&dev_attr_tap_quiet.attr,
+	&dev_attr_tap_shock.attr,
+	&dev_attr_tap_samp.attr,
+	&dev_attr_tap_time_period.attr,
+	&dev_attr_en_double_tap.attr,
+	NULL
+};
+static struct attribute_group smi130_acc_double_tap_attribute_group = {
+	.attrs = smi130_acc_double_tap_attributes
+};
+#endif
+
+
+#if defined(SMI_ACC2X2_ENABLE_INT1) || defined(SMI_ACC2X2_ENABLE_INT2)
+unsigned char *orient_mbl[] = {"upward looking portrait upright",
+	"upward looking portrait upside-down",
+		"upward looking landscape left",
+		"upward looking landscape right",
+		"downward looking portrait upright",
+		"downward looking portrait upside-down",
+		"downward looking landscape left",
+		"downward looking landscape right"};
+
+
+static void smi130_acc_high_g_interrupt_handle(struct smi130_acc_data *smi130_acc)
+{
+	unsigned char first_value = 0;
+	unsigned char sign_value = 0;
+	int i;
+
+	for (i = 0; i < 3; i++) {
+		smi130_acc_get_HIGH_first(smi130_acc->smi130_acc_client, i, &first_value);
+		if (first_value == 1) {
+			smi130_acc_get_HIGH_sign(smi130_acc->smi130_acc_client,
+								&sign_value);
+			if (sign_value == 1) {
+				if (i == 0)
+					input_report_rel(smi130_acc->dev_interrupt,
+							HIGH_G_INTERRUPT,
+							HIGH_G_INTERRUPT_X_N);
+				if (i == 1)
+					input_report_rel(smi130_acc->dev_interrupt,
+							HIGH_G_INTERRUPT,
+							HIGH_G_INTERRUPT_Y_N);
+				if (i == 2)
+					input_report_rel(smi130_acc->dev_interrupt,
+							HIGH_G_INTERRUPT,
+							HIGH_G_INTERRUPT_Z_N);
+			} else {
+				if (i == 0)
+					input_report_rel(smi130_acc->dev_interrupt,
+							HIGH_G_INTERRUPT,
+							HIGH_G_INTERRUPT_X);
+				if (i == 1)
+					input_report_rel(smi130_acc->dev_interrupt,
+							HIGH_G_INTERRUPT,
+							HIGH_G_INTERRUPT_Y);
+				if (i == 2)
+					input_report_rel(smi130_acc->dev_interrupt,
+							HIGH_G_INTERRUPT,
+							HIGH_G_INTERRUPT_Z);
+			}
+		}
+
+		PINFO("High G interrupt happened,exis is %d,\n\n"
+					"first is %d,sign is %d\n", i,
+						first_value, sign_value);
+	}
+
+
+}
+
+#ifndef CONFIG_SIG_MOTION
+static void smi130_acc_slope_interrupt_handle(struct smi130_acc_data *smi130_acc)
+{
+	unsigned char first_value = 0;
+	unsigned char sign_value = 0;
+	int i;
+	for (i = 0; i < 3; i++) {
+		smi130_acc_get_slope_first(smi130_acc->smi130_acc_client, i, &first_value);
+		if (first_value == 1) {
+			smi130_acc_get_slope_sign(smi130_acc->smi130_acc_client,
+								&sign_value);
+			if (sign_value == 1) {
+				if (i == 0)
+					input_report_rel(smi130_acc->dev_interrupt,
+							SLOP_INTERRUPT,
+							SLOPE_INTERRUPT_X_N);
+				if (i == 1)
+					input_report_rel(smi130_acc->dev_interrupt,
+							SLOP_INTERRUPT,
+							SLOPE_INTERRUPT_Y_N);
+				if (i == 2)
+					input_report_rel(smi130_acc->dev_interrupt,
+							SLOP_INTERRUPT,
+							SLOPE_INTERRUPT_Z_N);
+			} else {
+				if (i == 0)
+					input_report_rel(smi130_acc->dev_interrupt,
+							SLOP_INTERRUPT,
+							SLOPE_INTERRUPT_X);
+				if (i == 1)
+					input_report_rel(smi130_acc->dev_interrupt,
+							SLOP_INTERRUPT,
+							SLOPE_INTERRUPT_Y);
+				if (i == 2)
+					input_report_rel(smi130_acc->dev_interrupt,
+							SLOP_INTERRUPT,
+							SLOPE_INTERRUPT_Z);
+
+			}
+		}
+
+		PINFO("Slop interrupt happened,exis is %d,\n\n"
+					"first is %d,sign is %d\n", i,
+						first_value, sign_value);
+	}
+}
+#endif
+
+static void smi130_acc_irq_work_func(struct work_struct *work)
+{
+	struct smi130_acc_data *smi130_acc = container_of((struct work_struct *)work,
+			struct smi130_acc_data, irq_work);
+#ifdef CONFIG_DOUBLE_TAP
+	struct i2c_client *client = smi130_acc->smi130_acc_client;
+#endif
+
+	unsigned char status = 0;
+	unsigned char first_value = 0;
+	unsigned char sign_value = 0;
+
+#ifdef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+	static struct smi130_accacc acc;
+	struct timespec ts;
+	/*
+	do not use this function judge new data interrupt
+	smi130_acc_get_interruptstatus2(smi130_acc->smi130_acc_client, &status);
+	use the
+	x-axis value bit new_data_x
+	y-axis value bit new_data_y
+	z-axis value bit new_data_z
+	judge if this is the new data
+	*/
+	/* PINFO("New data interrupt happened\n");*/
+	smi130_acc_read_accel_xyz(smi130_acc->smi130_acc_client,
+				smi130_acc->sensor_type, &acc);
+	ts = ns_to_timespec(smi130_acc->timestamp);
+	//if ((acc.x & SMI_ACC2X2_NEW_DATA_X__MSK) &&
+	//	(acc.y & SMI_ACC2X2_NEW_DATA_Y__MSK) &&
+	//	(acc.x & SMI_ACC2X2_NEW_DATA_Z__MSK))
+	{
+		input_event(smi130_acc->input, EV_MSC, MSC_TIME,
+			ts.tv_sec);
+		input_event(smi130_acc->input, EV_MSC, MSC_TIME,
+			ts.tv_nsec);
+		input_event(smi130_acc->input, EV_MSC,
+			MSC_GESTURE, acc.x);
+		input_event(smi130_acc->input, EV_MSC,
+			MSC_RAW, acc.y);
+		input_event(smi130_acc->input, EV_MSC,
+			MSC_SCAN, acc.z);
+		input_sync(smi130_acc->input);
+		mutex_lock(&smi130_acc->value_mutex);
+		smi130_acc->value = acc;
+		mutex_unlock(&smi130_acc->value_mutex);
+	}
+#endif
+
+	smi130_acc_get_interruptstatus1(smi130_acc->smi130_acc_client, &status);
+	PINFO("smi130_acc_irq_work_func, status = 0x%x\n", status);
+
+#ifdef CONFIG_SIG_MOTION
+	if (status & 0x04)	{
+		if (atomic_read(&smi130_acc->en_sig_motion) == 1) {
+			PINFO("Significant motion interrupt happened\n");
+			/* close sig sensor,
+			it will be open again if APP wants */
+			smi130_acc_set_en_sig_motion(smi130_acc, 0);
+
+			input_report_rel(smi130_acc->dev_interrupt,
+				SLOP_INTERRUPT, 1);
+			input_sync(smi130_acc->dev_interrupt);
+		}
+	}
+#endif
+
+#ifdef CONFIG_DOUBLE_TAP
+	if (status & 0x20) {
+		if (atomic_read(&smi130_acc->en_double_tap) == 1) {
+			PINFO("single tap interrupt happened\n");
+			smi130_acc_set_Int_Enable(client, 8, 0);
+			if (smi130_acc->tap_times == 0)	{
+				mod_timer(&smi130_acc->tap_timer, jiffies +
+				msecs_to_jiffies(smi130_acc->tap_time_period));
+				smi130_acc->tap_times = 1;
+			} else {
+				/* only double tap is judged */
+				PINFO("double tap\n");
+				mutex_lock(&smi130_acc->tap_mutex);
+				smi130_acc->tap_times = 0;
+				del_timer(&smi130_acc->tap_timer);
+				mutex_unlock(&smi130_acc->tap_mutex);
+				input_report_rel(smi130_acc->dev_interrupt,
+					DOUBLE_TAP_INTERRUPT,
+					DOUBLE_TAP_INTERRUPT_HAPPENED);
+				input_sync(smi130_acc->dev_interrupt);
+			}
+			smi130_acc_set_Int_Enable(client, 8, 1);
+		}
+	}
+#endif
+
+	switch (status) {
+
+	case 0x01:
+		PINFO("Low G interrupt happened\n");
+		input_report_rel(smi130_acc->dev_interrupt, LOW_G_INTERRUPT,
+				LOW_G_INTERRUPT_HAPPENED);
+		break;
+
+	case 0x02:
+		smi130_acc_high_g_interrupt_handle(smi130_acc);
+		break;
+
+#ifndef CONFIG_SIG_MOTION
+	case 0x04:
+		smi130_acc_slope_interrupt_handle(smi130_acc);
+		break;
+#endif
+
+	case 0x08:
+		PINFO("slow/ no motion interrupt happened\n");
+		input_report_rel(smi130_acc->dev_interrupt,
+			SLOW_NO_MOTION_INTERRUPT,
+			SLOW_NO_MOTION_INTERRUPT_HAPPENED);
+		break;
+
+#ifndef CONFIG_DOUBLE_TAP
+	case 0x10:
+		PINFO("double tap interrupt happened\n");
+		input_report_rel(smi130_acc->dev_interrupt,
+			DOUBLE_TAP_INTERRUPT,
+			DOUBLE_TAP_INTERRUPT_HAPPENED);
+		break;
+	case 0x20:
+		PINFO("single tap interrupt happened\n");
+		input_report_rel(smi130_acc->dev_interrupt,
+			SINGLE_TAP_INTERRUPT,
+			SINGLE_TAP_INTERRUPT_HAPPENED);
+		break;
+#endif
+
+	case 0x40:
+		smi130_acc_get_orient_mbl_status(smi130_acc->smi130_acc_client,
+				    &first_value);
+		PINFO("orient_mbl interrupt happened,%s\n",
+				orient_mbl[first_value]);
+		if (first_value == 0)
+			input_report_abs(smi130_acc->dev_interrupt,
+			ORIENT_INTERRUPT,
+			UPWARD_PORTRAIT_UP_INTERRUPT_HAPPENED);
+		else if (first_value == 1)
+			input_report_abs(smi130_acc->dev_interrupt,
+				ORIENT_INTERRUPT,
+				UPWARD_PORTRAIT_DOWN_INTERRUPT_HAPPENED);
+		else if (first_value == 2)
+			input_report_abs(smi130_acc->dev_interrupt,
+				ORIENT_INTERRUPT,
+				UPWARD_LANDSCAPE_LEFT_INTERRUPT_HAPPENED);
+		else if (first_value == 3)
+			input_report_abs(smi130_acc->dev_interrupt,
+				ORIENT_INTERRUPT,
+				UPWARD_LANDSCAPE_RIGHT_INTERRUPT_HAPPENED);
+		else if (first_value == 4)
+			input_report_abs(smi130_acc->dev_interrupt,
+				ORIENT_INTERRUPT,
+				DOWNWARD_PORTRAIT_UP_INTERRUPT_HAPPENED);
+		else if (first_value == 5)
+			input_report_abs(smi130_acc->dev_interrupt,
+				ORIENT_INTERRUPT,
+				DOWNWARD_PORTRAIT_DOWN_INTERRUPT_HAPPENED);
+		else if (first_value == 6)
+			input_report_abs(smi130_acc->dev_interrupt,
+				ORIENT_INTERRUPT,
+				DOWNWARD_LANDSCAPE_LEFT_INTERRUPT_HAPPENED);
+		else if (first_value == 7)
+			input_report_abs(smi130_acc->dev_interrupt,
+				ORIENT_INTERRUPT,
+				DOWNWARD_LANDSCAPE_RIGHT_INTERRUPT_HAPPENED);
+		break;
+	case 0x80:
+		smi130_acc_get_orient_mbl_flat_status(smi130_acc->smi130_acc_client,
+				    &sign_value);
+		PINFO("flat interrupt happened,flat status is %d\n",
+				    sign_value);
+		if (sign_value == 1) {
+			input_report_abs(smi130_acc->dev_interrupt,
+				FLAT_INTERRUPT,
+				FLAT_INTERRUPT_TURE_HAPPENED);
+		} else {
+			input_report_abs(smi130_acc->dev_interrupt,
+				FLAT_INTERRUPT,
+				FLAT_INTERRUPT_FALSE_HAPPENED);
+		}
+		break;
+
+	default:
+		break;
+	}
+}
+
+static irqreturn_t smi130_acc_irq_handler(int irq, void *handle)
+{
+	struct smi130_acc_data *data = handle;
+
+	if (data == NULL)
+		return IRQ_HANDLED;
+	if (data->smi130_acc_client == NULL)
+		return IRQ_HANDLED;
+	data->timestamp = smi130_acc_get_alarm_timestamp();
+
+	schedule_work(&data->irq_work);
+
+	return IRQ_HANDLED;
+}
+#endif /* defined(SMI_ACC2X2_ENABLE_INT1)||defined(SMI_ACC2X2_ENABLE_INT2) */
+
+
+static int smi130_acc_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	int err = 0;
+	struct smi130_acc_data *data;
+	struct input_dev *dev;
+	struct bosch_dev  *dev_acc;
+#if defined(SMI_ACC2X2_ENABLE_INT1) || defined(SMI_ACC2X2_ENABLE_INT2)
+	struct bosch_sensor_specific *pdata;
+#endif
+	struct input_dev *dev_interrupt;
+
+	PINFO("smi130_acc_probe start\n");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		PERR("i2c_check_functionality error\n");
+		err = -EIO;
+		goto exit;
+	}
+	data = kzalloc(sizeof(struct smi130_acc_data), GFP_KERNEL);
+	if (!data) {
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	/* read and check chip id */
+	if (smi130_acc_check_chip_id(client, data) < 0) {
+		err = -EINVAL;
+		goto kfree_exit;
+	}
+
+	/* do soft reset */
+	smi130_acc_delay(5);
+	if (smi130_acc_soft_reset(client) < 0) {
+		PERR("i2c bus write error, pls check HW connection\n");
+		err = -EINVAL;
+		goto kfree_exit;
+	}
+	smi130_acc_delay(20);
+
+	i2c_set_clientdata(client, data);
+	data->smi130_acc_client = client;
+	mutex_init(&data->value_mutex);
+	mutex_init(&data->mode_mutex);
+	mutex_init(&data->enable_mutex);
+	smi130_acc_set_bandwidth(client, SMI_ACC2X2_BW_SET);
+	smi130_acc_set_range(client, SMI_ACC2X2_RANGE_SET);
+
+#if defined(SMI_ACC2X2_ENABLE_INT1) || defined(SMI_ACC2X2_ENABLE_INT2)
+
+	pdata = client->dev.platform_data;
+	if (pdata) {
+		if (pdata->irq_gpio_cfg && (pdata->irq_gpio_cfg() < 0)) {
+			PERR("IRQ GPIO conf. error %d\n",
+				client->irq);
+		}
+	}
+
+#ifdef SMI_ACC2X2_ENABLE_INT1
+	/* maps interrupt to INT1 pin */
+	smi130_acc_set_int1_pad_sel(client, PAD_LOWG);
+	smi130_acc_set_int1_pad_sel(client, PAD_HIGHG);
+	smi130_acc_set_int1_pad_sel(client, PAD_SLOP);
+	smi130_acc_set_int1_pad_sel(client, PAD_DOUBLE_TAP);
+	smi130_acc_set_int1_pad_sel(client, PAD_SINGLE_TAP);
+	smi130_acc_set_int1_pad_sel(client, PAD_ORIENT);
+	smi130_acc_set_int1_pad_sel(client, PAD_FLAT);
+	smi130_acc_set_int1_pad_sel(client, PAD_SLOW_NO_MOTION);
+#ifdef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+	smi130_acc_set_newdata(client, SMI_ACC2X2_INT1_NDATA, 1);
+	smi130_acc_set_newdata(client, SMI_ACC2X2_INT2_NDATA, 0);
+#endif
+#endif
+
+#ifdef SMI_ACC2X2_ENABLE_INT2
+	/* maps interrupt to INT2 pin */
+	smi130_acc_set_int2_pad_sel(client, PAD_LOWG);
+	smi130_acc_set_int2_pad_sel(client, PAD_HIGHG);
+	smi130_acc_set_int2_pad_sel(client, PAD_SLOP);
+	smi130_acc_set_int2_pad_sel(client, PAD_DOUBLE_TAP);
+	smi130_acc_set_int2_pad_sel(client, PAD_SINGLE_TAP);
+	smi130_acc_set_int2_pad_sel(client, PAD_ORIENT);
+	smi130_acc_set_int2_pad_sel(client, PAD_FLAT);
+	smi130_acc_set_int2_pad_sel(client, PAD_SLOW_NO_MOTION);
+#ifdef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+	smi130_acc_set_newdata(client, SMI_ACC2X2_INT1_NDATA, 0);
+	smi130_acc_set_newdata(client, SMI_ACC2X2_INT2_NDATA, 1);
+#endif
+#endif
+
+	smi130_acc_set_Int_Mode(client, 1);/*latch interrupt 250ms*/
+
+	/* do not open any interrupt here  */
+	/*10,orient_mbl
+	11,flat*/
+	/* smi130_acc_set_Int_Enable(client, 10, 1);	*/
+	/* smi130_acc_set_Int_Enable(client, 11, 1); */
+
+#ifdef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+	/* enable new data interrupt */
+	smi130_acc_set_Int_Enable(client, 4, 1);
+#endif
+
+#ifdef CONFIG_SIG_MOTION
+	enable_irq_wake(data->IRQ);
+#endif
+	if (err)
+		PERR("could not request irq\n");
+
+	INIT_WORK(&data->irq_work, smi130_acc_irq_work_func);
+#endif
+
+#ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+	INIT_DELAYED_WORK(&data->work, smi130_acc_work_func);
+#endif
+	atomic_set(&data->delay, SMI_ACC2X2_MAX_DELAY);
+	atomic_set(&data->enable, 0);
+
+	dev = input_allocate_device();
+	if (!dev)
+		return -ENOMEM;
+
+	dev_interrupt = input_allocate_device();
+	if (!dev_interrupt) {
+		kfree(data);
+		input_free_device(dev); /*free the successful dev and return*/
+		return -ENOMEM;
+	}
+
+	/* only value events reported */
+	dev->name = SENSOR_NAME;
+	dev->id.bustype = BUS_I2C;
+	input_set_capability(dev, EV_ABS, ABS_MISC);
+	input_set_abs_params(dev, ABS_X, ABSMIN, ABSMAX, 0, 0);
+	input_set_abs_params(dev, ABS_Y, ABSMIN, ABSMAX, 0, 0);
+	input_set_abs_params(dev, ABS_Z, ABSMIN, ABSMAX, 0, 0);
+	input_set_capability(dev, EV_MSC, MSC_GESTURE);
+	input_set_capability(dev, EV_MSC, MSC_RAW);
+	input_set_capability(dev, EV_MSC, MSC_SCAN);
+	input_set_capability(dev, EV_MSC, MSC_TIME);
+	input_set_drvdata(dev, data);
+	err = input_register_device(dev);
+	if (err < 0)
+		goto err_register_input_device;
+
+	/* all interrupt generated events are moved to interrupt input devices*/
+	dev_interrupt->name = "smi_acc_interrupt";
+	dev_interrupt->id.bustype = BUS_I2C;
+	input_set_capability(dev_interrupt, EV_REL,
+		SLOW_NO_MOTION_INTERRUPT);
+	input_set_capability(dev_interrupt, EV_REL,
+		LOW_G_INTERRUPT);
+	input_set_capability(dev_interrupt, EV_REL,
+		HIGH_G_INTERRUPT);
+	input_set_capability(dev_interrupt, EV_REL,
+		SLOP_INTERRUPT);
+	input_set_capability(dev_interrupt, EV_REL,
+		DOUBLE_TAP_INTERRUPT);
+	input_set_capability(dev_interrupt, EV_REL,
+		SINGLE_TAP_INTERRUPT);
+	input_set_capability(dev_interrupt, EV_ABS,
+		ORIENT_INTERRUPT);
+	input_set_capability(dev_interrupt, EV_ABS,
+		FLAT_INTERRUPT);
+	input_set_drvdata(dev_interrupt, data);
+
+	err = input_register_device(dev_interrupt);
+	if (err < 0)
+		goto err_register_input_device_interrupt;
+
+	data->dev_interrupt = dev_interrupt;
+	data->input = dev;
+
+#ifdef CONFIG_SIG_MOTION
+	data->g_sensor_class = class_create(THIS_MODULE, "sig_sensor");
+	if (IS_ERR(data->g_sensor_class)) {
+		err = PTR_ERR(data->g_sensor_class);
+		data->g_sensor_class = NULL;
+		PERR("could not allocate g_sensor_class\n");
+		goto err_create_class;
+	}
+
+	data->g_sensor_dev = device_create(data->g_sensor_class,
+				NULL, 0, "%s", "g_sensor");
+	if (unlikely(IS_ERR(data->g_sensor_dev))) {
+		err = PTR_ERR(data->g_sensor_dev);
+		data->g_sensor_dev = NULL;
+
+		PERR("could not allocate g_sensor_dev\n");
+		goto err_create_g_sensor_device;
+	}
+
+	dev_set_drvdata(data->g_sensor_dev, data);
+
+	err = sysfs_create_group(&data->g_sensor_dev->kobj,
+			&smi130_acc_sig_motion_attribute_group);
+	if (err < 0)
+		goto error_sysfs;
+#endif
+
+#ifdef CONFIG_DOUBLE_TAP
+	data->g_sensor_class_doubletap =
+		class_create(THIS_MODULE, "dtap_sensor");
+	if (IS_ERR(data->g_sensor_class_doubletap)) {
+		err = PTR_ERR(data->g_sensor_class_doubletap);
+		data->g_sensor_class_doubletap = NULL;
+		PERR("could not allocate g_sensor_class_doubletap\n");
+		goto err_create_class;
+	}
+
+	data->g_sensor_dev_doubletap = device_create(
+				data->g_sensor_class_doubletap,
+				NULL, 0, "%s", "g_sensor");
+	if (unlikely(IS_ERR(data->g_sensor_dev_doubletap))) {
+		err = PTR_ERR(data->g_sensor_dev_doubletap);
+		data->g_sensor_dev_doubletap = NULL;
+
+		PERR("could not allocate g_sensor_dev_doubletap\n");
+		goto err_create_g_sensor_device_double_tap;
+	}
+
+	dev_set_drvdata(data->g_sensor_dev_doubletap, data);
+
+	err = sysfs_create_group(&data->g_sensor_dev_doubletap->kobj,
+			&smi130_acc_double_tap_attribute_group);
+	if (err < 0)
+		goto error_sysfs;
+#endif
+
+	err = sysfs_create_group(&data->input->dev.kobj,
+			&smi130_acc_attribute_group);
+	if (err < 0)
+		goto error_sysfs;
+
+	dev_acc = bosch_allocate_device();
+	if (!dev_acc) {
+		err = -ENOMEM;
+		goto error_sysfs;
+	}
+	dev_acc->name = ACC_NAME;
+
+	bosch_set_drvdata(dev_acc, data);
+
+	err = bosch_register_device(dev_acc);
+	if (err < 0)
+		goto bosch_free_acc_exit;
+
+	data->bosch_acc = dev_acc;
+	err = sysfs_create_group(&data->bosch_acc->dev.kobj,
+			&smi130_acc_attribute_group);
+
+	if (err < 0)
+		goto bosch_free_exit;
+
+	if (NULL != client->dev.platform_data) {
+		data->bosch_pd = kzalloc(sizeof(*data->bosch_pd),
+				GFP_KERNEL);
+
+		if (NULL != data->bosch_pd) {
+			memcpy(data->bosch_pd, client->dev.platform_data,
+					sizeof(*data->bosch_pd));
+			PINFO("%s sensor driver set place: p%d",
+				data->bosch_pd->name, data->bosch_pd->place);
+		}
+	}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+	data->early_suspend.suspend = smi130_acc_early_suspend;
+	data->early_suspend.resume = smi130_acc_late_resume;
+	register_early_suspend(&data->early_suspend);
+#endif
+	INIT_WORK(&data->report_data_work,
+	smi130_acc_timer_work_fun);
+	reportdata_wq = create_singlethread_workqueue("smi130_acc_wq");
+	if (NULL == reportdata_wq)
+		PERR("fail to create the reportdta_wq");
+	hrtimer_init(&data->timer, CLOCK_MONOTONIC,
+		HRTIMER_MODE_REL);
+	data->timer.function = reportdata_timer_fun;
+	data->work_delay_kt = ns_to_ktime(4000000);
+	data->is_timer_running = 0;
+	data->timestamp = 0;
+	data->time_odr = 4000000;/*default bandwidth 125HZ*/
+	data->smi_acc_mode_enabled = 0;
+	data->fifo_datasel = 0;
+	data->fifo_count = 0;
+	data->acc_count = 0;
+
+#ifdef CONFIG_SIG_MOTION
+	atomic_set(&data->en_sig_motion, 0);
+#endif
+#ifdef CONFIG_DOUBLE_TAP
+	atomic_set(&data->en_double_tap, 0);
+	data->tap_times = 0;
+	data->tap_time_period = DEFAULT_TAP_JUDGE_PERIOD;
+	mutex_init(&data->tap_mutex);
+	setup_timer(&data->tap_timer, smi130_acc_tap_timeout_handle,
+			(unsigned long)data);
+#endif
+	if (smi130_acc_set_mode(client, SMI_ACC2X2_MODE_SUSPEND, SMI_ACC_ENABLED_ALL) < 0)
+		return -EINVAL;
+	data->IRQ = client->irq;
+	PDEBUG("data->IRQ = %d", data->IRQ);
+	err = request_irq(data->IRQ, smi130_acc_irq_handler, IRQF_TRIGGER_RISING,
+			"smi130_acc", data);
+	PINFO("SMI130_ACC driver probe successfully");
+
+	return 0;
+
+bosch_free_exit:
+	bosch_unregister_device(dev_acc);
+
+bosch_free_acc_exit:
+	bosch_free_device(dev_acc);
+
+error_sysfs:
+	input_unregister_device(data->input);
+
+#ifdef CONFIG_DOUBLE_TAP
+err_create_g_sensor_device_double_tap:
+	class_destroy(data->g_sensor_class_doubletap);
+#endif
+
+#ifdef CONFIG_SIG_MOTION
+err_create_g_sensor_device:
+	class_destroy(data->g_sensor_class);
+#endif
+
+#if defined(CONFIG_SIG_MOTION) || defined(CONFIG_DOUBLE_TAP)
+err_create_class:
+	input_unregister_device(data->dev_interrupt);
+#endif
+
+err_register_input_device_interrupt:
+	input_free_device(dev_interrupt);
+	input_unregister_device(data->input);
+
+err_register_input_device:
+	input_free_device(dev);
+
+kfree_exit:
+	if ((NULL != data) && (NULL != data->bosch_pd)) {
+		kfree(data->bosch_pd);
+		data->bosch_pd = NULL;
+	}
+	kfree(data);
+exit:
+	return err;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void smi130_acc_early_suspend(struct early_suspend *h)
+{
+	struct smi130_acc_data *data =
+		container_of(h, struct smi130_acc_data, early_suspend);
+
+	mutex_lock(&data->enable_mutex);
+	if (atomic_read(&data->enable) == 1) {
+		smi130_acc_set_mode(data->smi130_acc_client,
+			SMI_ACC2X2_MODE_SUSPEND, SMI_ACC_ENABLED_INPUT);
+#ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+		cancel_delayed_work_sync(&data->work);
+#endif
+	}
+	if (data->is_timer_running) {
+		/*diable fifo_mode when close timer*/
+		if (smi130_acc_set_fifo_mode(data->smi130_acc_client, 0) < 0)
+			PERR("set fifo_mode falied");
+		hrtimer_cancel(&data->timer);
+		data->base_time = 0;
+		data->timestamp = 0;
+		data->fifo_time = 0;
+		data->acc_count = 0;
+	}
+	mutex_unlock(&data->enable_mutex);
+}
+
+static void smi130_acc_late_resume(struct early_suspend *h)
+{
+	struct smi130_acc_data *data =
+		container_of(h, struct smi130_acc_data, early_suspend);
+	if (NULL == data)
+		return;
+
+	mutex_lock(&data->enable_mutex);
+	if (atomic_read(&data->enable) == 1) {
+		smi130_acc_set_mode(data->smi130_acc_client,
+			SMI_ACC2X2_MODE_NORMAL, SMI_ACC_ENABLED_INPUT);
+#ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+		schedule_delayed_work(&data->work,
+				msecs_to_jiffies(atomic_read(&data->delay)));
+#endif
+	}
+	if (data->is_timer_running) {
+		hrtimer_start(&data->timer,
+					ns_to_ktime(data->time_odr),
+			HRTIMER_MODE_REL);
+		/*enable fifo_mode when init*/
+		if (smi130_acc_set_fifo_mode(data->smi130_acc_client, 2) < 0)
+			PERR("set fifo_mode falied");
+		data->base_time = 0;
+		data->timestamp = 0;
+		data->is_timer_running = 1;
+		data->acc_count = 0;
+	}
+	mutex_unlock(&data->enable_mutex);
+}
+#endif
+
+static int smi130_acc_remove(struct i2c_client *client)
+{
+	struct smi130_acc_data *data = i2c_get_clientdata(client);
+
+	if (NULL == data)
+		return 0;
+
+	smi130_acc_set_enable(&client->dev, 0);
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	unregister_early_suspend(&data->early_suspend);
+#endif
+	sysfs_remove_group(&data->input->dev.kobj, &smi130_acc_attribute_group);
+	input_unregister_device(data->input);
+
+	if (NULL != data->bosch_pd) {
+		kfree(data->bosch_pd);
+		data->bosch_pd = NULL;
+	}
+
+	kfree(data);
+	return 0;
+}
+
+void smi130_acc_shutdown(struct i2c_client *client)
+{
+	struct smi130_acc_data *data = i2c_get_clientdata(client);
+
+	mutex_lock(&data->enable_mutex);
+	smi130_acc_set_mode(data->smi130_acc_client,
+		SMI_ACC2X2_MODE_DEEP_SUSPEND, SMI_ACC_ENABLED_ALL);
+	mutex_unlock(&data->enable_mutex);
+}
+
+#ifdef CONFIG_PM
+static int smi130_acc_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+	struct smi130_acc_data *data = i2c_get_clientdata(client);
+
+	mutex_lock(&data->enable_mutex);
+	if (atomic_read(&data->enable) == 1) {
+		smi130_acc_set_mode(data->smi130_acc_client,
+			SMI_ACC2X2_MODE_SUSPEND, SMI_ACC_ENABLED_INPUT);
+#ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+		cancel_delayed_work_sync(&data->work);
+#endif
+	}
+	if (data->is_timer_running) {
+		hrtimer_cancel(&data->timer);
+		data->base_time = 0;
+		data->timestamp = 0;
+		data->fifo_time = 0;
+		data->acc_count = 0;
+	}
+	mutex_unlock(&data->enable_mutex);
+
+	return 0;
+}
+
+static int smi130_acc_resume(struct i2c_client *client)
+{
+	struct smi130_acc_data *data = i2c_get_clientdata(client);
+
+	mutex_lock(&data->enable_mutex);
+	if (atomic_read(&data->enable) == 1) {
+		smi130_acc_set_mode(data->smi130_acc_client,
+			SMI_ACC2X2_MODE_NORMAL, SMI_ACC_ENABLED_INPUT);
+#ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
+		schedule_delayed_work(&data->work,
+				msecs_to_jiffies(atomic_read(&data->delay)));
+#endif
+	}
+	if (data->is_timer_running) {
+		hrtimer_start(&data->timer,
+					ns_to_ktime(data->time_odr),
+			HRTIMER_MODE_REL);
+		data->base_time = 0;
+		data->timestamp = 0;
+		data->is_timer_running = 1;
+	}
+	mutex_unlock(&data->enable_mutex);
+
+	return 0;
+}
+
+#else
+
+#define smi130_acc_suspend      NULL
+#define smi130_acc_resume       NULL
+
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id smi130_acc_id[] = {
+	{ SENSOR_NAME, 0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(i2c, smi130_acc_id);
+static const struct of_device_id smi130_acc_of_match[] = {
+	{ .compatible = "smi130_acc", },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, smi130_acc_of_match);
+
+static struct i2c_driver smi130_acc_driver = {
+	.driver = {
+		.owner  = THIS_MODULE,
+		.name   = SENSOR_NAME,
+		.of_match_table = smi130_acc_of_match,
+	},
+	//.suspend    = smi130_acc_suspend,
+	//.resume     = smi130_acc_resume,
+	.id_table   = smi130_acc_id,
+	.probe      = smi130_acc_probe,
+	.remove     = smi130_acc_remove,
+	.shutdown   = smi130_acc_shutdown,
+};
+
+static int __init SMI_ACC2X2_init(void)
+{
+	return i2c_add_driver(&smi130_acc_driver);
+}
+
+static void __exit SMI_ACC2X2_exit(void)
+{
+	i2c_del_driver(&smi130_acc_driver);
+}
+
+MODULE_AUTHOR("contact@bosch-sensortec.com");
+MODULE_DESCRIPTION("SMI_ACC2X2 ACCELEROMETER SENSOR DRIVER");
+MODULE_LICENSE("GPL v2");
+
+module_init(SMI_ACC2X2_init);
+module_exit(SMI_ACC2X2_exit);
+
diff --git a/drivers/input/sensors/smi130/smi130_driver.c b/drivers/input/sensors/smi130/smi130_driver.c
new file mode 100644
index 0000000..42a0a57
--- /dev/null
+++ b/drivers/input/sensors/smi130/smi130_driver.c
@@ -0,0 +1,4121 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+ *
+ * @filename smi130_driver.c
+ * @date     2016/08/01 14:40
+ * @Modification Date 2018/08/28 18:20
+ * @id       "b5ff23a"
+ * @version  1.3
+ *
+ * @brief
+ * The core code of SMI130 device driver
+ *
+ * @detail
+ * This file implements the core code of SMI130 device driver,
+ * which includes hardware related functions, input device register,
+ * device attribute files, etc.
+*/
+
+#include "smi130.h"
+#include "smi130_driver.h"
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+
+
+#define DRIVER_VERSION "0.0.53.0"
+#define I2C_BURST_READ_MAX_LEN      (256)
+#define SMI130_STORE_COUNT  (6000)
+#define LMADA     (1)
+uint64_t g_current_apts_us_mbl;
+
+
+enum SMI_SENSOR_INT_T {
+	/* Interrupt enable0*/
+	SMI_ANYMO_X_INT = 0,
+	SMI_ANYMO_Y_INT,
+	SMI_ANYMO_Z_INT,
+	SMI_D_TAP_INT,
+	SMI_S_TAP_INT,
+	SMI_ORIENT_INT,
+	SMI_FLAT_INT,
+	/* Interrupt enable1*/
+	SMI_HIGH_X_INT,
+	SMI_HIGH_Y_INT,
+	SMI_HIGH_Z_INT,
+	SMI_LOW_INT,
+	SMI_DRDY_INT,
+	SMI_FFULL_INT,
+	SMI_FWM_INT,
+	/* Interrupt enable2 */
+	SMI_NOMOTION_X_INT,
+	SMI_NOMOTION_Y_INT,
+	SMI_NOMOTION_Z_INT,
+	SMI_STEP_DETECTOR_INT,
+	INT_TYPE_MAX
+};
+
+/*smi fifo sensor type combination*/
+enum SMI_SENSOR_FIFO_COMBINATION {
+	SMI_FIFO_A = 0,
+	SMI_FIFO_G,
+	SMI_FIFO_M,
+	SMI_FIFO_G_A,
+	SMI_FIFO_M_A,
+	SMI_FIFO_M_G,
+	SMI_FIFO_M_G_A,
+	SMI_FIFO_COM_MAX
+};
+
+/*smi fifo analyse return err status*/
+enum SMI_FIFO_ANALYSE_RETURN_T {
+	FIFO_OVER_READ_RETURN = -10,
+	FIFO_SENSORTIME_RETURN = -9,
+	FIFO_SKIP_OVER_LEN = -8,
+	FIFO_M_G_A_OVER_LEN = -7,
+	FIFO_M_G_OVER_LEN = -6,
+	FIFO_M_A_OVER_LEN = -5,
+	FIFO_G_A_OVER_LEN = -4,
+	FIFO_M_OVER_LEN = -3,
+	FIFO_G_OVER_LEN = -2,
+	FIFO_A_OVER_LEN = -1
+};
+
+/*!smi sensor generic power mode enum */
+enum SMI_DEV_OP_MODE {
+	SENSOR_PM_NORMAL = 0,
+	SENSOR_PM_LP1,
+	SENSOR_PM_SUSPEND,
+	SENSOR_PM_LP2
+};
+
+/*! smi acc sensor power mode enum */
+enum SMI_ACC_PM_TYPE {
+	SMI_ACC_PM_NORMAL = 0,
+	SMI_ACC_PM_LP1,
+	SMI_ACC_PM_SUSPEND,
+	SMI_ACC_PM_LP2,
+	SMI_ACC_PM_MAX
+};
+
+/*! smi gyro sensor power mode enum */
+enum SMI_GYRO_PM_TYPE {
+	SMI_GYRO_PM_NORMAL = 0,
+	SMI_GYRO_PM_FAST_START,
+	SMI_GYRO_PM_SUSPEND,
+	SMI_GYRO_PM_MAX
+};
+
+/*! smi mag sensor power mode enum */
+enum SMI_MAG_PM_TYPE {
+	SMI_MAG_PM_NORMAL = 0,
+	SMI_MAG_PM_LP1,
+	SMI_MAG_PM_SUSPEND,
+	SMI_MAG_PM_LP2,
+	SMI_MAG_PM_MAX
+};
+
+
+/*! smi sensor support type*/
+enum SMI_SENSOR_TYPE {
+	SMI_ACC_SENSOR,
+	SMI_GYRO_SENSOR,
+	SMI_MAG_SENSOR,
+	SMI_SENSOR_TYPE_MAX
+};
+
+/*!smi sensor generic power mode enum */
+enum SMI_AXIS_TYPE {
+	X_AXIS = 0,
+	Y_AXIS,
+	Z_AXIS,
+	AXIS_MAX
+};
+
+/*!smi sensor generic intterrupt enum */
+enum SMI_INT_TYPE {
+	SMI130_INT0 = 0,
+	SMI130_INT1,
+	SMI130_INT_MAX
+};
+
+/*! smi sensor time resolution definition*/
+enum SMI_SENSOR_TIME_RS_TYPE {
+	TS_0_78_HZ = 1,/*0.78HZ*/
+	TS_1_56_HZ,/*1.56HZ*/
+	TS_3_125_HZ,/*3.125HZ*/
+	TS_6_25_HZ,/*6.25HZ*/
+	TS_12_5_HZ,/*12.5HZ*/
+	TS_25_HZ,/*25HZ, odr=6*/
+	TS_50_HZ,/*50HZ*/
+	TS_100_HZ,/*100HZ*/
+	TS_200_HZ,/*200HZ*/
+	TS_400_HZ,/*400HZ*/
+	TS_800_HZ,/*800HZ*/
+	TS_1600_HZ,/*1600HZ*/
+	TS_MAX_HZ
+};
+
+/*! smi sensor interface mode */
+enum SMI_SENSOR_IF_MODE_TYPE {
+	/*primary interface:autoconfig/secondary interface off*/
+	P_AUTO_S_OFF = 0,
+	/*primary interface:I2C/secondary interface:OIS*/
+	P_I2C_S_OIS,
+	/*primary interface:autoconfig/secondary interface:Magnetometer*/
+	P_AUTO_S_MAG,
+	/*interface mode reseved*/
+	IF_MODE_RESEVED
+
+};
+
+/*! smi130 acc/gyro calibration status in H/W layer */
+enum SMI_CALIBRATION_STATUS_TYPE {
+	/*SMI FAST Calibration ready x/y/z status*/
+	SMI_ACC_X_FAST_CALI_RDY = 0,
+	SMI_ACC_Y_FAST_CALI_RDY,
+	SMI_ACC_Z_FAST_CALI_RDY
+};
+
+unsigned int reg_op_addr_mbl;
+
+static const int smi_pmu_cmd_acc_arr[SMI_ACC_PM_MAX] = {
+	/*!smi pmu for acc normal, low power1,
+	 * suspend, low power2 mode command */
+	CMD_PMU_ACC_NORMAL,
+	CMD_PMU_ACC_LP1,
+	CMD_PMU_ACC_SUSPEND,
+	CMD_PMU_ACC_LP2
+};
+
+static const int smi_pmu_cmd_gyro_arr[SMI_GYRO_PM_MAX] = {
+	/*!smi pmu for gyro normal, fast startup,
+	 * suspend mode command */
+	CMD_PMU_GYRO_NORMAL,
+	CMD_PMU_GYRO_FASTSTART,
+	CMD_PMU_GYRO_SUSPEND
+};
+
+static const int smi_pmu_cmd_mag_arr[SMI_MAG_PM_MAX] = {
+	/*!smi pmu for mag normal, low power1,
+	 * suspend, low power2 mode command */
+	CMD_PMU_MAG_NORMAL,
+	CMD_PMU_MAG_LP1,
+	CMD_PMU_MAG_SUSPEND,
+	CMD_PMU_MAG_LP2
+};
+
+static const char *smi_axis_name[AXIS_MAX] = {"x", "y", "z"};
+
+static const int smi_interrupt_type[] = {
+	/*!smi interrupt type */
+	/* Interrupt enable0 , index=0~6*/
+	SMI130_ANY_MOTION_X_ENABLE,
+	SMI130_ANY_MOTION_Y_ENABLE,
+	SMI130_ANY_MOTION_Z_ENABLE,
+	SMI130_DOUBLE_TAP_ENABLE,
+	SMI130_SINGLE_TAP_ENABLE,
+	SMI130_ORIENT_ENABLE,
+	SMI130_FLAT_ENABLE,
+	/* Interrupt enable1, index=7~13*/
+	SMI130_HIGH_G_X_ENABLE,
+	SMI130_HIGH_G_Y_ENABLE,
+	SMI130_HIGH_G_Z_ENABLE,
+	SMI130_LOW_G_ENABLE,
+	SMI130_DATA_RDY_ENABLE,
+	SMI130_FIFO_FULL_ENABLE,
+	SMI130_FIFO_WM_ENABLE,
+	/* Interrupt enable2, index = 14~17*/
+	SMI130_NOMOTION_X_ENABLE,
+	SMI130_NOMOTION_Y_ENABLE,
+	SMI130_NOMOTION_Z_ENABLE,
+	SMI130_STEP_DETECTOR_EN
+};
+
+/*! smi sensor time depend on ODR*/
+struct smi_sensor_time_odr_tbl {
+	u32 ts_duration_lsb;
+	u32 ts_duration_us;
+	u32 ts_delat;/*sub current delat fifo_time*/
+};
+
+struct smi130_axis_data_t {
+	s16 x;
+	s16 y;
+	s16 z;
+};
+
+struct smi130_type_mapping_type {
+
+	/*! smi16x sensor chip id */
+	uint16_t chip_id;
+
+	/*! smi16x chip revision code */
+	uint16_t revision_id;
+
+	/*! smi130_acc sensor name */
+	const char *sensor_name;
+};
+
+struct smi130_store_info_t {
+	uint8_t current_frm_cnt;
+	uint64_t current_apts_us[2];
+	uint8_t fifo_ts_total_frmcnt;
+	uint64_t fifo_time;
+};
+
+uint64_t get_current_timestamp_mbl(void)
+{
+	uint64_t ts_ap;
+	struct timespec tmp_time;
+	get_monotonic_boottime(&tmp_time);
+	ts_ap = (uint64_t)tmp_time.tv_sec * 1000000000 + tmp_time.tv_nsec;
+	return ts_ap;
+
+}
+
+/*! sensor support type map */
+static const struct smi130_type_mapping_type sensor_type_map[] = {
+
+	{SENSOR_CHIP_ID_SMI, SENSOR_CHIP_REV_ID_SMI, "SMI130/162AB"},
+	{SENSOR_CHIP_ID_SMI_C2, SENSOR_CHIP_REV_ID_SMI, "SMI130C2"},
+	{SENSOR_CHIP_ID_SMI_C3, SENSOR_CHIP_REV_ID_SMI, "SMI130C3"},
+
+};
+
+/*!smi130 sensor time depends on ODR */
+static const struct smi_sensor_time_odr_tbl
+		sensortime_duration_tbl[TS_MAX_HZ] = {
+	{0x010000, 2560000, 0x00ffff},/*2560ms, 0.39hz, odr=resver*/
+	{0x008000, 1280000, 0x007fff},/*1280ms, 0.78hz, odr_acc=1*/
+	{0x004000, 640000, 0x003fff},/*640ms, 1.56hz, odr_acc=2*/
+	{0x002000, 320000, 0x001fff},/*320ms, 3.125hz, odr_acc=3*/
+	{0x001000, 160000, 0x000fff},/*160ms, 6.25hz, odr_acc=4*/
+	{0x000800, 80000,  0x0007ff},/*80ms, 12.5hz*/
+	{0x000400, 40000, 0x0003ff},/*40ms, 25hz, odr_acc = odr_gyro =6*/
+	{0x000200, 20000, 0x0001ff},/*20ms, 50hz, odr = 7*/
+	{0x000100, 10000, 0x0000ff},/*10ms, 100hz, odr=8*/
+	{0x000080, 5000, 0x00007f},/*5ms, 200hz, odr=9*/
+	{0x000040, 2500, 0x00003f},/*2.5ms, 400hz, odr=10*/
+	{0x000020, 1250, 0x00001f},/*1.25ms, 800hz, odr=11*/
+	{0x000010, 625, 0x00000f},/*0.625ms, 1600hz, odr=12*/
+
+};
+
+#if defined(CONFIG_USE_QUALCOMM_HAL)
+#define POLL_INTERVAL_MIN_MS	10
+#define POLL_INTERVAL_MAX_MS	4000
+#define POLL_DEFAULT_INTERVAL_MS 200
+#define SMI130_ACCEL_MIN_VALUE	-32768
+#define SMI130_ACCEL_MAX_VALUE	32767
+#define SMI130_GYRO_MIN_VALUE	-32768
+#define SMI130_GYRO_MAX_VALUE	32767
+#define SMI130_ACCEL_DEFAULT_POLL_INTERVAL_MS	200
+#define SMI130_GYRO_DEFAULT_POLL_INTERVAL_MS	200
+#define SMI130_ACCEL_MIN_POLL_INTERVAL_MS	10
+#define SMI130_ACCEL_MAX_POLL_INTERVAL_MS	5000
+#define SMI130_GYRO_MIN_POLL_INTERVAL_MS	10
+#define SMI130_GYRO_MAX_POLL_INTERVAL_MS	5000
+static struct sensors_classdev smi130_accel_cdev = {
+		.name = "smi130-accel",
+		.vendor = "bosch",
+		.version = 1,
+		.handle = SENSORS_ACCELERATION_HANDLE,
+		.type = SENSOR_TYPE_ACCELEROMETER,
+		.max_range = "156.8",	/* 16g */
+		.resolution = "0.153125",	/* 15.6mg */
+		.sensor_power = "0.13",	/* typical value */
+		.min_delay = POLL_INTERVAL_MIN_MS * 1000, /* in microseconds */
+		.max_delay = POLL_INTERVAL_MAX_MS,
+		.delay_msec = POLL_DEFAULT_INTERVAL_MS, /* in millisecond */
+		.fifo_reserved_event_count = 0,
+		.fifo_max_event_count = 0,
+		.enabled = 0,
+		.max_latency = 0,
+		.flags = 0,
+		.sensors_enable = NULL,
+		.sensors_poll_delay = NULL,
+		.sensors_set_latency = NULL,
+		.sensors_flush = NULL,
+		.sensors_self_test = NULL,
+};
+static struct sensors_classdev smi130_gyro_cdev = {
+	.name = "smi130-gyro",
+	.vendor = "bosch",
+	.version = 1,
+	.handle = SENSORS_GYROSCOPE_HANDLE,
+	.type = SENSOR_TYPE_GYROSCOPE,
+	.max_range = "34.906586",	/* rad/s */
+	.resolution = "0.0010681152",	/* rad/s */
+	.sensor_power = "3.6",	/* 3.6 mA */
+	.min_delay = SMI130_GYRO_MIN_POLL_INTERVAL_MS * 1000,
+	.max_delay = SMI130_GYRO_MAX_POLL_INTERVAL_MS,
+	.delay_msec = SMI130_GYRO_DEFAULT_POLL_INTERVAL_MS,
+	.fifo_reserved_event_count = 0,
+	.fifo_max_event_count = 0,
+	.enabled = 0,
+	.max_latency = 0,
+	.flags = 0, /* SENSOR_FLAG_CONTINUOUS_MODE */
+	.sensors_enable = NULL,
+	.sensors_poll_delay = NULL,
+	.sensors_enable_wakeup = NULL,
+	.sensors_set_latency = NULL,
+	.sensors_flush = NULL,
+};
+#endif
+static void smi_delay(u32 msec)
+{
+	if (msec <= 20)
+		usleep_range(msec * 1000, msec * 1000);
+	else
+		msleep(msec);
+}
+
+static void smi_dump_reg(struct smi_client_data *client_data)
+{
+	#define REG_MAX0 0x24
+	#define REG_MAX1 0x56
+	int i;
+	u8 dbg_buf0[REG_MAX0];
+	u8 dbg_buf1[REG_MAX1];
+	u8 dbg_buf_str0[REG_MAX0 * 3 + 1] = "";
+	u8 dbg_buf_str1[REG_MAX1 * 3 + 1] = "";
+
+	dev_notice(client_data->dev, "\nFrom 0x00:\n");
+
+	client_data->device.bus_read(client_data->device.dev_addr,
+			SMI_REG_NAME(USER_CHIP_ID), dbg_buf0, REG_MAX0);
+	for (i = 0; i < REG_MAX0; i++) {
+		snprintf(dbg_buf_str0 + i * 3, 16, "%02x%c", dbg_buf0[i],
+				(((i + 1) % BYTES_PER_LINE == 0) ? '\n' : ' '));
+	}
+	dev_notice(client_data->dev, "%s\n", dbg_buf_str0);
+
+	client_data->device.bus_read(client_data->device.dev_addr,
+			SMI130_USER_ACCEL_CONFIG_ADDR, dbg_buf1, REG_MAX1);
+	dev_notice(client_data->dev, "\nFrom 0x40:\n");
+	for (i = 0; i < REG_MAX1; i++) {
+		snprintf(dbg_buf_str1 + i * 3, 16, "%02x%c", dbg_buf1[i],
+				(((i + 1) % BYTES_PER_LINE == 0) ? '\n' : ' '));
+	}
+	dev_notice(client_data->dev, "\n%s\n", dbg_buf_str1);
+	}
+
+
+void smi_fifo_frame_bytes_extend_calc(
+	struct smi_client_data *client_data,
+	unsigned int *fifo_frmbytes_extend)
+{
+
+	switch (client_data->fifo_data_sel) {
+	case SMI_FIFO_A_SEL:
+	case SMI_FIFO_G_SEL:
+		*fifo_frmbytes_extend = 7;
+		break;
+	case SMI_FIFO_G_A_SEL:
+		*fifo_frmbytes_extend = 13;
+		break;
+	case SMI_FIFO_M_SEL:
+		*fifo_frmbytes_extend = 9;
+		break;
+	case SMI_FIFO_M_A_SEL:
+	case SMI_FIFO_M_G_SEL:
+		/*8(mag) + 6(gyro or acc) +1(head) = 15*/
+		*fifo_frmbytes_extend = 15;
+		break;
+	case SMI_FIFO_M_G_A_SEL:
+		/*8(mag) + 6(gyro or acc) + 6 + 1 = 21*/
+		*fifo_frmbytes_extend = 21;
+		break;
+	default:
+		*fifo_frmbytes_extend = 0;
+		break;
+
+	};
+
+}
+
+static int smi_input_init(struct smi_client_data *client_data)
+{
+	struct input_dev *dev;
+	int err = 0;
+
+	dev = input_allocate_device();
+	if (NULL == dev)
+		return -ENOMEM;
+#if defined(CONFIG_USE_QUALCOMM_HAL)
+	dev->name = "smi130-accel";
+#else
+	dev->name = SENSOR_NAME;
+#endif
+	dev->id.bustype = BUS_I2C;
+
+	input_set_capability(dev, EV_MSC, MSC_GESTURE);
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_SGM);
+
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_FAST_GYRO_CALIB_DONE);
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_STEP_DETECTOR);
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_FAST_ACC_CALIB_DONE);
+
+
+	input_set_capability(dev, EV_REL, REL_X);
+	input_set_capability(dev, EV_REL, REL_Y);
+	input_set_capability(dev, EV_REL, REL_Z);
+	#if defined(CONFIG_USE_QUALCOMM_HAL)
+	input_set_capability(dev, EV_ABS, ABS_MISC);
+	input_set_abs_params(dev, ABS_X,
+	SMI130_ACCEL_MIN_VALUE, SMI130_ACCEL_MAX_VALUE,
+	0, 0);
+	input_set_abs_params(dev, ABS_Y,
+	SMI130_ACCEL_MIN_VALUE, SMI130_ACCEL_MAX_VALUE,
+	0, 0);
+	input_set_abs_params(dev, ABS_Z,
+	SMI130_ACCEL_MIN_VALUE, SMI130_ACCEL_MAX_VALUE,
+	0, 0);
+	#endif
+	input_set_drvdata(dev, client_data);
+
+	err = input_register_device(dev);
+	if (err < 0) {
+		input_free_device(dev);
+		dev_notice(client_data->dev, "smi130 input free!\n");
+		return err;
+	}
+	client_data->input = dev;
+	dev_notice(client_data->dev,
+		"smi130 input register successfully, %s!\n",
+		client_data->input->name);
+	return err;
+}
+
+//#if defined(CONFIG_USE_QUALCOMM_HAL)
+static int smi_gyro_input_init(struct smi_client_data *client_data)
+{
+	struct input_dev *dev;
+	int err = 0;
+
+	dev = input_allocate_device();
+	if (NULL == dev)
+		return -ENOMEM;
+	dev->name = "smi130-gyro";
+	dev->id.bustype = BUS_I2C;
+	input_set_capability(dev, EV_ABS, ABS_MISC);
+	input_set_capability(dev, EV_MSC, MSC_GESTURE);
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_SGM);
+	
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_FAST_GYRO_CALIB_DONE);
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_STEP_DETECTOR);
+	input_set_capability(dev, EV_MSC, INPUT_EVENT_FAST_ACC_CALIB_DONE);
+	#if defined(CONFIG_USE_QUALCOMM_HAL)
+	input_set_abs_params(dev, ABS_RX,
+	SMI130_ACCEL_MIN_VALUE, SMI130_ACCEL_MAX_VALUE,
+	0, 0);
+	input_set_abs_params(dev, ABS_RY,
+	SMI130_ACCEL_MIN_VALUE, SMI130_ACCEL_MAX_VALUE,
+	0, 0);
+	input_set_abs_params(dev, ABS_RZ,
+	SMI130_ACCEL_MIN_VALUE, SMI130_ACCEL_MAX_VALUE,
+	0, 0);
+	#endif
+	input_set_drvdata(dev, client_data);
+	err = input_register_device(dev);
+	if (err < 0) {
+		input_free_device(dev);
+		dev_notice(client_data->dev, "smi130 input free!\n");
+		return err;
+	}
+	client_data->gyro_input = dev;
+	dev_notice(client_data->dev,
+		"smi130 input register successfully, %s!\n",
+		client_data->gyro_input->name);
+	return err;
+}
+//#endif
+static void smi_input_destroy(struct smi_client_data *client_data)
+{
+	struct input_dev *dev = client_data->input;
+
+	input_unregister_device(dev);
+	input_free_device(dev);
+}
+
+static int smi_check_chip_id(struct smi_client_data *client_data)
+{
+	int8_t err = 0;
+	int8_t i = 0;
+	uint8_t chip_id = 0;
+	uint8_t read_count = 0;
+	u8 smi_sensor_cnt = sizeof(sensor_type_map)
+				/ sizeof(struct smi130_type_mapping_type);
+	/* read and check chip id */
+	while (read_count++ < CHECK_CHIP_ID_TIME_MAX) {
+		if (client_data->device.bus_read(client_data->device.dev_addr,
+				SMI_REG_NAME(USER_CHIP_ID), &chip_id, 1) < 0) {
+
+			dev_err(client_data->dev,
+					"Bosch Sensortec Device not found"
+						"read chip_id:%d\n", chip_id);
+			continue;
+		} else {
+			for (i = 0; i < smi_sensor_cnt; i++) {
+				if (sensor_type_map[i].chip_id == chip_id) {
+					client_data->chip_id = chip_id;
+					dev_notice(client_data->dev,
+					"Bosch Sensortec Device detected, "
+			"HW IC name: %s\n", sensor_type_map[i].sensor_name);
+					break;
+				}
+			}
+			if (i < smi_sensor_cnt)
+				break;
+			else {
+				if (read_count == CHECK_CHIP_ID_TIME_MAX) {
+					dev_err(client_data->dev,
+				"Failed!Bosch Sensortec Device not found"
+					" mismatch chip_id:%d\n", chip_id);
+					err = -ENODEV;
+					return err;
+				}
+			}
+			smi_delay(1);
+		}
+	}
+	return err;
+
+}
+
+static int smi_pmu_set_suspend(struct smi_client_data *client_data)
+{
+	int err = 0;
+	if (client_data == NULL)
+		return -EINVAL;
+	else {
+		err += SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_acc_arr[SENSOR_PM_SUSPEND]);
+		err += SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_gyro_arr[SENSOR_PM_SUSPEND]);
+		err += SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_mag_arr[SENSOR_PM_SUSPEND]);
+		client_data->pw.acc_pm = SMI_ACC_PM_SUSPEND;
+		client_data->pw.gyro_pm = SMI_GYRO_PM_SUSPEND;
+		client_data->pw.mag_pm = SMI_MAG_PM_SUSPEND;
+	}
+
+	return err;
+}
+
+static int smi_get_err_status(struct smi_client_data *client_data)
+{
+	int err = 0;
+
+	err = SMI_CALL_API(get_error_status)(&client_data->err_st.fatal_err,
+		&client_data->err_st.err_code, &client_data->err_st.i2c_fail,
+	&client_data->err_st.drop_cmd, &client_data->err_st.mag_drdy_err);
+	return err;
+}
+
+static void smi_work_func(struct work_struct *work)
+{
+	struct smi_client_data *client_data =
+		container_of((struct delayed_work *)work,
+			struct smi_client_data, work);
+	unsigned long delay =
+		msecs_to_jiffies(atomic_read(&client_data->delay));
+	struct smi130_accel_t data;
+	int err;
+
+	err = SMI_CALL_API(read_accel_xyz)(&data);
+	if (err < 0)
+		return;
+
+	/*report current frame via input event*/
+	input_event(client_data->input, EV_REL, REL_X, data.x);
+	input_event(client_data->input, EV_REL, REL_Y, data.y);
+	input_event(client_data->input, EV_REL, REL_Z, data.z);
+	input_sync(client_data->input);
+
+	schedule_delayed_work(&client_data->work, delay);
+}
+
+static ssize_t smi130_chip_id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	return snprintf(buf, 16, "0x%x\n", client_data->chip_id);
+}
+
+static ssize_t smi130_err_st_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	int err = 0;
+	err = smi_get_err_status(client_data);
+	if (err)
+		return err;
+	else {
+		return snprintf(buf, 128, "fatal_err:0x%x, err_code:%d,\n\n"
+			"i2c_fail_err:%d, drop_cmd_err:%d, mag_drdy_err:%d\n",
+			client_data->err_st.fatal_err,
+			client_data->err_st.err_code,
+			client_data->err_st.i2c_fail,
+			client_data->err_st.drop_cmd,
+			client_data->err_st.mag_drdy_err);
+
+	}
+}
+
+static ssize_t smi130_sensor_time_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err = 0;
+	u32 sensor_time;
+	err = SMI_CALL_API(get_sensor_time)(&sensor_time);
+	if (err)
+		return err;
+	else
+		return snprintf(buf, 16, "0x%x\n", (unsigned int)sensor_time);
+}
+
+static ssize_t smi130_fifo_flush_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long enable;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &enable);
+	if (err)
+		return err;
+	if (enable)
+		err = SMI_CALL_API(set_command_register)(CMD_CLR_FIFO_DATA);
+
+	if (err)
+		dev_err(client_data->dev, "fifo flush failed!\n");
+
+	return count;
+
+}
+
+
+static ssize_t smi130_fifo_bytecount_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned int fifo_bytecount = 0;
+
+	SMI_CALL_API(fifo_length)(&fifo_bytecount);
+	err = snprintf(buf, 16, "%u\n", fifo_bytecount);
+	return err;
+}
+
+static ssize_t smi130_fifo_bytecount_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long data;
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	client_data->fifo_bytecount = (unsigned int) data;
+
+	return count;
+}
+
+int smi130_fifo_data_sel_get(struct smi_client_data *client_data)
+{
+	int err = 0;
+	unsigned char fifo_acc_en, fifo_gyro_en, fifo_mag_en;
+	unsigned char fifo_datasel;
+
+	err += SMI_CALL_API(get_fifo_accel_enable)(&fifo_acc_en);
+	err += SMI_CALL_API(get_fifo_gyro_enable)(&fifo_gyro_en);
+	err += SMI_CALL_API(get_fifo_mag_enable)(&fifo_mag_en);
+
+	if (err)
+		return err;
+
+	fifo_datasel = (fifo_acc_en << SMI_ACC_SENSOR) |
+			(fifo_gyro_en << SMI_GYRO_SENSOR) |
+				(fifo_mag_en << SMI_MAG_SENSOR);
+
+	client_data->fifo_data_sel = fifo_datasel;
+
+	return err;
+
+
+}
+
+static ssize_t smi130_fifo_data_sel_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err = 0;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	err = smi130_fifo_data_sel_get(client_data);
+	if (err) {
+		dev_err(client_data->dev, "get fifo_sel failed!\n");
+		return -EINVAL;
+	}
+	return snprintf(buf, 16, "%d\n", client_data->fifo_data_sel);
+}
+
+/* write any value to clear all the fifo data. */
+static ssize_t smi130_fifo_data_sel_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long data;
+	unsigned char fifo_datasel;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	/* data format: aimed 0b0000 0x(m)x(g)x(a), x:1 enable, 0:disable*/
+	if (data > 7)
+		return -EINVAL;
+
+
+	fifo_datasel = (unsigned char)data;
+
+
+	err += SMI_CALL_API(set_fifo_accel_enable)
+			((fifo_datasel & (1 << SMI_ACC_SENSOR)) ? 1 :  0);
+	err += SMI_CALL_API(set_fifo_gyro_enable)
+			(fifo_datasel & (1 << SMI_GYRO_SENSOR) ? 1 : 0);
+	err += SMI_CALL_API(set_fifo_mag_enable)
+			((fifo_datasel & (1 << SMI_MAG_SENSOR)) ? 1 : 0);
+
+	err += SMI_CALL_API(set_command_register)(CMD_CLR_FIFO_DATA);
+	if (err)
+		return -EIO;
+	else {
+		dev_notice(client_data->dev, "FIFO A_en:%d, G_en:%d, M_en:%d\n",
+			(fifo_datasel & (1 << SMI_ACC_SENSOR)) ? 1 :  0,
+			(fifo_datasel & (1 << SMI_GYRO_SENSOR) ? 1 : 0),
+			((fifo_datasel & (1 << SMI_MAG_SENSOR)) ? 1 : 0));
+		client_data->fifo_data_sel = fifo_datasel;
+	}
+	return count;
+}
+
+static ssize_t smi130_fifo_data_out_frame_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	int err = 0;
+	uint32_t fifo_bytecount = 0;
+
+	err = SMI_CALL_API(fifo_length)(&fifo_bytecount);
+	if (err < 0) {
+		dev_err(client_data->dev, "read fifo_length err");
+		return -EINVAL;
+	}
+	if (fifo_bytecount == 0)
+		return 0;
+	err = smi_burst_read_wrapper(client_data->device.dev_addr,
+		SMI130_USER_FIFO_DATA__REG, buf,
+		fifo_bytecount);
+	if (err) {
+		dev_err(client_data->dev, "read fifo err");
+		SMI_CALL_API(set_command_register)(CMD_CLR_FIFO_DATA);
+		return -EINVAL;
+	}
+	return fifo_bytecount;
+
+}
+
+static ssize_t smi130_fifo_watermark_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char data = 0xff;
+
+	err = SMI_CALL_API(get_fifo_wm)(&data);
+
+	if (err)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_fifo_watermark_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long data;
+	unsigned char fifo_watermark;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	fifo_watermark = (unsigned char)data;
+	err = SMI_CALL_API(set_fifo_wm)(fifo_watermark);
+	if (err)
+		return -EIO;
+
+	return count;
+}
+
+
+static ssize_t smi130_fifo_header_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char data = 0xff;
+
+	err = SMI_CALL_API(get_fifo_header_enable)(&data);
+
+	if (err)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_fifo_header_en_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long data;
+	unsigned char fifo_header_en;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	if (data > 1)
+		return -ENOENT;
+
+	fifo_header_en = (unsigned char)data;
+	err = SMI_CALL_API(set_fifo_header_enable)(fifo_header_en);
+	if (err)
+		return -EIO;
+
+	client_data->fifo_head_en = fifo_header_en;
+
+	return count;
+}
+
+static ssize_t smi130_fifo_time_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char data = 0;
+
+	err = SMI_CALL_API(get_fifo_time_enable)(&data);
+
+	if (!err)
+		err = snprintf(buf, 16, "%d\n", data);
+
+	return err;
+}
+
+static ssize_t smi130_fifo_time_en_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long data;
+	unsigned char fifo_ts_en;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	fifo_ts_en = (unsigned char)data;
+
+	err = SMI_CALL_API(set_fifo_time_enable)(fifo_ts_en);
+	if (err)
+		return -EIO;
+
+	return count;
+}
+
+static ssize_t smi130_fifo_int_tag_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err = 0;
+	unsigned char fifo_tag_int1 = 0;
+	unsigned char fifo_tag_int2 = 0;
+	unsigned char fifo_tag_int;
+
+	err += SMI_CALL_API(get_fifo_tag_intr1_enable)(&fifo_tag_int1);
+	err += SMI_CALL_API(get_fifo_tag_intr2_enable)(&fifo_tag_int2);
+
+	fifo_tag_int = (fifo_tag_int1 << SMI130_INT0) |
+			(fifo_tag_int2 << SMI130_INT1);
+
+	if (!err)
+		err = snprintf(buf, 16, "%d\n", fifo_tag_int);
+
+	return err;
+}
+
+static ssize_t smi130_fifo_int_tag_en_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long data;
+	unsigned char fifo_tag_int_en;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	if (data > 3)
+		return -EINVAL;
+
+	fifo_tag_int_en = (unsigned char)data;
+
+	err += SMI_CALL_API(set_fifo_tag_intr1_enable)
+			((fifo_tag_int_en & (1 << SMI130_INT0)) ? 1 :  0);
+	err += SMI_CALL_API(set_fifo_tag_intr2_enable)
+			((fifo_tag_int_en & (1 << SMI130_INT1)) ? 1 :  0);
+
+	if (err) {
+		dev_err(client_data->dev, "fifo int tag en err:%d\n", err);
+		return -EIO;
+	}
+	client_data->fifo_int_tag_en = fifo_tag_int_en;
+
+	return count;
+}
+
+static int smi130_set_acc_op_mode(struct smi_client_data *client_data,
+							unsigned long op_mode)
+{
+	int err = 0;
+	unsigned char stc_enable;
+	unsigned char std_enable;
+	mutex_lock(&client_data->mutex_op_mode);
+
+	if (op_mode < SMI_ACC_PM_MAX) {
+		switch (op_mode) {
+		case SMI_ACC_PM_NORMAL:
+			err = SMI_CALL_API(set_command_register)
+			(smi_pmu_cmd_acc_arr[SMI_ACC_PM_NORMAL]);
+			client_data->pw.acc_pm = SMI_ACC_PM_NORMAL;
+			smi_delay(10);
+			break;
+		case SMI_ACC_PM_LP1:
+			err = SMI_CALL_API(set_command_register)
+			(smi_pmu_cmd_acc_arr[SMI_ACC_PM_LP1]);
+			client_data->pw.acc_pm = SMI_ACC_PM_LP1;
+			smi_delay(3);
+			break;
+		case SMI_ACC_PM_SUSPEND:
+			SMI_CALL_API(get_step_counter_enable)(&stc_enable);
+			SMI_CALL_API(get_step_detector_enable)(&std_enable);
+			if ((stc_enable == 0) && (std_enable == 0) &&
+				(client_data->sig_flag == 0)) {
+				err = SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_acc_arr[SMI_ACC_PM_SUSPEND]);
+				client_data->pw.acc_pm = SMI_ACC_PM_SUSPEND;
+				smi_delay(10);
+			}
+			break;
+		case SMI_ACC_PM_LP2:
+			err = SMI_CALL_API(set_command_register)
+			(smi_pmu_cmd_acc_arr[SMI_ACC_PM_LP2]);
+			client_data->pw.acc_pm = SMI_ACC_PM_LP2;
+			smi_delay(3);
+			break;
+		default:
+			mutex_unlock(&client_data->mutex_op_mode);
+			return -EINVAL;
+		}
+	} else {
+		mutex_unlock(&client_data->mutex_op_mode);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&client_data->mutex_op_mode);
+
+	return err;
+
+
+}
+
+static ssize_t smi130_temperature_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	s16 temp = 0xff;
+
+	err = SMI_CALL_API(get_temp)(&temp);
+
+	if (!err)
+		err = snprintf(buf, 16, "0x%x\n", temp);
+
+	return err;
+}
+
+static ssize_t smi130_place_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	int place = BOSCH_SENSOR_PLACE_UNKNOWN;
+
+	if (NULL != client_data->bosch_pd)
+		place = client_data->bosch_pd->place;
+
+	return snprintf(buf, 16, "%d\n", place);
+}
+
+static ssize_t smi130_delay_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	return snprintf(buf, 16, "%d\n", atomic_read(&client_data->delay));
+
+}
+
+static ssize_t smi130_delay_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long data;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	if (data == 0) {
+		err = -EINVAL;
+		return err;
+	}
+
+	if (data < SMI_DELAY_MIN)
+		data = SMI_DELAY_MIN;
+
+	atomic_set(&client_data->delay, (unsigned int)data);
+
+	return count;
+}
+
+static ssize_t smi130_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	return snprintf(buf, 16, "%d\n", atomic_read(&client_data->wkqueue_en));
+
+}
+
+static ssize_t smi130_enable_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long enable;
+	int pre_enable = atomic_read(&client_data->wkqueue_en);
+
+	err = kstrtoul(buf, 10, &enable);
+	if (err)
+		return err;
+
+	enable = enable ? 1 : 0;
+	mutex_lock(&client_data->mutex_enable);
+	if (enable) {
+		if (pre_enable == 0) {
+			smi130_set_acc_op_mode(client_data,
+							SMI_ACC_PM_NORMAL);
+			schedule_delayed_work(&client_data->work,
+			msecs_to_jiffies(atomic_read(&client_data->delay)));
+			atomic_set(&client_data->wkqueue_en, 1);
+		}
+
+	} else {
+		if (pre_enable == 1) {
+			smi130_set_acc_op_mode(client_data,
+							SMI_ACC_PM_SUSPEND);
+
+			cancel_delayed_work_sync(&client_data->work);
+			atomic_set(&client_data->wkqueue_en, 0);
+		}
+	}
+
+	mutex_unlock(&client_data->mutex_enable);
+
+	return count;
+}
+
+#if defined(SMI130_ENABLE_INT1) || defined(SMI130_ENABLE_INT2)
+/* accel sensor part */
+static ssize_t smi130_anymot_duration_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char data;
+
+	err = SMI_CALL_API(get_intr_any_motion_durn)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_anymot_duration_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_intr_any_motion_durn)((unsigned char)data);
+	if (err < 0)
+		return -EIO;
+
+	return count;
+}
+
+static ssize_t smi130_anymot_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = SMI_CALL_API(get_intr_any_motion_thres)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_anymot_threshold_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_intr_any_motion_thres)((unsigned char)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t smi130_step_detector_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u8 data = 0;
+	u8 step_det;
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	err = SMI_CALL_API(get_step_detector_enable)(&step_det);
+	/*smi130_get_status0_step_int*/
+	if (err < 0)
+		return err;
+/*client_data->std will be updated in smi_stepdetector_interrupt_handle */
+	if ((step_det == 1) && (client_data->std == 1)) {
+		data = 1;
+		client_data->std = 0;
+		}
+	else {
+		data = 0;
+		}
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_step_detector_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = SMI_CALL_API(get_step_detector_enable)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_step_detector_enable_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_step_detector_enable)((unsigned char)data);
+	if (err < 0)
+		return -EIO;
+	if (data == 0)
+		client_data->pedo_data.wkar_step_detector_status = 0;
+	return count;
+}
+
+static ssize_t smi130_signification_motion_enable_store(
+	struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	/*0x62 (bit 1) INT_MOTION_3 int_sig_mot_sel*/
+	err = SMI_CALL_API(set_intr_significant_motion_select)(
+		(unsigned char)data);
+	if (err < 0)
+		return -EIO;
+	if (data == 1) {
+		err = SMI_CALL_API(set_intr_enable_0)
+					(SMI130_ANY_MOTION_X_ENABLE, 1);
+		err += SMI_CALL_API(set_intr_enable_0)
+					(SMI130_ANY_MOTION_Y_ENABLE, 1);
+		err += SMI_CALL_API(set_intr_enable_0)
+					(SMI130_ANY_MOTION_Z_ENABLE, 1);
+		if (err < 0)
+			return -EIO;
+		enable_irq_wake(client_data->IRQ);
+		client_data->sig_flag = 1;
+	} else {
+		err = SMI_CALL_API(set_intr_enable_0)
+					(SMI130_ANY_MOTION_X_ENABLE, 0);
+		err += SMI_CALL_API(set_intr_enable_0)
+					(SMI130_ANY_MOTION_Y_ENABLE, 0);
+		err += SMI_CALL_API(set_intr_enable_0)
+					(SMI130_ANY_MOTION_Z_ENABLE, 0);
+		if (err < 0)
+			return -EIO;
+		disable_irq_wake(client_data->IRQ);
+		client_data->sig_flag = 0;
+	}
+	return count;
+}
+
+static ssize_t smi130_signification_motion_enable_show(
+	struct device *dev, struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+	/*0x62 (bit 1) INT_MOTION_3 int_sig_mot_sel*/
+	err = SMI_CALL_API(get_intr_significant_motion_select)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static int sigmotion_init_interrupts(u8 sig_map_int_pin)
+{
+	int ret = 0;
+/*0x60  */
+	ret += smi130_set_intr_any_motion_thres(0x1e);
+/* 0x62(bit 3~2)	0=1.5s */
+	ret += smi130_set_intr_significant_motion_skip(0);
+/*0x62(bit 5~4)	1=0.5s*/
+	ret += smi130_set_intr_significant_motion_proof(1);
+/*0x50 (bit 0, 1, 2)  INT_EN_0 anymo x y z*/
+	ret += smi130_map_significant_motion_intr(sig_map_int_pin);
+/*0x62 (bit 1) INT_MOTION_3	int_sig_mot_sel
+close the signification_motion*/
+	ret += smi130_set_intr_significant_motion_select(0);
+/*close the anymotion interrupt*/
+	ret += SMI_CALL_API(set_intr_enable_0)
+					(SMI130_ANY_MOTION_X_ENABLE, 0);
+	ret += SMI_CALL_API(set_intr_enable_0)
+					(SMI130_ANY_MOTION_Y_ENABLE, 0);
+	ret += SMI_CALL_API(set_intr_enable_0)
+					(SMI130_ANY_MOTION_Z_ENABLE, 0);
+	if (ret)
+		printk(KERN_ERR "smi130 sig motion failed setting,%d!\n", ret);
+	return ret;
+
+}
+#endif
+
+static ssize_t smi130_acc_range_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char range;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = SMI_CALL_API(get_accel_range)(&range);
+	if (err)
+		return err;
+
+	client_data->range.acc_range = range;
+	return snprintf(buf, 16, "%d\n", range);
+}
+
+static ssize_t smi130_acc_range_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long range;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+
+	err = kstrtoul(buf, 10, &range);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_accel_range)(range);
+	if (err)
+		return -EIO;
+
+	client_data->range.acc_range = range;
+	return count;
+}
+
+static ssize_t smi130_acc_odr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char acc_odr;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = SMI_CALL_API(get_accel_output_data_rate)(&acc_odr);
+	if (err)
+		return err;
+
+	client_data->odr.acc_odr = acc_odr;
+	return snprintf(buf, 16, "%d\n", acc_odr);
+}
+
+static ssize_t smi130_acc_odr_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long acc_odr;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &acc_odr);
+	if (err)
+		return err;
+
+	if (acc_odr < 1 || acc_odr > 12)
+		return -EIO;
+
+	if (acc_odr < 5)
+		err = SMI_CALL_API(set_accel_under_sampling_parameter)(1);
+	else
+		err = SMI_CALL_API(set_accel_under_sampling_parameter)(0);
+
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_accel_output_data_rate)(acc_odr);
+	if (err)
+		return -EIO;
+	client_data->odr.acc_odr = acc_odr;
+	return count;
+}
+
+static ssize_t smi130_acc_op_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	int err = 0;
+	u8 accel_pmu_status = 0;
+	err = SMI_CALL_API(get_accel_power_mode_stat)(
+		&accel_pmu_status);
+
+	if (err)
+		return err;
+	else
+	return snprintf(buf, 32, "reg:%d, val:%d\n", accel_pmu_status,
+			client_data->pw.acc_pm);
+}
+
+static ssize_t smi130_acc_op_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	int err;
+	unsigned long op_mode;
+	err = kstrtoul(buf, 10, &op_mode);
+	if (err)
+		return err;
+
+	err = smi130_set_acc_op_mode(client_data, op_mode);
+	if (err)
+		return err;
+	else
+		return count;
+
+}
+
+static ssize_t smi130_acc_value_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct smi130_accel_t data;
+
+	int err;
+
+	err = SMI_CALL_API(read_accel_xyz)(&data);
+	if (err < 0)
+		return err;
+
+	return snprintf(buf, 48, "%hd %hd %hd\n",
+			data.x, data.y, data.z);
+}
+
+static ssize_t smi130_acc_fast_calibration_x_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = SMI_CALL_API(get_foc_accel_x)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_acc_fast_calibration_x_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	s8 accel_offset_x = 0;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	/* 0: disable, 1: +1g, 2: -1g, 3: 0g */
+	if (data > 3)
+		return -EINVAL;
+
+	err = SMI_CALL_API(set_accel_foc_trigger)(X_AXIS,
+					data, &accel_offset_x);
+	if (err)
+		return -EIO;
+	else
+		client_data->calib_status |=
+			SMI_FAST_CALI_TRUE << SMI_ACC_X_FAST_CALI_RDY;
+	return count;
+}
+
+static ssize_t smi130_acc_fast_calibration_y_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = SMI_CALL_API(get_foc_accel_y)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_acc_fast_calibration_y_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	s8 accel_offset_y = 0;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	/* 0: disable, 1: +1g, 2: -1g, 3: 0g */
+	if (data > 3)
+		return -EINVAL;
+
+	err = SMI_CALL_API(set_accel_foc_trigger)(Y_AXIS,
+				data, &accel_offset_y);
+	if (err)
+		return -EIO;
+	else
+		client_data->calib_status |=
+			SMI_FAST_CALI_TRUE << SMI_ACC_Y_FAST_CALI_RDY;
+	return count;
+}
+
+static ssize_t smi130_acc_fast_calibration_z_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = SMI_CALL_API(get_foc_accel_z)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_acc_fast_calibration_z_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	s8 accel_offset_z = 0;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	unsigned char data1[3] = {0};
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+	/* 0: disable, 1: +1g, 2: -1g, 3: 0g */
+	if (data > 3)
+		return -EINVAL;
+
+	err = SMI_CALL_API(set_accel_foc_trigger)(Z_AXIS,
+			data, &accel_offset_z);
+	if (err)
+		return -EIO;
+	else
+		client_data->calib_status |=
+			SMI_FAST_CALI_TRUE << SMI_ACC_Z_FAST_CALI_RDY;
+
+	if (client_data->calib_status == SMI_FAST_CALI_ALL_RDY) {
+		err = SMI_CALL_API(get_accel_offset_compensation_xaxis)(
+			&data1[0]);
+		err += SMI_CALL_API(get_accel_offset_compensation_yaxis)(
+			&data1[1]);
+		err += SMI_CALL_API(get_accel_offset_compensation_zaxis)(
+			&data1[2]);
+		dev_info(client_data->dev, "accx %d, accy %d, accz %d\n",
+			data1[0], data1[1], data1[2]);
+		if (err)
+			return -EIO;
+		input_event(client_data->input, EV_MSC,
+		INPUT_EVENT_FAST_ACC_CALIB_DONE,
+		(data1[0] | (data1[1] << 8) | (data1[2] << 16)));
+		input_sync(client_data->input);
+		client_data->calib_status = 0;
+	}
+
+	return count;
+}
+
+static ssize_t smi130_acc_offset_x_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = SMI_CALL_API(get_accel_offset_compensation_xaxis)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+
+static ssize_t smi130_acc_offset_x_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_accel_offset_compensation_xaxis)
+						((unsigned char)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t smi130_acc_offset_y_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = SMI_CALL_API(get_accel_offset_compensation_yaxis)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_acc_offset_y_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_accel_offset_compensation_yaxis)
+						((unsigned char)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t smi130_acc_offset_z_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = SMI_CALL_API(get_accel_offset_compensation_zaxis)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_acc_offset_z_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_accel_offset_compensation_zaxis)
+						((unsigned char)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t smi130_test_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	u8 raw_data[15] = {0};
+	unsigned int sensor_time = 0;
+
+	int err;
+	memset(raw_data, 0, sizeof(raw_data));
+
+	err = client_data->device.bus_read(client_data->device.dev_addr,
+			SMI130_USER_DATA_8_GYRO_X_LSB__REG, raw_data, 15);
+	if (err)
+		return err;
+
+	udelay(10);
+	sensor_time = (u32)(raw_data[14] << 16 | raw_data[13] << 8
+						| raw_data[12]);
+
+	return snprintf(buf, 128, "%d %d %d %d %d %d %u",
+					(s16)(raw_data[1] << 8 | raw_data[0]),
+				(s16)(raw_data[3] << 8 | raw_data[2]),
+				(s16)(raw_data[5] << 8 | raw_data[4]),
+				(s16)(raw_data[7] << 8 | raw_data[6]),
+				(s16)(raw_data[9] << 8 | raw_data[8]),
+				(s16)(raw_data[11] << 8 | raw_data[10]),
+				sensor_time);
+
+}
+
+static ssize_t smi130_step_counter_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = SMI_CALL_API(get_step_counter_enable)(&data);
+
+	client_data->stc_enable = data;
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_step_counter_enable_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_step_counter_enable)((unsigned char)data);
+
+	client_data->stc_enable = data;
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+
+static ssize_t smi130_step_counter_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_step_mode)((unsigned char)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t smi130_step_counter_clc_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = smi130_clear_step_counter();
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t smi130_step_counter_value_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u16 data;
+	int err;
+	static u16 last_stc_value;
+
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = SMI_CALL_API(read_step_count)(&data);
+
+	if (err < 0)
+		return err;
+	if (data >= last_stc_value) {
+		client_data->pedo_data.last_step_counter_value += (
+			data - last_stc_value);
+		last_stc_value = data;
+	} else
+		last_stc_value = data;
+	return snprintf(buf, 16, "%d\n",
+		client_data->pedo_data.last_step_counter_value);
+}
+
+static ssize_t smi130_smi_value_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	u8 raw_data[12] = {0};
+
+	int err;
+	memset(raw_data, 0, sizeof(raw_data));
+
+	err = client_data->device.bus_read(client_data->device.dev_addr,
+			SMI130_USER_DATA_8_GYRO_X_LSB__REG, raw_data, 12);
+	if (err)
+		return err;
+	/*output:gyro x y z acc x y z*/
+	return snprintf(buf, 96, "%hd %d %hd %hd %hd %hd\n",
+					(s16)(raw_data[1] << 8 | raw_data[0]),
+				(s16)(raw_data[3] << 8 | raw_data[2]),
+				(s16)(raw_data[5] << 8 | raw_data[4]),
+				(s16)(raw_data[7] << 8 | raw_data[6]),
+				(s16)(raw_data[9] << 8 | raw_data[8]),
+				(s16)(raw_data[11] << 8 | raw_data[10]));
+
+}
+
+
+static ssize_t smi130_selftest_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	return snprintf(buf, 16, "0x%x\n",
+				atomic_read(&client_data->selftest_result));
+}
+
+static int smi_restore_hw_cfg(struct smi_client_data *client);
+
+/*!
+ * @brief store selftest result which make up of acc and gyro
+ * format: 0b 0000 xxxx  x:1 failed, 0 success
+ * bit3:     gyro_self
+ * bit2..0: acc_self z y x
+ */
+static ssize_t smi130_selftest_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	int err = 0;
+	int i = 0;
+
+	u8 acc_selftest = 0;
+	u8 gyro_selftest = 0;
+	u8 smi_selftest = 0;
+	s16 axis_p_value, axis_n_value;
+	u16 diff_axis[3] = {0xff, 0xff, 0xff};
+	u8 acc_odr, range, acc_selftest_amp, acc_selftest_sign;
+
+	dev_notice(client_data->dev, "Selftest for SMI16x starting.\n");
+
+	client_data->selftest = 1;
+
+	/*soft reset*/
+	err = SMI_CALL_API(set_command_register)(CMD_RESET_USER_REG);
+	msleep(70);
+	err += SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_acc_arr[SMI_ACC_PM_NORMAL]);
+	err += SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_gyro_arr[SMI_GYRO_PM_NORMAL]);
+	err += SMI_CALL_API(set_accel_under_sampling_parameter)(0);
+	err += SMI_CALL_API(set_accel_output_data_rate)(
+	SMI130_ACCEL_OUTPUT_DATA_RATE_1600HZ);
+
+	/* set to 8G range*/
+	err += SMI_CALL_API(set_accel_range)(SMI130_ACCEL_RANGE_8G);
+	/* set to self amp high */
+	err += SMI_CALL_API(set_accel_selftest_amp)(SMI_SELFTEST_AMP_HIGH);
+
+
+	err += SMI_CALL_API(get_accel_output_data_rate)(&acc_odr);
+	err += SMI_CALL_API(get_accel_range)(&range);
+	err += SMI_CALL_API(get_accel_selftest_amp)(&acc_selftest_amp);
+	err += SMI_CALL_API(read_accel_x)(&axis_n_value);
+
+	dev_info(client_data->dev,
+			"acc_odr:%d, acc_range:%d, acc_selftest_amp:%d, acc_x:%d\n",
+				acc_odr, range, acc_selftest_amp, axis_n_value);
+
+	for (i = X_AXIS; i < AXIS_MAX; i++) {
+		axis_n_value = 0;
+		axis_p_value = 0;
+		/* set every selftest axis */
+		/*set_acc_selftest_axis(param),param x:1, y:2, z:3
+		* but X_AXIS:0, Y_AXIS:1, Z_AXIS:2
+		* so we need to +1*/
+		err += SMI_CALL_API(set_accel_selftest_axis)(i + 1);
+		msleep(50);
+		switch (i) {
+		case X_AXIS:
+			/* set negative sign */
+			err += SMI_CALL_API(set_accel_selftest_sign)(0);
+			err += SMI_CALL_API(get_accel_selftest_sign)(
+				&acc_selftest_sign);
+
+			msleep(60);
+			err += SMI_CALL_API(read_accel_x)(&axis_n_value);
+			dev_info(client_data->dev,
+			"acc_x_selftest_sign:%d, axis_n_value:%d\n",
+			acc_selftest_sign, axis_n_value);
+
+			/* set postive sign */
+			err += SMI_CALL_API(set_accel_selftest_sign)(1);
+			err += SMI_CALL_API(get_accel_selftest_sign)(
+				&acc_selftest_sign);
+
+			msleep(60);
+			err += SMI_CALL_API(read_accel_x)(&axis_p_value);
+			dev_info(client_data->dev,
+			"acc_x_selftest_sign:%d, axis_p_value:%d\n",
+			acc_selftest_sign, axis_p_value);
+			diff_axis[i] = abs(axis_p_value - axis_n_value);
+			break;
+
+		case Y_AXIS:
+			/* set negative sign */
+			err += SMI_CALL_API(set_accel_selftest_sign)(0);
+			msleep(60);
+			err += SMI_CALL_API(read_accel_y)(&axis_n_value);
+			/* set postive sign */
+			err += SMI_CALL_API(set_accel_selftest_sign)(1);
+			msleep(60);
+			err += SMI_CALL_API(read_accel_y)(&axis_p_value);
+			diff_axis[i] = abs(axis_p_value - axis_n_value);
+			break;
+
+		case Z_AXIS:
+			/* set negative sign */
+			err += SMI_CALL_API(set_accel_selftest_sign)(0);
+			msleep(60);
+			err += SMI_CALL_API(read_accel_z)(&axis_n_value);
+			/* set postive sign */
+			err += SMI_CALL_API(set_accel_selftest_sign)(1);
+			msleep(60);
+			err += SMI_CALL_API(read_accel_z)(&axis_p_value);
+			/* also start gyro self test */
+			err += SMI_CALL_API(set_gyro_selftest_start)(1);
+			msleep(60);
+			err += SMI_CALL_API(get_gyro_selftest)(&gyro_selftest);
+
+			diff_axis[i] = abs(axis_p_value - axis_n_value);
+			break;
+		default:
+			err += -EINVAL;
+			break;
+		}
+		if (err) {
+			dev_err(client_data->dev,
+				"Failed selftest axis:%s, p_val=%d, n_val=%d\n",
+				smi_axis_name[i], axis_p_value, axis_n_value);
+			client_data->selftest = 0;
+			return -EINVAL;
+		}
+
+		/*400mg for acc z axis*/
+		if (Z_AXIS == i) {
+			if (diff_axis[i] < 1639) {
+				acc_selftest |= 1 << i;
+				dev_err(client_data->dev,
+					"Over selftest minimum for "
+					"axis:%s,diff=%d,p_val=%d, n_val=%d\n",
+					smi_axis_name[i], diff_axis[i],
+						axis_p_value, axis_n_value);
+			}
+		} else {
+			/*800mg for x or y axis*/
+			if (diff_axis[i] < 3277) {
+				acc_selftest |= 1 << i;
+
+				if (smi_get_err_status(client_data) < 0)
+					return err;
+				dev_err(client_data->dev,
+					"Over selftest minimum for "
+					"axis:%s,diff=%d, p_val=%d, n_val=%d\n",
+					smi_axis_name[i], diff_axis[i],
+						axis_p_value, axis_n_value);
+				dev_err(client_data->dev, "err_st:0x%x\n",
+						client_data->err_st.err_st_all);
+
+			}
+		}
+
+	}
+	/* gyro_selftest==1,gyro selftest successfully,
+	* but smi_result bit4 0 is successful, 1 is failed*/
+	smi_selftest = (acc_selftest & 0x0f) | ((!gyro_selftest) << AXIS_MAX);
+	atomic_set(&client_data->selftest_result, smi_selftest);
+	/*soft reset*/
+	err = SMI_CALL_API(set_command_register)(CMD_RESET_USER_REG);
+	if (err) {
+		client_data->selftest = 0;
+		return err;
+	}
+	msleep(50);
+
+	smi_restore_hw_cfg(client_data);
+
+	client_data->selftest = 0;
+	dev_notice(client_data->dev, "Selftest for SMI16x finished\n");
+
+	return count;
+}
+
+/* gyro sensor part */
+static ssize_t smi130_gyro_op_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	int err = 0;
+	u8 gyro_pmu_status = 0;
+
+	err = SMI_CALL_API(get_gyro_power_mode_stat)(
+		&gyro_pmu_status);
+
+	if (err)
+		return err;
+	else
+	return snprintf(buf, 32, "reg:%d, val:%d\n", gyro_pmu_status,
+				client_data->pw.gyro_pm);
+}
+
+static ssize_t smi130_gyro_op_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	unsigned long op_mode;
+	int err;
+
+	err = kstrtoul(buf, 10, &op_mode);
+	if (err)
+		return err;
+
+	mutex_lock(&client_data->mutex_op_mode);
+
+	if (op_mode < SMI_GYRO_PM_MAX) {
+		switch (op_mode) {
+		case SMI_GYRO_PM_NORMAL:
+			err = SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_gyro_arr[SMI_GYRO_PM_NORMAL]);
+			client_data->pw.gyro_pm = SMI_GYRO_PM_NORMAL;
+			smi_delay(60);
+			break;
+		case SMI_GYRO_PM_FAST_START:
+			err = SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_gyro_arr[SMI_GYRO_PM_FAST_START]);
+			client_data->pw.gyro_pm = SMI_GYRO_PM_FAST_START;
+			smi_delay(60);
+			break;
+		case SMI_GYRO_PM_SUSPEND:
+			err = SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_gyro_arr[SMI_GYRO_PM_SUSPEND]);
+			client_data->pw.gyro_pm = SMI_GYRO_PM_SUSPEND;
+			smi_delay(60);
+			break;
+		default:
+			mutex_unlock(&client_data->mutex_op_mode);
+			return -EINVAL;
+		}
+	} else {
+		mutex_unlock(&client_data->mutex_op_mode);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&client_data->mutex_op_mode);
+
+	if (err)
+		return err;
+	else
+		return count;
+
+}
+
+static ssize_t smi130_gyro_value_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct smi130_gyro_t data;
+	int err;
+
+	err = SMI_CALL_API(read_gyro_xyz)(&data);
+	if (err < 0)
+		return err;
+
+
+	return snprintf(buf, 48, "%hd %hd %hd\n", data.x,
+				data.y, data.z);
+}
+
+static ssize_t smi130_gyro_range_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char range;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = SMI_CALL_API(get_gyro_range)(&range);
+	if (err)
+		return err;
+
+	client_data->range.gyro_range = range;
+	return snprintf(buf, 16, "%d\n", range);
+}
+
+static ssize_t smi130_gyro_range_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long range;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &range);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_gyro_range)(range);
+	if (err)
+		return -EIO;
+
+	client_data->range.gyro_range = range;
+	return count;
+}
+
+static ssize_t smi130_gyro_odr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char gyro_odr;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = SMI_CALL_API(get_gyro_output_data_rate)(&gyro_odr);
+	if (err)
+		return err;
+
+	client_data->odr.gyro_odr = gyro_odr;
+	return snprintf(buf, 16, "%d\n", gyro_odr);
+}
+
+static ssize_t smi130_gyro_odr_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long gyro_odr;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &gyro_odr);
+	if (err)
+		return err;
+
+	if (gyro_odr < 6 || gyro_odr > 13)
+		return -EIO;
+
+	err = SMI_CALL_API(set_gyro_output_data_rate)(gyro_odr);
+	if (err)
+		return -EIO;
+
+	client_data->odr.gyro_odr = gyro_odr;
+	return count;
+}
+
+static ssize_t smi130_gyro_fast_calibration_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char data;
+	int err;
+
+	err = SMI_CALL_API(get_foc_gyro_enable)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_gyro_fast_calibration_en_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long enable;
+	s8 err;
+	s16 gyr_off_x;
+	s16 gyr_off_y;
+	s16 gyr_off_z;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &enable);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_foc_gyro_enable)((u8)enable,
+				&gyr_off_x, &gyr_off_y, &gyr_off_z);
+
+	if (err < 0)
+		return -EIO;
+	else {
+		input_event(client_data->input, EV_MSC,
+			INPUT_EVENT_FAST_GYRO_CALIB_DONE, 1);
+		input_sync(client_data->input);
+	}
+	return count;
+}
+
+static ssize_t smi130_gyro_offset_x_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	s16 data = 0;
+	s8 err = 0;
+
+	err = SMI_CALL_API(get_gyro_offset_compensation_xaxis)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_gyro_offset_x_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	s8 err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_gyro_offset_compensation_xaxis)((s16)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t smi130_gyro_offset_y_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	s16 data = 0;
+	s8 err = 0;
+
+	err = SMI_CALL_API(get_gyro_offset_compensation_yaxis)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_gyro_offset_y_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	s8 err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_gyro_offset_compensation_yaxis)((s16)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t smi130_gyro_offset_z_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	s16 data = 0;
+	int err = 0;
+
+	err = SMI_CALL_API(get_gyro_offset_compensation_zaxis)(&data);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "%d\n", data);
+}
+
+static ssize_t smi130_gyro_offset_z_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err = SMI_CALL_API(set_gyro_offset_compensation_zaxis)((s16)data);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+
+/* mag sensor part */
+#ifdef SMI130_MAG_INTERFACE_SUPPORT
+static ssize_t smi130_mag_op_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	u8 mag_op_mode;
+	s8 err;
+	err = smi130_get_mag_power_mode_stat(&mag_op_mode);
+	if (err) {
+		dev_err(client_data->dev,
+			"Failed to get SMI130 mag power mode:%d\n", err);
+		return err;
+	} else
+		return snprintf(buf, 32, "%d, reg:%d\n",
+					client_data->pw.mag_pm, mag_op_mode);
+}
+
+static ssize_t smi130_mag_op_mode_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	unsigned long op_mode;
+	int err;
+
+	err = kstrtoul(buf, 10, &op_mode);
+	if (err)
+		return err;
+
+	if (op_mode == client_data->pw.mag_pm)
+		return count;
+
+	mutex_lock(&client_data->mutex_op_mode);
+
+
+	if (op_mode < SMI_MAG_PM_MAX) {
+		switch (op_mode) {
+		case SMI_MAG_PM_NORMAL:
+			/* need to modify as mag sensor connected,
+			 * set write address to 0x4c and triggers
+			 * write operation
+			 * 0x4c(op mode control reg)
+			 * enables normal mode in magnetometer */
+#if defined(SMI130_AKM09912_SUPPORT)
+			err = smi130_set_bosch_akm_and_secondary_if_powermode(
+			SMI130_MAG_FORCE_MODE);
+#else
+			err = smi130_set_bmm150_mag_and_secondary_if_power_mode(
+			SMI130_MAG_FORCE_MODE);
+#endif
+			client_data->pw.mag_pm = SMI_MAG_PM_NORMAL;
+			smi_delay(5);
+			break;
+		case SMI_MAG_PM_LP1:
+			/* need to modify as mag sensor connected,
+			 * set write address to 0x4 band triggers
+			 * write operation
+			 * 0x4b(bmm150, power control reg, bit0)
+			 * enables power in magnetometer*/
+#if defined(SMI130_AKM09912_SUPPORT)
+			err = smi130_set_bosch_akm_and_secondary_if_powermode(
+			SMI130_MAG_FORCE_MODE);
+#else
+			err = smi130_set_bmm150_mag_and_secondary_if_power_mode(
+			SMI130_MAG_FORCE_MODE);
+#endif
+			client_data->pw.mag_pm = SMI_MAG_PM_LP1;
+			smi_delay(5);
+			break;
+		case SMI_MAG_PM_SUSPEND:
+		case SMI_MAG_PM_LP2:
+#if defined(SMI130_AKM09912_SUPPORT)
+		err = smi130_set_bosch_akm_and_secondary_if_powermode(
+		SMI130_MAG_SUSPEND_MODE);
+#else
+		err = smi130_set_bmm150_mag_and_secondary_if_power_mode(
+		SMI130_MAG_SUSPEND_MODE);
+#endif
+			client_data->pw.mag_pm = op_mode;
+			smi_delay(5);
+			break;
+		default:
+			mutex_unlock(&client_data->mutex_op_mode);
+			return -EINVAL;
+		}
+	} else {
+		mutex_unlock(&client_data->mutex_op_mode);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&client_data->mutex_op_mode);
+
+	if (err) {
+		dev_err(client_data->dev,
+			"Failed to switch SMI130 mag power mode:%d\n",
+			client_data->pw.mag_pm);
+		return err;
+	} else
+		return count;
+
+}
+
+static ssize_t smi130_mag_odr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err = 0;
+	unsigned char mag_odr = 0;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = SMI_CALL_API(get_mag_output_data_rate)(&mag_odr);
+	if (err)
+		return err;
+
+	client_data->odr.mag_odr = mag_odr;
+	return snprintf(buf, 16, "%d\n", mag_odr);
+}
+
+static ssize_t smi130_mag_odr_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long mag_odr;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &mag_odr);
+	if (err)
+		return err;
+	/*1~25/32hz,..6(25hz),7(50hz),... */
+	err = SMI_CALL_API(set_mag_output_data_rate)(mag_odr);
+	if (err)
+		return -EIO;
+
+	client_data->odr.mag_odr = mag_odr;
+	return count;
+}
+
+static ssize_t smi130_mag_i2c_address_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u8 data;
+	s8 err;
+
+	err = SMI_CALL_API(set_mag_manual_enable)(1);
+	err += SMI_CALL_API(get_i2c_device_addr)(&data);
+	err += SMI_CALL_API(set_mag_manual_enable)(0);
+
+	if (err < 0)
+		return err;
+	return snprintf(buf, 16, "0x%x\n", data);
+}
+
+static ssize_t smi130_mag_i2c_address_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err += SMI_CALL_API(set_mag_manual_enable)(1);
+	if (!err)
+		err += SMI_CALL_API(set_i2c_device_addr)((unsigned char)data);
+	err += SMI_CALL_API(set_mag_manual_enable)(0);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t smi130_mag_value_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	struct smi130_mag_xyz_s32_t data;
+	int err;
+	/* raw data with compensation */
+#if defined(SMI130_AKM09912_SUPPORT)
+	err = smi130_bosch_akm09912_compensate_xyz(&data);
+#else
+	err = smi130_bmm150_mag_compensate_xyz(&data);
+#endif
+
+	if (err < 0) {
+		memset(&data, 0, sizeof(data));
+		dev_err(client_data->dev, "mag not ready!\n");
+	}
+	return snprintf(buf, 48, "%hd %hd %hd\n", data.x,
+				data.y, data.z);
+}
+static ssize_t smi130_mag_offset_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err = 0;
+	unsigned char mag_offset;
+	err = SMI_CALL_API(get_mag_offset)(&mag_offset);
+	if (err)
+		return err;
+
+	return snprintf(buf, 16, "%d\n", mag_offset);
+
+}
+
+static ssize_t smi130_mag_offset_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	err += SMI_CALL_API(set_mag_manual_enable)(1);
+	if (err == 0)
+		err += SMI_CALL_API(set_mag_offset)((unsigned char)data);
+	err += SMI_CALL_API(set_mag_manual_enable)(0);
+
+	if (err < 0)
+		return -EIO;
+	return count;
+}
+
+static ssize_t smi130_mag_chip_id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	s8 err = 0;
+	u8 mag_chipid;
+
+	err = smi130_set_mag_manual_enable(0x01);
+	/* read mag chip_id value */
+#if defined(SMI130_AKM09912_SUPPORT)
+	err += smi130_set_mag_read_addr(AKM09912_CHIP_ID_REG);
+		/* 0x04 is mag_x lsb register */
+	err += smi130_read_reg(SMI130_USER_DATA_0_MAG_X_LSB__REG,
+							&mag_chipid, 1);
+
+	/* Must add this commands to re-set data register addr of mag sensor */
+	err += smi130_set_mag_read_addr(AKM_DATA_REGISTER);
+#else
+	err += smi130_set_mag_read_addr(SMI130_BMM150_CHIP_ID);
+	/* 0x04 is mag_x lsb register */
+	err += smi130_read_reg(SMI130_USER_DATA_0_MAG_X_LSB__REG,
+							&mag_chipid, 1);
+
+	/* Must add this commands to re-set data register addr of mag sensor */
+	/* 0x42 is  bmm150 data register address */
+	err += smi130_set_mag_read_addr(SMI130_BMM150_DATA_REG);
+#endif
+
+	err += smi130_set_mag_manual_enable(0x00);
+
+	if (err)
+		return err;
+
+	return snprintf(buf, 16, "%x\n", mag_chipid);
+
+}
+
+static ssize_t smi130_mag_chip_name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u8 mag_chipid = 0;
+#if defined(SMI130_AKM09912_SUPPORT)
+	mag_chipid = 15;
+#else
+	mag_chipid = 150;
+#endif
+	return snprintf(buf, 16, "%d\n", mag_chipid);
+}
+
+struct smi130_mag_xyz_s32_t mag_compensate;
+static ssize_t smi130_mag_compensate_xyz_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	memcpy(buf, &mag_compensate, sizeof(mag_compensate));
+	return sizeof(mag_compensate);
+}
+static ssize_t smi130_mag_compensate_xyz_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct smi130_mag_xyzr_t mag_raw;
+	memset(&mag_compensate, 0, sizeof(mag_compensate));
+	memset(&mag_raw, 0, sizeof(mag_raw));
+	mag_raw.x = (buf[1] << 8 | buf[0]);
+	mag_raw.y = (buf[3] << 8 | buf[2]);
+	mag_raw.z = (buf[5] << 8 | buf[4]);
+	mag_raw.r = (buf[7] << 8 | buf[6]);
+	mag_raw.x = mag_raw.x >> 3;
+	mag_raw.y = mag_raw.y >> 3;
+	mag_raw.z = mag_raw.z >> 1;
+	mag_raw.r = mag_raw.r >> 2;
+	smi130_bmm150_mag_compensate_xyz_raw(
+	&mag_compensate, mag_raw);
+	return count;
+}
+
+#endif
+
+#if defined(SMI130_ENABLE_INT1) || defined(SMI130_ENABLE_INT2)
+static ssize_t smi_enable_int_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int interrupt_type, value;
+
+	sscanf(buf, "%3d %3d", &interrupt_type, &value);
+
+	if (interrupt_type < 0 || interrupt_type > 16)
+		return -EINVAL;
+
+	if (interrupt_type <= SMI_FLAT_INT) {
+		if (SMI_CALL_API(set_intr_enable_0)
+				(smi_interrupt_type[interrupt_type], value) < 0)
+			return -EINVAL;
+	} else if (interrupt_type <= SMI_FWM_INT) {
+		if (SMI_CALL_API(set_intr_enable_1)
+			(smi_interrupt_type[interrupt_type], value) < 0)
+			return -EINVAL;
+	} else {
+		if (SMI_CALL_API(set_intr_enable_2)
+			(smi_interrupt_type[interrupt_type], value) < 0)
+			return -EINVAL;
+	}
+
+	return count;
+}
+
+#endif
+
+static ssize_t smi130_show_reg_sel(struct device *dev
+		, struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	if (client_data == NULL) {
+		printk(KERN_ERR "Invalid client_data pointer");
+		return -ENODEV;
+	}
+
+	return snprintf(buf, 64, "reg=0X%02X, len=%d\n",
+		client_data->reg_sel, client_data->reg_len);
+}
+
+static ssize_t smi130_store_reg_sel(struct device *dev
+		, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	ssize_t ret;
+
+	if (client_data == NULL) {
+		printk(KERN_ERR "Invalid client_data pointer");
+		return -ENODEV;
+	}
+	ret = sscanf(buf, "%11X %11d",
+		&client_data->reg_sel, &client_data->reg_len);
+	if (ret != 2) {
+		dev_err(client_data->dev, "Invalid argument");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t smi130_show_reg_val(struct device *dev
+		, struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+
+	ssize_t ret;
+	u8 reg_data[128], i;
+	int pos;
+
+	if (client_data == NULL) {
+		printk(KERN_ERR "Invalid client_data pointer");
+		return -ENODEV;
+	}
+
+	ret = smi_burst_read_wrapper(client_data->device.dev_addr,
+		client_data->reg_sel,
+		reg_data, client_data->reg_len);
+	if (ret < 0) {
+		dev_err(client_data->dev, "Reg op failed");
+		return ret;
+	}
+
+	pos = 0;
+	for (i = 0; i < client_data->reg_len; ++i) {
+		pos += snprintf(buf + pos, 16, "%02X", reg_data[i]);
+		buf[pos++] = (i + 1) % 16 == 0 ? '\n' : ' ';
+	}
+	if (buf[pos - 1] == ' ')
+		buf[pos - 1] = '\n';
+
+	return pos;
+}
+
+static ssize_t smi130_store_reg_val(struct device *dev
+		, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	ssize_t ret;
+	u8 reg_data[32];
+	int i, j, status, digit;
+
+	if (client_data == NULL) {
+		printk(KERN_ERR "Invalid client_data pointer");
+		return -ENODEV;
+	}
+	status = 0;
+	for (i = j = 0; i < count && j < client_data->reg_len; ++i) {
+		if (buf[i] == ' ' || buf[i] == '\n' || buf[i] == '\t' ||
+			buf[i] == '\r') {
+			status = 0;
+			++j;
+			continue;
+		}
+		digit = buf[i] & 0x10 ? (buf[i] & 0xF) : ((buf[i] & 0xF) + 9);
+		printk(KERN_INFO "digit is %d", digit);
+		switch (status) {
+		case 2:
+			++j; /* Fall thru */
+		case 0:
+			reg_data[j] = digit;
+			status = 1;
+			break;
+		case 1:
+			reg_data[j] = reg_data[j] * 16 + digit;
+			status = 2;
+			break;
+		}
+	}
+	if (status > 0)
+		++j;
+	if (j > client_data->reg_len)
+		j = client_data->reg_len;
+	else if (j < client_data->reg_len) {
+		dev_err(client_data->dev, "Invalid argument");
+		return -EINVAL;
+	}
+	printk(KERN_INFO "Reg data read as");
+	for (i = 0; i < j; ++i)
+		printk(KERN_INFO "%d", reg_data[i]);
+
+	ret = SMI_CALL_API(write_reg)(
+		client_data->reg_sel,
+		reg_data, client_data->reg_len);
+	if (ret < 0) {
+		dev_err(client_data->dev, "Reg op failed");
+		return ret;
+	}
+
+	return count;
+}
+
+static ssize_t smi130_driver_version_show(struct device *dev
+		, struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_client_data *client_data = input_get_drvdata(input);
+	int ret;
+
+	if (client_data == NULL) {
+		printk(KERN_ERR "Invalid client_data pointer");
+		return -ENODEV;
+	}
+
+	ret = snprintf(buf, 128, "Driver version: %s\n",
+			DRIVER_VERSION);
+
+	return ret;
+}
+static DEVICE_ATTR(chip_id, S_IRUGO,
+		smi130_chip_id_show, NULL);
+static DEVICE_ATTR(err_st, S_IRUGO,
+		smi130_err_st_show, NULL);
+static DEVICE_ATTR(sensor_time, S_IRUGO,
+		smi130_sensor_time_show, NULL);
+
+static DEVICE_ATTR(selftest, S_IRUGO | S_IWUSR,
+		smi130_selftest_show, smi130_selftest_store);
+static DEVICE_ATTR(fifo_flush, S_IRUGO | S_IWUSR,
+		NULL, smi130_fifo_flush_store);
+static DEVICE_ATTR(fifo_bytecount, S_IRUGO | S_IWUSR,
+		smi130_fifo_bytecount_show, smi130_fifo_bytecount_store);
+static DEVICE_ATTR(fifo_data_sel, S_IRUGO | S_IWUSR,
+		smi130_fifo_data_sel_show, smi130_fifo_data_sel_store);
+static DEVICE_ATTR(fifo_data_frame, S_IRUGO,
+		smi130_fifo_data_out_frame_show, NULL);
+
+static DEVICE_ATTR(fifo_watermark, S_IRUGO | S_IWUSR,
+		smi130_fifo_watermark_show, smi130_fifo_watermark_store);
+
+static DEVICE_ATTR(fifo_header_en, S_IRUGO | S_IWUSR,
+		smi130_fifo_header_en_show, smi130_fifo_header_en_store);
+static DEVICE_ATTR(fifo_time_en, S_IRUGO | S_IWUSR,
+		smi130_fifo_time_en_show, smi130_fifo_time_en_store);
+static DEVICE_ATTR(fifo_int_tag_en, S_IRUGO | S_IWUSR,
+		smi130_fifo_int_tag_en_show, smi130_fifo_int_tag_en_store);
+
+static DEVICE_ATTR(temperature, S_IRUGO,
+		smi130_temperature_show, NULL);
+static DEVICE_ATTR(place, S_IRUGO,
+		smi130_place_show, NULL);
+static DEVICE_ATTR(delay, S_IRUGO | S_IWUSR,
+		smi130_delay_show, smi130_delay_store);
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
+		smi130_enable_show, smi130_enable_store);
+static DEVICE_ATTR(acc_range, S_IRUGO | S_IWUSR,
+		smi130_acc_range_show, smi130_acc_range_store);
+static DEVICE_ATTR(acc_odr, S_IRUGO | S_IWUSR,
+		smi130_acc_odr_show, smi130_acc_odr_store);
+static DEVICE_ATTR(acc_op_mode, S_IRUGO | S_IWUSR,
+		smi130_acc_op_mode_show, smi130_acc_op_mode_store);
+static DEVICE_ATTR(acc_value, S_IRUGO,
+		smi130_acc_value_show, NULL);
+static DEVICE_ATTR(acc_fast_calibration_x, S_IRUGO | S_IWUSR,
+		smi130_acc_fast_calibration_x_show,
+		smi130_acc_fast_calibration_x_store);
+static DEVICE_ATTR(acc_fast_calibration_y, S_IRUGO | S_IWUSR,
+		smi130_acc_fast_calibration_y_show,
+		smi130_acc_fast_calibration_y_store);
+static DEVICE_ATTR(acc_fast_calibration_z, S_IRUGO | S_IWUSR,
+		smi130_acc_fast_calibration_z_show,
+		smi130_acc_fast_calibration_z_store);
+static DEVICE_ATTR(acc_offset_x, S_IRUGO | S_IWUSR,
+		smi130_acc_offset_x_show,
+		smi130_acc_offset_x_store);
+static DEVICE_ATTR(acc_offset_y, S_IRUGO | S_IWUSR,
+		smi130_acc_offset_y_show,
+		smi130_acc_offset_y_store);
+static DEVICE_ATTR(acc_offset_z, S_IRUGO | S_IWUSR,
+		smi130_acc_offset_z_show,
+		smi130_acc_offset_z_store);
+static DEVICE_ATTR(test, S_IRUGO,
+		smi130_test_show, NULL);
+static DEVICE_ATTR(stc_enable, S_IRUGO | S_IWUSR,
+		smi130_step_counter_enable_show,
+		smi130_step_counter_enable_store);
+static DEVICE_ATTR(stc_mode, S_IRUGO | S_IWUSR,
+		NULL, smi130_step_counter_mode_store);
+static DEVICE_ATTR(stc_clc, S_IRUGO | S_IWUSR,
+		NULL, smi130_step_counter_clc_store);
+static DEVICE_ATTR(stc_value, S_IRUGO,
+		smi130_step_counter_value_show, NULL);
+static DEVICE_ATTR(reg_sel, S_IRUGO | S_IWUSR,
+		smi130_show_reg_sel, smi130_store_reg_sel);
+static DEVICE_ATTR(reg_val, S_IRUGO | S_IWUSR,
+		smi130_show_reg_val, smi130_store_reg_val);
+static DEVICE_ATTR(driver_version, S_IRUGO,
+		smi130_driver_version_show, NULL);
+/* gyro part */
+static DEVICE_ATTR(gyro_op_mode, S_IRUGO | S_IWUSR,
+		smi130_gyro_op_mode_show, smi130_gyro_op_mode_store);
+static DEVICE_ATTR(gyro_value, S_IRUGO,
+		smi130_gyro_value_show, NULL);
+static DEVICE_ATTR(gyro_range, S_IRUGO | S_IWUSR,
+		smi130_gyro_range_show, smi130_gyro_range_store);
+static DEVICE_ATTR(gyro_odr, S_IRUGO | S_IWUSR,
+		smi130_gyro_odr_show, smi130_gyro_odr_store);
+static DEVICE_ATTR(gyro_fast_calibration_en, S_IRUGO | S_IWUSR,
+smi130_gyro_fast_calibration_en_show, smi130_gyro_fast_calibration_en_store);
+static DEVICE_ATTR(gyro_offset_x, S_IRUGO | S_IWUSR,
+smi130_gyro_offset_x_show, smi130_gyro_offset_x_store);
+static DEVICE_ATTR(gyro_offset_y, S_IRUGO | S_IWUSR,
+smi130_gyro_offset_y_show, smi130_gyro_offset_y_store);
+static DEVICE_ATTR(gyro_offset_z, S_IRUGO | S_IWUSR,
+smi130_gyro_offset_z_show, smi130_gyro_offset_z_store);
+
+#ifdef SMI130_MAG_INTERFACE_SUPPORT
+static DEVICE_ATTR(mag_op_mode, S_IRUGO | S_IWUSR,
+		smi130_mag_op_mode_show, smi130_mag_op_mode_store);
+static DEVICE_ATTR(mag_odr, S_IRUGO | S_IWUSR,
+		smi130_mag_odr_show, smi130_mag_odr_store);
+static DEVICE_ATTR(mag_i2c_addr, S_IRUGO | S_IWUSR,
+		smi130_mag_i2c_address_show, smi130_mag_i2c_address_store);
+static DEVICE_ATTR(mag_value, S_IRUGO,
+		smi130_mag_value_show, NULL);
+static DEVICE_ATTR(mag_offset, S_IRUGO | S_IWUSR,
+		smi130_mag_offset_show, smi130_mag_offset_store);
+static DEVICE_ATTR(mag_chip_id, S_IRUGO,
+		smi130_mag_chip_id_show, NULL);
+static DEVICE_ATTR(mag_chip_name, S_IRUGO,
+		smi130_mag_chip_name_show, NULL);
+static DEVICE_ATTR(mag_compensate, S_IRUGO | S_IWUSR,
+		smi130_mag_compensate_xyz_show,
+		smi130_mag_compensate_xyz_store);
+#endif
+
+
+#if defined(SMI130_ENABLE_INT1) || defined(SMI130_ENABLE_INT2)
+static DEVICE_ATTR(enable_int, S_IRUGO | S_IWUSR,
+		NULL, smi_enable_int_store);
+static DEVICE_ATTR(anymot_duration, S_IRUGO | S_IWUSR,
+		smi130_anymot_duration_show, smi130_anymot_duration_store);
+static DEVICE_ATTR(anymot_threshold, S_IRUGO | S_IWUSR,
+		smi130_anymot_threshold_show, smi130_anymot_threshold_store);
+static DEVICE_ATTR(std_stu, S_IRUGO,
+		smi130_step_detector_status_show, NULL);
+static DEVICE_ATTR(std_en, S_IRUGO | S_IWUSR,
+		smi130_step_detector_enable_show,
+		smi130_step_detector_enable_store);
+static DEVICE_ATTR(sig_en, S_IRUGO | S_IWUSR,
+		smi130_signification_motion_enable_show,
+		smi130_signification_motion_enable_store);
+
+#endif
+
+
+
+static DEVICE_ATTR(smi_value, S_IRUGO,
+		smi130_smi_value_show, NULL);
+
+
+static struct attribute *smi130_attributes[] = {
+	&dev_attr_chip_id.attr,
+	&dev_attr_err_st.attr,
+	&dev_attr_sensor_time.attr,
+	&dev_attr_selftest.attr,
+	&dev_attr_driver_version.attr,
+	&dev_attr_test.attr,
+	&dev_attr_fifo_flush.attr,
+	&dev_attr_fifo_header_en.attr,
+	&dev_attr_fifo_time_en.attr,
+	&dev_attr_fifo_int_tag_en.attr,
+	&dev_attr_fifo_bytecount.attr,
+	&dev_attr_fifo_data_sel.attr,
+	&dev_attr_fifo_data_frame.attr,
+
+	&dev_attr_fifo_watermark.attr,
+
+	&dev_attr_enable.attr,
+	&dev_attr_delay.attr,
+	&dev_attr_temperature.attr,
+	&dev_attr_place.attr,
+
+	&dev_attr_acc_range.attr,
+	&dev_attr_acc_odr.attr,
+	&dev_attr_acc_op_mode.attr,
+	&dev_attr_acc_value.attr,
+
+	&dev_attr_acc_fast_calibration_x.attr,
+	&dev_attr_acc_fast_calibration_y.attr,
+	&dev_attr_acc_fast_calibration_z.attr,
+	&dev_attr_acc_offset_x.attr,
+	&dev_attr_acc_offset_y.attr,
+	&dev_attr_acc_offset_z.attr,
+
+	&dev_attr_stc_enable.attr,
+	&dev_attr_stc_mode.attr,
+	&dev_attr_stc_clc.attr,
+	&dev_attr_stc_value.attr,
+
+	&dev_attr_gyro_op_mode.attr,
+	&dev_attr_gyro_value.attr,
+	&dev_attr_gyro_range.attr,
+	&dev_attr_gyro_odr.attr,
+	&dev_attr_gyro_fast_calibration_en.attr,
+	&dev_attr_gyro_offset_x.attr,
+	&dev_attr_gyro_offset_y.attr,
+	&dev_attr_gyro_offset_z.attr,
+
+#ifdef SMI130_MAG_INTERFACE_SUPPORT
+	&dev_attr_mag_chip_id.attr,
+	&dev_attr_mag_op_mode.attr,
+	&dev_attr_mag_odr.attr,
+	&dev_attr_mag_i2c_addr.attr,
+	&dev_attr_mag_chip_name.attr,
+	&dev_attr_mag_value.attr,
+	&dev_attr_mag_offset.attr,
+	&dev_attr_mag_compensate.attr,
+#endif
+
+#if defined(SMI130_ENABLE_INT1) || defined(SMI130_ENABLE_INT2)
+	&dev_attr_enable_int.attr,
+
+	&dev_attr_anymot_duration.attr,
+	&dev_attr_anymot_threshold.attr,
+	&dev_attr_std_stu.attr,
+	&dev_attr_std_en.attr,
+	&dev_attr_sig_en.attr,
+
+#endif
+	&dev_attr_reg_sel.attr,
+	&dev_attr_reg_val.attr,
+	&dev_attr_smi_value.attr,
+	NULL
+};
+
+static struct attribute_group smi130_attribute_group = {
+	.attrs = smi130_attributes
+};
+
+#if defined(SMI130_ENABLE_INT1) || defined(SMI130_ENABLE_INT2)
+static void smi_slope_interrupt_handle(struct smi_client_data *client_data)
+{
+	/* anym_first[0..2]: x, y, z */
+	u8 anym_first[3] = {0};
+	u8 status2;
+	u8 anym_sign;
+	u8 i = 0;
+
+	client_data->device.bus_read(client_data->device.dev_addr,
+				SMI130_USER_INTR_STAT_2_ADDR, &status2, 1);
+	anym_first[0] = SMI130_GET_BITSLICE(status2,
+				SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_X);
+	anym_first[1] = SMI130_GET_BITSLICE(status2,
+				SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Y);
+	anym_first[2] = SMI130_GET_BITSLICE(status2,
+				SMI130_USER_INTR_STAT_2_ANY_MOTION_FIRST_Z);
+	anym_sign = SMI130_GET_BITSLICE(status2,
+				SMI130_USER_INTR_STAT_2_ANY_MOTION_SIGN);
+
+	for (i = 0; i < 3; i++) {
+		if (anym_first[i]) {
+			/*1: negative*/
+			if (anym_sign)
+				dev_notice(client_data->dev,
+				"Anymotion interrupt happend!"
+				"%s axis, negative sign\n", smi_axis_name[i]);
+			else
+				dev_notice(client_data->dev,
+				"Anymotion interrupt happend!"
+				"%s axis, postive sign\n", smi_axis_name[i]);
+		}
+	}
+
+
+}
+
+static void smi_fifo_watermark_interrupt_handle
+				(struct smi_client_data *client_data)
+{
+	int err = 0;
+	unsigned int fifo_len0 = 0;
+	unsigned int  fifo_frmbytes_ext = 0;
+	unsigned char *fifo_data = NULL;
+	fifo_data = kzalloc(FIFO_DATA_BUFSIZE, GFP_KERNEL);
+	/*TO DO*/
+	if (NULL == fifo_data) {
+			dev_err(client_data->dev, "no memory available");
+			err = -ENOMEM;
+	}
+	smi_fifo_frame_bytes_extend_calc(client_data, &fifo_frmbytes_ext);
+
+	if (client_data->pw.acc_pm == 2 && client_data->pw.gyro_pm == 2
+					&& client_data->pw.mag_pm == 2)
+		printk(KERN_INFO "pw_acc: %d, pw_gyro: %d\n",
+			client_data->pw.acc_pm, client_data->pw.gyro_pm);
+	if (!client_data->fifo_data_sel)
+		printk(KERN_INFO "no selsect sensor fifo, fifo_data_sel:%d\n",
+						client_data->fifo_data_sel);
+
+	err = SMI_CALL_API(fifo_length)(&fifo_len0);
+	client_data->fifo_bytecount = fifo_len0;
+
+	if (client_data->fifo_bytecount == 0 || err)
+		return;
+
+	if (client_data->fifo_bytecount + fifo_frmbytes_ext > FIFO_DATA_BUFSIZE)
+		client_data->fifo_bytecount = FIFO_DATA_BUFSIZE;
+	/* need give attention for the time of burst read*/
+	if (!err) {
+		err = smi_burst_read_wrapper(client_data->device.dev_addr,
+			SMI130_USER_FIFO_DATA__REG, fifo_data,
+			client_data->fifo_bytecount + fifo_frmbytes_ext);
+	} else
+		dev_err(client_data->dev, "read fifo leght err");
+
+	if (err)
+		dev_err(client_data->dev, "brust read fifo err\n");
+	/*err = smi_fifo_analysis_handle(client_data, fifo_data,
+			client_data->fifo_bytecount + 20, fifo_out_data);*/
+	if (fifo_data != NULL) {
+		kfree(fifo_data);
+		fifo_data = NULL;
+	}
+
+}
+static void smi_data_ready_interrupt_handle(
+	struct smi_client_data *client_data, uint8_t status)
+{
+	uint8_t data12[12] = {0};
+	struct smi130_accel_t accel;
+	struct smi130_gyro_t gyro;
+	struct timespec ts;
+	client_data->device.bus_read(client_data->device.dev_addr,
+	SMI130_USER_DATA_8_ADDR, data12, 12);
+	if (status & 0x80)
+	{
+		/*report acc data*/
+		/* Data X */
+		accel.x = (s16)((((s32)((s8)data12[7])) << SMI130_SHIFT_BIT_POSITION_BY_08_BITS) | (data12[6]));
+		/* Data Y */
+		accel.y = (s16)((((s32)((s8)data12[9])) << SMI130_SHIFT_BIT_POSITION_BY_08_BITS) | (data12[8]));
+		/* Data Z */
+		accel.z = (s16)((((s32)((s8)data12[11]))<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) | (data12[10]));
+		ts = ns_to_timespec(client_data->timestamp);
+		input_event(client_data->input, EV_MSC, 6, ts.tv_sec);
+		input_event(client_data->input, EV_MSC, 6, ts.tv_nsec);
+		input_event(client_data->input, EV_MSC, MSC_GESTURE, accel.x);
+		input_event(client_data->input, EV_MSC, MSC_RAW, accel.y);
+		input_event(client_data->input, EV_MSC, MSC_SCAN, accel.z);
+		input_sync(client_data->input);
+	}
+	if (status & 0x40)
+	{
+		/*report gyro data*/
+		/* Data X */
+		gyro.x = (s16)((((s32)((s8)data12[1])) << SMI130_SHIFT_BIT_POSITION_BY_08_BITS) | (data12[0]));
+		/* Data Y */
+		gyro.y = (s16)((((s32)((s8)data12[3])) << SMI130_SHIFT_BIT_POSITION_BY_08_BITS) | (data12[2]));
+		/* Data Z */
+		gyro.z = (s16)((((s32)((s8)data12[5]))<< SMI130_SHIFT_BIT_POSITION_BY_08_BITS) | (data12[4]));
+		ts = ns_to_timespec(client_data->timestamp);
+		input_event(client_data->gyro_input, EV_MSC, 6, ts.tv_sec);
+		input_event(client_data->gyro_input, EV_MSC, 6, ts.tv_nsec);
+		input_event(client_data->gyro_input, EV_MSC, MSC_GESTURE, gyro.x);
+		input_event(client_data->gyro_input, EV_MSC, MSC_RAW, gyro.y);
+		input_event(client_data->gyro_input, EV_MSC, MSC_SCAN, gyro.z);
+		input_sync(client_data->gyro_input);
+	}
+}
+
+static void smi_signification_motion_interrupt_handle(
+		struct smi_client_data *client_data)
+{
+	printk(KERN_INFO "smi_signification_motion_interrupt_handle\n");
+	input_event(client_data->input, EV_MSC, INPUT_EVENT_SGM, 1);
+/*input_report_rel(client_data->input,INPUT_EVENT_SGM,1);*/
+	input_sync(client_data->input);
+	smi130_set_command_register(CMD_RESET_INT_ENGINE);
+
+}
+static void smi_stepdetector_interrupt_handle(
+	struct smi_client_data *client_data)
+{
+	u8 current_step_dector_st = 0;
+	client_data->pedo_data.wkar_step_detector_status++;
+	current_step_dector_st =
+		client_data->pedo_data.wkar_step_detector_status;
+	client_data->std = ((current_step_dector_st == 1) ? 0 : 1);
+
+	input_event(client_data->input, EV_MSC, INPUT_EVENT_STEP_DETECTOR, 1);
+	input_sync(client_data->input);
+}
+
+static void smi_irq_work_func(struct work_struct *work)
+{
+	struct smi_client_data *client_data =
+		container_of((struct work_struct *)work,
+			struct smi_client_data, irq_work);
+
+	unsigned char int_status[4] = {0, 0, 0, 0};
+	uint8_t status = 0;
+
+	//client_data->device.bus_read(client_data->device.dev_addr,
+	//			SMI130_USER_INTR_STAT_0_ADDR, int_status, 4);
+	client_data->device.bus_read(client_data->device.dev_addr,
+	SMI130_USER_STAT_ADDR, &status, 1);
+	printk("status = 0x%x", status);
+	if (SMI130_GET_BITSLICE(int_status[0],
+					SMI130_USER_INTR_STAT_0_ANY_MOTION))
+		smi_slope_interrupt_handle(client_data);
+
+	if (SMI130_GET_BITSLICE(int_status[0],
+			SMI130_USER_INTR_STAT_0_STEP_INTR))
+		smi_stepdetector_interrupt_handle(client_data);
+	if (SMI130_GET_BITSLICE(int_status[1],
+			SMI130_USER_INTR_STAT_1_FIFO_WM_INTR))
+		smi_fifo_watermark_interrupt_handle(client_data);
+	if ((status & 0x80) || (status & 0x40))
+		smi_data_ready_interrupt_handle(client_data, status);
+	/* Clear ALL inputerrupt status after handler sig mition*/
+	/* Put this commads intot the last one*/
+	if (SMI130_GET_BITSLICE(int_status[0],
+		SMI130_USER_INTR_STAT_0_SIGNIFICANT_INTR))
+		smi_signification_motion_interrupt_handle(client_data);
+
+}
+
+static void smi130_delay_sigmo_work_func(struct work_struct *work)
+{
+	struct smi_client_data *client_data =
+	container_of(work, struct smi_client_data,
+	delay_work_sig.work);
+	unsigned char int_status[4] = {0, 0, 0, 0};
+
+	client_data->device.bus_read(client_data->device.dev_addr,
+				SMI130_USER_INTR_STAT_0_ADDR, int_status, 4);
+	if (SMI130_GET_BITSLICE(int_status[0],
+		SMI130_USER_INTR_STAT_0_SIGNIFICANT_INTR))
+		smi_signification_motion_interrupt_handle(client_data);
+}
+
+static irqreturn_t smi_irq_handler(int irq, void *handle)
+{
+	struct smi_client_data *client_data = handle;
+	int in_suspend_copy;
+	in_suspend_copy = atomic_read(&client_data->in_suspend);
+
+	if (client_data == NULL)
+		return IRQ_HANDLED;
+	if (client_data->dev == NULL)
+		return IRQ_HANDLED;
+		/*this only deal with SIG_motion CTS test*/
+	if ((in_suspend_copy == 1) &&
+		(client_data->sig_flag == 1)) {
+		/*wake_lock_timeout(&client_data->wakelock, HZ);*/
+		schedule_delayed_work(&client_data->delay_work_sig,
+			msecs_to_jiffies(50));
+	}
+	schedule_work(&client_data->irq_work);
+
+	return IRQ_HANDLED;
+}
+#endif /* defined(SMI_ENABLE_INT1)||defined(SMI_ENABLE_INT2) */
+
+static int smi_restore_hw_cfg(struct smi_client_data *client)
+{
+	int err = 0;
+
+	if ((client->fifo_data_sel) & (1 << SMI_ACC_SENSOR)) {
+		err += SMI_CALL_API(set_accel_range)(client->range.acc_range);
+		err += SMI_CALL_API(set_accel_output_data_rate)
+				(client->odr.acc_odr);
+		err += SMI_CALL_API(set_fifo_accel_enable)(1);
+	}
+	if ((client->fifo_data_sel) & (1 << SMI_GYRO_SENSOR)) {
+		err += SMI_CALL_API(set_gyro_range)(client->range.gyro_range);
+		err += SMI_CALL_API(set_gyro_output_data_rate)
+				(client->odr.gyro_odr);
+		err += SMI_CALL_API(set_fifo_gyro_enable)(1);
+	}
+	if ((client->fifo_data_sel) & (1 << SMI_MAG_SENSOR)) {
+		err += SMI_CALL_API(set_mag_output_data_rate)
+				(client->odr.mag_odr);
+		err += SMI_CALL_API(set_fifo_mag_enable)(1);
+	}
+	err += SMI_CALL_API(set_command_register)(CMD_CLR_FIFO_DATA);
+
+	mutex_lock(&client->mutex_op_mode);
+	if (client->pw.acc_pm != SMI_ACC_PM_SUSPEND) {
+		err += SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_acc_arr[SMI_ACC_PM_NORMAL]);
+		smi_delay(3);
+	}
+	mutex_unlock(&client->mutex_op_mode);
+
+	mutex_lock(&client->mutex_op_mode);
+	if (client->pw.gyro_pm != SMI_GYRO_PM_SUSPEND) {
+		err += SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_gyro_arr[SMI_GYRO_PM_NORMAL]);
+		smi_delay(3);
+	}
+	mutex_unlock(&client->mutex_op_mode);
+
+	mutex_lock(&client->mutex_op_mode);
+
+	if (client->pw.mag_pm != SMI_MAG_PM_SUSPEND) {
+#ifdef SMI130_AKM09912_SUPPORT
+		err += smi130_set_bosch_akm_and_secondary_if_powermode
+					(SMI130_MAG_FORCE_MODE);
+#else
+		err += smi130_set_bmm150_mag_and_secondary_if_power_mode
+					(SMI130_MAG_FORCE_MODE);
+#endif
+		smi_delay(3);
+	}
+	mutex_unlock(&client->mutex_op_mode);
+
+	return err;
+}
+
+#if defined(CONFIG_USE_QUALCOMM_HAL)
+static void smi130_accel_work_fn(struct work_struct *work)
+{
+	struct smi_client_data *sensor;
+	ktime_t timestamp;
+	struct smi130_accel_t data;
+	int err;
+	sensor = container_of((struct delayed_work *)work,
+				struct smi_client_data, accel_poll_work);
+	timestamp = ktime_get();
+	err = SMI_CALL_API(read_accel_xyz)(&data);
+	if (err)
+		dev_err(sensor->dev, "read data err");
+	input_report_abs(sensor->input, ABS_X,
+		(data.x));
+	input_report_abs(sensor->input, ABS_Y,
+		(data.y));
+	input_report_abs(sensor->input, ABS_Z,
+		(data.z));
+	input_event(sensor->input,
+			EV_SYN, SYN_TIME_SEC,
+			ktime_to_timespec(timestamp).tv_sec);
+	input_event(sensor->input, EV_SYN,
+		SYN_TIME_NSEC,
+		ktime_to_timespec(timestamp).tv_nsec);
+	input_sync(sensor->input);
+	if (atomic_read(&sensor->accel_en))
+		queue_delayed_work(sensor->data_wq,
+			&sensor->accel_poll_work,
+			msecs_to_jiffies(sensor->accel_poll_ms));
+}
+static void smi130_gyro_work_fn(struct work_struct *work)
+{
+	struct smi_client_data *sensor;
+	ktime_t timestamp;
+	struct smi130_gyro_t data;
+	int err;
+	sensor = container_of((struct delayed_work *)work,
+				struct smi_client_data, gyro_poll_work);
+	timestamp = ktime_get();
+	err = SMI_CALL_API(read_gyro_xyz)(&data);
+	if (err)
+		dev_err(sensor->dev, "read data err");
+	input_report_abs(sensor->gyro_input, ABS_RX,
+		(data.x));
+	input_report_abs(sensor->gyro_input, ABS_RY,
+		(data.y));
+	input_report_abs(sensor->gyro_input, ABS_RZ,
+		(data.z));
+	input_event(sensor->gyro_input,
+			EV_SYN, SYN_TIME_SEC,
+			ktime_to_timespec(timestamp).tv_sec);
+	input_event(sensor->gyro_input, EV_SYN,
+		SYN_TIME_NSEC,
+		ktime_to_timespec(timestamp).tv_nsec);
+	input_sync(sensor->gyro_input);
+	if (atomic_read(&sensor->gyro_en))
+		queue_delayed_work(sensor->data_wq,
+			&sensor->gyro_poll_work,
+			msecs_to_jiffies(sensor->gyro_poll_ms));
+}
+static int smi130_set_gyro_op_mode(struct smi_client_data *client_data,
+							unsigned long op_mode)
+{
+	int err = 0;
+	mutex_lock(&client_data->mutex_op_mode);
+	if (op_mode < SMI_GYRO_PM_MAX) {
+		switch (op_mode) {
+		case SMI_GYRO_PM_NORMAL:
+			err = SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_gyro_arr[SMI_GYRO_PM_NORMAL]);
+			client_data->pw.gyro_pm = SMI_GYRO_PM_NORMAL;
+			smi_delay(60);
+			break;
+		case SMI_GYRO_PM_FAST_START:
+			err = SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_gyro_arr[SMI_GYRO_PM_FAST_START]);
+			client_data->pw.gyro_pm = SMI_GYRO_PM_FAST_START;
+			smi_delay(60);
+			break;
+		case SMI_GYRO_PM_SUSPEND:
+			err = SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_gyro_arr[SMI_GYRO_PM_SUSPEND]);
+			client_data->pw.gyro_pm = SMI_GYRO_PM_SUSPEND;
+			smi_delay(60);
+			break;
+		default:
+			mutex_unlock(&client_data->mutex_op_mode);
+			return -EINVAL;
+		}
+	} else {
+		mutex_unlock(&client_data->mutex_op_mode);
+		return -EINVAL;
+	}
+	mutex_unlock(&client_data->mutex_op_mode);
+	return err;
+}
+static int smi130_accel_set_enable(
+	struct smi_client_data *client_data, bool enable)
+{
+	int ret = 0;
+	dev_notice(client_data->dev,
+		"smi130_accel_set_enable enable=%d\n", enable);
+	if (enable) {
+		ret = smi130_set_acc_op_mode(client_data, 0);
+		if (ret) {
+			dev_err(client_data->dev,
+				"Fail to enable accel engine ret=%d\n", ret);
+			ret = -EBUSY;
+			goto exit;
+		}
+		queue_delayed_work(client_data->data_wq,
+				&client_data->accel_poll_work,
+				msecs_to_jiffies(client_data->accel_poll_ms));
+		atomic_set(&client_data->accel_en, 1);
+	} else {
+		atomic_set(&client_data->accel_en, 0);
+		cancel_delayed_work_sync(&client_data->accel_poll_work);
+		ret = smi130_set_acc_op_mode(client_data, 2);
+		if (ret) {
+			dev_err(client_data->dev,
+				"Fail to disable accel engine ret=%d\n", ret);
+			ret = -EBUSY;
+			goto exit;
+		}
+	}
+exit:
+	return ret;
+}
+static int smi130_accel_set_poll_delay(struct smi_client_data *client_data,
+					unsigned long delay)
+{
+	dev_info(client_data->dev,
+		"smi130_accel_set_poll_delay delay_ms=%ld\n", delay);
+	if (delay < SMI130_ACCEL_MIN_POLL_INTERVAL_MS)
+		delay = SMI130_ACCEL_MIN_POLL_INTERVAL_MS;
+	if (delay > SMI130_ACCEL_MAX_POLL_INTERVAL_MS)
+		delay = SMI130_ACCEL_MAX_POLL_INTERVAL_MS;
+	client_data->accel_poll_ms = delay;
+	if (!atomic_read(&client_data->accel_en))
+		goto exit;
+	cancel_delayed_work_sync(&client_data->accel_poll_work);
+	queue_delayed_work(client_data->data_wq,
+			&client_data->accel_poll_work,
+			msecs_to_jiffies(client_data->accel_poll_ms));
+exit:
+	return 0;
+}
+static int smi130_gyro_set_enable(
+	struct smi_client_data *client_data, bool enable)
+{
+	int ret = 0;
+	dev_notice(client_data->dev,
+		"smi130_gyro_set_enable enable=%d\n", enable);
+	if (enable) {
+		ret = smi130_set_gyro_op_mode(client_data, 0);
+		if (ret) {
+			dev_err(client_data->dev,
+				"Fail to enable gyro engine ret=%d\n", ret);
+			ret = -EBUSY;
+			goto exit;
+		}
+		queue_delayed_work(client_data->data_wq,
+				&client_data->gyro_poll_work,
+				msecs_to_jiffies(client_data->gyro_poll_ms));
+		atomic_set(&client_data->gyro_en, 1);
+	} else {
+		atomic_set(&client_data->gyro_en, 0);
+		cancel_delayed_work_sync(&client_data->gyro_poll_work);
+		ret = smi130_set_gyro_op_mode(client_data, 2);
+		if (ret) {
+			dev_err(client_data->dev,
+				"Fail to disable accel engine ret=%d\n", ret);
+			ret = -EBUSY;
+			goto exit;
+		}
+	}
+exit:
+	return ret;
+}
+static int smi130_gyro_set_poll_delay(struct smi_client_data *client_data,
+					unsigned long delay)
+{
+	dev_info(client_data->dev,
+		"smi130_accel_set_poll_delay delay_ms=%ld\n", delay);
+	if (delay < SMI130_GYRO_MIN_POLL_INTERVAL_MS)
+		delay = SMI130_GYRO_MIN_POLL_INTERVAL_MS;
+	if (delay > SMI130_GYRO_MAX_POLL_INTERVAL_MS)
+		delay = SMI130_GYRO_MAX_POLL_INTERVAL_MS;
+	client_data->gyro_poll_ms = delay;
+	if (!atomic_read(&client_data->gyro_en))
+		goto exit;
+	cancel_delayed_work_sync(&client_data->gyro_poll_work);
+	queue_delayed_work(client_data->data_wq,
+			&client_data->gyro_poll_work,
+			msecs_to_jiffies(client_data->gyro_poll_ms));
+exit:
+	return 0;
+}
+static int smi130_accel_cdev_enable(struct sensors_classdev *sensors_cdev,
+			unsigned int enable)
+{
+	struct smi_client_data *sensor = container_of(sensors_cdev,
+			struct smi_client_data, accel_cdev);
+	return smi130_accel_set_enable(sensor, enable);
+}
+static int smi130_accel_cdev_poll_delay(struct sensors_classdev *sensors_cdev,
+			unsigned int delay_ms)
+{
+	struct smi_client_data *sensor = container_of(sensors_cdev,
+			struct smi_client_data, accel_cdev);
+
+	return smi130_accel_set_poll_delay(sensor, delay_ms);
+}
+
+static int smi130_gyro_cdev_enable(struct sensors_classdev *sensors_cdev,
+			unsigned int enable)
+{
+	struct smi_client_data *sensor = container_of(sensors_cdev,
+			struct smi_client_data, gyro_cdev);
+
+	return smi130_gyro_set_enable(sensor, enable);
+}
+
+static int smi130_gyro_cdev_poll_delay(struct sensors_classdev *sensors_cdev,
+			unsigned int delay_ms)
+{
+	struct smi_client_data *sensor = container_of(sensors_cdev,
+			struct smi_client_data, gyro_cdev);
+
+	return	smi130_gyro_set_poll_delay(sensor, delay_ms);
+}
+#endif
+
+int smi_probe(struct smi_client_data *client_data, struct device *dev)
+{
+	int err = 0;
+#ifdef SMI130_MAG_INTERFACE_SUPPORT
+	u8 mag_dev_addr;
+	u8 mag_urst_len;
+	u8 mag_op_mode;
+#endif
+	/* check chip id */
+	err = smi_check_chip_id(client_data);
+	if (err)
+		goto exit_err_clean;
+
+	dev_set_drvdata(dev, client_data);
+	client_data->dev = dev;
+
+	mutex_init(&client_data->mutex_enable);
+	mutex_init(&client_data->mutex_op_mode);
+
+	/* input device init */
+	err = smi_input_init(client_data);
+	if (err < 0)
+		goto exit_err_clean;
+
+	/* sysfs node creation */
+	err = sysfs_create_group(&client_data->input->dev.kobj,
+			&smi130_attribute_group);
+
+	if (err < 0)
+		goto exit_err_sysfs;
+
+	if (NULL != dev->platform_data) {
+		client_data->bosch_pd = kzalloc(sizeof(*client_data->bosch_pd),
+				GFP_KERNEL);
+
+		if (NULL != client_data->bosch_pd) {
+			memcpy(client_data->bosch_pd, dev->platform_data,
+					sizeof(*client_data->bosch_pd));
+			dev_notice(dev, "%s sensor driver set place: p%d\n",
+					client_data->bosch_pd->name,
+					client_data->bosch_pd->place);
+		}
+	}
+
+	if (NULL != client_data->bosch_pd) {
+			memcpy(client_data->bosch_pd, dev->platform_data,
+					sizeof(*client_data->bosch_pd));
+			dev_notice(dev, "%s sensor driver set place: p%d\n",
+					client_data->bosch_pd->name,
+					client_data->bosch_pd->place);
+		}
+
+
+	/* workqueue init */
+	INIT_DELAYED_WORK(&client_data->work, smi_work_func);
+	atomic_set(&client_data->delay, SMI_DELAY_DEFAULT);
+	atomic_set(&client_data->wkqueue_en, 0);
+
+	/* h/w init */
+	client_data->device.delay_msec = smi_delay;
+	err = SMI_CALL_API(init)(&client_data->device);
+
+	smi_dump_reg(client_data);
+
+	/*power on detected*/
+	/*or softrest(cmd 0xB6) */
+	/*fatal err check*/
+	/*soft reset*/
+	err += SMI_CALL_API(set_command_register)(CMD_RESET_USER_REG);
+	smi_delay(3);
+	if (err)
+		dev_err(dev, "Failed soft reset, er=%d", err);
+	/*usr data config page*/
+	err += SMI_CALL_API(set_target_page)(USER_DAT_CFG_PAGE);
+	if (err)
+		dev_err(dev, "Failed cffg page, er=%d", err);
+	err += smi_get_err_status(client_data);
+	if (err) {
+		dev_err(dev, "Failed to smi16x init!err_st=0x%x\n",
+				client_data->err_st.err_st_all);
+		goto exit_err_sysfs;
+	}
+
+#ifdef SMI130_MAG_INTERFACE_SUPPORT
+	err += smi130_set_command_register(MAG_MODE_NORMAL);
+	smi_delay(2);
+	err += smi130_get_mag_power_mode_stat(&mag_op_mode);
+	smi_delay(2);
+	err += SMI_CALL_API(get_i2c_device_addr)(&mag_dev_addr);
+	smi_delay(2);
+#if defined(SMI130_AKM09912_SUPPORT)
+	err += SMI_CALL_API(set_i2c_device_addr)(SMI130_AKM09912_I2C_ADDRESS);
+	smi130_bosch_akm_mag_interface_init(SMI130_AKM09912_I2C_ADDRESS);
+#else
+	err += SMI_CALL_API(set_i2c_device_addr)(
+		SMI130_AUX_BMM150_I2C_ADDRESS);
+	smi130_bmm150_mag_interface_init();
+#endif
+
+	err += smi130_set_mag_burst(3);
+	err += smi130_get_mag_burst(&mag_urst_len);
+	if (err)
+		dev_err(client_data->dev, "Failed cffg mag, er=%d", err);
+	dev_info(client_data->dev,
+		"SMI130 mag_urst_len:%d, mag_add:0x%x, mag_op_mode:%d\n",
+		mag_urst_len, mag_dev_addr, mag_op_mode);
+#endif
+	if (err < 0)
+		goto exit_err_sysfs;
+
+
+#if defined(SMI130_ENABLE_INT1) || defined(SMI130_ENABLE_INT2)
+		/*wake_lock_init(&client_data->wakelock,
+			WAKE_LOCK_SUSPEND, "smi130");*/
+		client_data->gpio_pin = of_get_named_gpio_flags(dev->of_node,
+					"smi,gpio_irq", 0, NULL);
+		dev_info(client_data->dev, "SMI130 qpio number:%d\n",
+					client_data->gpio_pin);
+		err += gpio_request_one(client_data->gpio_pin,
+					GPIOF_IN, "smi130_int");
+		err += gpio_direction_input(client_data->gpio_pin);
+		client_data->IRQ = gpio_to_irq(client_data->gpio_pin);
+		if (err) {
+			dev_err(client_data->dev,
+				"can not request gpio to irq number\n");
+			client_data->gpio_pin = 0;
+		}
+		INIT_DELAYED_WORK(&client_data->delay_work_sig,
+			smi130_delay_sigmo_work_func);
+#ifdef SMI130_ENABLE_INT1
+		/* maps interrupt to INT1/InT2 pin */
+		SMI_CALL_API(set_intr_any_motion)(SMI_INT0, ENABLE);
+		SMI_CALL_API(set_intr_fifo_wm)(SMI_INT0, ENABLE);
+		SMI_CALL_API(set_intr_data_rdy)(SMI_INT0, ENABLE);
+
+		/*Set interrupt trige level way */
+		SMI_CALL_API(set_intr_edge_ctrl)(SMI_INT0, SMI_INT_LEVEL);
+		smi130_set_intr_level(SMI_INT0, 1);
+		/*set interrupt latch temporary, 5 ms*/
+		/*smi130_set_latch_int(5);*/
+
+		SMI_CALL_API(set_output_enable)(
+		SMI130_INTR1_OUTPUT_ENABLE, ENABLE);
+		sigmotion_init_interrupts(SMI130_MAP_INTR1);
+		SMI_CALL_API(map_step_detector_intr)(SMI130_MAP_INTR1);
+		/*close step_detector in init function*/
+		SMI_CALL_API(set_step_detector_enable)(0);
+#endif
+
+#ifdef SMI130_ENABLE_INT2
+		/* maps interrupt to INT1/InT2 pin */
+		SMI_CALL_API(set_intr_any_motion)(SMI_INT1, ENABLE);
+		SMI_CALL_API(set_intr_fifo_wm)(SMI_INT1, ENABLE);
+		SMI_CALL_API(set_intr_data_rdy)(SMI_INT1, ENABLE);
+
+		/*Set interrupt trige level way */
+		SMI_CALL_API(set_intr_edge_ctrl)(SMI_INT1, SMI_INT_LEVEL);
+		smi130_set_intr_level(SMI_INT1, 1);
+		/*set interrupt latch temporary, 5 ms*/
+		/*smi130_set_latch_int(5);*/
+
+		SMI_CALL_API(set_output_enable)(
+		SMI130_INTR2_OUTPUT_ENABLE, ENABLE);
+		sigmotion_init_interrupts(SMI130_MAP_INTR2);
+		SMI_CALL_API(map_step_detector_intr)(SMI130_MAP_INTR2);
+		/*close step_detector in init function*/
+		SMI_CALL_API(set_step_detector_enable)(0);
+#endif
+		err = request_irq(client_data->IRQ, smi_irq_handler,
+				IRQF_TRIGGER_RISING, "smi130", client_data);
+		if (err)
+			dev_err(client_data->dev, "could not request irq\n");
+
+		INIT_WORK(&client_data->irq_work, smi_irq_work_func);
+#endif
+
+	client_data->selftest = 0;
+
+	client_data->fifo_data_sel = 0;
+	#if defined(CONFIG_USE_QUALCOMM_HAL)
+	SMI_CALL_API(set_accel_output_data_rate)(9);/*defalut odr 200HZ*/
+	SMI_CALL_API(set_gyro_output_data_rate)(9);/*defalut odr 200HZ*/
+	#endif
+	SMI_CALL_API(get_accel_output_data_rate)(&client_data->odr.acc_odr);
+	SMI_CALL_API(get_gyro_output_data_rate)(&client_data->odr.gyro_odr);
+	SMI_CALL_API(get_mag_output_data_rate)(&client_data->odr.mag_odr);
+	SMI_CALL_API(set_fifo_time_enable)(1);
+	SMI_CALL_API(get_accel_range)(&client_data->range.acc_range);
+	SMI_CALL_API(get_gyro_range)(&client_data->range.gyro_range);
+	/* now it's power on which is considered as resuming from suspend */
+	
+	/* gyro input device init */
+	err = smi_gyro_input_init(client_data);
+	#if defined(CONFIG_USE_QUALCOMM_HAL)
+	/* gyro input device init */
+	err = smi_gyro_input_init(client_data);
+	if (err < 0)
+		goto exit_err_clean;
+	client_data->accel_poll_ms = SMI130_ACCEL_DEFAULT_POLL_INTERVAL_MS;
+	client_data->gyro_poll_ms = SMI130_GYRO_DEFAULT_POLL_INTERVAL_MS;
+	client_data->data_wq = create_freezable_workqueue("smi130_data_work");
+	if (!client_data->data_wq) {
+		dev_err(dev, "Cannot create workqueue!\n");
+		goto exit_err_clean;
+	}
+	INIT_DELAYED_WORK(&client_data->accel_poll_work,
+		smi130_accel_work_fn);
+	client_data->accel_cdev = smi130_accel_cdev;
+	client_data->accel_cdev.delay_msec = client_data->accel_poll_ms;
+	client_data->accel_cdev.sensors_enable = smi130_accel_cdev_enable;
+	client_data->accel_cdev.sensors_poll_delay =
+	smi130_accel_cdev_poll_delay;
+	err = sensors_classdev_register(dev, &client_data->accel_cdev);
+	if (err) {
+		dev_err(dev,
+			"create accel class device file failed!\n");
+		goto exit_err_clean;
+	}
+	INIT_DELAYED_WORK(&client_data->gyro_poll_work, smi130_gyro_work_fn);
+	client_data->gyro_cdev = smi130_gyro_cdev;
+	client_data->gyro_cdev.delay_msec = client_data->gyro_poll_ms;
+	client_data->gyro_cdev.sensors_enable = smi130_gyro_cdev_enable;
+	client_data->gyro_cdev.sensors_poll_delay = smi130_gyro_cdev_poll_delay;
+	err = sensors_classdev_register(dev, &client_data->gyro_cdev);
+	if (err) {
+		dev_err(dev,
+			"create accel class device file failed!\n");
+		goto exit_err_clean;
+	}
+	#endif
+	/* set sensor PMU into suspend power mode for all */
+	if (smi_pmu_set_suspend(client_data) < 0) {
+		dev_err(dev, "Failed to set SMI130 to suspend power mode\n");
+		goto exit_err_sysfs;
+	}
+	/*enable the data ready interrupt*/
+	SMI_CALL_API(set_intr_enable_1)(SMI130_DATA_RDY_ENABLE, 1);
+	dev_notice(dev, "sensor_time:%d, %d, %d",
+		sensortime_duration_tbl[0].ts_delat,
+		sensortime_duration_tbl[0].ts_duration_lsb,
+		sensortime_duration_tbl[0].ts_duration_us);
+	dev_notice(dev, "sensor %s probed successfully", SENSOR_NAME);
+
+	return 0;
+
+exit_err_sysfs:
+	if (err)
+		smi_input_destroy(client_data);
+
+exit_err_clean:
+	if (err) {
+		if (client_data != NULL) {
+			if (NULL != client_data->bosch_pd) {
+				kfree(client_data->bosch_pd);
+				client_data->bosch_pd = NULL;
+			}
+		}
+	}
+	return err;
+}
+EXPORT_SYMBOL(smi_probe);
+
+/*!
+ * @brief remove smi client
+ *
+ * @param dev the pointer of device
+ *
+ * @return zero
+ * @retval zero
+*/
+int smi_remove(struct device *dev)
+{
+	int err = 0;
+	struct smi_client_data *client_data = dev_get_drvdata(dev);
+
+	if (NULL != client_data) {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		unregister_early_suspend(&client_data->early_suspend_handler);
+#endif
+		mutex_lock(&client_data->mutex_enable);
+		if (SMI_ACC_PM_NORMAL == client_data->pw.acc_pm ||
+			SMI_GYRO_PM_NORMAL == client_data->pw.gyro_pm ||
+				SMI_MAG_PM_NORMAL == client_data->pw.mag_pm) {
+			cancel_delayed_work_sync(&client_data->work);
+		}
+		mutex_unlock(&client_data->mutex_enable);
+
+		err = smi_pmu_set_suspend(client_data);
+
+		smi_delay(5);
+
+		sysfs_remove_group(&client_data->input->dev.kobj,
+				&smi130_attribute_group);
+		smi_input_destroy(client_data);
+
+		if (NULL != client_data->bosch_pd) {
+			kfree(client_data->bosch_pd);
+			client_data->bosch_pd = NULL;
+		}
+		kfree(client_data);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(smi_remove);
+
+static int smi_post_resume(struct smi_client_data *client_data)
+{
+	int err = 0;
+
+	mutex_lock(&client_data->mutex_enable);
+
+	if (atomic_read(&client_data->wkqueue_en) == 1) {
+		smi130_set_acc_op_mode(client_data, SMI_ACC_PM_NORMAL);
+		schedule_delayed_work(&client_data->work,
+				msecs_to_jiffies(
+					atomic_read(&client_data->delay)));
+	}
+	mutex_unlock(&client_data->mutex_enable);
+
+	return err;
+}
+
+
+int smi_suspend(struct device *dev)
+{
+	int err = 0;
+	struct smi_client_data *client_data = dev_get_drvdata(dev);
+	unsigned char stc_enable;
+	unsigned char std_enable;
+	dev_err(client_data->dev, "smi suspend function entrance");
+
+	atomic_set(&client_data->in_suspend, 1);
+	if (atomic_read(&client_data->wkqueue_en) == 1) {
+		smi130_set_acc_op_mode(client_data, SMI_ACC_PM_SUSPEND);
+		cancel_delayed_work_sync(&client_data->work);
+	}
+	SMI_CALL_API(get_step_counter_enable)(&stc_enable);
+	SMI_CALL_API(get_step_detector_enable)(&std_enable);
+	if (client_data->pw.acc_pm != SMI_ACC_PM_SUSPEND &&
+		(stc_enable != 1) && (std_enable != 1) &&
+		(client_data->sig_flag != 1)) {
+		err += SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_acc_arr[SMI_ACC_PM_SUSPEND]);
+		smi_delay(3);
+	}
+	if (client_data->pw.gyro_pm != SMI_GYRO_PM_SUSPEND) {
+		err += SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_gyro_arr[SMI_GYRO_PM_SUSPEND]);
+		smi_delay(3);
+	}
+
+	if (client_data->pw.mag_pm != SMI_MAG_PM_SUSPEND) {
+#if defined(SMI130_AKM09912_SUPPORT)
+		err += smi130_set_bosch_akm_and_secondary_if_powermode(
+		SMI130_MAG_SUSPEND_MODE);
+#else
+		err += smi130_set_bmm150_mag_and_secondary_if_power_mode(
+		SMI130_MAG_SUSPEND_MODE);
+#endif
+		smi_delay(3);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(smi_suspend);
+
+int smi_resume(struct device *dev)
+{
+	int err = 0;
+	struct smi_client_data *client_data = dev_get_drvdata(dev);
+	atomic_set(&client_data->in_suspend, 0);
+	if (client_data->pw.acc_pm != SMI_ACC_PM_SUSPEND) {
+		err += SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_acc_arr[SMI_ACC_PM_NORMAL]);
+		smi_delay(3);
+	}
+	if (client_data->pw.gyro_pm != SMI_GYRO_PM_SUSPEND) {
+		err += SMI_CALL_API(set_command_register)
+				(smi_pmu_cmd_gyro_arr[SMI_GYRO_PM_NORMAL]);
+		smi_delay(3);
+	}
+
+	if (client_data->pw.mag_pm != SMI_MAG_PM_SUSPEND) {
+#if defined(SMI130_AKM09912_SUPPORT)
+		err += smi130_set_bosch_akm_and_secondary_if_powermode
+					(SMI130_MAG_FORCE_MODE);
+#else
+		err += smi130_set_bmm150_mag_and_secondary_if_power_mode
+					(SMI130_MAG_FORCE_MODE);
+#endif
+		smi_delay(3);
+	}
+	/* post resume operation */
+	err += smi_post_resume(client_data);
+
+	return err;
+}
+EXPORT_SYMBOL(smi_resume);
+
diff --git a/drivers/input/sensors/smi130/smi130_driver.h b/drivers/input/sensors/smi130/smi130_driver.h
new file mode 100644
index 0000000..4307ae5
--- /dev/null
+++ b/drivers/input/sensors/smi130/smi130_driver.h
@@ -0,0 +1,512 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+ *
+ * @filename smi130_driver.h
+ * @date     2015/08/17 14:40
+ * @Modification Date 2018/08/28 18:20
+ * @id       "e90a329"
+ * @version  1.3
+ *
+ * @brief
+ * The head file of SMI130 device driver core code
+*/
+#ifndef _SMI130_DRIVER_H
+#define _SMI130_DRIVER_H
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/unistd.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#else
+#include <unistd.h>
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/ktime.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include "smi130.h"
+
+#if defined(CONFIG_USE_QUALCOMM_HAL)
+#include <linux/sensors.h>
+#endif
+/* sensor specific */
+#define SENSOR_NAME "smi130"
+#define SMI130_ENABLE_INT1 1
+#define SMI130_ENABLE_INT2 1
+/*#define SMI130_MAG_INTERFACE_SUPPORT 1*/
+
+/*#define SMI130_AKM09912_SUPPORT 1*/
+#define SMI_USE_BASIC_I2C_FUNC 1
+#define SENSOR_CHIP_ID_SMI (0xD0)
+#define SENSOR_CHIP_ID_SMI_C2 (0xD1)
+#define SENSOR_CHIP_ID_SMI_C3 (0xD3)
+
+#define SENSOR_CHIP_REV_ID_SMI (0x00)
+
+#define CHECK_CHIP_ID_TIME_MAX  5
+
+#define SMI_REG_NAME(name) SMI130_##name##__REG
+#define SMI_VAL_NAME(name) SMI130_##name
+#define SMI_CALL_API(name) smi130_##name
+
+#define SMI_I2C_WRITE_DELAY_TIME (1)
+
+/* generic */
+#define SMI_MAX_RETRY_I2C_XFER (10)
+#define SMI_MAX_RETRY_WAKEUP (5)
+#define SMI_MAX_RETRY_WAIT_DRDY (100)
+
+#define SMI_DELAY_MIN (1)
+#define SMI_DELAY_DEFAULT (200)
+
+#define SMI_VALUE_MAX (32767)
+#define SMI_VALUE_MIN (-32768)
+
+#define BYTES_PER_LINE (16)
+
+#define BUF_SIZE_PRINT (16)
+
+#define SMI_FAST_CALI_TRUE  (1)
+#define SMI_FAST_CALI_ALL_RDY (7)
+
+/*! FIFO 1024 byte, max fifo frame count not over 150 */
+#define FIFO_FRAME_CNT 170
+#define FIFO_DATA_BUFSIZE    1024
+
+
+#define FRAME_LEN_ACC    6
+#define FRAME_LEN_GYRO    6
+#define FRAME_LEN_MAG    8
+
+/*! SMI Self test */
+#define SMI_SELFTEST_AMP_HIGH       1
+
+/* CMD  */
+#define CMD_FOC_START                 0x03
+#define CMD_PMU_ACC_SUSPEND           0x10
+#define CMD_PMU_ACC_NORMAL            0x11
+#define CMD_PMU_ACC_LP1               0x12
+#define CMD_PMU_ACC_LP2               0x13
+#define CMD_PMU_GYRO_SUSPEND          0x14
+#define CMD_PMU_GYRO_NORMAL           0x15
+#define CMD_PMU_GYRO_FASTSTART        0x17
+#define CMD_PMU_MAG_SUSPEND           0x18
+#define CMD_PMU_MAG_NORMAL            0x19
+#define CMD_PMU_MAG_LP1               0x1A
+#define CMD_PMU_MAG_LP2               0x1B
+#define CMD_CLR_FIFO_DATA             0xB0
+#define CMD_RESET_INT_ENGINE          0xB1
+#define CMD_RESET_USER_REG            0xB6
+
+#define USER_DAT_CFG_PAGE              0x00
+
+/*! FIFO Head definition*/
+#define FIFO_HEAD_A        0x84
+#define FIFO_HEAD_G        0x88
+#define FIFO_HEAD_M        0x90
+
+#define FIFO_HEAD_G_A        (FIFO_HEAD_G | FIFO_HEAD_A)
+#define FIFO_HEAD_M_A        (FIFO_HEAD_M | FIFO_HEAD_A)
+#define FIFO_HEAD_M_G        (FIFO_HEAD_M | FIFO_HEAD_G)
+
+#define FIFO_HEAD_M_G_A         (FIFO_HEAD_M | FIFO_HEAD_G | FIFO_HEAD_A)
+
+#define FIFO_HEAD_SENSOR_TIME        0x44
+#define FIFO_HEAD_SKIP_FRAME        0x40
+#define FIFO_HEAD_OVER_READ_LSB       0x80
+#define FIFO_HEAD_OVER_READ_MSB       0x00
+
+/*! FIFO head mode Frame bytes number definition */
+#define A_BYTES_FRM      6
+#define G_BYTES_FRM      6
+#define M_BYTES_FRM      8
+#define GA_BYTES_FRM     12
+#define MG_BYTES_FRM     14
+#define MA_BYTES_FRM     14
+#define MGA_BYTES_FRM    20
+
+#define ACC_FIFO_HEAD       "acc"
+#define GYRO_FIFO_HEAD     "gyro"
+#define MAG_FIFO_HEAD         "mag"
+
+/*! Bosch sensor unknown place*/
+#define BOSCH_SENSOR_PLACE_UNKNOWN (-1)
+/*! Bosch sensor remapping table size P0~P7*/
+#define MAX_AXIS_REMAP_TAB_SZ 8
+
+#define ENABLE     1
+#define DISABLE    0
+
+/* smi sensor HW interrupt pin number */
+#define SMI_INT0      0
+#define SMI_INT1       1
+
+#define SMI_INT_LEVEL      0
+#define SMI_INT_EDGE        1
+
+/*! SMI mag interface */
+
+
+/* compensated output value returned if sensor had overflow */
+#define BMM050_OVERFLOW_OUTPUT       -32768
+#define BMM050_OVERFLOW_OUTPUT_S32   ((s32)(-2147483647-1))
+
+/* Trim Extended Registers */
+#define BMM050_DIG_X1                      0x5D
+#define BMM050_DIG_Y1                      0x5E
+#define BMM050_DIG_Z4_LSB                  0x62
+#define BMM050_DIG_Z4_MSB                  0x63
+#define BMM050_DIG_X2                      0x64
+#define BMM050_DIG_Y2                      0x65
+#define BMM050_DIG_Z2_LSB                  0x68
+#define BMM050_DIG_Z2_MSB                  0x69
+#define BMM050_DIG_Z1_LSB                  0x6A
+#define BMM050_DIG_Z1_MSB                  0x6B
+#define BMM050_DIG_XYZ1_LSB                0x6C
+#define BMM050_DIG_XYZ1_MSB                0x6D
+#define BMM050_DIG_Z3_LSB                  0x6E
+#define BMM050_DIG_Z3_MSB                  0x6F
+#define BMM050_DIG_XY2                     0x70
+#define BMM050_DIG_XY1                     0x71
+
+struct smi130mag_compensate_t {
+	signed char dig_x1;
+	signed char dig_y1;
+
+	signed char dig_x2;
+	signed char dig_y2;
+
+	u16 dig_z1;
+	s16 dig_z2;
+	s16 dig_z3;
+	s16 dig_z4;
+
+	unsigned char dig_xy1;
+	signed char dig_xy2;
+
+	u16 dig_xyz1;
+};
+
+/*smi fifo sensor type combination*/
+enum SMI_FIFO_DATA_SELECT_T {
+	SMI_FIFO_A_SEL = 1,
+	SMI_FIFO_G_SEL,
+	SMI_FIFO_G_A_SEL,
+	SMI_FIFO_M_SEL,
+	SMI_FIFO_M_A_SEL,
+	SMI_FIFO_M_G_SEL,
+	SMI_FIFO_M_G_A_SEL,
+	SMI_FIFO_DATA_SEL_MAX
+};
+
+/*smi interrupt about step_detector and sgm*/
+#define INPUT_EVENT_STEP_DETECTOR    5
+#define INPUT_EVENT_SGM              3/*7*/
+#define INPUT_EVENT_FAST_ACC_CALIB_DONE    6
+#define INPUT_EVENT_FAST_GYRO_CALIB_DONE    4
+
+
+/*!
+* Bst sensor common definition,
+* please give parameters in BSP file.
+*/
+struct bosch_sensor_specific {
+	char *name;
+	/* 0 to 7 */
+	unsigned int place:3;
+	int irq;
+	int (*irq_gpio_cfg)(void);
+};
+
+/*! smi130 sensor spec of power mode */
+struct pw_mode {
+	u8 acc_pm;
+	u8 gyro_pm;
+	u8 mag_pm;
+};
+
+/*! smi130 sensor spec of odr */
+struct odr_t {
+	u8 acc_odr;
+	u8 gyro_odr;
+	u8 mag_odr;
+};
+
+/*! smi130 sensor spec of range */
+struct range_t {
+	u8 acc_range;
+	u8 gyro_range;
+};
+
+/*! smi130 sensor error status */
+struct err_status {
+	u8 fatal_err;
+	u8 err_code;
+	u8 i2c_fail;
+	u8 drop_cmd;
+	u8 mag_drdy_err;
+	u8 err_st_all;
+};
+
+/*! smi130 fifo frame for all sensors */
+struct fifo_frame_t {
+	struct smi130_accel_t *acc_farr;
+	struct smi130_gyro_t *gyro_farr;
+	struct smi130_mag_xyz_s32_t *mag_farr;
+
+	unsigned char acc_frame_cnt;
+	unsigned char gyro_frame_cnt;
+	unsigned char mag_frame_cnt;
+
+	u32 acc_lastf_ts;
+	u32 gyro_lastf_ts;
+	u32 mag_lastf_ts;
+};
+
+/*! smi130 fifo sensor time */
+struct fifo_sensor_time_t {
+	u32 acc_ts;
+	u32 gyro_ts;
+	u32 mag_ts;
+};
+
+struct pedometer_data_t {
+	/*! Fix step detector misinformation for the first time*/
+	u8 wkar_step_detector_status;
+	u_int32_t last_step_counter_value;
+};
+
+struct smi_client_data {
+	struct smi130_t device;
+	struct device *dev;
+	struct input_dev *input;/*acc_device*/
+	struct input_dev *gyro_input;
+	#if defined(CONFIG_USE_QUALCOMM_HAL)
+	struct input_dev *gyro_input;
+	struct sensors_classdev accel_cdev;
+	struct sensors_classdev gyro_cdev;
+	struct delayed_work accel_poll_work;
+	struct delayed_work gyro_poll_work;
+	u32 accel_poll_ms;
+	u32 gyro_poll_ms;
+	u32 accel_latency_ms;
+	u32 gyro_latency_ms;
+	atomic_t accel_en;
+	atomic_t gyro_en;
+	struct workqueue_struct *data_wq;
+	#endif
+	struct delayed_work work;
+	struct work_struct irq_work;
+
+	u8 chip_id;
+
+	struct pw_mode pw;
+	struct odr_t odr;
+	struct range_t range; /*TO DO*/
+	struct err_status err_st;
+	struct pedometer_data_t pedo_data;
+	s8 place;
+	u8 selftest;
+	/*struct wake_lock wakelock;*/
+	struct delayed_work delay_work_sig;
+	atomic_t in_suspend;
+
+	atomic_t wkqueue_en; /*TO DO acc gyro mag*/
+	atomic_t delay;
+	atomic_t selftest_result;
+
+	u8  fifo_data_sel;
+	u16 fifo_bytecount;
+	u8 fifo_head_en;
+	unsigned char fifo_int_tag_en;
+	struct fifo_frame_t fifo_frame;
+
+	unsigned char *fifo_data;
+	u64 fifo_time;
+	u8 stc_enable;
+	uint16_t gpio_pin;
+	u8 std;
+	u8 sig_flag;
+	unsigned char calib_status;
+	struct mutex mutex_op_mode;
+	struct mutex mutex_enable;
+	struct bosch_sensor_specific *bosch_pd;
+	int IRQ;
+	int reg_sel;
+	int reg_len;
+	uint64_t timestamp;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend_handler;
+#endif
+};
+
+
+/*!
+ * we use a typedef to hide the detail,
+ * because this type might be changed
+ */
+struct bosch_sensor_axis_remap {
+	/* src means which source will be mapped to target x, y, z axis */
+	/* if an target OS axis is remapped from (-)x,
+	 * src is 0, sign_* is (-)1 */
+	/* if an target OS axis is remapped from (-)y,
+	 * src is 1, sign_* is (-)1 */
+	/* if an target OS axis is remapped from (-)z,
+	 * src is 2, sign_* is (-)1 */
+	int src_x:3;
+	int src_y:3;
+	int src_z:3;
+
+	int sign_x:2;
+	int sign_y:2;
+	int sign_z:2;
+};
+
+
+struct bosch_sensor_data {
+	union {
+		int16_t v[3];
+		struct {
+			int16_t x;
+			int16_t y;
+			int16_t z;
+		};
+	};
+};
+
+s8 smi_burst_read_wrapper(u8 dev_addr, u8 reg_addr, u8 *data, u16 len);
+int smi_probe(struct smi_client_data *client_data, struct device *dev);
+int smi_remove(struct device *dev);
+int smi_suspend(struct device *dev);
+int smi_resume(struct device *dev);
+
+
+
+
+#endif/*_SMI130_DRIVER_H*/
+/*@}*/
+
diff --git a/drivers/input/sensors/smi130/smi130_gyro.c b/drivers/input/sensors/smi130/smi130_gyro.c
new file mode 100644
index 0000000..ef3fc38f
--- /dev/null
+++ b/drivers/input/sensors/smi130/smi130_gyro.c
@@ -0,0 +1,7422 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+ * @filename smi130_gyro.c
+ * @date    2013/11/25
+ * @Modification Date 2018/08/28 18:20
+ * @id       "8fcde22"
+ * @version  1.5
+ *
+ * @brief    SMI130_GYROAPI
+*/
+
+#include "smi130_gyro.h"
+static struct smi130_gyro_t *p_smi130_gyro;
+
+
+/*****************************************************************************
+ * Description: *//**brief API Initialization routine
+ *
+ *
+ *
+ *
+* \param smi130_gyro_t *smi130_gyro
+ *      Pointer to a structure.
+ *
+ *       structure members are
+ *
+ *       unsigned char chip_id;
+ *       unsigned char dev_addr;
+ *       SMI130_GYRO_BRD_FUNC_PTR;
+ *       SMI130_GYRO_WR_FUNC_PTR;
+ *       SMI130_GYRO_RD_FUNC_PTR;
+ *       void(*delay_msec)( SMI130_GYRO_MDELAY_DATA_TYPE );
+ *
+ *
+ *
+ *
+ *
+ *  \return result of communication routines
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_init(struct smi130_gyro_t *smi130_gyro)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char a_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	p_smi130_gyro = smi130_gyro;
+
+	p_smi130_gyro->dev_addr = SMI130_GYRO_I2C_ADDR;
+
+	/*Read CHIP_ID */
+	comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+	 SMI130_GYRO_CHIP_ID_ADDR, &a_data_u8r, 1);
+	p_smi130_gyro->chip_id = a_data_u8r;
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads Rate dataX from location 02h and 03h
+ * registers
+ *
+ *
+ *
+ *
+ *  \param
+ *      SMI130_GYRO_S16  *data_x   :  Address of data_x
+ *
+ *
+ *  \return
+ *      result of communication routines
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_dataX(SMI130_GYRO_S16 *data_x)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char a_data_u8r[2] = {0, 0};
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_RATE_X_LSB_VALUEX__REG, a_data_u8r, 2);
+		a_data_u8r[0] = SMI130_GYRO_GET_BITSLICE(a_data_u8r[0],
+		SMI130_GYRO_RATE_X_LSB_VALUEX);
+		*data_x = (SMI130_GYRO_S16)
+		((((SMI130_GYRO_S16)((signed char)a_data_u8r[1])) <<
+		SMI130_GYRO_SHIFT_8_POSITION) | (a_data_u8r[0]));
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads rate dataY from location 04h and 05h
+ * registers
+ *
+ *
+ *
+ *
+ *  \param
+ *      SMI130_GYRO_S16  *data_y   :  Address of data_y
+ *
+ *
+ *  \return
+ *      result of communication routines
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_dataY(SMI130_GYRO_S16 *data_y)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char a_data_u8r[2] = {0, 0};
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_RATE_Y_LSB_VALUEY__REG, a_data_u8r, 2);
+		a_data_u8r[0] = SMI130_GYRO_GET_BITSLICE(a_data_u8r[0],
+		SMI130_GYRO_RATE_Y_LSB_VALUEY);
+		*data_y = (SMI130_GYRO_S16)
+		((((SMI130_GYRO_S16)((signed char)a_data_u8r[1]))
+		<< SMI130_GYRO_SHIFT_8_POSITION) | (a_data_u8r[0]));
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads rate dataZ from location 06h and 07h
+ * registers
+ *
+ *
+ *
+ *
+ *  \param
+ *      SMI130_GYRO_S16  *data_z   :  Address of data_z
+ *
+ *
+ *  \return
+ *      result of communication routines
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_dataZ(SMI130_GYRO_S16 *data_z)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char a_data_u8r[2] = {0, 0};
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_RATE_Z_LSB_VALUEZ__REG, a_data_u8r, 2);
+		a_data_u8r[0] = SMI130_GYRO_GET_BITSLICE(a_data_u8r[0],
+		SMI130_GYRO_RATE_Z_LSB_VALUEZ);
+		*data_z = (SMI130_GYRO_S16)
+		((((SMI130_GYRO_S16)((signed char)a_data_u8r[1]))
+		<< SMI130_GYRO_SHIFT_8_POSITION) | (a_data_u8r[0]));
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads data X,Y and Z from location 02h to 07h
+ *
+ *
+ *
+ *
+ *  \param
+ *      smi130_gyro_data_t *data   :  Address of smi130_gyro_data_t
+ *
+ *
+ *  \return
+ *      result of communication routines
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_dataXYZ(struct smi130_gyro_data_t *data)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char a_data_u8r[6] = {0, 0, 0, 0, 0, 0};
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_RATE_X_LSB_VALUEX__REG, a_data_u8r, 6);
+		/* Data X */
+		a_data_u8r[0] =
+		SMI130_GYRO_GET_BITSLICE(a_data_u8r[0], SMI130_GYRO_RATE_X_LSB_VALUEX);
+		data->datax = (SMI130_GYRO_S16)
+		((((SMI130_GYRO_S16)((signed char)a_data_u8r[1]))
+		<< SMI130_GYRO_SHIFT_8_POSITION) | (a_data_u8r[0]));
+		/* Data Y */
+		a_data_u8r[2] = SMI130_GYRO_GET_BITSLICE(a_data_u8r[2],
+		SMI130_GYRO_RATE_Y_LSB_VALUEY);
+		data->datay = (SMI130_GYRO_S16)
+		((((SMI130_GYRO_S16)((signed char)a_data_u8r[3]))
+		<< SMI130_GYRO_SHIFT_8_POSITION) | (a_data_u8r[2]));
+		/* Data Z */
+		a_data_u8r[4] = SMI130_GYRO_GET_BITSLICE(a_data_u8r[4],
+		SMI130_GYRO_RATE_Z_LSB_VALUEZ);
+		data->dataz = (SMI130_GYRO_S16)
+		((((SMI130_GYRO_S16)((signed char)a_data_u8r[5]))
+		<< SMI130_GYRO_SHIFT_8_POSITION) | (a_data_u8r[4]));
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads data X,Y,Z and Interrupts
+ *							from location 02h to 07h
+ *
+ *
+ *
+ *
+ *  \param
+ *      smi130_gyro_data_t *data   :  Address of smi130_gyro_data_t
+ *
+ *
+ *  \return
+ *      result of communication routines
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_dataXYZI(struct smi130_gyro_data_t *data)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char a_data_u8r[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_RATE_X_LSB_VALUEX__REG, a_data_u8r, 12);
+		/* Data X */
+		a_data_u8r[0] = SMI130_GYRO_GET_BITSLICE(a_data_u8r[0],
+		SMI130_GYRO_RATE_X_LSB_VALUEX);
+		data->datax = (SMI130_GYRO_S16)
+		((((SMI130_GYRO_S16)((signed char)a_data_u8r[1]))
+		<< SMI130_GYRO_SHIFT_8_POSITION) | (a_data_u8r[0]));
+		/* Data Y */
+		a_data_u8r[2] = SMI130_GYRO_GET_BITSLICE(a_data_u8r[2],
+		SMI130_GYRO_RATE_Y_LSB_VALUEY);
+		data->datay = (SMI130_GYRO_S16)
+		((((SMI130_GYRO_S16)((signed char)a_data_u8r[3]))
+		<< SMI130_GYRO_SHIFT_8_POSITION) | (a_data_u8r[2]));
+		/* Data Z */
+		a_data_u8r[4] = SMI130_GYRO_GET_BITSLICE(a_data_u8r[4],
+		SMI130_GYRO_RATE_Z_LSB_VALUEZ);
+		data->dataz = (SMI130_GYRO_S16)
+		((((SMI130_GYRO_S16)((signed char)a_data_u8r[5]))
+		<< SMI130_GYRO_SHIFT_8_POSITION) | (a_data_u8r[4]));
+		data->intstatus[0] = a_data_u8r[7];
+		data->intstatus[1] = a_data_u8r[8];
+		data->intstatus[2] = a_data_u8r[9];
+		data->intstatus[3] = a_data_u8r[10];
+		data->intstatus[4] = a_data_u8r[11];
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads Temperature from location 08h
+ *
+ *
+ *
+ *
+ *  \param
+ *      unsigned char *temp   :  Address of temperature
+ *
+ *
+ *  \return
+ *      result of communication routines
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_Temperature(unsigned char *temperature)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_TEMP_ADDR, &v_data_u8r, 1);
+		*temperature = v_data_u8r;
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API reads the data from the given register
+ *
+ *
+ *
+ *
+ *\param unsigned char addr, unsigned char *data unsigned char len
+ *                       addr -> Address of the register
+ *                       data -> address of the variable, read value will be
+ *								kept
+ *						len -> No of byte to be read.
+ *  \return  results of bus communication function
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_read_register(unsigned char addr,
+unsigned char *data, unsigned char len)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+		(p_smi130_gyro->dev_addr, addr, data, len);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API reads the data from the given register
+ *
+ *
+ *
+ *
+ *\param unsigned char addr, unsigned char *data SMI130_GYRO_S32 len
+ *                       addr -> Address of the register
+ *                       data -> address of the variable, read value will be
+ *								kept
+ *						len -> No of byte to be read.
+ *  \return  results of bus communication function
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_burst_read(unsigned char addr,
+unsigned char *data, SMI130_GYRO_S32 len)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BURST_READ_FUNC(p_smi130_gyro->dev_addr,
+		addr, data, len);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API given data to the given register
+ *
+ *
+ *
+ *
+ *\param unsigned char addr, unsigned char data,unsigned char len
+ *                   addr -> Address of the register
+ *                   data -> Data to be written to the register
+ *					len -> No of byte to be read.
+ *
+ *  \return Results of bus communication function
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_write_register(unsigned char addr,
+unsigned char *data, unsigned char len)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+		(p_smi130_gyro->dev_addr, addr, data, len);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads interrupt status 0 register byte from 09h
+ *
+ *
+ *
+ *
+ *  \param
+ *      unsigned char *status0_data : Address of status 0 register
+ *
+ *
+ *  \return
+ *      Result of bus communication function
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_interrupt_status_reg_0(
+unsigned char *status0_data)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+		(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_STATUSZERO__REG, &v_data_u8r, 1);
+		*status0_data =
+		SMI130_GYRO_GET_BITSLICE(v_data_u8r, SMI130_GYRO_INT_STATUSZERO);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads interrupt status 1 register byte from 0Ah
+ *
+ *
+ *
+ *
+ *  \param
+ *      unsigned char *status1_data : Address of status register
+ *
+ *
+ *  \return
+ *      Result of bus communication function
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_interrupt_status_reg_1(
+unsigned char *status1_data)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+		(p_smi130_gyro->dev_addr, SMI130_GYRO_INT_STATUSONE__REG,
+		&v_data_u8r, 1);
+		*status1_data =
+		SMI130_GYRO_GET_BITSLICE(v_data_u8r, SMI130_GYRO_INT_STATUSONE);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads interrupt status register byte from 0Bh
+ *
+ *
+ *
+ *
+ *  \param
+ *      unsigned char *status2_data : Address of status 2 register
+ *
+ *
+ *  \return
+ *      Result of bus communication function
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_interrupt_status_reg_2(
+unsigned char *status2_data)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+		(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_STATUSTWO__REG, &v_data_u8r, 1);
+		*status2_data =
+		SMI130_GYRO_GET_BITSLICE(v_data_u8r, SMI130_GYRO_INT_STATUSTWO);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads interrupt status 3 register byte from 0Ch
+ *
+ *
+ *
+ *
+ *  \param
+ *      unsigned char *status3_data : Address of status 3 register
+ *
+ *
+ *  \return
+ *      Result of bus communication function
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_interrupt_status_reg_3(
+unsigned char *status3_data)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+		(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_STATUSTHREE__REG, &v_data_u8r, 1);
+		*status3_data =
+		SMI130_GYRO_GET_BITSLICE(v_data_u8r, SMI130_GYRO_INT_STATUSTHREE);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API reads the range from register 0x0Fh of
+ * (0 to 2) bits
+ *
+ *
+ *
+ *
+ *\param unsigned char *range
+ *      Range[0....7]
+ *      0 2000/s
+ *      1 1000/s
+ *      2 500/s
+ *      3 250/s
+ *      4 125/s
+ *
+ *
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_range_reg(unsigned char *range)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+		(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_RANGE_ADDR_RANGE__REG, &v_data_u8r, 1);
+		*range =
+		SMI130_GYRO_GET_BITSLICE(v_data_u8r, SMI130_GYRO_RANGE_ADDR_RANGE);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API sets the range register 0x0Fh
+ * (0 to 2 bits)
+ *
+ *
+ *
+ *
+ *\param unsigned char range
+ *
+ *      Range[0....7]
+ *      0 2000/s
+ *      1 1000/s
+ *      2 500/s
+ *      3 250/s
+ *      4 125/s
+ *
+ *
+ *
+ *
+ *  \return Communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_range_reg(unsigned char range)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (range < C_SMI130_GYRO_Five_U8X) {
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+			(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_RANGE_ADDR_RANGE__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_RANGE_ADDR_RANGE,
+			range);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+			(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_RANGE_ADDR_RANGE__REG, &v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API reads the high resolution bit of 0x10h
+ * Register 7th bit
+ *
+ *
+ *
+ *
+ *\param unsigned char *high_res
+ *                      Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_high_res(unsigned char *high_res)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+		(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_BW_ADDR_HIGH_RES__REG, &v_data_u8r, 1);
+		*high_res =
+		SMI130_GYRO_GET_BITSLICE(v_data_u8r, SMI130_GYRO_BW_ADDR_HIGH_RES);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API reads the bandwidth register of 0x10h 0 to
+ *  3 bits
+ *
+ *
+ *
+ *
+* \param unsigned char *bandwidth
+ *              pointer to a variable passed as a parameter
+ *
+ *              0 no filter(523 Hz)
+ *              1 230Hz
+ *              2 116Hz
+ *              3 47Hz
+ *              4 23Hz
+ *              5 12Hz
+ *              6 64Hz
+ *              7 32Hz
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_bw(unsigned char *bandwidth)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+		(p_smi130_gyro->dev_addr, SMI130_GYRO_BW_ADDR__REG, &v_data_u8r, 1);
+		*bandwidth = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_BW_ADDR);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API writes the Bandwidth register (0x10h of 0
+ * to 3 bits)
+ *
+ *
+ *
+ *
+ *\param unsigned char bandwidth,
+ *              The bandwidth to be set passed as a parameter
+ *
+ *              0 no filter(523 Hz)
+ *              1 230Hz
+ *              2 116Hz
+ *              3 47Hz
+ *              4 23Hz
+ *              5 12Hz
+ *              6 64Hz
+ *              7 32Hz
+ *
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_bw(unsigned char bandwidth)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_mode_u8r  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_autosleepduration  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (bandwidth < C_SMI130_GYRO_Eight_U8X) {
+			smi130_gyro_get_mode(&v_mode_u8r);
+			if (v_mode_u8r == SMI130_GYRO_MODE_ADVANCEDPOWERSAVING) {
+				smi130_gyro_get_autosleepdur(&v_autosleepduration);
+				smi130_gyro_set_autosleepdur(v_autosleepduration,
+				bandwidth);
+			}
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+			(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_BW_ADDR__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_BW_ADDR, bandwidth);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_BW_ADDR__REG, &v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API reads the status of External Trigger
+ * selection bits (4 and 5) of 0x12h registers
+ *
+ *
+ *
+ *
+ *\param unsigned char *pwu_ext_tri_sel
+ *                      Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return Communication Results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_pmu_ext_tri_sel(
+unsigned char *pwu_ext_tri_sel)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_MODE_LPM2_ADDR_EXT_TRI_SEL__REG, &v_data_u8r, 1);
+		*pwu_ext_tri_sel = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_MODE_LPM2_ADDR_EXT_TRI_SEL);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API writes the External Trigger selection
+ * bits (4 and 5) of 0x12h registers
+ *
+ *
+ *
+ *
+ *\param unsigned char pwu_ext_tri_sel
+ *               Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return Communication Results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_pmu_ext_tri_sel(
+unsigned char pwu_ext_tri_sel)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_MODE_LPM2_ADDR_EXT_TRI_SEL__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_MODE_LPM2_ADDR_EXT_TRI_SEL, pwu_ext_tri_sel);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_MODE_LPM2_ADDR_EXT_TRI_SEL__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief  This API is used to get data high bandwidth
+ *
+ *
+ *
+ *
+ *\param unsigned char *high_bw : Address of high_bw
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_high_bw(unsigned char *high_bw)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_RATED_HBW_ADDR_DATA_HIGHBW__REG, &v_data_u8r, 1);
+		*high_bw = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_RATED_HBW_ADDR_DATA_HIGHBW);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set data high bandwidth
+ *
+ *
+ *
+ *
+ *\param unsigned char high_bw:
+ *          Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_high_bw(unsigned char high_bw)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (high_bw < C_SMI130_GYRO_Two_U8X) {
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_RATED_HBW_ADDR_DATA_HIGHBW__REG,
+			&v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_RATED_HBW_ADDR_DATA_HIGHBW, high_bw);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_RATED_HBW_ADDR_DATA_HIGHBW__REG,
+			&v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get shadow dis
+ *
+ *
+ *
+ *
+ *\param unsigned char *shadow_dis : Address of shadow_dis
+ *                       Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_shadow_dis(unsigned char *shadow_dis)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_RATED_HBW_ADDR_SHADOW_DIS__REG, &v_data_u8r, 1);
+		*shadow_dis = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_RATED_HBW_ADDR_SHADOW_DIS);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set shadow dis
+ *
+ *
+ *
+ *
+ *\param unsigned char shadow_dis
+ *         Value to be written passed as a parameter
+ *
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_shadow_dis(unsigned char shadow_dis)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (shadow_dis < C_SMI130_GYRO_Two_U8X) {
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+			(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_RATED_HBW_ADDR_SHADOW_DIS__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_RATED_HBW_ADDR_SHADOW_DIS, shadow_dis);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_RATED_HBW_ADDR_SHADOW_DIS__REG, &v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief
+ *               This function is used for the soft reset
+ *     The soft reset register will be written with 0xB6.
+ *
+ *
+ *
+* \param None
+ *
+ *
+ *
+ *  \return Communication results.
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_soft_reset()
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_SoftReset_u8r  = C_SMI130_GYRO_Zero_U8X;
+	v_SoftReset_u8r = 0xB6;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_BGW_SOFTRESET_ADDR, &v_SoftReset_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get data enable data
+ *
+ *
+ *
+ *
+ *\param unsigned char *data_en : Address of data_en
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_data_enable(unsigned char *data_en)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_INT_ENABLE0_DATAEN__REG, &v_data_u8r, 1);
+		*data_en = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_ENABLE0_DATAEN);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set data enable data
+ *
+ *
+ *
+ *
+ *  \param unsigned char data_en:
+ *          Value to be written passed as a \parameter
+ *           0 --> Disable
+ *           1 --> Enable
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_data_en(unsigned char data_en)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+			(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_ENABLE0_DATAEN__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_INT_ENABLE0_DATAEN, data_en);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+			(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_ENABLE0_DATAEN__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get fifo enable bit
+ *
+ *
+ *
+ *
+ *  \param unsigned char *fifo_en : Address of fifo_en
+ *                         Pointer to a variable passed as a parameter
+
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_enable(unsigned char *fifo_en)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_INT_ENABLE0_FIFOEN__REG, &v_data_u8r, 1);
+		*fifo_en = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_ENABLE0_FIFOEN);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set fifo enable bit
+ *
+ *
+ *
+ *
+ *  \param unsigned char fifo_en:
+ *          Value to be written passed as a parameter
+ *           0 --> Disable
+ *           1 --> Enable
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fifo_enable(unsigned char fifo_en)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (fifo_en < C_SMI130_GYRO_Two_U8X) {
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_ENABLE0_FIFOEN__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_ENABLE0_FIFOEN, fifo_en);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_ENABLE0_FIFOEN__REG, &v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API reads the status of the Auto offset
+ * Enable bit
+ *                      (0x15 Reg 3rd Bit)
+ *
+ *
+ *
+ *
+ *  \param unsigned char *offset_en
+ *              address of a variable,
+ *
+ *
+ *
+ *  \return   Communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_auto_offset_en(
+unsigned char *offset_en)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_INT_ENABLE0_AUTO_OFFSETEN__REG, &v_data_u8r, 1);
+		*offset_en = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_INT_ENABLE0_AUTO_OFFSETEN);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API sets the Auto offset enable bit
+ *                      (Reg 0x15 3rd Bit)
+ *
+ *
+ *
+ *
+ *  \param unsigned char offset_en
+ *                      0 --> Disable
+ *                      1 --> Enable
+ *
+ *  \return  Communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_auto_offset_en(unsigned char offset_en)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_ENABLE0_AUTO_OFFSETEN__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_INT_ENABLE0_AUTO_OFFSETEN, offset_en);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_ENABLE0_AUTO_OFFSETEN__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the output type status
+ *
+ *
+ *
+ *
+ *  \param unsigned char channel,unsigned char *int_od
+ *                  SMI130_GYRO_INT1    ->   0
+ *                  SMI130_GYRO_INT2    ->   1
+ *                  int_od : open drain   ->   1
+ *                           push pull    ->   0
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int_od(unsigned char param,
+unsigned char *int_od)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (param) {
+		case SMI130_GYRO_INT1:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			 SMI130_GYRO_INT_ENABLE1_IT1_OD__REG, &v_data_u8r, 1);
+			*int_od = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_ENABLE1_IT1_OD);
+			break;
+		case SMI130_GYRO_INT2:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			 SMI130_GYRO_INT_ENABLE1_IT2_OD__REG, &v_data_u8r, 1);
+			*int_od = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_ENABLE1_IT2_OD);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the output type status
+ *
+ *
+ *
+ *
+ *  \param unsigned char channel,unsigned char *int_od
+ *                  SMI130_GYRO_INT1    ->   0
+ *                  SMI130_GYRO_INT2    ->   1
+ *                  int_od : open drain   ->   1
+ *                           push pull    ->   0
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int_od(unsigned char param,
+unsigned char int_od)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (param) {
+		case SMI130_GYRO_INT1:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_ENABLE1_IT1_OD__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_ENABLE1_IT1_OD, int_od);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_ENABLE1_IT1_OD__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_INT2:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_ENABLE1_IT2_OD__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_ENABLE1_IT2_OD, int_od);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_ENABLE1_IT2_OD__REG, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get Active Level status
+ *
+ *
+ *
+ *
+ *  \param unsigned char channel,unsigned char *int_lvl
+ *                  SMI130_GYRO_INT1    ->    0
+ *                  SMI130_GYRO_INT2    ->    1
+ *                  int_lvl : Active HI   ->   1
+ *                            Active LO   ->   0
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int_lvl(unsigned char param,
+unsigned char *int_lvl)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (param) {
+		case SMI130_GYRO_INT1:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			 SMI130_GYRO_INT_ENABLE1_IT1_LVL__REG, &v_data_u8r, 1);
+			*int_lvl = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_ENABLE1_IT1_LVL);
+			break;
+		case SMI130_GYRO_INT2:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			 SMI130_GYRO_INT_ENABLE1_IT2_LVL__REG, &v_data_u8r, 1);
+			*int_lvl = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_ENABLE1_IT2_LVL);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set Active Level status
+ *
+ *
+ *
+ *
+ *  \param unsigned char channel,unsigned char *int_lvl
+ *                  SMI130_GYRO_INT1    ->    0
+ *                  SMI130_GYRO_INT2    ->    1
+ *                  int_lvl : Active HI   ->   1
+ *                            Active LO   ->   0
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int_lvl(unsigned char param,
+unsigned char int_lvl)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (param) {
+		case SMI130_GYRO_INT1:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_ENABLE1_IT1_LVL__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_ENABLE1_IT1_LVL, int_lvl);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_ENABLE1_IT1_LVL__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_INT2:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_ENABLE1_IT2_LVL__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_ENABLE1_IT2_LVL, int_lvl);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_ENABLE1_IT2_LVL__REG, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get High Interrupt1
+ *
+ *
+ *
+ *
+ *  \param unsigned char *int1_high : Address of high_bw
+ *                         Pointer to a variable passed as a parameter
+
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int1_high(unsigned char *int1_high)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_INT_MAP_0_INT1_HIGH__REG, &v_data_u8r, 1);
+		*int1_high = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_MAP_0_INT1_HIGH);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set High Interrupt1
+ *
+ *
+ *
+ *
+ *  \param unsigned char int1_high
+ *                  0 -> Disable
+ *                  1 -> Enable
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int1_high(unsigned char int1_high)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_MAP_0_INT1_HIGH__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_INT_MAP_0_INT1_HIGH, int1_high);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_MAP_0_INT1_HIGH__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get Any Interrupt1
+ *
+ *
+ *
+ *
+ *  \param unsigned char *int1_any : Address of high_bw
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int1_any(unsigned char *int1_any)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_INT_MAP_0_INT1_ANY__REG, &v_data_u8r, 1);
+		*int1_any = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_MAP_0_INT1_ANY);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set Any Interrupt1
+ *
+ *
+ *
+ *
+ *\param unsigned char int1_any
+ *                   0 -> Disable
+ *                   1 -> Enable
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int1_any(unsigned char int1_any)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_MAP_0_INT1_ANY__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_INT_MAP_0_INT1_ANY, int1_any);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_MAP_0_INT1_ANY__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get data Interrupt1 and data
+ * Interrupt2
+ *
+ *
+ *
+ *
+ *  \param unsigned char axis,unsigned char *int_data
+ *                       axis :
+ *                       SMI130_GYRO_INT1_DATA -> 0
+ *                       SMI130_GYRO_INT2_DATA -> 1
+ *                       int_data :
+ *                       Disable     -> 0
+ *                       Enable      -> 1
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int_data(unsigned char axis,
+unsigned char *int_data)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (axis) {
+		case SMI130_GYRO_INT1_DATA:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			 SMI130_GYRO_MAP_1_INT1_DATA__REG, &v_data_u8r, 1);
+			*int_data = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_MAP_1_INT1_DATA);
+			break;
+		case SMI130_GYRO_INT2_DATA:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			 SMI130_GYRO_MAP_1_INT2_DATA__REG, &v_data_u8r, 1);
+			*int_data = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_MAP_1_INT2_DATA);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set data Interrupt1 and data
+ * Interrupt2
+ *
+ *
+ *
+ *
+ * \param unsigned char axis,unsigned char *int_data
+ *                       axis :
+ *                       SMI130_GYRO_INT1_DATA -> 0
+ *                       SMI130_GYRO_INT2_DATA -> 1
+ *                       int_data :
+ *                       Disable     -> 0
+ *                       Enable      -> 1
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int_data(unsigned char axis,
+unsigned char int_data)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	}   else {
+			switch (axis) {
+			case SMI130_GYRO_INT1_DATA:
+				comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+					(p_smi130_gyro->dev_addr,
+				SMI130_GYRO_MAP_1_INT1_DATA__REG, &v_data_u8r, 1);
+				v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_MAP_1_INT1_DATA, int_data);
+				comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+					(p_smi130_gyro->dev_addr,
+				SMI130_GYRO_MAP_1_INT1_DATA__REG, &v_data_u8r, 1);
+				break;
+			case SMI130_GYRO_INT2_DATA:
+				comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+					(p_smi130_gyro->dev_addr,
+				SMI130_GYRO_MAP_1_INT2_DATA__REG, &v_data_u8r, 1);
+				v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_MAP_1_INT2_DATA, int_data);
+				comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+					(p_smi130_gyro->dev_addr,
+				SMI130_GYRO_MAP_1_INT2_DATA__REG, &v_data_u8r, 1);
+				break;
+			default:
+				comres = E_SMI130_GYRO_OUT_OF_RANGE;
+				break;
+			}
+		}
+		return comres;
+	}
+
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get fast offset and auto
+ * offset Interrupt2
+ *
+ *
+ *
+ *
+ *\param unsigned char axis,unsigned char *int2_offset
+ *                       axis :
+ *                       SMI130_GYRO_AUTO_OFFSET -> 1
+ *                       SMI130_GYRO_FAST_OFFSET -> 2
+ *                       int2_offset :
+ *                       Disable     -> 0
+ *                       Enable      -> 1
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int2_offset(unsigned char axis,
+unsigned char *int2_offset)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (axis) {
+		case SMI130_GYRO_FAST_OFFSET:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			 SMI130_GYRO_MAP_1_INT2_FAST_OFFSET__REG, &v_data_u8r, 1);
+			*int2_offset = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT2_FAST_OFFSET);
+			break;
+		case SMI130_GYRO_AUTO_OFFSET:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			 SMI130_GYRO_MAP_1_INT2_AUTO_OFFSET__REG, &v_data_u8r, 1);
+			*int2_offset = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT2_AUTO_OFFSET);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set fast offset and auto
+ * offset Interrupt2
+ *
+ *
+ *
+ *
+ *\param unsigned char axis,unsigned char *int2_offset
+ *                       axis :
+ *                       SMI130_GYRO_AUTO_OFFSET -> 1
+ *                       SMI130_GYRO_FAST_OFFSET -> 2
+ *                       int2_offset :
+ *                       Disable     -> 0
+ *                       Enable      -> 1
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int2_offset(unsigned char axis,
+unsigned char int2_offset)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (axis) {
+		case SMI130_GYRO_FAST_OFFSET:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT2_FAST_OFFSET__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT2_FAST_OFFSET, int2_offset);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT2_FAST_OFFSET__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_AUTO_OFFSET:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT2_AUTO_OFFSET__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT2_AUTO_OFFSET, int2_offset);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT2_AUTO_OFFSET__REG, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get fast offset and auto
+ * offset Interrupt1
+ *
+ *
+ *
+ *
+ *\param unsigned char axis,unsigned char *int1_offset
+ *                       axis :
+ *                       SMI130_GYRO_AUTO_OFFSET -> 1
+ *                       SMI130_GYRO_FAST_OFFSET -> 2
+ *                       int2_offset :
+ *                       Disable     -> 0
+ *                       Enable      -> 1
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int1_offset(unsigned char axis,
+unsigned char *int1_offset)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (axis) {
+		case SMI130_GYRO_FAST_OFFSET:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			 SMI130_GYRO_MAP_1_INT1_FAST_OFFSET__REG, &v_data_u8r, 1);
+			*int1_offset = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT1_FAST_OFFSET);
+			break;
+		case SMI130_GYRO_AUTO_OFFSET:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			 SMI130_GYRO_MAP_1_INT1_AUTO_OFFSET__REG, &v_data_u8r, 1);
+			*int1_offset = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT1_AUTO_OFFSET);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set fast offset and auto
+ * offset Interrupt1
+ *
+ *
+ *
+ *
+ *\param unsigned char axis,unsigned char *int1_offset
+ *                       axis :
+ *                       SMI130_GYRO_AUTO_OFFSET -> 1
+ *                       SMI130_GYRO_FAST_OFFSET -> 2
+ *                       int2_offset :
+ *                       Disable     -> 0
+ *                       Enable      -> 1
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int1_offset(unsigned char axis,
+unsigned char int1_offset)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (axis) {
+		case SMI130_GYRO_FAST_OFFSET:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT1_FAST_OFFSET__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT1_FAST_OFFSET, int1_offset);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT1_FAST_OFFSET__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_AUTO_OFFSET:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT1_AUTO_OFFSET__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT1_AUTO_OFFSET, int1_offset);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT1_AUTO_OFFSET__REG, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get status of FIFO Interrupt
+ *
+ *
+ *
+ *
+ *\param unsigned char *int_fifo : Address of int_fifo
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int_fifo(unsigned char *int_fifo)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_INT_STATUS1_FIFO_INT__REG, &v_data_u8r, 1);
+		*int_fifo = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_STATUS1_FIFO_INT);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get FIFO Interrupt2
+ *
+ *
+ *
+ *
+ *\param unsigned char *int_fifo
+ *                  int_fifo :
+ *                       Disable     -> 0
+ *                       Enable      -> 1
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int2_fifo(unsigned char *int_fifo)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_MAP_1_INT2_FIFO__REG, &v_data_u8r, 1);
+		*int_fifo = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT2_FIFO);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get FIFO Interrupt1
+ *
+ *
+ *
+ *
+ *\param unsigned char *int_fifo
+ *                  int_fifo :
+ *                       Disable     -> 0
+ *                       Enable      -> 1
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int1_fifo(unsigned char *int_fifo)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_MAP_1_INT1_FIFO__REG, &v_data_u8r, 1);
+		*int_fifo = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT1_FIFO);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int_fifo(unsigned char axis,
+unsigned char int_fifo)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (axis) {
+		case SMI130_GYRO_INT1:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			 SMI130_GYRO_MAP_1_INT1_FIFO__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT1_FIFO, int_fifo);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT1_FIFO__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_INT2:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT2_FIFO__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT2_FIFO, int_fifo);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT2_FIFO__REG, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set FIFO Interrupt1
+ *
+ *
+ *
+ *
+ *\param unsigned char *fifo_int1
+ *                  fifo_int1 :
+ *                       Disable     -> 0
+ *                       Enable      -> 1
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int1_fifo(unsigned char fifo_int1)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (fifo_int1 < C_SMI130_GYRO_Two_U8X) {
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT1_FIFO__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT1_FIFO, fifo_int1);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT1_FIFO__REG, &v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set FIFO Interrupt2
+ *
+ *
+ *
+ *
+ *\param unsigned char *fifo_int2
+ *                  fifo_int2 :
+ *                       Disable     -> 0
+ *                       Enable      -> 1
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int2_fifo(unsigned char fifo_int2)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (fifo_int2 < C_SMI130_GYRO_Two_U8X) {
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT2_FIFO__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MAP_1_INT2_FIFO, fifo_int2);
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MAP_1_INT2_FIFO__REG, &v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get High Interrupt2
+ *
+ *
+ *
+ *
+ *\param unsigned char *int2_high : Address of int2_high
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int2_high(unsigned char *int2_high)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_MAP_2_INT2_HIGH__REG, &v_data_u8r, 1);
+		*int2_high = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_MAP_2_INT2_HIGH);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get High Interrupt2
+ *
+ *
+ *
+ *
+ *\param unsigned char int2_high
+ *                  0 -> Disable
+ *                  1 -> Enable
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int2_high(unsigned char int2_high)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_MAP_2_INT2_HIGH__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_INT_MAP_2_INT2_HIGH, int2_high);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_MAP_2_INT2_HIGH__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get Any Interrupt2
+ *
+ *
+ *
+ *
+ *\param unsigned char *int2_any : Address of int2_any
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int2_any(unsigned char *int2_any)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_MAP_2_INT2_ANY__REG, &v_data_u8r, 1);
+		*int2_any = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_MAP_2_INT2_ANY);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set Any Interrupt2
+ *
+ *
+ *
+ *
+ *\param unsigned char int2_any
+ *                  0 -> Disable
+ *                  1 -> Enable
+ *
+ *
+ *
+ *
+ * \return  communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int2_any(unsigned char int2_any)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_MAP_2_INT2_ANY__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_INT_MAP_2_INT2_ANY, int2_any);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_MAP_2_INT2_ANY__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get slow offset and fast
+ * offset unfilt data
+ *
+ *
+ *
+ *\param unsigned char param,unsigned char *offset_unfilt
+ *                  param :
+ *                  SMI130_GYRO_SLOW_OFFSET -> 0
+ *                  SMI130_GYRO_FAST_OFFSET -> 2
+ *                  offset_unfilt: Enable  -> 1
+ *                                Disable -> 0
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_offset_unfilt(unsigned char param,
+unsigned char *offset_unfilt)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (param) {
+		case SMI130_GYRO_SLOW_OFFSET:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_0_ADDR_SLOW_OFFSET_UNFILT__REG,
+			&v_data_u8r, 1);
+			*offset_unfilt = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_0_ADDR_SLOW_OFFSET_UNFILT);
+			break;
+		case SMI130_GYRO_FAST_OFFSET:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_1_ADDR_FAST_OFFSET_UNFILT__REG,
+			&v_data_u8r, 1);
+			*offset_unfilt = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_1_ADDR_FAST_OFFSET_UNFILT);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set slow offset and fast
+ * offset unfilt data
+ *
+ *
+ *
+ *
+ *\param unsigned char param,unsigned char *offset_unfilt
+ *                  param :
+ *                  SMI130_GYRO_SLOW_OFFSET -> 0
+ *                  SMI130_GYRO_FAST_OFFSET -> 2
+ *                  offset_unfilt: Enable  -> 1
+ *                                Disable -> 0
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_offset_unfilt(unsigned char param,
+unsigned char offset_unfilt)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (param) {
+		case SMI130_GYRO_SLOW_OFFSET:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_0_ADDR_SLOW_OFFSET_UNFILT__REG,
+			&v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_0_ADDR_SLOW_OFFSET_UNFILT, offset_unfilt);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_0_ADDR_SLOW_OFFSET_UNFILT__REG,
+			&v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_FAST_OFFSET:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_1_ADDR_FAST_OFFSET_UNFILT__REG,
+			&v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_1_ADDR_FAST_OFFSET_UNFILT, offset_unfilt);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_1_ADDR_FAST_OFFSET_UNFILT__REG,
+			&v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get Tap, High, Constant, Any,
+ * Shake unfilt data
+ *
+ *
+ *
+ *
+ *\param unsigned char param,unsigned char *unfilt_data
+ *                  param :
+ *
+ *                  SMI130_GYRO_HIGH_UNFILT_DATA      -> 1
+ *                  SMI130_GYRO_ANY_UNFILT_DATA       -> 3
+ *
+ *                  unfilt_data:   Enable  -> 1
+ *                                Disable -> 0
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_unfilt_data(unsigned char param,
+unsigned char *unfilt_data)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (param) {
+		case SMI130_GYRO_HIGH_UNFILT_DATA:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_0_ADDR_HIGH_UNFILT_DATA__REG,
+			&v_data_u8r, 1);
+			*unfilt_data = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_0_ADDR_HIGH_UNFILT_DATA);
+			break;
+		case SMI130_GYRO_ANY_UNFILT_DATA:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_0_ADDR_ANY_UNFILT_DATA__REG, &v_data_u8r, 1);
+			*unfilt_data = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_0_ADDR_ANY_UNFILT_DATA);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set Tap, High, Constant, Any,
+ * Shake unfilt data
+ *
+ *
+ *
+ *
+ *\param unsigned char param,unsigned char *unfilt_data
+ *                  param :
+ *
+ *                  SMI130_GYRO_HIGH_UNFILT_DATA      -> 1
+ *                  SMI130_GYRO_ANY_UNFILT_DATA       -> 3
+ *
+ *                  unfilt_data:   Enable  -> 1
+ *                                Disable -> 0
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_unfilt_data(unsigned char param,
+unsigned char unfilt_data)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (param) {
+		case SMI130_GYRO_HIGH_UNFILT_DATA:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_0_ADDR_HIGH_UNFILT_DATA__REG,
+			&v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_0_ADDR_HIGH_UNFILT_DATA, unfilt_data);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_0_ADDR_HIGH_UNFILT_DATA__REG,
+			&v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_ANY_UNFILT_DATA:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_0_ADDR_ANY_UNFILT_DATA__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_0_ADDR_ANY_UNFILT_DATA, unfilt_data);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_0_ADDR_ANY_UNFILT_DATA__REG, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get Any Threshold
+ *
+ *
+ *
+ *
+ *\param unsigned char *any_th : Address of any_th
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_any_th(unsigned char *any_th)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_1_ADDR_ANY_TH__REG, &v_data_u8r, 1);
+		*any_th = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_1_ADDR_ANY_TH);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set Any Threshold
+ *
+ *
+ *
+ *
+ *\param unsigned char any_th:
+ *          Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_any_th(unsigned char any_th)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_1_ADDR_ANY_TH__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_INT_1_ADDR_ANY_TH, any_th);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_1_ADDR_ANY_TH__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get Awake Duration
+ *
+ *
+ *
+ *
+ *\param unsigned char *awake_dur : Address of awake_dur
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_awake_dur(unsigned char *awake_dur)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_2_ADDR_AWAKE_DUR__REG, &v_data_u8r, 1);
+		*awake_dur = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_2_ADDR_AWAKE_DUR);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set Awake Duration
+ *
+ *
+ *
+ *
+ *\param unsigned char awake_dur:
+ *          Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************
+ * Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_awake_dur(unsigned char awake_dur)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_2_ADDR_AWAKE_DUR__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_INT_2_ADDR_AWAKE_DUR, awake_dur);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_2_ADDR_AWAKE_DUR__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get Any Duration Sample
+ *
+ *
+ *
+ *
+ *\param unsigned char *dursample : Address of dursample
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_any_dursample(unsigned char *dursample)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_2_ADDR_ANY_DURSAMPLE__REG, &v_data_u8r, 1);
+		*dursample = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_INT_2_ADDR_ANY_DURSAMPLE);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set Any Duration Sample
+ *
+ *
+ *
+ *
+ *\param unsigned char dursample:
+ *          Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_any_dursample(unsigned char dursample)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_2_ADDR_ANY_DURSAMPLE__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_INT_2_ADDR_ANY_DURSAMPLE, dursample);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_2_ADDR_ANY_DURSAMPLE__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the status of Any Enable
+ * Channel X,Y,Z
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *data
+ *                       channel :
+ *                       SMI130_GYRO_X_AXIS -> 0
+ *                       SMI130_GYRO_Y_AXIS -> 1
+ *                       SMI130_GYRO_Z_AXIS -> 2
+ *                       data :
+ *                       Enable  -> 1
+ *                       disable -> 0
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_any_en_ch(unsigned char channel,
+unsigned char *data)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_X_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_2_ADDR_ANY_EN_X__REG, &v_data_u8r, 1);
+			*data = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_2_ADDR_ANY_EN_X);
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_2_ADDR_ANY_EN_Y__REG, &v_data_u8r, 1);
+			*data = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_INT_2_ADDR_ANY_EN_Y);
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_2_ADDR_ANY_EN_Z__REG, &v_data_u8r, 1);
+			*data = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_INT_2_ADDR_ANY_EN_Z);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the status of Any Enable
+ * Channel X,Y,Z
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *data
+ *                       channel :
+ *                       SMI130_GYRO_X_AXIS -> 0
+ *                       SMI130_GYRO_Y_AXIS -> 1
+ *                       SMI130_GYRO_Z_AXIS -> 2
+ *                       data :
+ *                       Enable  -> 1
+ *                       disable -> 0
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_any_en_ch(unsigned char channel,
+unsigned char data)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_X_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_2_ADDR_ANY_EN_X__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_2_ADDR_ANY_EN_X, data);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_2_ADDR_ANY_EN_X__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_2_ADDR_ANY_EN_Y__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_2_ADDR_ANY_EN_Y, data);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_2_ADDR_ANY_EN_Y__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_2_ADDR_ANY_EN_Z__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_2_ADDR_ANY_EN_Z, data);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_2_ADDR_ANY_EN_Z__REG, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the status of FIFO WM
+ * Enable
+ *
+ *
+ *
+ *
+ *\param unsigned char *fifo_wn_en
+ *                       Enable  -> 1
+ *                       Disable -> 0
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_watermark_enable(
+unsigned char *fifo_wn_en)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_INT_4_FIFO_WM_EN__REG, &v_data_u8r, 1);
+		*fifo_wn_en = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_4_FIFO_WM_EN);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set FIFO WM Enable
+ *
+ *
+ *
+ *
+ *\param unsigned char *fifo_wn_en
+ *                       Enable  -> 1
+ *                       Disable -> 0
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fifo_watermark_enable(
+unsigned char fifo_wn_en)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (fifo_wn_en < C_SMI130_GYRO_Two_U8X) {
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_4_FIFO_WM_EN__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_INT_4_FIFO_WM_EN, fifo_wn_en);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_INT_4_FIFO_WM_EN__REG, &v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the Interrupt Reset
+ *
+ *
+ *
+ *
+ *\param unsigned char reset_int
+ *                    1 -> Reset All Interrupts
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_reset_int(unsigned char reset_int)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_RST_LATCH_ADDR_RESET_INT__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_RST_LATCH_ADDR_RESET_INT, reset_int);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_RST_LATCH_ADDR_RESET_INT__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the Offset Reset
+ *
+ *
+ *
+ *
+ *\param unsigned char offset_reset
+ *                  1 -> Resets All the Offsets
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_offset_reset(
+unsigned char offset_reset)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_RST_LATCH_ADDR_OFFSET_RESET__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_RST_LATCH_ADDR_OFFSET_RESET, offset_reset);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_RST_LATCH_ADDR_OFFSET_RESET__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the Latch Status
+ *
+ *
+ *
+ *
+ *\param unsigned char *latch_status : Address of latch_status
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_latch_status(
+unsigned char *latch_status)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_RST_LATCH_ADDR_LATCH_STATUS__REG, &v_data_u8r, 1);
+		*latch_status = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_RST_LATCH_ADDR_LATCH_STATUS);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the Latch Status
+ *
+ *
+ *
+ *
+ *\param unsigned char latch_status:
+ *          Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_latch_status(
+unsigned char latch_status)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_RST_LATCH_ADDR_LATCH_STATUS__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_RST_LATCH_ADDR_LATCH_STATUS, latch_status);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_RST_LATCH_ADDR_LATCH_STATUS__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the Latch Interrupt
+ *
+ *
+ *
+ *
+ *\param unsigned char *latch_int : Address of latch_int
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_latch_int(unsigned char *latch_int)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_RST_LATCH_ADDR_LATCH_INT__REG, &v_data_u8r, 1);
+		*latch_int = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_RST_LATCH_ADDR_LATCH_INT);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the Latch Interrupt
+ *
+ *
+ *
+ *
+ *\param unsigned char latch_int:
+ *          Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_latch_int(unsigned char latch_int)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_RST_LATCH_ADDR_LATCH_INT__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_RST_LATCH_ADDR_LATCH_INT, latch_int);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_RST_LATCH_ADDR_LATCH_INT__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the status of High
+ * Hysteresis X,Y,Z
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *high_hy
+ *                       channel :
+ *                       SMI130_GYRO_X_AXIS -> 0
+ *                       SMI130_GYRO_Y_AXIS -> 1
+ *                       SMI130_GYRO_Z_AXIS -> 2
+ *                       high_hy :
+ *                       Enable  -> 1
+ *                       disable -> 0
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_high_hy(unsigned char channel,
+unsigned char *high_hy)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_X_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_HY_X__REG, &v_data_u8r, 1);
+			*high_hy = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_HY_X);
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_HY_Y__REG, &v_data_u8r, 1);
+			*high_hy = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_HY_Y);
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_HY_Z__REG, &v_data_u8r, 1);
+			*high_hy = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_HY_Z);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the status of High
+ * Hysteresis X,Y,Z
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *high_hy
+ *                       channel :
+ *                       SMI130_GYRO_X_AXIS -> 0
+ *                       SMI130_GYRO_Y_AXIS -> 1
+ *                       SMI130_GYRO_Z_AXIS -> 2
+ *                       high_hy :
+ *                       Enable  -> 1
+ *                       disable -> 0
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_high_hy(unsigned char channel,
+unsigned char high_hy)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_X_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_HY_X__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_HIGH_HY_X, high_hy);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_HY_X__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_HY_Y__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_HIGH_HY_Y, high_hy);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_HY_Y__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_HY_Z__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_HIGH_HY_Z, high_hy);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_HY_Z__REG, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the status of High
+ * Threshold X,Y,Z
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *high_th
+ *                       channel :
+ *                       SMI130_GYRO_X_AXIS -> 0
+ *                       SMI130_GYRO_Y_AXIS -> 1
+ *                       SMI130_GYRO_Z_AXIS -> 2
+ *                       high_th :
+ *                       Enable  -> 1
+ *                       disable -> 0
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_high_th(unsigned char channel,
+unsigned char *high_th)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_X_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_TH_X__REG, &v_data_u8r, 1);
+			*high_th = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_TH_X);
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_TH_Y__REG, &v_data_u8r, 1);
+			*high_th = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_TH_Y);
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_TH_Z__REG, &v_data_u8r, 1);
+			*high_th = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_TH_Z);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the status of High
+ * Threshold X,Y,Z
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *high_th
+ *                       channel :
+ *                       SMI130_GYRO_X_AXIS -> 0
+ *                       SMI130_GYRO_Y_AXIS -> 1
+ *                       SMI130_GYRO_Z_AXIS -> 2
+ *                       high_th :
+ *                       Enable  -> 1
+ *                       disable -> 0
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_high_th(unsigned char channel,
+unsigned char high_th)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_X_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_TH_X__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_TH_X, high_th);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_TH_X__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_TH_Y__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_TH_Y, high_th);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_TH_Y__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_TH_Z__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_TH_Z, high_th);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_TH_Z__REG, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the status of High Enable
+ * Channel X,Y,Z
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *high_en
+ *                       channel :
+ *                       SMI130_GYRO_X_AXIS -> 0
+ *                       SMI130_GYRO_Y_AXIS -> 1
+ *                       SMI130_GYRO_Z_AXIS -> 2
+ *                       high_en :
+ *                       Enable  -> 1
+ *                       disable -> 0
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_high_en_ch(unsigned char channel,
+unsigned char *high_en)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_X_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_EN_X__REG, &v_data_u8r, 1);
+			*high_en = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_EN_X);
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_EN_Y__REG, &v_data_u8r, 1);
+			*high_en = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_EN_Y);
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_EN_Z__REG, &v_data_u8r, 1);
+			*high_en = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_EN_Z);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the status of High Enable
+ * Channel X,Y,Z
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *high_en
+ *                       channel :
+ *                       SMI130_GYRO_X_AXIS -> 0
+ *                       SMI130_GYRO_Y_AXIS -> 1
+ *                       SMI130_GYRO_Z_AXIS -> 2
+ *                       high_en :
+ *                       Enable  -> 1
+ *                       disable -> 0
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_high_en_ch(unsigned char channel,
+unsigned char high_en)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_X_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_EN_X__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_EN_X, high_en);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_EN_X__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_EN_Y__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_EN_Y, high_en);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_EN_Y__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_EN_Z__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_HIGH_EN_Z, high_en);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_EN_Z__REG, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get High Duration
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *high_dur
+ *                       channel :
+ *                       SMI130_GYRO_X_AXIS -> 0
+ *                       SMI130_GYRO_Y_AXIS -> 1
+ *                       SMI130_GYRO_Z_AXIS -> 2
+ *                       *high_dur : Address of high_bw
+ *                                   Pointer to a variable passed as a
+ *                                   parameter
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_high_dur_ch(unsigned char channel,
+unsigned char *high_dur)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_X_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_DUR_X_ADDR, &v_data_u8r, 1);
+			*high_dur = v_data_u8r;
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_DUR_Y_ADDR, &v_data_u8r, 1);
+			*high_dur = v_data_u8r;
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_DUR_Z_ADDR, &v_data_u8r, 1);
+			*high_dur = v_data_u8r;
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set High Duration
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *high_dur
+ *                       channel :
+ *                       SMI130_GYRO_X_AXIS -> 0
+ *                       SMI130_GYRO_Y_AXIS -> 1
+ *                       SMI130_GYRO_Z_AXIS -> 2
+ *                       high_dur : Value to be written passed as a parameter
+ *
+ *
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_high_dur_ch(unsigned char channel,
+unsigned char high_dur)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_X_AXIS:
+			v_data_u8r = high_dur;
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_DUR_X_ADDR, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			v_data_u8r = high_dur;
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_DUR_Y_ADDR, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			v_data_u8r = high_dur;
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_HIGH_DUR_Z_ADDR, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get Slow Offset Threshold
+ *
+ *
+ *
+ *
+ *\param unsigned char *offset_th : Address of offset_th
+ *                         Pointer to a variable passed as a parameter
+
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_slow_offset_th(
+unsigned char *offset_th)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_SLOW_OFFSET_TH__REG, &v_data_u8r, 1);
+		*offset_th = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_SLOW_OFFSET_TH);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set Slow Offset Threshold
+ *
+ *
+ *
+ *
+ *\param unsigned char offset_th:
+ *          Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_slow_offset_th(unsigned char offset_th)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_SLOW_OFFSET_TH__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_SLOW_OFFSET_TH, offset_th);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_SLOW_OFFSET_TH__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get Slow Offset Duration
+ *
+ *
+ *
+ *
+ *\param unsigned char *offset_dur : Address of offset_dur
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_slow_offset_dur(
+unsigned char *offset_dur)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_SLOW_OFFSET_DUR__REG, &v_data_u8r, 1);
+		*offset_dur = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_SLOW_OFFSET_DUR);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set Slow Offset Duration
+ *
+ *
+ *
+ *
+ *\param unsigned char offset_dur:
+ *          Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_slow_offset_dur(
+unsigned char offset_dur)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_SLOW_OFFSET_DUR__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_SLOW_OFFSET_DUR, offset_dur);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_SLOW_OFFSET_DUR__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get Slow Offset Enable channel
+ * X,Y,Z
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *slow_offset
+ *                       channel :
+ *                       SMI130_GYRO_X_AXIS -> 0
+ *                       SMI130_GYRO_Y_AXIS -> 1
+ *                       SMI130_GYRO_Z_AXIS -> 2
+ *                       slow_offset :
+ *                       Enable  -> 1
+ *                       disable -> 0
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_slow_offset_en_ch(
+unsigned char channel, unsigned char *slow_offset)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_X_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_SLOW_OFFSET_EN_X__REG, &v_data_u8r, 1);
+			*slow_offset = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_SLOW_OFFSET_EN_X);
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_SLOW_OFFSET_EN_Y__REG, &v_data_u8r, 1);
+			*slow_offset = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_SLOW_OFFSET_EN_Y);
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_SLOW_OFFSET_EN_Z__REG, &v_data_u8r, 1);
+			*slow_offset = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_SLOW_OFFSET_EN_Z);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set Slow Offset Enable channel
+ * X,Y,Z
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *slow_offset
+ *                       channel :
+ *                       SMI130_GYRO_X_AXIS -> 0
+ *                       SMI130_GYRO_Y_AXIS -> 1
+ *                       SMI130_GYRO_Z_AXIS -> 2
+ *                       slow_offset :
+ *                       Enable  -> 1
+ *                       disable -> 0
+ *
+ *
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_slow_offset_en_ch(
+unsigned char channel, unsigned char slow_offset)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_X_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_SLOW_OFFSET_EN_X__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_SLOW_OFFSET_EN_X, slow_offset);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_SLOW_OFFSET_EN_X__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_SLOW_OFFSET_EN_Y__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_SLOW_OFFSET_EN_Y, slow_offset);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_SLOW_OFFSET_EN_Y__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_SLOW_OFFSET_EN_Z__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_SLOW_OFFSET_EN_Z,
+			slow_offset);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_SLOW_OFFSET_EN_Z__REG, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get Fast Offset WordLength and
+ * Auto Offset WordLength
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *offset_wl
+ *                       channel :
+ *                       SMI130_GYRO_AUTO_OFFSET_WL -> 0
+ *                       SMI130_GYRO_FAST_OFFSET_WL -> 1
+ *                       *offset_wl : Address of high_bw
+ *                                    Pointer to a variable passed as a
+ *                                    parameter
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_offset_wl(unsigned char channel,
+unsigned char *offset_wl)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_AUTO_OFFSET_WL:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_AUTO_OFFSET_WL__REG, &v_data_u8r, 1);
+			*offset_wl = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_AUTO_OFFSET_WL);
+			break;
+		case SMI130_GYRO_FAST_OFFSET_WL:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FAST_OFFSET_WL__REG, &v_data_u8r, 1);
+			*offset_wl = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_FAST_OFFSET_WL);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set Fast Offset WordLength and
+ *  Auto Offset WordLength
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *offset_wl
+ *                       channel :
+ *                       SMI130_GYRO_AUTO_OFFSET_WL -> 0
+ *                       SMI130_GYRO_FAST_OFFSET_WL -> 1
+ *                       offset_wl : Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_offset_wl(
+unsigned char channel, unsigned char offset_wl)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_AUTO_OFFSET_WL:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_AUTO_OFFSET_WL__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_AUTO_OFFSET_WL, offset_wl);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_AUTO_OFFSET_WL__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_FAST_OFFSET_WL:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FAST_OFFSET_WL__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_FAST_OFFSET_WL, offset_wl);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FAST_OFFSET_WL__REG, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to enable fast offset
+ *
+ *
+ *
+ *
+* \param smi130_gyro_enable_fast_offset
+ *                 Enable  -> 1
+ *                 Disable -> 0
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_enable_fast_offset()
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_FAST_OFFSET_EN__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_FAST_OFFSET_EN, 1);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_FAST_OFFSET_EN__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API read the Fast offset en status from the
+ * 0x32h of 0 to 2 bits.
+ *
+ *
+ *
+ *
+ *\param unsigned char *fast_offset
+ *             Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return Communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fast_offset_en_ch(
+unsigned char *fast_offset)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+			(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_FAST_OFFSET_EN_XYZ__REG, &v_data_u8r, 1);
+		*fast_offset = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_FAST_OFFSET_EN_XYZ);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API writes the Fast offset enable bit based
+ * on the Channel selection 0x32h of (0 to 2 bits)
+ *
+ *
+ *
+ *
+* \param  unsigned char channel,unsigned char fast_offset
+ *
+ *                      channel --> SMI130_GYRO_X_AXIS,SMI130_GYRO_Y_AXIS,SMI130_GYRO_Z_AXIS
+ *                      fast_offset --> 0 - Disable
+ *                                      1 - Enable
+ *
+ *
+ *
+ *  \return Communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fast_offset_en_ch(
+unsigned char channel, unsigned char fast_offset)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres  = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (channel) {
+		case SMI130_GYRO_X_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FAST_OFFSET_EN_X__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_FAST_OFFSET_EN_X, fast_offset);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FAST_OFFSET_EN_X__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FAST_OFFSET_EN_Y__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_FAST_OFFSET_EN_Y, fast_offset);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FAST_OFFSET_EN_Y__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FAST_OFFSET_EN_Z__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_FAST_OFFSET_EN_Z, fast_offset);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FAST_OFFSET_EN_Z__REG, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the status of nvm program
+ * remain
+ *
+ *
+ *
+ *
+ *\param unsigned char *nvm_remain
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_nvm_remain(unsigned char *nvm_remain)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_REMAIN__REG, &v_data_u8r, 1);
+		*nvm_remain = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_REMAIN);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the status of nvm load
+ *
+ *
+ *
+ *
+ *\param unsigned char nvm_load
+ *              1 -> load offset value from NVM
+ *              0 -> no action
+ *
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_nvm_load(unsigned char nvm_load)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_LOAD__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_LOAD, nvm_load);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_LOAD__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the status of nvmprogram
+ * ready
+ *
+ *
+ *
+ *
+ *\param unsigned char *nvm_rdy
+ *             1 -> program seq finished
+ *             0 -> program seq in progress
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_nvm_rdy(unsigned char *nvm_rdy)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_RDY__REG, &v_data_u8r, 1);
+		*nvm_rdy = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_RDY);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the status of nvm program
+ * trigger
+ *
+ *
+ *
+ *
+ *\param unsigned char trig
+ *            1 -> trig program seq (wo)
+ *            0 -> No Action
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_nvm_prog_trig(unsigned char prog_trig)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_TRIG__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_TRIG, prog_trig);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_TRIG__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the status of nvm program
+ * mode
+ *
+ *
+ *
+ *
+* \param unsigned char *prog_mode : Address of *prog_mode
+ *                  1 -> Enable program mode
+ *                  0 -> Disable program mode
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_nvm_prog_mode(unsigned char *prog_mode)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_MODE__REG, &v_data_u8r, 1);
+		*prog_mode = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_MODE);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/******************************************************************************
+ * Description: *//**brief This API is used to set the status of nvmprogram
+ * mode
+ *
+ *
+ *
+ *
+* \param(unsigned char prog_mode)
+ *                   1 -> Enable program mode
+ *                   0 -> Disable program mode
+ *
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_nvm_prog_mode(unsigned char prog_mode)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_MODE__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_MODE, prog_mode);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_MODE__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the status of i2c wdt
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char *prog_mode
+ *            SMI130_GYRO_I2C_WDT_SEL               1
+ *            SMI130_GYRO_I2C_WDT_EN                0
+ *            *prog_mode : Address of prog_mode
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_i2c_wdt(unsigned char i2c_wdt,
+unsigned char *prog_mode)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (i2c_wdt) {
+		case SMI130_GYRO_I2C_WDT_EN:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_EN__REG,
+			&v_data_u8r, 1);
+			*prog_mode = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_EN);
+			break;
+		case SMI130_GYRO_I2C_WDT_SEL:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_SEL__REG,
+			&v_data_u8r, 1);
+			*prog_mode = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_SEL);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the status of i2c wdt
+ *
+ *
+ *
+ *
+ *\param unsigned char channel,unsigned char prog_mode
+ *            SMI130_GYRO_I2C_WDT_SEL               1
+ *            SMI130_GYRO_I2C_WDT_EN                0
+ *            prog_mode : Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_i2c_wdt(unsigned char i2c_wdt,
+unsigned char prog_mode)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (i2c_wdt) {
+		case SMI130_GYRO_I2C_WDT_EN:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_EN__REG,
+			&v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_EN, prog_mode);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_EN__REG,
+			&v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_I2C_WDT_SEL:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_SEL__REG,
+			&v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_SEL, prog_mode);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_SEL__REG,
+			&v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief  This API is used to get the status of spi3
+ *
+ *
+ *
+ *
+* \param unsigned char *spi3 : Address of spi3
+ *                                Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_spi3(unsigned char *spi3)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_BGW_SPI3_WDT_ADDR_SPI3__REG, &v_data_u8r, 1);
+		*spi3 = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_BGW_SPI3_WDT_ADDR_SPI3);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the status of spi3
+ *
+ *
+ *
+ *
+ *\param unsigned char spi3
+ *
+ *
+ *
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_spi3(unsigned char spi3)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_BGW_SPI3_WDT_ADDR_SPI3__REG, &v_data_u8r, 1);
+		v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_BGW_SPI3_WDT_ADDR_SPI3, spi3);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_BGW_SPI3_WDT_ADDR_SPI3__REG, &v_data_u8r, 1);
+	}
+	return comres;
+}
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_tag(unsigned char *tag)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_FIFO_CGF1_ADDR_TAG__REG, &v_data_u8r, 1);
+		*tag = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_FIFO_CGF1_ADDR_TAG);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the status of Tag
+ *
+ *
+ *
+ *
+ *\param unsigned char tag
+ *                  Enable  -> 1
+ *                  Disable -> 0
+ *
+ *
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fifo_tag(unsigned char tag)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (tag < C_SMI130_GYRO_Two_U8X) {
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FIFO_CGF1_ADDR_TAG__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_FIFO_CGF1_ADDR_TAG, tag);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FIFO_CGF1_ADDR_TAG__REG, &v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get Water Mark Level
+ *
+ *
+ *
+ *
+ *\param unsigned char *water_mark_level : Address of water_mark_level
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_watermarklevel(
+unsigned char *water_mark_level)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_FIFO_CGF1_ADDR_WML__REG, &v_data_u8r, 1);
+		*water_mark_level = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_FIFO_CGF1_ADDR_WML);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set Water Mark Level
+ *
+ *
+ *
+ *
+ *\param unsigned char water_mark_level:
+ *          Value to be written passed as a parameter
+
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fifo_watermarklevel(
+unsigned char water_mark_level)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (water_mark_level < C_SMI130_GYRO_OneTwentyEight_U8X) {
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FIFO_CGF1_ADDR_WML__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_FIFO_CGF1_ADDR_WML, water_mark_level);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FIFO_CGF1_ADDR_WML__REG, &v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the status of offset
+ *
+ *
+ *
+ *
+ *\param unsigned char axis,unsigned char *offset
+ *                         axis ->
+ *                   SMI130_GYRO_X_AXIS     ->      0
+ *                   SMI130_GYRO_Y_AXIS     ->      1
+ *                   SMI130_GYRO_Z_AXIS     ->      2
+ *                   offset -> Any valid value
+ *
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_offset(unsigned char axis,
+SMI130_GYRO_S16 *offset)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data1_u8r = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data2_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (axis) {
+		case SMI130_GYRO_X_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_X__REG, &v_data1_u8r, 1);
+			v_data1_u8r = SMI130_GYRO_GET_BITSLICE(v_data1_u8r,
+			SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_X);
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_OFC1_ADDR_OFFSET_X__REG, &v_data2_u8r, 1);
+			v_data2_u8r = SMI130_GYRO_GET_BITSLICE(v_data2_u8r,
+			SMI130_GYRO_OFC1_ADDR_OFFSET_X);
+			v_data2_u8r = ((v_data2_u8r <<
+			SMI130_GYRO_SHIFT_2_POSITION) | v_data1_u8r);
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+			(p_smi130_gyro->dev_addr, SMI130_GYRO_OFC2_ADDR, &v_data1_u8r, 1);
+			*offset = (SMI130_GYRO_S16)((((SMI130_GYRO_S16)
+				((signed char)v_data1_u8r))
+			<< SMI130_GYRO_SHIFT_4_POSITION) | (v_data2_u8r));
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Y__REG, &v_data1_u8r, 1);
+			v_data1_u8r = SMI130_GYRO_GET_BITSLICE(v_data1_u8r,
+			SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Y);
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_OFC1_ADDR_OFFSET_Y__REG, &v_data2_u8r, 1);
+			v_data2_u8r = SMI130_GYRO_GET_BITSLICE(v_data2_u8r,
+			SMI130_GYRO_OFC1_ADDR_OFFSET_Y);
+			v_data2_u8r = ((v_data2_u8r <<
+			SMI130_GYRO_SHIFT_1_POSITION) | v_data1_u8r);
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_OFC3_ADDR, &v_data1_u8r, 1);
+			*offset = (SMI130_GYRO_S16)((((SMI130_GYRO_S16)
+				((signed char)v_data1_u8r))
+			<< SMI130_GYRO_SHIFT_4_POSITION) | (v_data2_u8r));
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Z__REG, &v_data1_u8r, 1);
+			v_data1_u8r = SMI130_GYRO_GET_BITSLICE(v_data1_u8r,
+			SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Z);
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_OFC1_ADDR_OFFSET_Z__REG, &v_data2_u8r, 1);
+			v_data2_u8r = SMI130_GYRO_GET_BITSLICE(v_data2_u8r,
+			SMI130_GYRO_OFC1_ADDR_OFFSET_Z);
+			v_data2_u8r = ((v_data2_u8r << SMI130_GYRO_SHIFT_1_POSITION)
+				| v_data1_u8r);
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_OFC4_ADDR, &v_data1_u8r, 1);
+			*offset = (SMI130_GYRO_S16)((((SMI130_GYRO_S16)
+				((signed char)v_data1_u8r))
+			<< SMI130_GYRO_SHIFT_4_POSITION) | (v_data2_u8r));
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the status of offset
+ *
+ *
+ *
+ *
+ *\param unsigned char axis,unsigned char offset
+ *                         axis ->
+ *                   SMI130_GYRO_X_AXIS     ->      0
+ *                   SMI130_GYRO_Y_AXIS     ->      1
+ *                   SMI130_GYRO_Z_AXIS     ->      2
+ *                   offset -> Any valid value
+ *
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_offset(
+unsigned char axis, SMI130_GYRO_S16 offset)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data1_u8r = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data2_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (axis) {
+		case SMI130_GYRO_X_AXIS:
+			v_data1_u8r = ((signed char) (offset & 0x0FF0))
+			>> SMI130_GYRO_SHIFT_4_POSITION;
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_OFC2_ADDR, &v_data1_u8r, 1);
+
+			v_data1_u8r = (unsigned char) (offset & 0x000C);
+			v_data2_u8r = SMI130_GYRO_SET_BITSLICE(v_data2_u8r,
+			SMI130_GYRO_OFC1_ADDR_OFFSET_X, v_data1_u8r);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_OFC1_ADDR_OFFSET_X__REG, &v_data2_u8r, 1);
+
+			v_data1_u8r = (unsigned char) (offset & 0x0003);
+			v_data2_u8r = SMI130_GYRO_SET_BITSLICE(v_data2_u8r,
+			SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_X, v_data1_u8r);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_X__REG, &v_data2_u8r, 1);
+			break;
+		case SMI130_GYRO_Y_AXIS:
+			v_data1_u8r = ((signed char) (offset & 0x0FF0)) >>
+			SMI130_GYRO_SHIFT_4_POSITION;
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_OFC3_ADDR, &v_data1_u8r, 1);
+
+			v_data1_u8r = (unsigned char) (offset & 0x000E);
+			v_data2_u8r = SMI130_GYRO_SET_BITSLICE(v_data2_u8r,
+			SMI130_GYRO_OFC1_ADDR_OFFSET_Y, v_data1_u8r);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_OFC1_ADDR_OFFSET_Y__REG, &v_data2_u8r, 1);
+
+			v_data1_u8r = (unsigned char) (offset & 0x0001);
+			v_data2_u8r = SMI130_GYRO_SET_BITSLICE(v_data2_u8r,
+			SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Y, v_data1_u8r);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Y__REG, &v_data2_u8r, 1);
+			break;
+		case SMI130_GYRO_Z_AXIS:
+			v_data1_u8r = ((signed char) (offset & 0x0FF0)) >>
+			SMI130_GYRO_SHIFT_4_POSITION;
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_OFC4_ADDR, &v_data1_u8r, 1);
+
+			v_data1_u8r = (unsigned char) (offset & 0x000E);
+			v_data2_u8r = SMI130_GYRO_SET_BITSLICE(v_data2_u8r,
+			SMI130_GYRO_OFC1_ADDR_OFFSET_Z, v_data1_u8r);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_OFC1_ADDR_OFFSET_Z__REG, &v_data2_u8r, 1);
+
+			v_data1_u8r = (unsigned char) (offset & 0x0001);
+			v_data2_u8r = SMI130_GYRO_SET_BITSLICE(v_data2_u8r,
+			SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Z, v_data1_u8r);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Z__REG, &v_data2_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the status of general
+ * purpose register
+ *
+ *
+ *
+ *
+ *\param unsigned char param,unsigned char *value
+ *             param ->
+ *              SMI130_GYRO_GP0                      0
+ *              SMI130_GYRO_GP0                      1
+ *               *value -> Address of high_bw
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_gp(unsigned char param,
+unsigned char *value)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (param) {
+		case SMI130_GYRO_GP0:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_TRIM_GP0_ADDR_GP0__REG, &v_data_u8r, 1);
+			*value = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+				SMI130_GYRO_TRIM_GP0_ADDR_GP0);
+			break;
+		case SMI130_GYRO_GP1:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_TRIM_GP1_ADDR, &v_data_u8r, 1);
+			*value = v_data_u8r;
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the status of general
+ * purpose register
+ *
+ *
+ *
+ *
+ *\param unsigned char param,unsigned char value
+ *             param ->
+ *              SMI130_GYRO_GP0                      0
+ *              SMI130_GYRO_GP0                      1
+ *             value -> Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_gp(unsigned char param,
+unsigned char value)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		switch (param) {
+		case SMI130_GYRO_GP0:
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_TRIM_GP0_ADDR_GP0__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_TRIM_GP0_ADDR_GP0, value);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_TRIM_GP0_ADDR_GP0__REG, &v_data_u8r, 1);
+			break;
+		case SMI130_GYRO_GP1:
+			v_data_u8r = value;
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_TRIM_GP1_ADDR, &v_data_u8r, 1);
+			break;
+		default:
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+			break;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads FIFI data from location 3Fh
+ *
+ *
+ *
+ *
+ *  \param
+ *      unsigned char *fifo_data : Address of FIFO data bits
+ *
+ *
+ *
+ *
+ *  \return result of communication routines
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_FIFO_data_reg(unsigned char *fifo_data)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_FIFO_DATA_ADDR, &v_data_u8r, 1);
+		*fifo_data = v_data_u8r;
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads interrupt fifo status register byte from 0Eh
+ *
+ *
+ *
+ *
+ *  \param
+ *      unsigned char *fifo_status : Address of Fifo status register
+ *
+ *
+ *  \return
+ *      Result of bus communication function
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifostatus_reg(
+unsigned char *fifo_status)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_FIFO_STATUS_ADDR, fifo_status, 1);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads interrupt fifo status register byte from 0Eh
+ *
+ *
+ *
+ *
+ *  \param
+ *      unsigned char *fifo_framecount: Address of FIFO status register
+ *
+ *
+ *  \return
+ *      Result of bus communication function
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_framecount(
+unsigned char *fifo_framecount)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r  = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_FIFO_STATUS_FRAME_COUNTER__REG, &v_data_u8r, 1);
+		*fifo_framecount = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_FIFO_STATUS_FRAME_COUNTER);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief Reads interrupt fifo status register byte from 0Eh
+ *
+ *
+ *
+ *
+ *  \param
+ *      unsigned char *fifo_overrun: Address of FIFO status register
+ *
+ *
+ *  \return
+ *      Result of bus communication function
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_overrun(
+unsigned char *fifo_overrun)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_FIFO_STATUS_OVERRUN__REG, &v_data_u8r, 1);
+		*fifo_overrun = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_FIFO_STATUS_OVERRUN);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the status of fifo mode
+ *
+ *
+ *
+ *
+ *\param unsigned char *mode : Address of mode
+ *                         fifo_mode  0 --> Bypass
+ *                         1 --> FIFO
+ *                         2 --> Stream
+ *                         3 --> Reserved
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_mode(unsigned char *mode)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_FIFO_CGF0_ADDR_MODE__REG, &v_data_u8r, 1);
+		*mode = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_FIFO_CGF0_ADDR_MODE);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used set to FIFO mode
+ *
+ *
+ *
+ *
+* \param             0 --> BYPASS
+ *                      1 --> FIFO
+ *                      2 --> STREAM
+ *
+ *
+ *  \return Communication Results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fifo_mode(unsigned char mode)
+{
+	int comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (mode < C_SMI130_GYRO_Four_U8X) {
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FIFO_CGF0_ADDR_MODE__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_FIFO_CGF0_ADDR_MODE, mode);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FIFO_CGF0_ADDR_MODE__REG, &v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the status of fifo data
+ * sel
+ *
+ *
+ *
+ *
+ *\param unsigned char *data_sel : Address of data_sel
+ *         data_sel --> [0:3]
+ *         0 --> X,Y and Z (DEFAULT)
+ *         1 --> X only
+ *         2 --> Y only
+ *         3 --> Z only
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_data_sel(unsigned char *data_sel)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_FIFO_CGF0_ADDR_DATA_SEL__REG, &v_data_u8r, 1);
+		*data_sel = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_FIFO_CGF0_ADDR_DATA_SEL);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the status of fifo data
+ * sel
+ *
+ *
+ *
+ *
+ *\param unsigned char data_sel
+ *         data_sel --> [0:3]
+ *         0 --> X,Y and Z (DEFAULT)
+ *         1 --> X only
+ *         2 --> Y only
+ *         3 --> Z only
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fifo_data_sel(unsigned char data_sel)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (data_sel < C_SMI130_GYRO_Four_U8X) {
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FIFO_CGF0_ADDR_DATA_SEL__REG, &v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_FIFO_CGF0_ADDR_DATA_SEL, data_sel);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_FIFO_CGF0_ADDR_DATA_SEL__REG, &v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to get the operating modes of the
+ * sensor
+ *
+ *
+ *
+ *
+ *\param unsigned char * mode : Address of mode
+ *                       0 -> NORMAL
+ *                       1 -> SUSPEND
+ *                       2 -> DEEP SUSPEND
+ *						 3 -> FAST POWERUP
+ *						 4 -> ADVANCED POWERSAVING
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_mode(unsigned char *mode)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char data1 = C_SMI130_GYRO_Zero_U8X;
+	unsigned char data2 = C_SMI130_GYRO_Zero_U8X;
+	unsigned char data3 = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == C_SMI130_GYRO_Zero_U8X) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_MODE_LPM1_ADDR, &data1, C_SMI130_GYRO_One_U8X);
+		comres += p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		SMI130_GYRO_MODE_LPM2_ADDR, &data2, C_SMI130_GYRO_One_U8X);
+		data1  = (data1 & 0xA0) >> 5;
+		data3  = (data2 & 0x40) >> 6;
+		data2  = (data2 & 0x80) >> 7;
+		if (data3 == 0x01) {
+			*mode  = SMI130_GYRO_MODE_ADVANCEDPOWERSAVING;
+		} else {
+			if ((data1 == 0x00) && (data2 == 0x00)) {
+				*mode  = SMI130_GYRO_MODE_NORMAL;
+				} else {
+				if ((data1 == 0x01) || (data1 == 0x05)) {
+					*mode  = SMI130_GYRO_MODE_DEEPSUSPEND;
+					} else {
+					if ((data1 == 0x04) &&
+					(data2 == 0x00)) {
+						*mode  = SMI130_GYRO_MODE_SUSPEND;
+					} else {
+					if ((data1 == 0x04) &&
+						(data2 == 0x01))
+							*mode  =
+							SMI130_GYRO_MODE_FASTPOWERUP;
+						}
+					}
+				}
+			}
+		}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set the operating Modes of the
+ * sensor
+ *
+ *
+ *
+ *
+ *\param unsigned char Mode
+ *                       0 -> NORMAL
+ *                       1 -> DEEPSUSPEND
+ *                       2 -> SUSPEND
+ *						 3 -> Fast Powerup
+ *						 4 -> Advance Powerup
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_mode(unsigned char mode)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char data1 = C_SMI130_GYRO_Zero_U8X;
+	unsigned char data2 = C_SMI130_GYRO_Zero_U8X;
+	unsigned char data3 = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_autosleepduration = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_bw_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == C_SMI130_GYRO_Zero_U8X) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (mode < C_SMI130_GYRO_Five_U8X) {
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM1_ADDR, &data1, C_SMI130_GYRO_One_U8X);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM2_ADDR, &data2, C_SMI130_GYRO_One_U8X);
+			switch (mode) {
+			case SMI130_GYRO_MODE_NORMAL:
+				data1  = SMI130_GYRO_SET_BITSLICE(data1,
+				SMI130_GYRO_MODE_LPM1, C_SMI130_GYRO_Zero_U8X);
+				data2  = SMI130_GYRO_SET_BITSLICE(data2,
+				SMI130_GYRO_MODE_LPM2_ADDR_FAST_POWERUP,
+				C_SMI130_GYRO_Zero_U8X);
+				data3  = SMI130_GYRO_SET_BITSLICE(data2,
+				SMI130_GYRO_MODE_LPM2_ADDR_ADV_POWERSAVING,
+				C_SMI130_GYRO_Zero_U8X);
+				comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM1_ADDR, &data1, C_SMI130_GYRO_One_U8X);
+			p_smi130_gyro->delay_msec(1);/*A minimum delay of atleast
+			450us is required for Multiple write.*/
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM2_ADDR, &data3, C_SMI130_GYRO_One_U8X);
+				break;
+			case SMI130_GYRO_MODE_DEEPSUSPEND:
+				data1  = SMI130_GYRO_SET_BITSLICE(data1,
+				SMI130_GYRO_MODE_LPM1, C_SMI130_GYRO_One_U8X);
+				data2  = SMI130_GYRO_SET_BITSLICE(data2,
+				SMI130_GYRO_MODE_LPM2_ADDR_FAST_POWERUP,
+				C_SMI130_GYRO_Zero_U8X);
+				data3  = SMI130_GYRO_SET_BITSLICE(data2,
+				SMI130_GYRO_MODE_LPM2_ADDR_ADV_POWERSAVING,
+				C_SMI130_GYRO_Zero_U8X);
+				comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM1_ADDR, &data1, C_SMI130_GYRO_One_U8X);
+			p_smi130_gyro->delay_msec(1);/*A minimum delay of atleast
+			450us is required for Multiple write.*/
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM2_ADDR, &data3, C_SMI130_GYRO_One_U8X);
+				break;
+			case SMI130_GYRO_MODE_SUSPEND:
+				data1  = SMI130_GYRO_SET_BITSLICE(data1,
+				SMI130_GYRO_MODE_LPM1, C_SMI130_GYRO_Four_U8X);
+				data2  = SMI130_GYRO_SET_BITSLICE(data2,
+				SMI130_GYRO_MODE_LPM2_ADDR_FAST_POWERUP,
+				C_SMI130_GYRO_Zero_U8X);
+				data3  = SMI130_GYRO_SET_BITSLICE(data2,
+				SMI130_GYRO_MODE_LPM2_ADDR_ADV_POWERSAVING,
+				C_SMI130_GYRO_Zero_U8X);
+				comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM1_ADDR, &data1, C_SMI130_GYRO_One_U8X);
+			p_smi130_gyro->delay_msec(1);/*A minimum delay of atleast
+			450us is required for Multiple write.*/
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM2_ADDR, &data3, C_SMI130_GYRO_One_U8X);
+				break;
+			case SMI130_GYRO_MODE_FASTPOWERUP:
+				data1  = SMI130_GYRO_SET_BITSLICE(data1,
+				SMI130_GYRO_MODE_LPM1, C_SMI130_GYRO_Four_U8X);
+				data2  = SMI130_GYRO_SET_BITSLICE(data2,
+				SMI130_GYRO_MODE_LPM2_ADDR_FAST_POWERUP,
+				C_SMI130_GYRO_One_U8X);
+				data3  = SMI130_GYRO_SET_BITSLICE(data2,
+				SMI130_GYRO_MODE_LPM2_ADDR_ADV_POWERSAVING,
+				C_SMI130_GYRO_Zero_U8X);
+				comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM1_ADDR, &data1, C_SMI130_GYRO_One_U8X);
+			p_smi130_gyro->delay_msec(1);/*A minimum delay of atleast
+			450us is required for Multiple write.*/
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM2_ADDR, &data3, C_SMI130_GYRO_One_U8X);
+				break;
+			case SMI130_GYRO_MODE_ADVANCEDPOWERSAVING:
+				/* Configuring the proper settings for auto
+				sleep duration */
+				smi130_gyro_get_bw(&v_bw_u8r);
+				smi130_gyro_get_autosleepdur(&v_autosleepduration);
+				smi130_gyro_set_autosleepdur(v_autosleepduration,
+				v_bw_u8r);
+				comres += p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+					(p_smi130_gyro->dev_addr,
+				SMI130_GYRO_MODE_LPM2_ADDR, &data2,
+				C_SMI130_GYRO_One_U8X);
+				/* Configuring the advanced power saving mode*/
+				data1  = SMI130_GYRO_SET_BITSLICE(data1,
+				SMI130_GYRO_MODE_LPM1, C_SMI130_GYRO_Zero_U8X);
+				data2  = SMI130_GYRO_SET_BITSLICE(data2,
+				SMI130_GYRO_MODE_LPM2_ADDR_FAST_POWERUP,
+				C_SMI130_GYRO_Zero_U8X);
+				data3  = SMI130_GYRO_SET_BITSLICE(data2,
+				SMI130_GYRO_MODE_LPM2_ADDR_ADV_POWERSAVING,
+				C_SMI130_GYRO_One_U8X);
+				comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM1_ADDR, &data1, C_SMI130_GYRO_One_U8X);
+			p_smi130_gyro->delay_msec(1);/*A minimum delay of atleast
+			450us is required for Multiple write.*/
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM2_ADDR, &data3, C_SMI130_GYRO_One_U8X);
+				break;
+				}
+		} else {
+		comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to to do selftest to sensor
+ * sensor
+ *
+ *
+ *
+ *
+ *\param unsigned char *result
+ *
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_selftest(unsigned char *result)
+	{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char data1 = C_SMI130_GYRO_Zero_U8X;
+	unsigned char data2 = C_SMI130_GYRO_Zero_U8X;
+
+	comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+	SMI130_GYRO_SELF_TEST_ADDR, &data1, C_SMI130_GYRO_One_U8X);
+	data2  = SMI130_GYRO_GET_BITSLICE(data1, SMI130_GYRO_SELF_TEST_ADDR_RATEOK);
+	data1  = SMI130_GYRO_SET_BITSLICE(data1, SMI130_GYRO_SELF_TEST_ADDR_TRIGBIST,
+	C_SMI130_GYRO_One_U8X);
+	comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC(p_smi130_gyro->dev_addr,
+	SMI130_GYRO_SELF_TEST_ADDR_TRIGBIST__REG, &data1, C_SMI130_GYRO_One_U8X);
+
+	/* Waiting time to complete the selftest process */
+	p_smi130_gyro->delay_msec(10);
+
+	/* Reading Selftest result bir bist_failure */
+	comres += p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+	SMI130_GYRO_SELF_TEST_ADDR_BISTFAIL__REG, &data1, C_SMI130_GYRO_One_U8X);
+	data1  = SMI130_GYRO_GET_BITSLICE(data1, SMI130_GYRO_SELF_TEST_ADDR_BISTFAIL);
+	if ((data1 == 0x00) && (data2 == 0x01))
+		*result = C_SMI130_GYRO_SUCCESS;
+	else
+		*result = C_SMI130_GYRO_FAILURE;
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief  This API is used to get data auto sleep duration
+ *
+ *
+ *
+ *
+ *\param unsigned char *duration : Address of auto sleep duration
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_autosleepdur(unsigned char *duration)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_MODE_LPM2_ADDR_AUTOSLEEPDUR__REG, &v_data_u8r, 1);
+		*duration = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_MODE_LPM2_ADDR_AUTOSLEEPDUR);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set duration
+ *
+ *
+ *
+ *
+ *\param unsigned char duration:
+ *          Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_autosleepdur(unsigned char duration,
+unsigned char bandwith)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_autosleepduration_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+			(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM2_ADDR_AUTOSLEEPDUR__REG,
+			&v_data_u8r, 1);
+			if (duration < C_SMI130_GYRO_Eight_U8X) {
+				switch (bandwith) {
+				case C_SMI130_GYRO_No_Filter_U8X:
+					if (duration >
+					C_SMI130_GYRO_4ms_AutoSleepDur_U8X)
+						v_autosleepduration_u8r =
+						duration;
+					else
+						v_autosleepduration_u8r =
+						C_SMI130_GYRO_4ms_AutoSleepDur_U8X;
+					break;
+				case C_SMI130_GYRO_BW_230Hz_U8X:
+					if (duration >
+					C_SMI130_GYRO_4ms_AutoSleepDur_U8X)
+						v_autosleepduration_u8r =
+						duration;
+					else
+						v_autosleepduration_u8r =
+						C_SMI130_GYRO_4ms_AutoSleepDur_U8X;
+					break;
+				case C_SMI130_GYRO_BW_116Hz_U8X:
+					if (duration >
+					C_SMI130_GYRO_4ms_AutoSleepDur_U8X)
+						v_autosleepduration_u8r =
+						duration;
+					else
+						v_autosleepduration_u8r =
+						C_SMI130_GYRO_4ms_AutoSleepDur_U8X;
+					break;
+				case C_SMI130_GYRO_BW_47Hz_U8X:
+					if (duration >
+					C_SMI130_GYRO_5ms_AutoSleepDur_U8X)
+						v_autosleepduration_u8r =
+						duration;
+					else
+						v_autosleepduration_u8r =
+						C_SMI130_GYRO_5ms_AutoSleepDur_U8X;
+					break;
+				case C_SMI130_GYRO_BW_23Hz_U8X:
+					if (duration >
+					C_SMI130_GYRO_10ms_AutoSleepDur_U8X)
+						v_autosleepduration_u8r =
+						duration;
+					else
+						v_autosleepduration_u8r =
+						C_SMI130_GYRO_10ms_AutoSleepDur_U8X;
+					break;
+				case C_SMI130_GYRO_BW_12Hz_U8X:
+					if (duration >
+					C_SMI130_GYRO_20ms_AutoSleepDur_U8X)
+						v_autosleepduration_u8r =
+						duration;
+					else
+					v_autosleepduration_u8r =
+					C_SMI130_GYRO_20ms_AutoSleepDur_U8X;
+					break;
+				case C_SMI130_GYRO_BW_64Hz_U8X:
+					if (duration >
+					C_SMI130_GYRO_10ms_AutoSleepDur_U8X)
+						v_autosleepduration_u8r =
+						duration;
+					else
+						v_autosleepduration_u8r =
+						C_SMI130_GYRO_10ms_AutoSleepDur_U8X;
+					break;
+				case C_SMI130_GYRO_BW_32Hz_U8X:
+					if (duration >
+					C_SMI130_GYRO_20ms_AutoSleepDur_U8X)
+						v_autosleepduration_u8r =
+						duration;
+					else
+						v_autosleepduration_u8r =
+						C_SMI130_GYRO_20ms_AutoSleepDur_U8X;
+					break;
+				default:
+				if (duration >
+					C_SMI130_GYRO_4ms_AutoSleepDur_U8X)
+					v_autosleepduration_u8r =
+						duration;
+					else
+					v_autosleepduration_u8r =
+					C_SMI130_GYRO_4ms_AutoSleepDur_U8X;
+					break;
+				}
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MODE_LPM2_ADDR_AUTOSLEEPDUR,
+			v_autosleepduration_u8r);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODE_LPM2_ADDR_AUTOSLEEPDUR__REG,
+			&v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief  This API is used to get data sleep duration
+ *
+ *
+ *
+ *
+ *\param unsigned char *duration : Address of sleep duration
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_sleepdur(unsigned char *duration)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC(p_smi130_gyro->dev_addr,
+		 SMI130_GYRO_MODELPM1_ADDR_SLEEPDUR__REG, &v_data_u8r, 1);
+		*duration = SMI130_GYRO_GET_BITSLICE(v_data_u8r,
+		SMI130_GYRO_MODELPM1_ADDR_SLEEPDUR);
+	}
+	return comres;
+}
+/* Compiler Switch if applicable
+#ifdef
+
+#endif
+*/
+/*****************************************************************************
+ * Description: *//**brief This API is used to set duration
+ *
+ *
+ *
+ *
+ *\param unsigned char duration:
+ *          Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_sleepdur(unsigned char duration)
+{
+	SMI130_GYRO_RETURN_FUNCTION_TYPE comres = C_SMI130_GYRO_Zero_U8X;
+	unsigned char v_data_u8r = C_SMI130_GYRO_Zero_U8X;
+	if (p_smi130_gyro == SMI130_GYRO_NULL) {
+		return  E_SMI130_GYRO_NULL_PTR;
+	} else {
+		if (duration < C_SMI130_GYRO_Eight_U8X) {
+			comres = p_smi130_gyro->SMI130_GYRO_BUS_READ_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODELPM1_ADDR_SLEEPDUR__REG,
+			&v_data_u8r, 1);
+			v_data_u8r = SMI130_GYRO_SET_BITSLICE(v_data_u8r,
+			SMI130_GYRO_MODELPM1_ADDR_SLEEPDUR, duration);
+			comres += p_smi130_gyro->SMI130_GYRO_BUS_WRITE_FUNC
+				(p_smi130_gyro->dev_addr,
+			SMI130_GYRO_MODELPM1_ADDR_SLEEPDUR__REG,
+			&v_data_u8r, 1);
+		} else {
+			comres = E_SMI130_GYRO_OUT_OF_RANGE;
+		}
+	}
+	return comres;
+}
+
diff --git a/drivers/input/sensors/smi130/smi130_gyro.h b/drivers/input/sensors/smi130/smi130_gyro.h
new file mode 100644
index 0000000..38e52ac
--- /dev/null
+++ b/drivers/input/sensors/smi130/smi130_gyro.h
@@ -0,0 +1,4705 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+ *
+ * @filename smi130_gyro.h
+ * @date    2013/11/25
+ * @Modification Date 2018/08/28 18:20
+ * @id       "8fcde22"
+ * @version  1.5
+ *
+ * @brief    Header of SMI130_GYRO API
+*/
+
+/* user defined code to be added here ... */
+#ifndef __SMI130_GYRO_H__
+#define __SMI130_GYRO_H__
+
+#ifdef __KERNEL__
+#define SMI130_GYRO_U16 unsigned short       /* 16 bit achieved with short */
+#define SMI130_GYRO_S16 signed short
+#define SMI130_GYRO_S32 signed int           /* 32 bit achieved with int   */
+#else
+#include <limits.h> /*needed to test integer limits */
+
+
+/* find correct data type for signed/unsigned 16 bit variables \
+by checking max of unsigned variant */
+#if USHRT_MAX == 0xFFFF
+		/* 16 bit achieved with short */
+		#define SMI130_GYRO_U16 unsigned short
+		#define SMI130_GYRO_S16 signed short
+#elif UINT_MAX == 0xFFFF
+		/* 16 bit achieved with int */
+		#define SMI130_GYRO_U16 unsigned int
+		#define SMI130_GYRO_S16 signed int
+#else
+		#error SMI130_GYRO_U16 and SMI130_GYRO_S16 could not be
+		#error defined automatically, please do so manually
+#endif
+
+/* find correct data type for signed 32 bit variables */
+#if INT_MAX == 0x7FFFFFFF
+		/* 32 bit achieved with int */
+		#define SMI130_GYRO_S32 signed int
+#elif LONG_MAX == 0x7FFFFFFF
+		/* 32 bit achieved with long int */
+		#define SMI130_GYRO_S32 signed long int
+#else
+		#error SMI130_GYRO_S32 could not be
+		#error defined automatically, please do so manually
+#endif
+#endif
+
+/**\brief defines the calling parameter types of the SMI130_GYRO_WR_FUNCTION */
+#define SMI130_GYRO_BUS_WR_RETURN_TYPE char
+
+/**\brief links the order of parameters defined in
+SMI130_GYRO_BUS_WR_PARAM_TYPE to function calls used inside the API*/
+#define SMI130_GYRO_BUS_WR_PARAM_TYPES unsigned char, unsigned char,\
+unsigned char *, unsigned char
+
+/**\brief links the order of parameters defined in
+SMI130_GYRO_BUS_WR_PARAM_TYPE to function calls used inside the API*/
+#define SMI130_GYRO_BUS_WR_PARAM_ORDER(device_addr, register_addr,\
+register_data, wr_len)
+
+/* never change this line */
+#define SMI130_GYRO_BUS_WRITE_FUNC(device_addr, register_addr,\
+register_data, wr_len) bus_write(device_addr, register_addr,\
+register_data, wr_len)
+/**\brief defines the return parameter type of the SMI130_GYRO_RD_FUNCTION
+*/
+#define SMI130_GYRO_BUS_RD_RETURN_TYPE char
+/**\brief defines the calling parameter types of the SMI130_GYRO_RD_FUNCTION
+*/
+#define SMI130_GYRO_BUS_RD_PARAM_TYPES unsigned char, unsigned char,\
+unsigned char *, unsigned char
+/**\brief links the order of parameters defined in \
+SMI130_GYRO_BUS_RD_PARAM_TYPE to function calls used inside the API
+*/
+#define SMI130_GYRO_BUS_RD_PARAM_ORDER (device_addr, register_addr,\
+register_data)
+/* never change this line */
+#define SMI130_GYRO_BUS_READ_FUNC(device_addr, register_addr,\
+register_data, rd_len)bus_read(device_addr, register_addr,\
+register_data, rd_len)
+/**\brief defines the return parameter type of the SMI130_GYRO_RD_FUNCTION
+*/
+#define SMI130_GYRO_BURST_RD_RETURN_TYPE char
+/**\brief defines the calling parameter types of the SMI130_GYRO_RD_FUNCTION
+*/
+#define SMI130_GYRO_BURST_RD_PARAM_TYPES unsigned char,\
+unsigned char, unsigned char *, signed int
+/**\brief links the order of parameters defined in \
+SMI130_GYRO_BURST_RD_PARAM_TYPE to function calls used inside the API
+*/
+#define SMI130_GYRO_BURST_RD_PARAM_ORDER (device_addr, register_addr,\
+register_data)
+/* never change this line */
+#define SMI130_GYRO_BURST_READ_FUNC(device_addr, register_addr,\
+register_data, rd_len)burst_read(device_addr, \
+register_addr, register_data, rd_len)
+/**\brief defines the return parameter type of the SMI130_GYRO_DELAY_FUNCTION
+*/
+#define SMI130_GYRO_DELAY_RETURN_TYPE void
+/* never change this line */
+#define SMI130_GYRO_DELAY_FUNC(delay_in_msec)\
+		delay_func(delay_in_msec)
+#define SMI130_GYRO_RETURN_FUNCTION_TYPE			int
+/**< This refers SMI130_GYRO return type as char */
+
+#define	SMI130_GYRO_I2C_ADDR1				0x68
+#define	SMI130_GYRO_I2C_ADDR					SMI130_GYRO_I2C_ADDR1
+#define	SMI130_GYRO_I2C_ADDR2				0x69
+
+
+
+/*Define of registers*/
+
+/* Hard Wired */
+#define SMI130_GYRO_CHIP_ID_ADDR						0x00
+/**<Address of Chip ID Register*/
+
+
+/* Data Register */
+#define SMI130_GYRO_RATE_X_LSB_ADDR                   0x02
+/**<        Address of X axis Rate LSB Register       */
+#define SMI130_GYRO_RATE_X_MSB_ADDR                   0x03
+/**<        Address of X axis Rate MSB Register       */
+#define SMI130_GYRO_RATE_Y_LSB_ADDR                   0x04
+/**<        Address of Y axis Rate LSB Register       */
+#define SMI130_GYRO_RATE_Y_MSB_ADDR                   0x05
+/**<        Address of Y axis Rate MSB Register       */
+#define SMI130_GYRO_RATE_Z_LSB_ADDR                   0x06
+/**<        Address of Z axis Rate LSB Register       */
+#define SMI130_GYRO_RATE_Z_MSB_ADDR                   0x07
+/**<        Address of Z axis Rate MSB Register       */
+#define SMI130_GYRO_TEMP_ADDR                        0x08
+/**<        Address of Temperature Data LSB Register  */
+
+/* Status Register */
+#define SMI130_GYRO_INT_STATUS0_ADDR                 0x09
+/**<        Address of Interrupt status Register 0    */
+#define SMI130_GYRO_INT_STATUS1_ADDR                 0x0A
+/**<        Address of Interrupt status Register 1    */
+#define SMI130_GYRO_INT_STATUS2_ADDR                 0x0B
+/**<        Address of Interrupt status Register 2    */
+#define SMI130_GYRO_INT_STATUS3_ADDR                 0x0C
+/**<        Address of Interrupt status Register 3    */
+#define SMI130_GYRO_FIFO_STATUS_ADDR                 0x0E
+/**<        Address of FIFO status Register           */
+
+/* Control Register */
+#define SMI130_GYRO_RANGE_ADDR                  0x0F
+/**<        Address of Range address Register     */
+#define SMI130_GYRO_BW_ADDR                     0x10
+/**<        Address of Bandwidth Register         */
+#define SMI130_GYRO_MODE_LPM1_ADDR              0x11
+/**<        Address of Mode LPM1 Register         */
+#define SMI130_GYRO_MODE_LPM2_ADDR              0x12
+/**<        Address of Mode LPM2 Register         */
+#define SMI130_GYRO_RATED_HBW_ADDR              0x13
+/**<        Address of Rate HBW Register          */
+#define SMI130_GYRO_BGW_SOFTRESET_ADDR          0x14
+/**<        Address of BGW Softreset Register      */
+#define SMI130_GYRO_INT_ENABLE0_ADDR            0x15
+/**<        Address of Interrupt Enable 0             */
+#define SMI130_GYRO_INT_ENABLE1_ADDR            0x16
+/**<        Address of Interrupt Enable 1             */
+#define SMI130_GYRO_INT_MAP_0_ADDR              0x17
+/**<        Address of Interrupt MAP 0                */
+#define SMI130_GYRO_INT_MAP_1_ADDR              0x18
+/**<        Address of Interrupt MAP 1                */
+#define SMI130_GYRO_INT_MAP_2_ADDR              0x19
+/**<        Address of Interrupt MAP 2                */
+#define SMI130_GYRO_INT_0_ADDR                  0x1A
+/**<        Address of Interrupt 0 register   */
+#define SMI130_GYRO_INT_1_ADDR                  0x1B
+/**<        Address of Interrupt 1 register   */
+#define SMI130_GYRO_INT_2_ADDR                  0x1C
+/**<        Address of Interrupt 2 register   */
+#define SMI130_GYRO_INT_4_ADDR                  0x1E
+/**<        Address of Interrupt 4 register   */
+#define SMI130_GYRO_RST_LATCH_ADDR              0x21
+/**<        Address of Reset Latch Register           */
+#define SMI130_GYRO_HIGH_TH_X_ADDR              0x22
+/**<        Address of High Th x Address register     */
+#define SMI130_GYRO_HIGH_DUR_X_ADDR             0x23
+/**<        Address of High Dur x Address register    */
+#define SMI130_GYRO_HIGH_TH_Y_ADDR              0x24
+/**<        Address of High Th y  Address register    */
+#define SMI130_GYRO_HIGH_DUR_Y_ADDR             0x25
+/**<        Address of High Dur y Address register    */
+#define SMI130_GYRO_HIGH_TH_Z_ADDR              0x26
+/**<        Address of High Th z Address register  */
+#define SMI130_GYRO_HIGH_DUR_Z_ADDR             0x27
+/**<        Address of High Dur z Address register  */
+#define SMI130_GYRO_SOC_ADDR                        0x31
+/**<        Address of SOC register        */
+#define SMI130_GYRO_A_FOC_ADDR                      0x32
+/**<        Address of A_FOC Register        */
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR          0x33
+/**<        Address of Trim NVM control register      */
+#define SMI130_GYRO_BGW_SPI3_WDT_ADDR           0x34
+/**<        Address of BGW SPI3,WDT Register           */
+
+
+/* Trim Register */
+#define SMI130_GYRO_OFC1_ADDR                   0x36
+/**<        Address of OFC1 Register          */
+#define SMI130_GYRO_OFC2_ADDR                       0x37
+/**<        Address of OFC2 Register          */
+#define SMI130_GYRO_OFC3_ADDR                   0x38
+/**<        Address of OFC3 Register          */
+#define SMI130_GYRO_OFC4_ADDR                   0x39
+/**<        Address of OFC4 Register          */
+#define SMI130_GYRO_TRIM_GP0_ADDR               0x3A
+/**<        Address of Trim GP0 Register              */
+#define SMI130_GYRO_TRIM_GP1_ADDR               0x3B
+/**<        Address of Trim GP1 Register              */
+#define SMI130_GYRO_SELF_TEST_ADDR              0x3C
+/**<        Address of BGW Self test Register           */
+
+/* Control Register */
+#define SMI130_GYRO_FIFO_CGF1_ADDR              0x3D
+/**<        Address of FIFO CGF0 Register             */
+#define SMI130_GYRO_FIFO_CGF0_ADDR              0x3E
+/**<        Address of FIFO CGF1 Register             */
+
+/* Data Register */
+#define SMI130_GYRO_FIFO_DATA_ADDR              0x3F
+/**<        Address of FIFO Data Register             */
+
+/* Rate X LSB Register */
+#define SMI130_GYRO_RATE_X_LSB_VALUEX__POS        0
+
+/**< Last 8 bits of RateX LSB Registers */
+#define SMI130_GYRO_RATE_X_LSB_VALUEX__LEN        8
+#define SMI130_GYRO_RATE_X_LSB_VALUEX__MSK        0xFF
+#define SMI130_GYRO_RATE_X_LSB_VALUEX__REG        SMI130_GYRO_RATE_X_LSB_ADDR
+
+/* Rate Y LSB Register */
+/**<  Last 8 bits of RateY LSB Registers */
+#define SMI130_GYRO_RATE_Y_LSB_VALUEY__POS        0
+#define SMI130_GYRO_RATE_Y_LSB_VALUEY__LEN        8
+#define SMI130_GYRO_RATE_Y_LSB_VALUEY__MSK        0xFF
+#define SMI130_GYRO_RATE_Y_LSB_VALUEY__REG        SMI130_GYRO_RATE_Y_LSB_ADDR
+
+/* Rate Z LSB Register */
+/**< Last 8 bits of RateZ LSB Registers */
+#define SMI130_GYRO_RATE_Z_LSB_VALUEZ__POS        0
+#define SMI130_GYRO_RATE_Z_LSB_VALUEZ__LEN        8
+#define SMI130_GYRO_RATE_Z_LSB_VALUEZ__MSK        0xFF
+#define SMI130_GYRO_RATE_Z_LSB_VALUEZ__REG        SMI130_GYRO_RATE_Z_LSB_ADDR
+
+/* Interrupt status 0 Register */
+   /**< 2th bit of Interrupt status 0 register */
+#define SMI130_GYRO_INT_STATUS0_ANY_INT__POS     2
+#define SMI130_GYRO_INT_STATUS0_ANY_INT__LEN     1
+#define SMI130_GYRO_INT_STATUS0_ANY_INT__MSK     0x04
+#define SMI130_GYRO_INT_STATUS0_ANY_INT__REG     SMI130_GYRO_INT_STATUS0_ADDR
+
+/**< 1st bit of Interrupt status 0 register */
+#define SMI130_GYRO_INT_STATUS0_HIGH_INT__POS    1
+#define SMI130_GYRO_INT_STATUS0_HIGH_INT__LEN    1
+#define SMI130_GYRO_INT_STATUS0_HIGH_INT__MSK    0x02
+#define SMI130_GYRO_INT_STATUS0_HIGH_INT__REG    SMI130_GYRO_INT_STATUS0_ADDR
+
+ /**< 1st and 2nd bit of Interrupt status 0 register */
+#define SMI130_GYRO_INT_STATUSZERO__POS    1
+#define SMI130_GYRO_INT_STATUSZERO__LEN    2
+#define SMI130_GYRO_INT_STATUSZERO__MSK    0x06
+#define SMI130_GYRO_INT_STATUSZERO__REG    SMI130_GYRO_INT_STATUS0_ADDR
+
+/* Interrupt status 1 Register */
+/**< 7th bit of Interrupt status 1 register */
+#define SMI130_GYRO_INT_STATUS1_DATA_INT__POS           7
+#define SMI130_GYRO_INT_STATUS1_DATA_INT__LEN           1
+#define SMI130_GYRO_INT_STATUS1_DATA_INT__MSK           0x80
+#define SMI130_GYRO_INT_STATUS1_DATA_INT__REG           SMI130_GYRO_INT_STATUS1_ADDR
+
+ /**< 6th bit of Interrupt status 1 register */
+#define SMI130_GYRO_INT_STATUS1_AUTO_OFFSET_INT__POS    6
+#define SMI130_GYRO_INT_STATUS1_AUTO_OFFSET_INT__LEN    1
+#define SMI130_GYRO_INT_STATUS1_AUTO_OFFSET_INT__MSK    0x40
+#define SMI130_GYRO_INT_STATUS1_AUTO_OFFSET_INT__REG    SMI130_GYRO_INT_STATUS1_ADDR
+
+/**< 5th bit of Interrupt status 1 register */
+#define SMI130_GYRO_INT_STATUS1_FAST_OFFSET_INT__POS    5
+#define SMI130_GYRO_INT_STATUS1_FAST_OFFSET_INT__LEN    1
+#define SMI130_GYRO_INT_STATUS1_FAST_OFFSET_INT__MSK    0x20
+#define SMI130_GYRO_INT_STATUS1_FAST_OFFSET_INT__REG    SMI130_GYRO_INT_STATUS1_ADDR
+
+/**< 4th bit of Interrupt status 1 register */
+#define SMI130_GYRO_INT_STATUS1_FIFO_INT__POS           4
+#define SMI130_GYRO_INT_STATUS1_FIFO_INT__LEN           1
+#define SMI130_GYRO_INT_STATUS1_FIFO_INT__MSK           0x10
+#define SMI130_GYRO_INT_STATUS1_FIFO_INT__REG           SMI130_GYRO_INT_STATUS1_ADDR
+
+/**< MSB 4 bits of Interrupt status1 register */
+#define SMI130_GYRO_INT_STATUSONE__POS           4
+#define SMI130_GYRO_INT_STATUSONE__LEN           4
+#define SMI130_GYRO_INT_STATUSONE__MSK           0xF0
+#define SMI130_GYRO_INT_STATUSONE__REG           SMI130_GYRO_INT_STATUS1_ADDR
+
+/* Interrupt status 2 Register */
+/**< 3th bit of Interrupt status 2 register */
+#define SMI130_GYRO_INT_STATUS2_ANY_SIGN_INT__POS     3
+#define SMI130_GYRO_INT_STATUS2_ANY_SIGN_INT__LEN     1
+#define SMI130_GYRO_INT_STATUS2_ANY_SIGN_INT__MSK     0x08
+#define SMI130_GYRO_INT_STATUS2_ANY_SIGN_INT__REG     SMI130_GYRO_INT_STATUS2_ADDR
+
+/**< 2th bit of Interrupt status 2 register */
+#define SMI130_GYRO_INT_STATUS2_ANY_FIRSTZ_INT__POS   2
+#define SMI130_GYRO_INT_STATUS2_ANY_FIRSTZ_INT__LEN   1
+#define SMI130_GYRO_INT_STATUS2_ANY_FIRSTZ_INT__MSK   0x04
+#define SMI130_GYRO_INT_STATUS2_ANY_FIRSTZ_INT__REG   SMI130_GYRO_INT_STATUS2_ADDR
+
+/**< 1st bit of Interrupt status 2 register */
+#define SMI130_GYRO_INT_STATUS2_ANY_FIRSTY_INT__POS   1
+#define SMI130_GYRO_INT_STATUS2_ANY_FIRSTY_INT__LEN   1
+#define SMI130_GYRO_INT_STATUS2_ANY_FIRSTY_INT__MSK   0x02
+#define SMI130_GYRO_INT_STATUS2_ANY_FIRSTY_INT__REG   SMI130_GYRO_INT_STATUS2_ADDR
+
+/**< 0th bit of Interrupt status 2 register */
+#define SMI130_GYRO_INT_STATUS2_ANY_FIRSTX_INT__POS   0
+#define SMI130_GYRO_INT_STATUS2_ANY_FIRSTX_INT__LEN   1
+#define SMI130_GYRO_INT_STATUS2_ANY_FIRSTX_INT__MSK   0x01
+#define SMI130_GYRO_INT_STATUS2_ANY_FIRSTX_INT__REG   SMI130_GYRO_INT_STATUS2_ADDR
+
+/**< 4 bits of Interrupt status 2 register */
+#define SMI130_GYRO_INT_STATUSTWO__POS   0
+#define SMI130_GYRO_INT_STATUSTWO__LEN   4
+#define SMI130_GYRO_INT_STATUSTWO__MSK   0x0F
+#define SMI130_GYRO_INT_STATUSTWO__REG   SMI130_GYRO_INT_STATUS2_ADDR
+
+/* Interrupt status 3 Register */
+/**< 3th bit of Interrupt status 3 register */
+#define SMI130_GYRO_INT_STATUS3_HIGH_SIGN_INT__POS     3
+#define SMI130_GYRO_INT_STATUS3_HIGH_SIGN_INT__LEN     1
+#define SMI130_GYRO_INT_STATUS3_HIGH_SIGN_INT__MSK     0x08
+#define SMI130_GYRO_INT_STATUS3_HIGH_SIGN_INT__REG     SMI130_GYRO_INT_STATUS3_ADDR
+
+/**< 2th bit of Interrupt status 3 register */
+#define SMI130_GYRO_INT_STATUS3_HIGH_FIRSTZ_INT__POS   2
+#define SMI130_GYRO_INT_STATUS3_HIGH_FIRSTZ_INT__LEN   1
+#define SMI130_GYRO_INT_STATUS3_HIGH_FIRSTZ_INT__MSK   0x04
+#define SMI130_GYRO_INT_STATUS3_HIGH_FIRSTZ_INT__REG  SMI130_GYRO_INT_STATUS3_ADDR
+
+/**< 1st bit of Interrupt status 3 register */
+#define SMI130_GYRO_INT_STATUS3_HIGH_FIRSTY_INT__POS   1
+#define SMI130_GYRO_INT_STATUS3_HIGH_FIRSTY_INT__LEN   1
+#define SMI130_GYRO_INT_STATUS3_HIGH_FIRSTY_INT__MSK   0x02
+#define SMI130_GYRO_INT_STATUS3_HIGH_FIRSTY_INT__REG   SMI130_GYRO_INT_STATUS3_ADDR
+
+/**< 0th bit of Interrupt status 3 register */
+#define SMI130_GYRO_INT_STATUS3_HIGH_FIRSTX_INT__POS   0
+#define SMI130_GYRO_INT_STATUS3_HIGH_FIRSTX_INT__LEN   1
+#define SMI130_GYRO_INT_STATUS3_HIGH_FIRSTX_INT__MSK   0x01
+#define SMI130_GYRO_INT_STATUS3_HIGH_FIRSTX_INT__REG   SMI130_GYRO_INT_STATUS3_ADDR
+
+/**< LSB 4 bits of Interrupt status 3 register */
+#define SMI130_GYRO_INT_STATUSTHREE__POS   0
+#define SMI130_GYRO_INT_STATUSTHREE__LEN   4
+#define SMI130_GYRO_INT_STATUSTHREE__MSK   0x0F
+#define SMI130_GYRO_INT_STATUSTHREE__REG   SMI130_GYRO_INT_STATUS3_ADDR
+
+/* SMI130_GYRO FIFO Status Register */
+/**< 7th bit of FIFO status Register */
+#define SMI130_GYRO_FIFO_STATUS_OVERRUN__POS         7
+#define SMI130_GYRO_FIFO_STATUS_OVERRUN__LEN         1
+#define SMI130_GYRO_FIFO_STATUS_OVERRUN__MSK         0x80
+#define SMI130_GYRO_FIFO_STATUS_OVERRUN__REG         SMI130_GYRO_FIFO_STATUS_ADDR
+
+/**< First 7 bits of FIFO status Register */
+#define SMI130_GYRO_FIFO_STATUS_FRAME_COUNTER__POS   0
+#define SMI130_GYRO_FIFO_STATUS_FRAME_COUNTER__LEN   7
+#define SMI130_GYRO_FIFO_STATUS_FRAME_COUNTER__MSK   0x7F
+#define SMI130_GYRO_FIFO_STATUS_FRAME_COUNTER__REG   SMI130_GYRO_FIFO_STATUS_ADDR
+
+/**< First 3 bits of range Registers */
+#define SMI130_GYRO_RANGE_ADDR_RANGE__POS           0
+#define SMI130_GYRO_RANGE_ADDR_RANGE__LEN           3
+#define SMI130_GYRO_RANGE_ADDR_RANGE__MSK           0x07
+#define SMI130_GYRO_RANGE_ADDR_RANGE__REG           SMI130_GYRO_RANGE_ADDR
+
+/**< Last bit of Bandwidth Registers */
+#define SMI130_GYRO_BW_ADDR_HIGH_RES__POS       7
+#define SMI130_GYRO_BW_ADDR_HIGH_RES__LEN       1
+#define SMI130_GYRO_BW_ADDR_HIGH_RES__MSK       0x80
+#define SMI130_GYRO_BW_ADDR_HIGH_RES__REG       SMI130_GYRO_BW_ADDR
+
+/**< First 3 bits of Bandwidth Registers */
+#define SMI130_GYRO_BW_ADDR__POS             0
+#define SMI130_GYRO_BW_ADDR__LEN             3
+#define SMI130_GYRO_BW_ADDR__MSK             0x07
+#define SMI130_GYRO_BW_ADDR__REG             SMI130_GYRO_BW_ADDR
+
+/**< 6th bit of Bandwidth Registers */
+#define SMI130_GYRO_BW_ADDR_IMG_STB__POS             6
+#define SMI130_GYRO_BW_ADDR_IMG_STB__LEN             1
+#define SMI130_GYRO_BW_ADDR_IMG_STB__MSK             0x40
+#define SMI130_GYRO_BW_ADDR_IMG_STB__REG             SMI130_GYRO_BW_ADDR
+
+/**< 5th and 7th bit of LPM1 Register */
+#define SMI130_GYRO_MODE_LPM1__POS             5
+#define SMI130_GYRO_MODE_LPM1__LEN             3
+#define SMI130_GYRO_MODE_LPM1__MSK             0xA0
+#define SMI130_GYRO_MODE_LPM1__REG             SMI130_GYRO_MODE_LPM1_ADDR
+
+/**< 1st to 3rd bit of LPM1 Register */
+#define SMI130_GYRO_MODELPM1_ADDR_SLEEPDUR__POS              1
+#define SMI130_GYRO_MODELPM1_ADDR_SLEEPDUR__LEN              3
+#define SMI130_GYRO_MODELPM1_ADDR_SLEEPDUR__MSK              0x0E
+#define SMI130_GYRO_MODELPM1_ADDR_SLEEPDUR__REG              SMI130_GYRO_MODE_LPM1_ADDR
+
+/**< 7th bit of Mode LPM2 Register */
+#define SMI130_GYRO_MODE_LPM2_ADDR_FAST_POWERUP__POS         7
+#define SMI130_GYRO_MODE_LPM2_ADDR_FAST_POWERUP__LEN         1
+#define SMI130_GYRO_MODE_LPM2_ADDR_FAST_POWERUP__MSK         0x80
+#define SMI130_GYRO_MODE_LPM2_ADDR_FAST_POWERUP__REG         SMI130_GYRO_MODE_LPM2_ADDR
+
+/**< 6th bit of Mode LPM2 Register */
+#define SMI130_GYRO_MODE_LPM2_ADDR_ADV_POWERSAVING__POS      6
+#define SMI130_GYRO_MODE_LPM2_ADDR_ADV_POWERSAVING__LEN      1
+#define SMI130_GYRO_MODE_LPM2_ADDR_ADV_POWERSAVING__MSK      0x40
+#define SMI130_GYRO_MODE_LPM2_ADDR_ADV_POWERSAVING__REG      SMI130_GYRO_MODE_LPM2_ADDR
+
+/**< 4th & 5th bit of Mode LPM2 Register */
+#define SMI130_GYRO_MODE_LPM2_ADDR_EXT_TRI_SEL__POS          4
+#define SMI130_GYRO_MODE_LPM2_ADDR_EXT_TRI_SEL__LEN          2
+#define SMI130_GYRO_MODE_LPM2_ADDR_EXT_TRI_SEL__MSK          0x30
+#define SMI130_GYRO_MODE_LPM2_ADDR_EXT_TRI_SEL__REG          SMI130_GYRO_MODE_LPM2_ADDR
+
+/**< 0th to 2nd bit of LPM2 Register */
+#define SMI130_GYRO_MODE_LPM2_ADDR_AUTOSLEEPDUR__POS  0
+#define SMI130_GYRO_MODE_LPM2_ADDR_AUTOSLEEPDUR__LEN  3
+#define SMI130_GYRO_MODE_LPM2_ADDR_AUTOSLEEPDUR__MSK  0x07
+#define SMI130_GYRO_MODE_LPM2_ADDR_AUTOSLEEPDUR__REG  SMI130_GYRO_MODE_LPM2_ADDR
+
+/**< 7th bit of HBW Register */
+#define SMI130_GYRO_RATED_HBW_ADDR_DATA_HIGHBW__POS         7
+#define SMI130_GYRO_RATED_HBW_ADDR_DATA_HIGHBW__LEN         1
+#define SMI130_GYRO_RATED_HBW_ADDR_DATA_HIGHBW__MSK         0x80
+#define SMI130_GYRO_RATED_HBW_ADDR_DATA_HIGHBW__REG         SMI130_GYRO_RATED_HBW_ADDR
+
+/**< 6th bit of HBW Register */
+#define SMI130_GYRO_RATED_HBW_ADDR_SHADOW_DIS__POS          6
+#define SMI130_GYRO_RATED_HBW_ADDR_SHADOW_DIS__LEN          1
+#define SMI130_GYRO_RATED_HBW_ADDR_SHADOW_DIS__MSK          0x40
+#define SMI130_GYRO_RATED_HBW_ADDR_SHADOW_DIS__REG          SMI130_GYRO_RATED_HBW_ADDR
+
+/**< 7th bit of Interrupt Enable 0 Registers */
+#define SMI130_GYRO_INT_ENABLE0_DATAEN__POS               7
+#define SMI130_GYRO_INT_ENABLE0_DATAEN__LEN               1
+#define SMI130_GYRO_INT_ENABLE0_DATAEN__MSK               0x80
+#define SMI130_GYRO_INT_ENABLE0_DATAEN__REG               SMI130_GYRO_INT_ENABLE0_ADDR
+
+/**< 6th bit of Interrupt Enable 0 Registers */
+#define SMI130_GYRO_INT_ENABLE0_FIFOEN__POS               6
+#define SMI130_GYRO_INT_ENABLE0_FIFOEN__LEN               1
+#define SMI130_GYRO_INT_ENABLE0_FIFOEN__MSK               0x40
+#define SMI130_GYRO_INT_ENABLE0_FIFOEN__REG               SMI130_GYRO_INT_ENABLE0_ADDR
+
+/**< 2nd bit of Interrupt Enable 0 Registers */
+#define SMI130_GYRO_INT_ENABLE0_AUTO_OFFSETEN__POS        2
+#define SMI130_GYRO_INT_ENABLE0_AUTO_OFFSETEN__LEN        1
+#define SMI130_GYRO_INT_ENABLE0_AUTO_OFFSETEN__MSK        0x04
+#define SMI130_GYRO_INT_ENABLE0_AUTO_OFFSETEN__REG        SMI130_GYRO_INT_ENABLE0_ADDR
+
+/**< 3rd bit of Interrupt Enable 1 Registers */
+#define SMI130_GYRO_INT_ENABLE1_IT2_OD__POS               3
+#define SMI130_GYRO_INT_ENABLE1_IT2_OD__LEN               1
+#define SMI130_GYRO_INT_ENABLE1_IT2_OD__MSK               0x08
+#define SMI130_GYRO_INT_ENABLE1_IT2_OD__REG               SMI130_GYRO_INT_ENABLE1_ADDR
+
+/**< 2nd bit of Interrupt Enable 1 Registers */
+#define SMI130_GYRO_INT_ENABLE1_IT2_LVL__POS              2
+#define SMI130_GYRO_INT_ENABLE1_IT2_LVL__LEN              1
+#define SMI130_GYRO_INT_ENABLE1_IT2_LVL__MSK              0x04
+#define SMI130_GYRO_INT_ENABLE1_IT2_LVL__REG              SMI130_GYRO_INT_ENABLE1_ADDR
+
+/**< 1st bit of Interrupt Enable 1 Registers */
+#define SMI130_GYRO_INT_ENABLE1_IT1_OD__POS               1
+#define SMI130_GYRO_INT_ENABLE1_IT1_OD__LEN               1
+#define SMI130_GYRO_INT_ENABLE1_IT1_OD__MSK               0x02
+#define SMI130_GYRO_INT_ENABLE1_IT1_OD__REG               SMI130_GYRO_INT_ENABLE1_ADDR
+
+/**< 0th bit of Interrupt Enable 1 Registers */
+#define SMI130_GYRO_INT_ENABLE1_IT1_LVL__POS              0
+#define SMI130_GYRO_INT_ENABLE1_IT1_LVL__LEN              1
+#define SMI130_GYRO_INT_ENABLE1_IT1_LVL__MSK              0x01
+#define SMI130_GYRO_INT_ENABLE1_IT1_LVL__REG              SMI130_GYRO_INT_ENABLE1_ADDR
+
+/**< 3rd bit of Interrupt MAP 0 Registers */
+#define SMI130_GYRO_INT_MAP_0_INT1_HIGH__POS            3
+#define SMI130_GYRO_INT_MAP_0_INT1_HIGH__LEN            1
+#define SMI130_GYRO_INT_MAP_0_INT1_HIGH__MSK            0x08
+#define SMI130_GYRO_INT_MAP_0_INT1_HIGH__REG            SMI130_GYRO_INT_MAP_0_ADDR
+
+/**< 1st bit of Interrupt MAP 0 Registers */
+#define SMI130_GYRO_INT_MAP_0_INT1_ANY__POS             1
+#define SMI130_GYRO_INT_MAP_0_INT1_ANY__LEN             1
+#define SMI130_GYRO_INT_MAP_0_INT1_ANY__MSK             0x02
+#define SMI130_GYRO_INT_MAP_0_INT1_ANY__REG             SMI130_GYRO_INT_MAP_0_ADDR
+
+/**< 7th bit of MAP_1Registers */
+#define SMI130_GYRO_MAP_1_INT2_DATA__POS                  7
+#define SMI130_GYRO_MAP_1_INT2_DATA__LEN                  1
+#define SMI130_GYRO_MAP_1_INT2_DATA__MSK                  0x80
+#define SMI130_GYRO_MAP_1_INT2_DATA__REG                  SMI130_GYRO_INT_MAP_1_ADDR
+
+/**< 6th bit of MAP_1Registers */
+#define SMI130_GYRO_MAP_1_INT2_FAST_OFFSET__POS           6
+#define SMI130_GYRO_MAP_1_INT2_FAST_OFFSET__LEN           1
+#define SMI130_GYRO_MAP_1_INT2_FAST_OFFSET__MSK           0x40
+#define SMI130_GYRO_MAP_1_INT2_FAST_OFFSET__REG           SMI130_GYRO_INT_MAP_1_ADDR
+
+/**< 5th bit of MAP_1Registers */
+#define SMI130_GYRO_MAP_1_INT2_FIFO__POS                  5
+#define SMI130_GYRO_MAP_1_INT2_FIFO__LEN                  1
+#define SMI130_GYRO_MAP_1_INT2_FIFO__MSK                  0x20
+#define SMI130_GYRO_MAP_1_INT2_FIFO__REG                  SMI130_GYRO_INT_MAP_1_ADDR
+
+/**< 4th bit of MAP_1Registers */
+#define SMI130_GYRO_MAP_1_INT2_AUTO_OFFSET__POS           4
+#define SMI130_GYRO_MAP_1_INT2_AUTO_OFFSET__LEN           1
+#define SMI130_GYRO_MAP_1_INT2_AUTO_OFFSET__MSK           0x10
+#define SMI130_GYRO_MAP_1_INT2_AUTO_OFFSET__REG           SMI130_GYRO_INT_MAP_1_ADDR
+
+/**< 3rd bit of MAP_1Registers */
+#define SMI130_GYRO_MAP_1_INT1_AUTO_OFFSET__POS           3
+#define SMI130_GYRO_MAP_1_INT1_AUTO_OFFSET__LEN           1
+#define SMI130_GYRO_MAP_1_INT1_AUTO_OFFSET__MSK           0x08
+#define SMI130_GYRO_MAP_1_INT1_AUTO_OFFSET__REG           SMI130_GYRO_INT_MAP_1_ADDR
+
+/**< 2nd bit of MAP_1Registers */
+#define SMI130_GYRO_MAP_1_INT1_FIFO__POS                  2
+#define SMI130_GYRO_MAP_1_INT1_FIFO__LEN                  1
+#define SMI130_GYRO_MAP_1_INT1_FIFO__MSK                  0x04
+#define SMI130_GYRO_MAP_1_INT1_FIFO__REG                  SMI130_GYRO_INT_MAP_1_ADDR
+
+/**< 1st bit of MAP_1Registers */
+#define SMI130_GYRO_MAP_1_INT1_FAST_OFFSET__POS           1
+#define SMI130_GYRO_MAP_1_INT1_FAST_OFFSET__LEN           1
+#define SMI130_GYRO_MAP_1_INT1_FAST_OFFSET__MSK           0x02
+#define SMI130_GYRO_MAP_1_INT1_FAST_OFFSET__REG           SMI130_GYRO_INT_MAP_1_ADDR
+
+/**< 0th bit of MAP_1Registers */
+#define SMI130_GYRO_MAP_1_INT1_DATA__POS                  0
+#define SMI130_GYRO_MAP_1_INT1_DATA__LEN                  1
+#define SMI130_GYRO_MAP_1_INT1_DATA__MSK                  0x01
+#define SMI130_GYRO_MAP_1_INT1_DATA__REG                  SMI130_GYRO_INT_MAP_1_ADDR
+
+/**< 3rd bit of Interrupt Map 2 Registers */
+#define SMI130_GYRO_INT_MAP_2_INT2_HIGH__POS            3
+#define SMI130_GYRO_INT_MAP_2_INT2_HIGH__LEN            1
+#define SMI130_GYRO_INT_MAP_2_INT2_HIGH__MSK            0x08
+#define SMI130_GYRO_INT_MAP_2_INT2_HIGH__REG            SMI130_GYRO_INT_MAP_2_ADDR
+
+/**< 1st bit of Interrupt Map 2 Registers */
+#define SMI130_GYRO_INT_MAP_2_INT2_ANY__POS             1
+#define SMI130_GYRO_INT_MAP_2_INT2_ANY__LEN             1
+#define SMI130_GYRO_INT_MAP_2_INT2_ANY__MSK             0x02
+#define SMI130_GYRO_INT_MAP_2_INT2_ANY__REG             SMI130_GYRO_INT_MAP_2_ADDR
+
+/**< 5th bit of Interrupt 0 Registers */
+#define SMI130_GYRO_INT_0_ADDR_SLOW_OFFSET_UNFILT__POS          5
+#define SMI130_GYRO_INT_0_ADDR_SLOW_OFFSET_UNFILT__LEN          1
+#define SMI130_GYRO_INT_0_ADDR_SLOW_OFFSET_UNFILT__MSK          0x20
+#define SMI130_GYRO_INT_0_ADDR_SLOW_OFFSET_UNFILT__REG          SMI130_GYRO_INT_0_ADDR
+
+/**< 3rd bit of Interrupt 0 Registers */
+#define SMI130_GYRO_INT_0_ADDR_HIGH_UNFILT_DATA__POS            3
+#define SMI130_GYRO_INT_0_ADDR_HIGH_UNFILT_DATA__LEN            1
+#define SMI130_GYRO_INT_0_ADDR_HIGH_UNFILT_DATA__MSK            0x08
+#define SMI130_GYRO_INT_0_ADDR_HIGH_UNFILT_DATA__REG            SMI130_GYRO_INT_0_ADDR
+
+/**< 1st bit of Interrupt 0 Registers */
+#define SMI130_GYRO_INT_0_ADDR_ANY_UNFILT_DATA__POS             1
+#define SMI130_GYRO_INT_0_ADDR_ANY_UNFILT_DATA__LEN             1
+#define SMI130_GYRO_INT_0_ADDR_ANY_UNFILT_DATA__MSK             0x02
+#define SMI130_GYRO_INT_0_ADDR_ANY_UNFILT_DATA__REG             SMI130_GYRO_INT_0_ADDR
+
+/**< 7th bit of INT_1  Registers */
+#define SMI130_GYRO_INT_1_ADDR_FAST_OFFSET_UNFILT__POS            7
+#define SMI130_GYRO_INT_1_ADDR_FAST_OFFSET_UNFILT__LEN            1
+#define SMI130_GYRO_INT_1_ADDR_FAST_OFFSET_UNFILT__MSK            0x80
+#define SMI130_GYRO_INT_1_ADDR_FAST_OFFSET_UNFILT__REG            SMI130_GYRO_INT_1_ADDR
+
+/**< First 7 bits of INT_1  Registers */
+#define SMI130_GYRO_INT_1_ADDR_ANY_TH__POS                       0
+#define SMI130_GYRO_INT_1_ADDR_ANY_TH__LEN                       7
+#define SMI130_GYRO_INT_1_ADDR_ANY_TH__MSK                       0x7F
+#define SMI130_GYRO_INT_1_ADDR_ANY_TH__REG                       SMI130_GYRO_INT_1_ADDR
+
+/**< Last 2 bits of INT 2Registers */
+#define SMI130_GYRO_INT_2_ADDR_AWAKE_DUR__POS          6
+#define SMI130_GYRO_INT_2_ADDR_AWAKE_DUR__LEN          2
+#define SMI130_GYRO_INT_2_ADDR_AWAKE_DUR__MSK          0xC0
+#define SMI130_GYRO_INT_2_ADDR_AWAKE_DUR__REG          SMI130_GYRO_INT_2_ADDR
+
+/**< 4th & 5th bit of INT 2Registers */
+#define SMI130_GYRO_INT_2_ADDR_ANY_DURSAMPLE__POS      4
+#define SMI130_GYRO_INT_2_ADDR_ANY_DURSAMPLE__LEN      2
+#define SMI130_GYRO_INT_2_ADDR_ANY_DURSAMPLE__MSK      0x30
+#define SMI130_GYRO_INT_2_ADDR_ANY_DURSAMPLE__REG      SMI130_GYRO_INT_2_ADDR
+
+/**< 2nd bit of INT 2Registers */
+#define SMI130_GYRO_INT_2_ADDR_ANY_EN_Z__POS           2
+#define SMI130_GYRO_INT_2_ADDR_ANY_EN_Z__LEN           1
+#define SMI130_GYRO_INT_2_ADDR_ANY_EN_Z__MSK           0x04
+#define SMI130_GYRO_INT_2_ADDR_ANY_EN_Z__REG           SMI130_GYRO_INT_2_ADDR
+
+/**< 1st bit of INT 2Registers */
+#define SMI130_GYRO_INT_2_ADDR_ANY_EN_Y__POS           1
+#define SMI130_GYRO_INT_2_ADDR_ANY_EN_Y__LEN           1
+#define SMI130_GYRO_INT_2_ADDR_ANY_EN_Y__MSK           0x02
+#define SMI130_GYRO_INT_2_ADDR_ANY_EN_Y__REG           SMI130_GYRO_INT_2_ADDR
+
+/**< 0th bit of INT 2Registers */
+#define SMI130_GYRO_INT_2_ADDR_ANY_EN_X__POS           0
+#define SMI130_GYRO_INT_2_ADDR_ANY_EN_X__LEN           1
+#define SMI130_GYRO_INT_2_ADDR_ANY_EN_X__MSK           0x01
+#define SMI130_GYRO_INT_2_ADDR_ANY_EN_X__REG           SMI130_GYRO_INT_2_ADDR
+
+/**< Last bit of INT 4 Registers */
+#define SMI130_GYRO_INT_4_FIFO_WM_EN__POS           7
+#define SMI130_GYRO_INT_4_FIFO_WM_EN__LEN           1
+#define SMI130_GYRO_INT_4_FIFO_WM_EN__MSK           0x80
+#define SMI130_GYRO_INT_4_FIFO_WM_EN__REG           SMI130_GYRO_INT_4_ADDR
+
+/**< Last bit of Reset Latch Registers */
+#define SMI130_GYRO_RST_LATCH_ADDR_RESET_INT__POS           7
+#define SMI130_GYRO_RST_LATCH_ADDR_RESET_INT__LEN           1
+#define SMI130_GYRO_RST_LATCH_ADDR_RESET_INT__MSK           0x80
+#define SMI130_GYRO_RST_LATCH_ADDR_RESET_INT__REG           SMI130_GYRO_RST_LATCH_ADDR
+
+/**< 6th bit of Reset Latch Registers */
+#define SMI130_GYRO_RST_LATCH_ADDR_OFFSET_RESET__POS        6
+#define SMI130_GYRO_RST_LATCH_ADDR_OFFSET_RESET__LEN        1
+#define SMI130_GYRO_RST_LATCH_ADDR_OFFSET_RESET__MSK        0x40
+#define SMI130_GYRO_RST_LATCH_ADDR_OFFSET_RESET__REG        SMI130_GYRO_RST_LATCH_ADDR
+
+/**< 4th bit of Reset Latch Registers */
+#define SMI130_GYRO_RST_LATCH_ADDR_LATCH_STATUS__POS        4
+#define SMI130_GYRO_RST_LATCH_ADDR_LATCH_STATUS__LEN        1
+#define SMI130_GYRO_RST_LATCH_ADDR_LATCH_STATUS__MSK        0x10
+#define SMI130_GYRO_RST_LATCH_ADDR_LATCH_STATUS__REG        SMI130_GYRO_RST_LATCH_ADDR
+
+/**< First 4 bits of Reset Latch Registers */
+#define SMI130_GYRO_RST_LATCH_ADDR_LATCH_INT__POS           0
+#define SMI130_GYRO_RST_LATCH_ADDR_LATCH_INT__LEN           4
+#define SMI130_GYRO_RST_LATCH_ADDR_LATCH_INT__MSK           0x0F
+#define SMI130_GYRO_RST_LATCH_ADDR_LATCH_INT__REG           SMI130_GYRO_RST_LATCH_ADDR
+
+/**< Last 2 bits of HIGH_TH_X Registers */
+#define SMI130_GYRO_HIGH_HY_X__POS        6
+#define SMI130_GYRO_HIGH_HY_X__LEN        2
+#define SMI130_GYRO_HIGH_HY_X__MSK        0xC0
+#define SMI130_GYRO_HIGH_HY_X__REG        SMI130_GYRO_HIGH_TH_X_ADDR
+
+/**< 5 bits of HIGH_TH_X Registers */
+#define SMI130_GYRO_HIGH_TH_X__POS        1
+#define SMI130_GYRO_HIGH_TH_X__LEN        5
+#define SMI130_GYRO_HIGH_TH_X__MSK        0x3E
+#define SMI130_GYRO_HIGH_TH_X__REG        SMI130_GYRO_HIGH_TH_X_ADDR
+
+/**< 0th bit of HIGH_TH_X Registers */
+#define SMI130_GYRO_HIGH_EN_X__POS        0
+#define SMI130_GYRO_HIGH_EN_X__LEN        1
+#define SMI130_GYRO_HIGH_EN_X__MSK        0x01
+#define SMI130_GYRO_HIGH_EN_X__REG        SMI130_GYRO_HIGH_TH_X_ADDR
+
+/**< Last 2 bits of HIGH_TH_Y Registers */
+#define SMI130_GYRO_HIGH_HY_Y__POS        6
+#define SMI130_GYRO_HIGH_HY_Y__LEN        2
+#define SMI130_GYRO_HIGH_HY_Y__MSK        0xC0
+#define SMI130_GYRO_HIGH_HY_Y__REG        SMI130_GYRO_HIGH_TH_Y_ADDR
+
+/**< 5 bits of HIGH_TH_Y Registers */
+#define SMI130_GYRO_HIGH_TH_Y__POS        1
+#define SMI130_GYRO_HIGH_TH_Y__LEN        5
+#define SMI130_GYRO_HIGH_TH_Y__MSK        0x3E
+#define SMI130_GYRO_HIGH_TH_Y__REG        SMI130_GYRO_HIGH_TH_Y_ADDR
+
+/**< 0th bit of HIGH_TH_Y Registers */
+#define SMI130_GYRO_HIGH_EN_Y__POS        0
+#define SMI130_GYRO_HIGH_EN_Y__LEN        1
+#define SMI130_GYRO_HIGH_EN_Y__MSK        0x01
+#define SMI130_GYRO_HIGH_EN_Y__REG        SMI130_GYRO_HIGH_TH_Y_ADDR
+
+/**< Last 2 bits of HIGH_TH_Z Registers */
+#define SMI130_GYRO_HIGH_HY_Z__POS        6
+#define SMI130_GYRO_HIGH_HY_Z__LEN        2
+#define SMI130_GYRO_HIGH_HY_Z__MSK        0xC0
+#define SMI130_GYRO_HIGH_HY_Z__REG        SMI130_GYRO_HIGH_TH_Z_ADDR
+
+/**< 5 bits of HIGH_TH_Z Registers */
+#define SMI130_GYRO_HIGH_TH_Z__POS        1
+#define SMI130_GYRO_HIGH_TH_Z__LEN        5
+#define SMI130_GYRO_HIGH_TH_Z__MSK        0x3E
+#define SMI130_GYRO_HIGH_TH_Z__REG        SMI130_GYRO_HIGH_TH_Z_ADDR
+
+/**< 0th bit of HIGH_TH_Z Registers */
+#define SMI130_GYRO_HIGH_EN_Z__POS        0
+#define SMI130_GYRO_HIGH_EN_Z__LEN        1
+#define SMI130_GYRO_HIGH_EN_Z__MSK        0x01
+#define SMI130_GYRO_HIGH_EN_Z__REG        SMI130_GYRO_HIGH_TH_Z_ADDR
+
+/**< Last 3 bits of INT OFF0 Registers */
+#define SMI130_GYRO_SLOW_OFFSET_TH__POS          6
+#define SMI130_GYRO_SLOW_OFFSET_TH__LEN          2
+#define SMI130_GYRO_SLOW_OFFSET_TH__MSK          0xC0
+#define SMI130_GYRO_SLOW_OFFSET_TH__REG          SMI130_GYRO_SOC_ADDR
+
+/**< 2  bits of INT OFF0 Registers */
+#define SMI130_GYRO_SLOW_OFFSET_DUR__POS         3
+#define SMI130_GYRO_SLOW_OFFSET_DUR__LEN         3
+#define SMI130_GYRO_SLOW_OFFSET_DUR__MSK         0x38
+#define SMI130_GYRO_SLOW_OFFSET_DUR__REG         SMI130_GYRO_SOC_ADDR
+
+/**< 2nd bit of INT OFF0 Registers */
+#define SMI130_GYRO_SLOW_OFFSET_EN_Z__POS        2
+#define SMI130_GYRO_SLOW_OFFSET_EN_Z__LEN        1
+#define SMI130_GYRO_SLOW_OFFSET_EN_Z__MSK        0x04
+#define SMI130_GYRO_SLOW_OFFSET_EN_Z__REG        SMI130_GYRO_SOC_ADDR
+
+/**< 1st bit of INT OFF0 Registers */
+#define SMI130_GYRO_SLOW_OFFSET_EN_Y__POS        1
+#define SMI130_GYRO_SLOW_OFFSET_EN_Y__LEN        1
+#define SMI130_GYRO_SLOW_OFFSET_EN_Y__MSK        0x02
+#define SMI130_GYRO_SLOW_OFFSET_EN_Y__REG        SMI130_GYRO_SOC_ADDR
+
+/**< 0th bit of INT OFF0 Registers */
+#define SMI130_GYRO_SLOW_OFFSET_EN_X__POS        0
+#define SMI130_GYRO_SLOW_OFFSET_EN_X__LEN        1
+#define SMI130_GYRO_SLOW_OFFSET_EN_X__MSK        0x01
+#define SMI130_GYRO_SLOW_OFFSET_EN_X__REG        SMI130_GYRO_SOC_ADDR
+
+/**< Last 2 bits of INT OFF1 Registers */
+#define SMI130_GYRO_AUTO_OFFSET_WL__POS        6
+#define SMI130_GYRO_AUTO_OFFSET_WL__LEN        2
+#define SMI130_GYRO_AUTO_OFFSET_WL__MSK        0xC0
+#define SMI130_GYRO_AUTO_OFFSET_WL__REG        SMI130_GYRO_A_FOC_ADDR
+
+/**< 2  bits of INT OFF1 Registers */
+#define SMI130_GYRO_FAST_OFFSET_WL__POS        4
+#define SMI130_GYRO_FAST_OFFSET_WL__LEN        2
+#define SMI130_GYRO_FAST_OFFSET_WL__MSK        0x30
+#define SMI130_GYRO_FAST_OFFSET_WL__REG        SMI130_GYRO_A_FOC_ADDR
+
+/**< 3nd bit of INT OFF1 Registers */
+#define SMI130_GYRO_FAST_OFFSET_EN__POS        3
+#define SMI130_GYRO_FAST_OFFSET_EN__LEN        1
+#define SMI130_GYRO_FAST_OFFSET_EN__MSK        0x08
+#define SMI130_GYRO_FAST_OFFSET_EN__REG        SMI130_GYRO_A_FOC_ADDR
+
+/**< 2nd bit of INT OFF1 Registers */
+#define SMI130_GYRO_FAST_OFFSET_EN_Z__POS      2
+#define SMI130_GYRO_FAST_OFFSET_EN_Z__LEN      1
+#define SMI130_GYRO_FAST_OFFSET_EN_Z__MSK      0x04
+#define SMI130_GYRO_FAST_OFFSET_EN_Z__REG      SMI130_GYRO_A_FOC_ADDR
+
+/**< 1st bit of INT OFF1 Registers */
+#define SMI130_GYRO_FAST_OFFSET_EN_Y__POS      1
+#define SMI130_GYRO_FAST_OFFSET_EN_Y__LEN      1
+#define SMI130_GYRO_FAST_OFFSET_EN_Y__MSK      0x02
+#define SMI130_GYRO_FAST_OFFSET_EN_Y__REG      SMI130_GYRO_A_FOC_ADDR
+
+/**< 0th bit of INT OFF1 Registers */
+#define SMI130_GYRO_FAST_OFFSET_EN_X__POS      0
+#define SMI130_GYRO_FAST_OFFSET_EN_X__LEN      1
+#define SMI130_GYRO_FAST_OFFSET_EN_X__MSK      0x01
+#define SMI130_GYRO_FAST_OFFSET_EN_X__REG      SMI130_GYRO_A_FOC_ADDR
+
+/**< 0 to 2 bits of INT OFF1 Registers */
+#define SMI130_GYRO_FAST_OFFSET_EN_XYZ__POS      0
+#define SMI130_GYRO_FAST_OFFSET_EN_XYZ__LEN      3
+#define SMI130_GYRO_FAST_OFFSET_EN_XYZ__MSK      0x07
+#define SMI130_GYRO_FAST_OFFSET_EN_XYZ__REG      SMI130_GYRO_A_FOC_ADDR
+
+/**< Last 4 bits of Trim NVM control Registers */
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_REMAIN__POS        4
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_REMAIN__LEN        4
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_REMAIN__MSK        0xF0
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_REMAIN__REG        \
+SMI130_GYRO_TRIM_NVM_CTRL_ADDR
+
+/**< 3rd bit of Trim NVM control Registers */
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_LOAD__POS          3
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_LOAD__LEN          1
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_LOAD__MSK          0x08
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_LOAD__REG          \
+SMI130_GYRO_TRIM_NVM_CTRL_ADDR
+
+/**< 2nd bit of Trim NVM control Registers */
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_RDY__POS           2
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_RDY__LEN           1
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_RDY__MSK           0x04
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_RDY__REG           \
+SMI130_GYRO_TRIM_NVM_CTRL_ADDR
+
+ /**< 1st bit of Trim NVM control Registers */
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_TRIG__POS     1
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_TRIG__LEN     1
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_TRIG__MSK     0x02
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_TRIG__REG     \
+SMI130_GYRO_TRIM_NVM_CTRL_ADDR
+
+/**< 0th bit of Trim NVM control Registers */
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_MODE__POS     0
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_MODE__LEN     1
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_MODE__MSK     0x01
+#define SMI130_GYRO_TRIM_NVM_CTRL_ADDR_NVM_PROG_MODE__REG     \
+SMI130_GYRO_TRIM_NVM_CTRL_ADDR
+
+ /**< 2nd bit of SPI3 WDT Registers */
+#define SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_EN__POS      2
+#define SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_EN__LEN      1
+#define SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_EN__MSK      0x04
+#define SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_EN__REG      \
+SMI130_GYRO_BGW_SPI3_WDT_ADDR
+
+ /**< 1st bit of SPI3 WDT Registers */
+#define SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_SEL__POS     1
+#define SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_SEL__LEN     1
+#define SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_SEL__MSK     0x02
+#define SMI130_GYRO_BGW_SPI3_WDT_ADDR_I2C_WDT_SEL__REG     \
+SMI130_GYRO_BGW_SPI3_WDT_ADDR
+
+/**< 0th bit of SPI3 WDT Registers */
+#define SMI130_GYRO_BGW_SPI3_WDT_ADDR_SPI3__POS            0
+#define SMI130_GYRO_BGW_SPI3_WDT_ADDR_SPI3__LEN            1
+#define SMI130_GYRO_BGW_SPI3_WDT_ADDR_SPI3__MSK            0x01
+#define SMI130_GYRO_BGW_SPI3_WDT_ADDR_SPI3__REG            \
+SMI130_GYRO_BGW_SPI3_WDT_ADDR
+
+/**< 4th bit of Self test Registers */
+#define SMI130_GYRO_SELF_TEST_ADDR_RATEOK__POS            4
+#define SMI130_GYRO_SELF_TEST_ADDR_RATEOK__LEN            1
+#define SMI130_GYRO_SELF_TEST_ADDR_RATEOK__MSK            0x10
+#define SMI130_GYRO_SELF_TEST_ADDR_RATEOK__REG            \
+SMI130_GYRO_SELF_TEST_ADDR
+
+/**< 2nd bit of Self test Registers */
+#define SMI130_GYRO_SELF_TEST_ADDR_BISTFAIL__POS          2
+#define SMI130_GYRO_SELF_TEST_ADDR_BISTFAIL__LEN          1
+#define SMI130_GYRO_SELF_TEST_ADDR_BISTFAIL__MSK          0x04
+#define SMI130_GYRO_SELF_TEST_ADDR_BISTFAIL__REG          \
+SMI130_GYRO_SELF_TEST_ADDR
+
+/**< 1st bit of Self test Registers */
+#define SMI130_GYRO_SELF_TEST_ADDR_BISTRDY__POS           1
+#define SMI130_GYRO_SELF_TEST_ADDR_BISTRDY__LEN           1
+#define SMI130_GYRO_SELF_TEST_ADDR_BISTRDY__MSK           0x02
+#define SMI130_GYRO_SELF_TEST_ADDR_BISTRDY__REG           \
+SMI130_GYRO_SELF_TEST_ADDR
+
+/**< 0th bit of Self test Registers */
+#define SMI130_GYRO_SELF_TEST_ADDR_TRIGBIST__POS          0
+#define SMI130_GYRO_SELF_TEST_ADDR_TRIGBIST__LEN          1
+#define SMI130_GYRO_SELF_TEST_ADDR_TRIGBIST__MSK          0x01
+#define SMI130_GYRO_SELF_TEST_ADDR_TRIGBIST__REG          \
+SMI130_GYRO_SELF_TEST_ADDR
+
+/**< 7th bit of FIFO CGF1 Registers */
+#define SMI130_GYRO_FIFO_CGF1_ADDR_TAG__POS     7
+#define SMI130_GYRO_FIFO_CGF1_ADDR_TAG__LEN     1
+#define SMI130_GYRO_FIFO_CGF1_ADDR_TAG__MSK     0x80
+#define SMI130_GYRO_FIFO_CGF1_ADDR_TAG__REG     SMI130_GYRO_FIFO_CGF1_ADDR
+
+/**< First 7 bits of FIFO CGF1 Registers */
+#define SMI130_GYRO_FIFO_CGF1_ADDR_WML__POS     0
+#define SMI130_GYRO_FIFO_CGF1_ADDR_WML__LEN     7
+#define SMI130_GYRO_FIFO_CGF1_ADDR_WML__MSK     0x7F
+#define SMI130_GYRO_FIFO_CGF1_ADDR_WML__REG     SMI130_GYRO_FIFO_CGF1_ADDR
+
+/**< Last 2 bits of FIFO CGF0 Addr Registers */
+#define SMI130_GYRO_FIFO_CGF0_ADDR_MODE__POS         6
+#define SMI130_GYRO_FIFO_CGF0_ADDR_MODE__LEN         2
+#define SMI130_GYRO_FIFO_CGF0_ADDR_MODE__MSK         0xC0
+#define SMI130_GYRO_FIFO_CGF0_ADDR_MODE__REG         SMI130_GYRO_FIFO_CGF0_ADDR
+
+/**< First 2 bits of FIFO CGF0 Addr Registers */
+#define SMI130_GYRO_FIFO_CGF0_ADDR_DATA_SEL__POS     0
+#define SMI130_GYRO_FIFO_CGF0_ADDR_DATA_SEL__LEN     2
+#define SMI130_GYRO_FIFO_CGF0_ADDR_DATA_SEL__MSK     0x03
+#define SMI130_GYRO_FIFO_CGF0_ADDR_DATA_SEL__REG     SMI130_GYRO_FIFO_CGF0_ADDR
+
+ /**< Last 2 bits of INL Offset MSB Registers */
+#define SMI130_GYRO_OFC1_ADDR_OFFSET_X__POS       6
+#define SMI130_GYRO_OFC1_ADDR_OFFSET_X__LEN       2
+#define SMI130_GYRO_OFC1_ADDR_OFFSET_X__MSK       0xC0
+#define SMI130_GYRO_OFC1_ADDR_OFFSET_X__REG       SMI130_GYRO_OFC1_ADDR
+
+/**< 3 bits of INL Offset MSB Registers */
+#define SMI130_GYRO_OFC1_ADDR_OFFSET_Y__POS       3
+#define SMI130_GYRO_OFC1_ADDR_OFFSET_Y__LEN       3
+#define SMI130_GYRO_OFC1_ADDR_OFFSET_Y__MSK       0x38
+#define SMI130_GYRO_OFC1_ADDR_OFFSET_Y__REG       SMI130_GYRO_OFC1_ADDR
+
+/**< First 3 bits of INL Offset MSB Registers */
+#define SMI130_GYRO_OFC1_ADDR_OFFSET_Z__POS       0
+#define SMI130_GYRO_OFC1_ADDR_OFFSET_Z__LEN       3
+#define SMI130_GYRO_OFC1_ADDR_OFFSET_Z__MSK       0x07
+#define SMI130_GYRO_OFC1_ADDR_OFFSET_Z__REG       SMI130_GYRO_OFC1_ADDR
+
+/**< 4 bits of Trim GP0 Registers */
+#define SMI130_GYRO_TRIM_GP0_ADDR_GP0__POS            4
+#define SMI130_GYRO_TRIM_GP0_ADDR_GP0__LEN            4
+#define SMI130_GYRO_TRIM_GP0_ADDR_GP0__MSK            0xF0
+#define SMI130_GYRO_TRIM_GP0_ADDR_GP0__REG            SMI130_GYRO_TRIM_GP0_ADDR
+
+/**< 2 bits of Trim GP0 Registers */
+#define SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_X__POS       2
+#define SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_X__LEN       2
+#define SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_X__MSK       0x0C
+#define SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_X__REG       SMI130_GYRO_TRIM_GP0_ADDR
+
+/**< 1st bit of Trim GP0 Registers */
+#define SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Y__POS       1
+#define SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Y__LEN       1
+#define SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Y__MSK       0x02
+#define SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Y__REG       SMI130_GYRO_TRIM_GP0_ADDR
+
+/**< First bit of Trim GP0 Registers */
+#define SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Z__POS       0
+#define SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Z__LEN       1
+#define SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Z__MSK       0x01
+#define SMI130_GYRO_TRIM_GP0_ADDR_OFFSET_Z__REG       SMI130_GYRO_TRIM_GP0_ADDR
+
+/* For Axis Selection   */
+/**< It refers SMI130_GYRO X-axis */
+#define SMI130_GYRO_X_AXIS           0
+/**< It refers SMI130_GYRO Y-axis */
+#define SMI130_GYRO_Y_AXIS           1
+/**< It refers SMI130_GYRO Z-axis */
+#define SMI130_GYRO_Z_AXIS           2
+
+/* For Mode Settings    */
+#define SMI130_GYRO_MODE_NORMAL              0
+#define SMI130_GYRO_MODE_DEEPSUSPEND         1
+#define SMI130_GYRO_MODE_SUSPEND             2
+#define SMI130_GYRO_MODE_FASTPOWERUP			3
+#define SMI130_GYRO_MODE_ADVANCEDPOWERSAVING 4
+
+/* get bit slice  */
+#define SMI130_GYRO_GET_BITSLICE(regvar, bitname)\
+((regvar & bitname##__MSK) >> bitname##__POS)
+
+/* Set bit slice */
+#define SMI130_GYRO_SET_BITSLICE(regvar, bitname, val)\
+((regvar&~bitname##__MSK)|((val<<bitname##__POS)&bitname##__MSK))
+/* Constants */
+
+#define SMI130_GYRO_NULL                             0
+/**< constant declaration of NULL */
+#define SMI130_GYRO_DISABLE                          0
+/**< It refers SMI130_GYRO disable */
+#define SMI130_GYRO_ENABLE                           1
+/**< It refers SMI130_GYRO enable */
+#define SMI130_GYRO_OFF                              0
+/**< It refers SMI130_GYRO OFF state */
+#define SMI130_GYRO_ON                               1
+/**< It refers SMI130_GYRO ON state  */
+
+
+#define SMI130_GYRO_TURN1                            0
+/**< It refers SMI130_GYRO TURN1 */
+#define SMI130_GYRO_TURN2                            1
+/**< It refers SMI130_GYRO TURN2 */
+
+#define SMI130_GYRO_INT1                             0
+/**< It refers SMI130_GYRO INT1 */
+#define SMI130_GYRO_INT2                             1
+/**< It refers SMI130_GYRO INT2 */
+
+#define SMI130_GYRO_SLOW_OFFSET                      0
+/**< It refers SMI130_GYRO Slow Offset */
+#define SMI130_GYRO_AUTO_OFFSET                      1
+/**< It refers SMI130_GYRO Auto Offset */
+#define SMI130_GYRO_FAST_OFFSET                      2
+/**< It refers SMI130_GYRO Fast Offset */
+#define SMI130_GYRO_S_TAP                            0
+/**< It refers SMI130_GYRO Single Tap */
+#define SMI130_GYRO_D_TAP                            1
+/**< It refers SMI130_GYRO Double Tap */
+#define SMI130_GYRO_INT1_DATA                        0
+/**< It refers SMI130_GYRO Int1 Data */
+#define SMI130_GYRO_INT2_DATA                        1
+/**< It refers SMI130_GYRO Int2 Data */
+#define SMI130_GYRO_TAP_UNFILT_DATA                   0
+/**< It refers SMI130_GYRO Tap unfilt data */
+#define SMI130_GYRO_HIGH_UNFILT_DATA                  1
+/**< It refers SMI130_GYRO High unfilt data */
+#define SMI130_GYRO_CONST_UNFILT_DATA                 2
+/**< It refers SMI130_GYRO Const unfilt data */
+#define SMI130_GYRO_ANY_UNFILT_DATA                   3
+/**< It refers SMI130_GYRO Any unfilt data */
+#define SMI130_GYRO_SHAKE_UNFILT_DATA                 4
+/**< It refers SMI130_GYRO Shake unfilt data */
+#define SMI130_GYRO_SHAKE_TH                         0
+/**< It refers SMI130_GYRO Shake Threshold */
+#define SMI130_GYRO_SHAKE_TH2                        1
+/**< It refers SMI130_GYRO Shake Threshold2 */
+#define SMI130_GYRO_AUTO_OFFSET_WL                   0
+/**< It refers SMI130_GYRO Auto Offset word length */
+#define SMI130_GYRO_FAST_OFFSET_WL                   1
+/**< It refers SMI130_GYRO Fast Offset word length */
+#define SMI130_GYRO_I2C_WDT_EN                       0
+/**< It refers SMI130_GYRO I2C WDT En */
+#define SMI130_GYRO_I2C_WDT_SEL                      1
+/**< It refers SMI130_GYRO I2C WDT Sel */
+#define SMI130_GYRO_EXT_MODE                         0
+/**< It refers SMI130_GYRO Ext Mode */
+#define SMI130_GYRO_EXT_PAGE                         1
+/**< It refers SMI130_GYRO Ext page */
+#define SMI130_GYRO_START_ADDR                       0
+/**< It refers SMI130_GYRO Start Address */
+#define SMI130_GYRO_STOP_ADDR                        1
+/**< It refers SMI130_GYRO Stop Address */
+#define SMI130_GYRO_SLOW_CMD                         0
+/**< It refers SMI130_GYRO Slow Command */
+#define SMI130_GYRO_FAST_CMD                         1
+/**< It refers SMI130_GYRO Fast Command */
+#define SMI130_GYRO_TRIM_VRA                         0
+/**< It refers SMI130_GYRO Trim VRA */
+#define SMI130_GYRO_TRIM_VRD                         1
+/**< It refers SMI130_GYRO Trim VRD */
+#define SMI130_GYRO_LOGBIT_EM                        0
+/**< It refers SMI130_GYRO LogBit Em */
+#define SMI130_GYRO_LOGBIT_VM                        1
+/**< It refers SMI130_GYRO LogBit VM */
+#define SMI130_GYRO_GP0                              0
+/**< It refers SMI130_GYRO GP0 */
+#define SMI130_GYRO_GP1                              1
+/**< It refers SMI130_GYRO GP1*/
+#define SMI130_GYRO_LOW_SPEED                        0
+/**< It refers SMI130_GYRO Low Speed Oscillator */
+#define SMI130_GYRO_HIGH_SPEED                       1
+/**< It refers SMI130_GYRO High Speed Oscillator */
+#define SMI130_GYRO_DRIVE_OFFSET_P                   0
+/**< It refers SMI130_GYRO Drive Offset P */
+#define SMI130_GYRO_DRIVE_OFFSET_N                   1
+/**< It refers SMI130_GYRO Drive Offset N */
+#define SMI130_GYRO_TEST_MODE_EN                     0
+/**< It refers SMI130_GYRO Test Mode Enable */
+#define SMI130_GYRO_TEST_MODE_REG                    1
+/**< It refers SMI130_GYRO Test Mode reg */
+#define SMI130_GYRO_IBIAS_DRIVE_TRIM                 0
+/**< It refers SMI130_GYRO IBIAS Drive Trim */
+#define SMI130_GYRO_IBIAS_RATE_TRIM                  1
+/**< It refers SMI130_GYRO IBIAS Rate Trim */
+#define SMI130_GYRO_BAA_MODE                         0
+/**< It refers SMI130_GYRO BAA Mode Trim */
+#define SMI130_GYRO_SMI_ACC_MODE                         1
+/**< It refers SMI130_GYRO SMI_ACC Mode Trim */
+#define SMI130_GYRO_PI_KP                            0
+/**< It refers SMI130_GYRO PI KP */
+#define SMI130_GYRO_PI_KI                            1
+/**< It refers SMI130_GYRO PI KI */
+
+
+#define C_SMI130_GYRO_SUCCESS						0
+/**< It refers SMI130_GYRO operation is success */
+#define C_SMI130_GYRO_FAILURE						1
+/**< It refers SMI130_GYRO operation is Failure */
+
+#define SMI130_GYRO_SPI_RD_MASK                      0x80
+/**< Read mask **/
+#define SMI130_GYRO_READ_SET                         0x01
+/**< Setting for rading data **/
+
+#define SMI130_GYRO_SHIFT_1_POSITION                 1
+/**< Shift bit by 1 Position **/
+#define SMI130_GYRO_SHIFT_2_POSITION                 2
+/**< Shift bit by 2 Position **/
+#define SMI130_GYRO_SHIFT_3_POSITION                 3
+/**< Shift bit by 3 Position **/
+#define SMI130_GYRO_SHIFT_4_POSITION                 4
+/**< Shift bit by 4 Position **/
+#define SMI130_GYRO_SHIFT_5_POSITION                 5
+/**< Shift bit by 5 Position **/
+#define SMI130_GYRO_SHIFT_6_POSITION                 6
+/**< Shift bit by 6 Position **/
+#define SMI130_GYRO_SHIFT_7_POSITION                 7
+/**< Shift bit by 7 Position **/
+#define SMI130_GYRO_SHIFT_8_POSITION                 8
+/**< Shift bit by 8 Position **/
+#define SMI130_GYRO_SHIFT_12_POSITION                12
+/**< Shift bit by 12 Position **/
+
+#define         C_SMI130_GYRO_Null_U8X                              0
+#define         C_SMI130_GYRO_Zero_U8X                              0
+#define         C_SMI130_GYRO_One_U8X                               1
+#define         C_SMI130_GYRO_Two_U8X                               2
+#define         C_SMI130_GYRO_Three_U8X                             3
+#define         C_SMI130_GYRO_Four_U8X                              4
+#define         C_SMI130_GYRO_Five_U8X                              5
+#define         C_SMI130_GYRO_Six_U8X                               6
+#define         C_SMI130_GYRO_Seven_U8X                             7
+#define         C_SMI130_GYRO_Eight_U8X                             8
+#define         C_SMI130_GYRO_Nine_U8X                              9
+#define         C_SMI130_GYRO_Ten_U8X                               10
+#define         C_SMI130_GYRO_Eleven_U8X                            11
+#define         C_SMI130_GYRO_Twelve_U8X                            12
+#define         C_SMI130_GYRO_Thirteen_U8X                          13
+#define         C_SMI130_GYRO_Fifteen_U8X                           15
+#define         C_SMI130_GYRO_Sixteen_U8X                           16
+#define         C_SMI130_GYRO_TwentyTwo_U8X                         22
+#define         C_SMI130_GYRO_TwentyThree_U8X                       23
+#define         C_SMI130_GYRO_TwentyFour_U8X                        24
+#define         C_SMI130_GYRO_TwentyFive_U8X                        25
+#define         C_SMI130_GYRO_ThirtyTwo_U8X                         32
+#define         C_SMI130_GYRO_Hundred_U8X                           100
+#define         C_SMI130_GYRO_OneTwentySeven_U8X                    127
+#define         C_SMI130_GYRO_OneTwentyEight_U8X                    128
+#define         C_SMI130_GYRO_TwoFiftyFive_U8X                      255
+#define         C_SMI130_GYRO_TwoFiftySix_U16X                      256
+
+#define         E_SMI130_GYRO_NULL_PTR               (signed char)(-127)
+#define         E_SMI130_GYRO_COMM_RES               (signed char)(-1)
+#define         E_SMI130_GYRO_OUT_OF_RANGE           (signed char)(-2)
+
+#define	C_SMI130_GYRO_No_Filter_U8X			0
+#define	C_SMI130_GYRO_BW_230Hz_U8X			1
+#define	C_SMI130_GYRO_BW_116Hz_U8X			2
+#define	C_SMI130_GYRO_BW_47Hz_U8X			3
+#define	C_SMI130_GYRO_BW_23Hz_U8X			4
+#define	C_SMI130_GYRO_BW_12Hz_U8X			5
+#define	C_SMI130_GYRO_BW_64Hz_U8X			6
+#define	C_SMI130_GYRO_BW_32Hz_U8X			7
+
+#define C_SMI130_GYRO_No_AutoSleepDur_U8X	0
+#define	C_SMI130_GYRO_4ms_AutoSleepDur_U8X	1
+#define	C_SMI130_GYRO_5ms_AutoSleepDur_U8X	2
+#define	C_SMI130_GYRO_8ms_AutoSleepDur_U8X	3
+#define	C_SMI130_GYRO_10ms_AutoSleepDur_U8X	4
+#define	C_SMI130_GYRO_15ms_AutoSleepDur_U8X	5
+#define	C_SMI130_GYRO_20ms_AutoSleepDur_U8X	6
+#define	C_SMI130_GYRO_40ms_AutoSleepDur_U8X	7
+
+
+
+
+#define SMI130_GYRO_WR_FUNC_PTR int (*bus_write)\
+(unsigned char, unsigned char, unsigned char *, unsigned char)
+#define SMI130_GYRO_RD_FUNC_PTR int (*bus_read)\
+(unsigned char, unsigned char, unsigned char *, unsigned char)
+#define SMI130_GYRO_BRD_FUNC_PTR int (*burst_read)\
+(unsigned char, unsigned char, unsigned char *, SMI130_GYRO_S32)
+#define SMI130_GYRO_MDELAY_DATA_TYPE SMI130_GYRO_U16
+
+
+
+
+/*user defined Structures*/
+struct smi130_gyro_data_t {
+		SMI130_GYRO_S16 datax;
+		SMI130_GYRO_S16 datay;
+		SMI130_GYRO_S16 dataz;
+		char intstatus[5];
+};
+
+
+struct smi130_gyro_offset_t {
+		SMI130_GYRO_U16 datax;
+		SMI130_GYRO_U16 datay;
+		SMI130_GYRO_U16 dataz;
+};
+
+
+struct smi130_gyro_t {
+		unsigned char chip_id;
+		unsigned char dev_addr;
+		SMI130_GYRO_BRD_FUNC_PTR;
+		SMI130_GYRO_WR_FUNC_PTR;
+		SMI130_GYRO_RD_FUNC_PTR;
+		void(*delay_msec)(SMI130_GYRO_MDELAY_DATA_TYPE);
+};
+
+/***************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ***************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ***************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_init(struct smi130_gyro_t *p_smi130_gyro);
+/***************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ***************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_dataX(SMI130_GYRO_S16 *data_x);
+/****************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ***************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_dataY(SMI130_GYRO_S16 *data_y);
+/***************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ***************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_dataZ(SMI130_GYRO_S16 *data_z);
+/************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ***************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_dataXYZ(struct smi130_gyro_data_t *data);
+/***************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ********************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_dataXYZI(struct smi130_gyro_data_t *data);
+/********************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ********************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_Temperature(unsigned char *temperature);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_FIFO_data_reg
+(unsigned char *fifo_data);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_read_register(unsigned char addr,
+unsigned char *data, unsigned char len);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_burst_read(unsigned char addr,
+unsigned char *data, SMI130_GYRO_S32 len);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_write_register(unsigned char addr,
+unsigned char *data, unsigned char len);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_interrupt_status_reg_0
+(unsigned char *status0_data);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_interrupt_status_reg_1
+(unsigned char *status1_data);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_interrupt_status_reg_2
+(unsigned char *status2_data);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_interrupt_status_reg_3
+(unsigned char *status3_data);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifostatus_reg
+(unsigned char *fifo_status);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_range_reg
+(unsigned char *range);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_range_reg
+(unsigned char range);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_high_res
+(unsigned char *high_res);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_high_res
+(unsigned char high_res);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_bw(unsigned char *bandwidth);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_bw(unsigned char bandwidth);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_pmu_ext_tri_sel
+(unsigned char *pwu_ext_tri_sel);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_pmu_ext_tri_sel
+(unsigned char pwu_ext_tri_sel);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_high_bw
+(unsigned char *high_bw);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_high_bw
+(unsigned char high_bw);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_shadow_dis
+(unsigned char *shadow_dis);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_shadow_dis
+(unsigned char shadow_dis);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_soft_reset(void);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_data_enable(unsigned char *data_en);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_data_en(unsigned char data_en);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_enable(unsigned char *fifo_en);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fifo_enable(unsigned char fifo_en);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_offset_enable
+(unsigned char mode, unsigned char *offset_en);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_offset_enable
+(unsigned char mode, unsigned char offset_en);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int_od
+(unsigned char param, unsigned char *int_od);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int_od
+(unsigned char param, unsigned char int_od);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int_lvl
+(unsigned char param, unsigned char *int_lvl);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int_lvl
+(unsigned char param, unsigned char int_lvl);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int1_high
+(unsigned char *int1_high);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int1_high
+(unsigned char int1_high);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int1_any
+(unsigned char *int1_any);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int1_any
+(unsigned char int1_any);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int_data
+(unsigned char axis, unsigned char *int_data);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int_data
+(unsigned char axis, unsigned char int_data);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int2_offset
+(unsigned char axis, unsigned char *int2_offset);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int2_offset
+(unsigned char axis, unsigned char int2_offset);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int1_offset
+(unsigned char axis, unsigned char *int1_offset);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int1_offset
+(unsigned char axis, unsigned char int1_offset);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int_fifo(unsigned char *int_fifo);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int_fifo
+(unsigned char axis, unsigned char int_fifo);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int2_high
+(unsigned char *int2_high);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int2_high
+(unsigned char int2_high);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int2_any
+(unsigned char *int2_any);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int2_any
+(unsigned char int2_any);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_offset_unfilt
+(unsigned char param, unsigned char *offset_unfilt);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_offset_unfilt
+(unsigned char param, unsigned char offset_unfilt);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_unfilt_data
+(unsigned char param, unsigned char *unfilt_data);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_unfilt_data
+(unsigned char param, unsigned char unfilt_data);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_any_th
+(unsigned char *any_th);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_any_th
+(unsigned char any_th);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_awake_dur
+(unsigned char *awake_dur);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_awake_dur
+(unsigned char awake_dur);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_any_dursample
+(unsigned char *dursample);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_any_dursample
+(unsigned char dursample);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_any_en_ch
+(unsigned char channel, unsigned char *data);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_any_en_ch
+(unsigned char channel, unsigned char data);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_watermark_enable
+(unsigned char *fifo_wn_en);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fifo_watermark_enable
+(unsigned char fifo_wn_en);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_reset_int
+(unsigned char reset_int);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_offset_reset
+(unsigned char offset_reset);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_latch_status
+(unsigned char *latch_status);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_latch_status
+(unsigned char latch_status);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_latch_int
+(unsigned char *latch_int);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_latch_int
+(unsigned char latch_int);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_high_hy
+(unsigned char channel, unsigned char *high_hy);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_high_hy
+(unsigned char channel, unsigned char high_hy);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_high_th
+(unsigned char channel, unsigned char *high_th);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_high_th
+(unsigned char channel, unsigned char high_th);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_high_en_ch
+(unsigned char channel, unsigned char *high_en);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_high_en_ch
+(unsigned char channel, unsigned char high_en);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_high_dur_ch
+(unsigned char channel, unsigned char *high_dur);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_high_dur_ch
+(unsigned char channel, unsigned char high_dur);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_slow_offset_th
+(unsigned char *offset_th);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_slow_offset_th
+(unsigned char offset_th);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_slow_offset_dur
+(unsigned char *offset_dur);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_slow_offset_dur
+(unsigned char offset_dur);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_slow_offset_en_ch
+(unsigned char channel, unsigned char *slow_offset);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_slow_offset_en_ch
+(unsigned char channel, unsigned char slow_offset);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_offset_wl
+(unsigned char channel, unsigned char *offset_wl);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_offset_wl
+(unsigned char channel, unsigned char offset_wl);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fast_offset_en
+(unsigned char fast_offset);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fast_offset_en_ch
+(unsigned char *fast_offset);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fast_offset_en_ch
+(unsigned char channel, unsigned char fast_offset);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_enable_fast_offset(void);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_nvm_remain
+(unsigned char *nvm_remain);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_nvm_load
+(unsigned char nvm_load);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_nvm_rdy
+(unsigned char *nvm_rdy);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_nvm_prog_trig
+(unsigned char prog_trig);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_nvm_prog_mode
+(unsigned char *prog_mode);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_nvm_prog_mode
+(unsigned char prog_mode);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_i2c_wdt
+(unsigned char i2c_wdt, unsigned char *prog_mode);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_i2c_wdt
+(unsigned char i2c_wdt, unsigned char prog_mode);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_spi3(unsigned char *spi3);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_spi3(unsigned char spi3);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_tag(unsigned char *tag);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fifo_tag(unsigned char tag);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_watermarklevel
+(unsigned char *water_mark_level);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fifo_watermarklevel
+(unsigned char water_mark_level);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_mode
+(unsigned char *mode);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fifo_mode(unsigned char mode);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_data_sel
+(unsigned char *data_sel);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_fifo_data_sel
+(unsigned char data_sel);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_offset
+(unsigned char axis, SMI130_GYRO_S16 *offset);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_offset
+(unsigned char axis, SMI130_GYRO_S16 offset);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_gp
+(unsigned char param, unsigned char *value);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_gp
+(unsigned char param, unsigned char value);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_framecount
+(unsigned char *fifo_framecount);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_fifo_overrun
+(unsigned char *fifo_overrun);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int2_fifo
+(unsigned char *int_fifo);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_int1_fifo
+(unsigned char *int_fifo);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int2_fifo
+(unsigned char fifo_int2);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_int1_fifo
+(unsigned char fifo_int1);
+/****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_mode(unsigned char *mode);
+/*****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_mode(unsigned char mode);
+/*****************************************************************************
+ * Description: *//**\brief
+ *
+ *
+ *
+ *
+ *  \param
+ *
+ *
+ *  \return
+ *
+ *
+ ****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ ****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_selftest(unsigned char *result);
+/*****************************************************************************
+ * Description: *//**\brief  This API is used to get data auto sleep duration
+ *
+ *
+ *
+ *
+ *  \param unsigned char *duration : Address of auto sleep duration
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_autosleepdur(unsigned char *duration);
+/*****************************************************************************
+ * Description: *//**\brief This API is used to set duration
+ *
+ *
+ *
+ *
+ *  \param unsigned char duration:
+ *          Value to be written passed as a parameter
+ *		   unsigned char bandwidth:
+ *			Value to be written passed as a parameter
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_autosleepdur(unsigned char duration,
+unsigned char bandwith);
+/*****************************************************************************
+ * Description: *//**\brief  This API is used to get data sleep duration
+ *
+ *
+ *
+ *
+ *  \param unsigned char *duration : Address of sleep duration
+ *                         Pointer to a variable passed as a parameter
+ *
+ *
+ *
+ *  \return
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_sleepdur(unsigned char *duration);
+/*****************************************************************************
+ * Description: *//**\brief This API is used to set duration
+ *
+ *
+ *
+ *
+ *  \param unsigned char duration:
+ *          Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_sleepdur(unsigned char duration);
+/*****************************************************************************
+ * Description: *//**\brief This API is used to set auto offset
+ *
+ *
+ *
+ *
+ *  \param unsigned char duration:
+ *          Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_set_auto_offset_en(unsigned char offset_en);
+/*****************************************************************************
+ * Description: *//**\brief This API is used to get auto offset
+ *
+ *
+ *
+ *
+ *  \param unsigned char duration:
+ *          Value to be written passed as a parameter
+ *
+ *
+ *
+ *  \return communication results
+ *
+ *
+ *****************************************************************************/
+/* Scheduling:
+ *
+ *
+ *
+ * Usage guide:
+ *
+ *
+ * Remarks:
+ *
+ *****************************************************************************/
+SMI130_GYRO_RETURN_FUNCTION_TYPE smi130_gyro_get_auto_offset_en(
+unsigned char *offset_en);
+#endif
diff --git a/drivers/input/sensors/smi130/smi130_gyro_driver.c b/drivers/input/sensors/smi130/smi130_gyro_driver.c
new file mode 100644
index 0000000..65e303c
--- /dev/null
+++ b/drivers/input/sensors/smi130/smi130_gyro_driver.c
@@ -0,0 +1,2036 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+ * @filename smi130_gyro_driver.c
+ * @date     2015/11/17 13:44
+ * @Modification Date 2018/08/28 18:20
+ * @id       "836294d"
+ * @version  1.5.9
+ *
+ * @brief    SMI130_GYRO Linux Driver
+ */
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/unistd.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#else
+#include <unistd.h>
+#include <sys/types.h>
+#include <string.h>
+#endif
+#include <linux/math64.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include "smi130_gyro.h"
+#include "bs_log.h"
+
+/* sensor specific */
+#define SENSOR_NAME "smi130_gyro"
+#define SMI130_GYRO_ENABLE_INT1 1
+#define SENSOR_CHIP_ID_SMI_GYRO (0x0f)
+#define CHECK_CHIP_ID_TIME_MAX   5
+#define DRIVER_VERSION "0.0.53.0"
+#define SMI_GYRO_USE_FIFO          1
+#define SMI_GYRO_USE_BASIC_I2C_FUNC     1
+#define SMI_GYRO_REG_NAME(name) SMI130_GYRO_##name
+#define SMI_GYRO_VAL_NAME(name) SMI130_GYRO_##name
+#define SMI_GYRO_CALL_API(name) smi130_gyro_##name
+#define MSC_TIME                6
+
+#define SMI_GYRO_I2C_WRITE_DELAY_TIME 1
+
+/* generic */
+#define SMI_GYRO_MAX_RETRY_I2C_XFER (100)
+#define SMI_GYRO_MAX_RETRY_WAKEUP (5)
+#define SMI_GYRO_MAX_RETRY_WAIT_DRDY (100)
+
+#define SMI_GYRO_DELAY_MIN (1)
+#define SMI_GYRO_DELAY_DEFAULT (200)
+
+#define SMI_GYRO_VALUE_MAX (32767)
+#define SMI_GYRO_VALUE_MIN (-32768)
+
+#define BYTES_PER_LINE (16)
+
+#define SMI_GYRO_SELF_TEST 0
+
+#define SMI_GYRO_SOFT_RESET_VALUE                0xB6
+
+#ifdef SMI_GYRO_USE_FIFO
+#define MAX_FIFO_F_LEVEL 100
+#define MAX_FIFO_F_BYTES 8
+#define SMI130_GYRO_FIFO_DAT_SEL_X                     1
+#define SMI130_GYRO_FIFO_DAT_SEL_Y                     2
+#define SMI130_GYRO_FIFO_DAT_SEL_Z                     3
+#endif
+
+/*!
+ * @brief:BMI058 feature
+ *  macro definition
+*/
+#ifdef CONFIG_SENSORS_BMI058
+/*! BMI058 X AXIS definition*/
+#define BMI058_X_AXIS	SMI130_GYRO_Y_AXIS
+/*! BMI058 Y AXIS definition*/
+#define BMI058_Y_AXIS	SMI130_GYRO_X_AXIS
+
+#define C_BMI058_One_U8X	1
+#define C_BMI058_Two_U8X	2
+#endif
+
+/*! Bosch sensor unknown place*/
+#define BOSCH_SENSOR_PLACE_UNKNOWN (-1)
+/*! Bosch sensor remapping table size P0~P7*/
+#define MAX_AXIS_REMAP_TAB_SZ 8
+
+
+struct bosch_sensor_specific {
+	char *name;
+	/* 0 to 7 */
+	int place;
+	int irq;
+	int (*irq_gpio_cfg)(void);
+};
+
+
+/*!
+ * we use a typedef to hide the detail,
+ * because this type might be changed
+ */
+struct bosch_sensor_axis_remap {
+	/* src means which source will be mapped to target x, y, z axis */
+	/* if an target OS axis is remapped from (-)x,
+	 * src is 0, sign_* is (-)1 */
+	/* if an target OS axis is remapped from (-)y,
+	 * src is 1, sign_* is (-)1 */
+	/* if an target OS axis is remapped from (-)z,
+	 * src is 2, sign_* is (-)1 */
+	int src_x:3;
+	int src_y:3;
+	int src_z:3;
+
+	int sign_x:2;
+	int sign_y:2;
+	int sign_z:2;
+};
+
+
+struct bosch_sensor_data {
+	union {
+		int16_t v[3];
+		struct {
+			int16_t x;
+			int16_t y;
+			int16_t z;
+		};
+	};
+};
+
+struct smi_gyro_client_data {
+	struct smi130_gyro_t device;
+	struct i2c_client *client;
+	struct input_dev *input;
+	struct delayed_work work;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend_handler;
+#endif
+
+	atomic_t delay;
+	uint8_t debug_level;
+	struct smi130_gyro_data_t value;
+	u8 enable:1;
+	unsigned int fifo_count;
+	unsigned char fifo_datasel;
+	uint64_t timestamp;
+	uint64_t base_time;
+	uint64_t fifo_time;
+	uint64_t gyro_count;
+	uint64_t time_odr;
+	/* controls not only reg, but also workqueue */
+	struct mutex mutex_op_mode;
+	struct mutex mutex_enable;
+	struct bosch_sensor_specific *bosch_pd;
+	struct work_struct report_data_work;
+	int is_timer_running;
+	struct hrtimer timer;
+	ktime_t work_delay_kt;
+	uint8_t gpio_pin;
+	int16_t IRQ;
+	struct work_struct irq_work;
+};
+
+static struct i2c_client *smi_gyro_client;
+/* i2c operation for API */
+static int smi_gyro_i2c_read(struct i2c_client *client, u8 reg_addr,
+		u8 *data, u8 len);
+static int smi_gyro_i2c_write(struct i2c_client *client, u8 reg_addr,
+		u8 *data, u8 len);
+
+static void smi_gyro_dump_reg(struct i2c_client *client);
+static int smi_gyro_check_chip_id(struct i2c_client *client);
+
+static int smi_gyro_pre_suspend(struct i2c_client *client);
+static int smi_gyro_post_resume(struct i2c_client *client);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void smi_gyro_early_suspend(struct early_suspend *handler);
+static void smi_gyro_late_resume(struct early_suspend *handler);
+#endif
+
+static void smi130_gyro_delay(SMI130_GYRO_U16 msec)
+{
+	if (msec <= 20)
+		usleep_range(msec * 1000, msec * 1000);
+	else
+		msleep(msec);
+}
+
+/*!
+* SMI130_GYRO sensor remapping function
+* need to give some parameter in BSP files first.
+*/
+static const struct bosch_sensor_axis_remap
+	bosch_axis_remap_tab_dft[MAX_AXIS_REMAP_TAB_SZ] = {
+	/* src_x src_y src_z  sign_x  sign_y  sign_z */
+	{  0,	 1,    2,	  1,	  1,	  1 }, /* P0 */
+	{  1,	 0,    2,	  1,	 -1,	  1 }, /* P1 */
+	{  0,	 1,    2,	 -1,	 -1,	  1 }, /* P2 */
+	{  1,	 0,    2,	 -1,	  1,	  1 }, /* P3 */
+
+	{  0,	 1,    2,	 -1,	  1,	 -1 }, /* P4 */
+	{  1,	 0,    2,	 -1,	 -1,	 -1 }, /* P5 */
+	{  0,	 1,    2,	  1,	 -1,	 -1 }, /* P6 */
+	{  1,	 0,    2,	  1,	  1,	 -1 }, /* P7 */
+};
+
+static void bosch_remap_sensor_data(struct bosch_sensor_data *data,
+			const struct bosch_sensor_axis_remap *remap)
+{
+	struct bosch_sensor_data tmp;
+
+	tmp.x = data->v[remap->src_x] * remap->sign_x;
+	tmp.y = data->v[remap->src_y] * remap->sign_y;
+	tmp.z = data->v[remap->src_z] * remap->sign_z;
+
+	memcpy(data, &tmp, sizeof(*data));
+}
+
+static void bosch_remap_sensor_data_dft_tab(struct bosch_sensor_data *data,
+			int place)
+{
+/* sensor with place 0 needs not to be remapped */
+	if ((place <= 0) || (place >= MAX_AXIS_REMAP_TAB_SZ))
+		return;
+	bosch_remap_sensor_data(data, &bosch_axis_remap_tab_dft[place]);
+}
+
+static void smi130_gyro_remap_sensor_data(struct smi130_gyro_data_t *val,
+		struct smi_gyro_client_data *client_data)
+{
+	struct bosch_sensor_data bsd;
+	int place;
+
+	if ((NULL == client_data->bosch_pd) || (BOSCH_SENSOR_PLACE_UNKNOWN
+			 == client_data->bosch_pd->place))
+		place = BOSCH_SENSOR_PLACE_UNKNOWN;
+	else
+		place = client_data->bosch_pd->place;
+
+#ifdef CONFIG_SENSORS_BMI058
+/*x,y need to be invesed becase of HW Register for BMI058*/
+	bsd.y = val->datax;
+	bsd.x = val->datay;
+	bsd.z = val->dataz;
+#else
+	bsd.x = val->datax;
+	bsd.y = val->datay;
+	bsd.z = val->dataz;
+#endif
+
+	bosch_remap_sensor_data_dft_tab(&bsd, place);
+
+	val->datax = bsd.x;
+	val->datay = bsd.y;
+	val->dataz = bsd.z;
+
+}
+
+static int smi_gyro_check_chip_id(struct i2c_client *client)
+{
+	int err = -1;
+	u8 chip_id = 0;
+	u8 read_count = 0;
+
+	while (read_count++ < CHECK_CHIP_ID_TIME_MAX) {
+		smi_gyro_i2c_read(client, SMI_GYRO_REG_NAME(CHIP_ID_ADDR), &chip_id, 1);
+		PINFO("read chip id result: %#x", chip_id);
+
+		if ((chip_id & 0xff) != SENSOR_CHIP_ID_SMI_GYRO) {
+			smi130_gyro_delay(1);
+		} else {
+			err = 0;
+			break;
+		}
+	}
+	return err;
+}
+
+static void smi_gyro_dump_reg(struct i2c_client *client)
+{
+	int i;
+	u8 dbg_buf[64];
+	u8 dbg_buf_str[64 * 3 + 1] = "";
+
+	for (i = 0; i < BYTES_PER_LINE; i++) {
+		dbg_buf[i] = i;
+		snprintf(dbg_buf_str + i * 3, 16, "%02x%c",
+				dbg_buf[i],
+				(((i + 1) % BYTES_PER_LINE == 0) ? '\n' : ' '));
+	}
+	dev_dbg(&client->dev, "%s\n", dbg_buf_str);
+
+	smi_gyro_i2c_read(client, SMI_GYRO_REG_NAME(CHIP_ID_ADDR), dbg_buf, 64);
+	for (i = 0; i < 64; i++) {
+		snprintf(dbg_buf_str + i * 3, 16, "%02x%c",
+				dbg_buf[i],
+				(((i + 1) % BYTES_PER_LINE == 0) ? '\n' : ' '));
+	}
+	dev_dbg(&client->dev, "%s\n", dbg_buf_str);
+}
+
+/*i2c read routine for API*/
+static int smi_gyro_i2c_read(struct i2c_client *client, u8 reg_addr,
+		u8 *data, u8 len)
+{
+#if !defined SMI_GYRO_USE_BASIC_I2C_FUNC
+	s32 dummy;
+	if (NULL == client)
+		return -ENODEV;
+
+	while (0 != len--) {
+#ifdef SMI_GYRO_SMBUS
+		dummy = i2c_smbus_read_byte_data(client, reg_addr);
+		if (dummy < 0) {
+			dev_err(&client->dev, "i2c bus read error");
+			return -EIO;
+		}
+		*data = (u8)(dummy & 0xff);
+#else
+		dummy = i2c_master_send(client, (char *)&reg_addr, 1);
+		if (dummy < 0)
+			return -EIO;
+
+		dummy = i2c_master_recv(client, (char *)data, 1);
+		if (dummy < 0)
+			return -EIO;
+#endif
+		reg_addr++;
+		data++;
+	}
+	return 0;
+#else
+	int retry;
+
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &reg_addr,
+		},
+
+		{
+			.addr = client->addr,
+			.flags = I2C_M_RD,
+			.len = len,
+			.buf = data,
+		},
+	};
+
+	for (retry = 0; retry < SMI_GYRO_MAX_RETRY_I2C_XFER; retry++) {
+		if (i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)) > 0)
+			break;
+		else
+			smi130_gyro_delay(SMI_GYRO_I2C_WRITE_DELAY_TIME);
+	}
+
+	if (SMI_GYRO_MAX_RETRY_I2C_XFER <= retry) {
+		dev_err(&client->dev, "I2C xfer error");
+		return -EIO;
+	}
+
+	return 0;
+#endif
+}
+
+#ifdef SMI_GYRO_USE_FIFO
+static int smi_gyro_i2c_burst_read(struct i2c_client *client, u8 reg_addr,
+		u8 *data, u16 len)
+{
+	int retry;
+
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &reg_addr,
+		},
+
+		{
+			.addr = client->addr,
+			.flags = I2C_M_RD,
+			.len = len,
+			.buf = data,
+		},
+	};
+
+	for (retry = 0; retry < SMI_GYRO_MAX_RETRY_I2C_XFER; retry++) {
+		if (i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)) > 0)
+			break;
+		else
+			smi130_gyro_delay(SMI_GYRO_I2C_WRITE_DELAY_TIME);
+	}
+
+	if (SMI_GYRO_MAX_RETRY_I2C_XFER <= retry) {
+		dev_err(&client->dev, "I2C xfer error");
+		return -EIO;
+	}
+
+	return 0;
+}
+#endif
+
+/*i2c write routine for */
+static int smi_gyro_i2c_write(struct i2c_client *client, u8 reg_addr,
+		u8 *data, u8 len)
+{
+#if !defined SMI_GYRO_USE_BASIC_I2C_FUNC
+	s32 dummy;
+
+#ifndef SMI_GYRO_SMBUS
+	u8 buffer[2];
+#endif
+
+	if (NULL == client)
+		return -ENODEV;
+
+	while (0 != len--) {
+#ifdef SMI_GYRO_SMBUS
+		dummy = i2c_smbus_write_byte_data(client, reg_addr, *data);
+#else
+		buffer[0] = reg_addr;
+		buffer[1] = *data;
+		dummy = i2c_master_send(client, (char *)buffer, 2);
+#endif
+		reg_addr++;
+		data++;
+		if (dummy < 0) {
+			dev_err(&client->dev, "error writing i2c bus");
+			return -EIO;
+		}
+
+	}
+	return 0;
+#else
+	u8 buffer[2];
+	int retry;
+	struct i2c_msg msg[] = {
+		{
+		 .addr = client->addr,
+		 .flags = 0,
+		 .len = 2,
+		 .buf = buffer,
+		 },
+	};
+
+	while (0 != len--) {
+		buffer[0] = reg_addr;
+		buffer[1] = *data;
+		for (retry = 0; retry < SMI_GYRO_MAX_RETRY_I2C_XFER; retry++) {
+			if (i2c_transfer(client->adapter, msg,
+						ARRAY_SIZE(msg)) > 0) {
+				break;
+			} else {
+				smi130_gyro_delay(SMI_GYRO_I2C_WRITE_DELAY_TIME);
+			}
+		}
+		if (SMI_GYRO_MAX_RETRY_I2C_XFER <= retry) {
+			dev_err(&client->dev, "I2C xfer error");
+			return -EIO;
+		}
+		reg_addr++;
+		data++;
+	}
+
+	return 0;
+#endif
+}
+
+static int smi_gyro_i2c_read_wrapper(u8 dev_addr, u8 reg_addr, u8 *data, u8 len)
+{
+	int err;
+	err = smi_gyro_i2c_read(smi_gyro_client, reg_addr, data, len);
+	return err;
+}
+
+static int smi_gyro_i2c_write_wrapper(u8 dev_addr, u8 reg_addr, u8 *data, u8 len)
+{
+	int err;
+	err = smi_gyro_i2c_write(smi_gyro_client, reg_addr, data, len);
+	return err;
+}
+
+
+static void smi_gyro_work_func(struct work_struct *work)
+{
+	struct smi_gyro_client_data *client_data =
+		container_of((struct delayed_work *)work,
+			struct smi_gyro_client_data, work);
+
+	unsigned long delay =
+		msecs_to_jiffies(atomic_read(&client_data->delay));
+	struct smi130_gyro_data_t gyro_data;
+
+	SMI_GYRO_CALL_API(get_dataXYZ)(&gyro_data);
+	/*remapping for SMI130_GYRO sensor*/
+	smi130_gyro_remap_sensor_data(&gyro_data, client_data);
+
+	input_report_abs(client_data->input, ABS_X, gyro_data.datax);
+	input_report_abs(client_data->input, ABS_Y, gyro_data.datay);
+	input_report_abs(client_data->input, ABS_Z, gyro_data.dataz);
+	input_sync(client_data->input);
+
+	schedule_delayed_work(&client_data->work, delay);
+}
+
+static struct workqueue_struct *reportdata_wq;
+
+uint64_t smi130_gyro_get_alarm_timestamp(void)
+{
+	uint64_t ts_ap;
+	struct timespec tmp_time;
+	get_monotonic_boottime(&tmp_time);
+	ts_ap = (uint64_t)tmp_time.tv_sec * 1000000000 + tmp_time.tv_nsec;
+	return ts_ap;
+}
+#define ABS(x) ((x) > 0 ? (x) : -(x))
+
+static void smi130_gyro_work_func(struct work_struct *work)
+{
+	struct	smi_gyro_client_data *smi130_gyro =
+		container_of(work,
+				struct smi_gyro_client_data, report_data_work);
+	int i;
+	struct smi130_gyro_data_t gyro_lsb;
+	unsigned char fifo_framecount;
+	signed char fifo_data_out[MAX_FIFO_F_LEVEL * MAX_FIFO_F_BYTES] = {0};
+	unsigned char f_len = 0;
+	uint64_t del;
+	uint64_t time_internal;
+	struct timespec ts;
+	int64_t drift_time = 0;
+	static uint64_t time_odr;
+	static uint32_t data_cnt;
+	static uint32_t pre_data_cnt;
+	static int64_t sample_drift_offset;
+	if (smi130_gyro->fifo_datasel)
+		/*Select one axis data output for every fifo frame*/
+		f_len = 2;
+	else
+		/*Select X Y Z axis data output for every fifo frame*/
+		f_len = 6;
+	if (SMI_GYRO_CALL_API(get_fifo_framecount)(&fifo_framecount) < 0) {
+		PERR("bm160_get_fifo_framecount err\n");
+		return;
+	}
+	if (fifo_framecount == 0)
+		return;
+	if (fifo_framecount > MAX_FIFO_F_LEVEL)
+			fifo_framecount = MAX_FIFO_F_LEVEL;
+	if (smi_gyro_i2c_burst_read(smi130_gyro->client, SMI130_GYRO_FIFO_DATA_ADDR,
+			fifo_data_out, fifo_framecount * f_len) < 0) {
+			PERR("smi130_gyro read fifo err\n");
+			return;
+	}
+	smi130_gyro->fifo_time = smi130_gyro_get_alarm_timestamp();
+	if (smi130_gyro->gyro_count == 0)
+		smi130_gyro->base_time = smi130_gyro->timestamp =
+		smi130_gyro->fifo_time - (fifo_framecount-1) * smi130_gyro->time_odr;
+
+	smi130_gyro->gyro_count += fifo_framecount;
+	del = smi130_gyro->fifo_time - smi130_gyro->base_time;
+	time_internal = div64_u64(del, smi130_gyro->gyro_count);
+	data_cnt++;
+	if (data_cnt == 1)
+		time_odr = smi130_gyro->time_odr;
+	if (time_internal > time_odr) {
+		if (time_internal - time_odr > div64_u64 (time_odr, 200))
+			time_internal = time_odr + div64_u64(time_odr, 200);
+	} else {
+		if (time_odr - time_internal > div64_u64(time_odr, 200))
+			time_internal = time_odr - div64_u64(time_odr, 200);
+	}
+
+	/* Select X Y Z axis data output for every frame */
+	for (i = 0; i < fifo_framecount; i++) {
+		if (smi130_gyro->debug_level & 0x01)
+			printk(KERN_INFO "smi_gyro time =%llu fifo_time = %llu time_internal = %llu smi_gyro->count= %llu count = %d",
+		smi130_gyro->timestamp, smi130_gyro->fifo_time,
+		time_internal, smi130_gyro->gyro_count, fifo_framecount);
+		ts = ns_to_timespec(smi130_gyro->timestamp);
+		gyro_lsb.datax =
+		((unsigned char)fifo_data_out[i * f_len + 1] << 8
+				| (unsigned char)fifo_data_out[i * f_len + 0]);
+		gyro_lsb.datay =
+		((unsigned char)fifo_data_out[i * f_len + 3] << 8
+				| (unsigned char)fifo_data_out[i * f_len + 2]);
+		gyro_lsb.dataz =
+		((unsigned char)fifo_data_out[i * f_len + 5] << 8
+				| (unsigned char)fifo_data_out[i * f_len + 4]);
+		smi130_gyro_remap_sensor_data(&gyro_lsb, smi130_gyro);
+		input_event(smi130_gyro->input, EV_MSC, MSC_TIME,
+		ts.tv_sec);
+		input_event(smi130_gyro->input, EV_MSC, MSC_TIME,
+		ts.tv_nsec);
+		input_event(smi130_gyro->input, EV_MSC,
+			MSC_GESTURE, gyro_lsb.datax);
+		input_event(smi130_gyro->input, EV_MSC,
+			MSC_RAW, gyro_lsb.datay);
+		input_event(smi130_gyro->input, EV_MSC,
+			MSC_SCAN, gyro_lsb.dataz);
+		input_sync(smi130_gyro->input);
+		smi130_gyro->timestamp += time_internal - sample_drift_offset;
+	}
+	drift_time = smi130_gyro->timestamp - smi130_gyro->fifo_time;
+	if (data_cnt % 20 == 0) {
+		if (ABS(drift_time) > div64_u64(time_odr, 5)) {
+			sample_drift_offset =
+		div64_s64(drift_time, smi130_gyro->gyro_count - pre_data_cnt);
+			pre_data_cnt = smi130_gyro->gyro_count;
+			time_odr = time_internal;
+		}
+	}
+}
+
+
+static enum hrtimer_restart reportdata_timer_fun(
+	struct hrtimer *hrtimer)
+{
+	struct smi_gyro_client_data *client_data =
+		container_of(hrtimer, struct smi_gyro_client_data, timer);
+	int32_t delay = 0;
+	delay = 10;
+	queue_work(reportdata_wq, &(client_data->report_data_work));
+	client_data->work_delay_kt = ns_to_ktime(delay*1000000);
+	hrtimer_forward(hrtimer, ktime_get(), client_data->work_delay_kt);
+
+	return HRTIMER_RESTART;
+}
+
+static ssize_t smi_gyro_show_enable_timer(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+
+	return snprintf(buf, 16, "%d\n", client_data->is_timer_running);
+}
+
+static ssize_t smi_gyro_store_enable_timer(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	if (data) {
+		if (0 == client_data->is_timer_running) {
+			hrtimer_start(&client_data->timer,
+			ns_to_ktime(10000000),
+			HRTIMER_MODE_REL);
+		client_data->is_timer_running = 1;
+		client_data->base_time = 0;
+		client_data->timestamp = 0;
+		client_data->gyro_count = 0;
+	}
+	} else {
+		if (1 == client_data->is_timer_running) {
+			hrtimer_cancel(&client_data->timer);
+			client_data->is_timer_running = 0;
+			client_data->base_time = 0;
+			client_data->timestamp = 0;
+			client_data->gyro_count = 0;
+	}
+	}
+	return count;
+}
+
+static ssize_t smi130_gyro_show_debug_level(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+	err = snprintf(buf, 8, "%d\n", client_data->debug_level);
+	return err;
+}
+static ssize_t smi130_gyro_store_debug_level(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int32_t ret = 0;
+	unsigned long data;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+	ret = kstrtoul(buf, 16, &data);
+	if (ret)
+		return ret;
+	client_data->debug_level = (uint8_t)data;
+	return count;
+}
+
+static int smi_gyro_set_soft_reset(struct i2c_client *client)
+{
+	int err = 0;
+	unsigned char data = SMI_GYRO_SOFT_RESET_VALUE;
+	err = smi_gyro_i2c_write(client, SMI130_GYRO_BGW_SOFTRESET_ADDR, &data, 1);
+	return err;
+}
+
+static ssize_t smi_gyro_show_chip_id(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, 16, "%d\n", SENSOR_CHIP_ID_SMI_GYRO);
+}
+
+static ssize_t smi_gyro_show_op_mode(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+	u8 op_mode = 0xff;
+
+	mutex_lock(&client_data->mutex_op_mode);
+	SMI_GYRO_CALL_API(get_mode)(&op_mode);
+	mutex_unlock(&client_data->mutex_op_mode);
+
+	ret = snprintf(buf, 16, "%d\n", op_mode);
+
+	return ret;
+}
+
+static ssize_t smi_gyro_store_op_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+
+	long op_mode;
+
+	err = kstrtoul(buf, 10, &op_mode);
+	if (err)
+		return err;
+	mutex_lock(&client_data->mutex_op_mode);
+
+	err = SMI_GYRO_CALL_API(set_mode)(op_mode);
+
+	mutex_unlock(&client_data->mutex_op_mode);
+
+	if (err)
+		return err;
+	else
+		return count;
+}
+
+
+
+static ssize_t smi_gyro_show_value(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+	int count;
+
+	struct smi130_gyro_data_t value_data;
+	SMI_GYRO_CALL_API(get_dataXYZ)(&value_data);
+	/*SMI130_GYRO sensor raw data remapping*/
+	smi130_gyro_remap_sensor_data(&value_data, client_data);
+
+	count = snprintf(buf, 96, "%hd %hd %hd\n",
+				value_data.datax,
+				value_data.datay,
+				value_data.dataz);
+
+	return count;
+}
+
+static ssize_t smi_gyro_show_range(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char range = 0;
+	SMI_GYRO_CALL_API(get_range_reg)(&range);
+	err = snprintf(buf, 16, "%d\n", range);
+	return err;
+}
+
+static ssize_t smi_gyro_store_range(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long range;
+	err = kstrtoul(buf, 10, &range);
+	if (err)
+		return err;
+	SMI_GYRO_CALL_API(set_range_reg)(range);
+	return count;
+}
+
+/*
+decimation    odr     filter bandwidth     bits
+20	100HZ		32HZ		7
+10	200Hz		64HZ		6
+20	100HZ		12HZ		5
+10	200hz		23HZ		4
+5	400HZ		47HZ		3
+2	1000HZ		116HZ		2
+0	2000HZ		230HZ		1
+0	2000HZ		Unfiltered(523HZ)	0
+*/
+
+static const uint64_t odr_map[8] = {
+500000, 500000, 1000000, 2500000, 5000000, 10000000, 5000000, 10000000};
+
+static ssize_t smi_gyro_show_bandwidth(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char bandwidth = 0;
+	SMI_GYRO_CALL_API(get_bw)(&bandwidth);
+	err = snprintf(buf, 16, "%d\n", bandwidth);
+	return err;
+}
+
+static ssize_t smi_gyro_store_bandwidth(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+	unsigned long bandwidth;
+	u8 op_mode = 0xff;
+	err = kstrtoul(buf, 10, &bandwidth);
+	if (err)
+		return err;
+	/*
+	set bandwidth only in the op_mode=0
+	*/
+	err = SMI_GYRO_CALL_API(get_mode)(&op_mode);
+	if (op_mode == 0) {
+		err += SMI_GYRO_CALL_API(set_bw)(bandwidth);
+	} else {
+		err += SMI_GYRO_CALL_API(set_mode)(0);
+		err += SMI_GYRO_CALL_API(set_bw)(bandwidth);
+		smi130_gyro_delay(1);
+		err += SMI_GYRO_CALL_API(set_mode)(2);
+		smi130_gyro_delay(3);
+	}
+
+	if (err)
+		PERR("set failed");
+	client_data->time_odr = odr_map[bandwidth];
+	client_data->base_time = 0;
+	client_data->gyro_count = 0;
+	return count;
+}
+
+
+static ssize_t smi_gyro_show_enable(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+	int err;
+
+	mutex_lock(&client_data->mutex_enable);
+	err = snprintf(buf, 16, "%d\n", client_data->enable);
+	mutex_unlock(&client_data->mutex_enable);
+	return err;
+}
+
+static ssize_t smi_gyro_store_enable(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	data = data ? 1 : 0;
+	mutex_lock(&client_data->mutex_enable);
+	if (data != client_data->enable) {
+		if (data) {
+			schedule_delayed_work(
+					&client_data->work,
+					msecs_to_jiffies(atomic_read(
+							&client_data->delay)));
+		} else {
+			cancel_delayed_work_sync(&client_data->work);
+		}
+
+		client_data->enable = data;
+	}
+	mutex_unlock(&client_data->mutex_enable);
+
+	return count;
+}
+
+static ssize_t smi_gyro_show_delay(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+
+	return snprintf(buf, 16, "%d\n", atomic_read(&client_data->delay));
+
+}
+
+static ssize_t smi_gyro_store_delay(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int err;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+
+	err = kstrtoul(buf, 10, &data);
+	if (err)
+		return err;
+
+	if (data == 0) {
+		err = -EINVAL;
+		return err;
+	}
+
+	if (data < SMI_GYRO_DELAY_MIN)
+		data = SMI_GYRO_DELAY_MIN;
+
+	atomic_set(&client_data->delay, data);
+
+	return count;
+}
+
+
+static ssize_t smi_gyro_store_fastoffset_en(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long fastoffset_en;
+	err = kstrtoul(buf, 10, &fastoffset_en);
+	if (err)
+		return err;
+	if (fastoffset_en) {
+
+#ifdef CONFIG_SENSORS_BMI058
+		SMI_GYRO_CALL_API(set_fast_offset_en_ch)(BMI058_X_AXIS, 1);
+		SMI_GYRO_CALL_API(set_fast_offset_en_ch)(BMI058_Y_AXIS, 1);
+#else
+		SMI_GYRO_CALL_API(set_fast_offset_en_ch)(SMI130_GYRO_X_AXIS, 1);
+		SMI_GYRO_CALL_API(set_fast_offset_en_ch)(SMI130_GYRO_Y_AXIS, 1);
+#endif
+
+		SMI_GYRO_CALL_API(set_fast_offset_en_ch)(SMI130_GYRO_Z_AXIS, 1);
+		SMI_GYRO_CALL_API(enable_fast_offset)();
+	}
+	return count;
+}
+
+static ssize_t smi_gyro_store_slowoffset_en(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long slowoffset_en;
+	err = kstrtoul(buf, 10, &slowoffset_en);
+	if (err)
+		return err;
+	if (slowoffset_en) {
+		SMI_GYRO_CALL_API(set_slow_offset_th)(3);
+		SMI_GYRO_CALL_API(set_slow_offset_dur)(0);
+#ifdef CONFIG_SENSORS_BMI058
+		SMI_GYRO_CALL_API(set_slow_offset_en_ch)(BMI058_X_AXIS, 1);
+		SMI_GYRO_CALL_API(set_slow_offset_en_ch)(BMI058_Y_AXIS, 1);
+#else
+		SMI_GYRO_CALL_API(set_slow_offset_en_ch)(SMI130_GYRO_X_AXIS, 1);
+		SMI_GYRO_CALL_API(set_slow_offset_en_ch)(SMI130_GYRO_Y_AXIS, 1);
+#endif
+		SMI_GYRO_CALL_API(set_slow_offset_en_ch)(SMI130_GYRO_Z_AXIS, 1);
+	} else {
+#ifdef CONFIG_SENSORS_BMI058
+	SMI_GYRO_CALL_API(set_slow_offset_en_ch)(BMI058_X_AXIS, 0);
+	SMI_GYRO_CALL_API(set_slow_offset_en_ch)(BMI058_Y_AXIS, 0);
+#else
+	SMI_GYRO_CALL_API(set_slow_offset_en_ch)(SMI130_GYRO_X_AXIS, 0);
+	SMI_GYRO_CALL_API(set_slow_offset_en_ch)(SMI130_GYRO_Y_AXIS, 0);
+#endif
+	SMI_GYRO_CALL_API(set_slow_offset_en_ch)(SMI130_GYRO_Z_AXIS, 0);
+	}
+
+	return count;
+}
+
+static ssize_t smi_gyro_show_selftest(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char selftest;
+	SMI_GYRO_CALL_API(selftest)(&selftest);
+	err = snprintf(buf, 16, "%d\n", selftest);
+	return err;
+}
+
+static ssize_t smi_gyro_show_sleepdur(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char sleepdur;
+	SMI_GYRO_CALL_API(get_sleepdur)(&sleepdur);
+	err = snprintf(buf, 16, "%d\n", sleepdur);
+	return err;
+}
+
+static ssize_t smi_gyro_store_sleepdur(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long sleepdur;
+	err = kstrtoul(buf, 10, &sleepdur);
+	if (err)
+		return err;
+	SMI_GYRO_CALL_API(set_sleepdur)(sleepdur);
+	return count;
+}
+
+static ssize_t smi_gyro_show_autosleepdur(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char autosleepdur;
+	SMI_GYRO_CALL_API(get_autosleepdur)(&autosleepdur);
+	err = snprintf(buf, 16, "%d\n", autosleepdur);
+	return err;
+}
+
+static ssize_t smi_gyro_store_autosleepdur(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long autosleepdur;
+	unsigned char bandwidth;
+	err = kstrtoul(buf, 10, &autosleepdur);
+	if (err)
+		return err;
+	SMI_GYRO_CALL_API(get_bw)(&bandwidth);
+	SMI_GYRO_CALL_API(set_autosleepdur)(autosleepdur, bandwidth);
+	return count;
+}
+
+static ssize_t smi_gyro_show_place(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+	int place = BOSCH_SENSOR_PLACE_UNKNOWN;
+
+	if (NULL != client_data->bosch_pd)
+		place = client_data->bosch_pd->place;
+
+	return snprintf(buf, 16, "%d\n", place);
+}
+
+
+#ifdef SMI_GYRO_DEBUG
+static ssize_t smi_gyro_store_softreset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long softreset;
+	err = kstrtoul(buf, 10, &softreset);
+	if (err)
+		return err;
+	SMI_GYRO_CALL_API(set_soft_reset)();
+	return count;
+}
+
+static ssize_t smi_gyro_show_dumpreg(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	size_t count = 0;
+	u8 reg[0x40];
+	int i;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+
+	for (i = 0; i < 0x40; i++) {
+		smi_gyro_i2c_read(client_data->client, i, reg+i, 1);
+
+		count += snprintf(&buf[count], 48, "0x%x: 0x%x\n", i, reg[i]);
+	}
+	return count;
+}
+#endif
+
+#ifdef SMI_GYRO_USE_FIFO
+static ssize_t smi_gyro_show_fifo_mode(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char fifo_mode;
+	SMI_GYRO_CALL_API(get_fifo_mode)(&fifo_mode);
+	err = snprintf(buf, 16, "%d\n", fifo_mode);
+	return err;
+}
+
+static ssize_t smi_gyro_store_fifo_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int err;
+	unsigned long fifo_mode;
+	err = kstrtoul(buf, 10, &fifo_mode);
+	if (err)
+		return err;
+	SMI_GYRO_CALL_API(set_fifo_mode)(fifo_mode);
+	return count;
+}
+
+static ssize_t smi_gyro_show_fifo_framecount(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char fifo_framecount;
+	SMI_GYRO_CALL_API(get_fifo_framecount)(&fifo_framecount);
+	err = snprintf(buf, 32, "%d\n", fifo_framecount);
+	return err;
+}
+
+static ssize_t smi_gyro_store_fifo_framecount(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned long data;
+	int error;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+	error = kstrtoul(buf, 10, &data);
+	if (error)
+		return error;
+	client_data->fifo_count = (unsigned int) data;
+
+	return count;
+}
+
+static ssize_t smi_gyro_show_fifo_overrun(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char fifo_overrun;
+	SMI_GYRO_CALL_API(get_fifo_overrun)(&fifo_overrun);
+	err = snprintf(buf, 16, "%d\n", fifo_overrun);
+	return err;
+}
+
+static ssize_t smi_gyro_show_fifo_data_frame(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned char f_len = 0;
+	unsigned char fifo_framecount;
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+
+	if (client_data->fifo_datasel)
+		/*Select one axis data output for every fifo frame*/
+		f_len = 2;
+	else
+		/*Select X Y Z axis data output for every fifo frame*/
+		f_len = 6;
+
+	if (SMI_GYRO_CALL_API(get_fifo_framecount)(&fifo_framecount) < 0) {
+		PERR("bm160_get_fifo_framecount err\n");
+		return -EINVAL;
+	}
+	if (fifo_framecount == 0)
+		return 0;
+
+	smi_gyro_i2c_burst_read(client_data->client, SMI130_GYRO_FIFO_DATA_ADDR,
+			buf, fifo_framecount * f_len);
+	return fifo_framecount * f_len;
+}
+
+/*!
+ * @brief show fifo_data_sel axis definition(Android definition, not sensor HW reg).
+ * 0--> x, y, z axis fifo data for every frame
+ * 1--> only x axis fifo data for every frame
+ * 2--> only y axis fifo data for every frame
+ * 3--> only z axis fifo data for every frame
+ */
+static ssize_t smi_gyro_show_fifo_data_sel(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char fifo_data_sel;
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smi_gyro_client_data *client_data = i2c_get_clientdata(client);
+	signed char place = BOSCH_SENSOR_PLACE_UNKNOWN;
+
+	SMI_GYRO_CALL_API(get_fifo_data_sel)(&fifo_data_sel);
+
+	/*remapping fifo_dat_sel if define virtual place in BSP files*/
+	if ((NULL != client_data->bosch_pd) &&
+		(BOSCH_SENSOR_PLACE_UNKNOWN != client_data->bosch_pd->place)) {
+		place = client_data->bosch_pd->place;
+		/* sensor with place 0 needs not to be remapped */
+		if ((place > 0) && (place < MAX_AXIS_REMAP_TAB_SZ)) {
+			if (SMI130_GYRO_FIFO_DAT_SEL_X == fifo_data_sel)
+				/* SMI130_GYRO_FIFO_DAT_SEL_X: 1, Y:2, Z:3;
+				*bosch_axis_remap_tab_dft[i].src_x:0, y:1, z:2
+				*so we need to +1*/
+				fifo_data_sel =
+					bosch_axis_remap_tab_dft[place].src_x + 1;
+
+			else if (SMI130_GYRO_FIFO_DAT_SEL_Y == fifo_data_sel)
+				fifo_data_sel =
+					bosch_axis_remap_tab_dft[place].src_y + 1;
+		}
+
+	}
+
+	err = snprintf(buf, 16, "%d\n", fifo_data_sel);
+	return err;
+}
+
+/*!
+ * @brief store fifo_data_sel axis definition(Android definition, not sensor HW reg).
+ * 0--> x, y, z axis fifo data for every frame
+ * 1--> only x axis fifo data for every frame
+ * 2--> only y axis fifo data for every frame
+ * 3--> only z axis fifo data for every frame
+ */
+static ssize_t smi_gyro_store_fifo_data_sel(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+
+{
+	int err;
+	unsigned long fifo_data_sel;
+
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+	signed char place;
+
+	err = kstrtoul(buf, 10, &fifo_data_sel);
+	if (err)
+		return err;
+
+	/*save fifo_data_sel(android axis definition)*/
+	client_data->fifo_datasel = (unsigned char) fifo_data_sel;
+
+	/*remaping fifo_dat_sel if define virtual place*/
+	if ((NULL != client_data->bosch_pd) &&
+		(BOSCH_SENSOR_PLACE_UNKNOWN != client_data->bosch_pd->place)) {
+		place = client_data->bosch_pd->place;
+		/* sensor with place 0 needs not to be remapped */
+		if ((place > 0) && (place < MAX_AXIS_REMAP_TAB_SZ)) {
+			/*Need X Y axis revesal sensor place: P1, P3, P5, P7 */
+			/* SMI130_GYRO_FIFO_DAT_SEL_X: 1, Y:2, Z:3;
+			  * but bosch_axis_remap_tab_dft[i].src_x:0, y:1, z:2
+			  * so we need to +1*/
+			if (SMI130_GYRO_FIFO_DAT_SEL_X == fifo_data_sel)
+				fifo_data_sel =
+					bosch_axis_remap_tab_dft[place].src_x + 1;
+
+			else if (SMI130_GYRO_FIFO_DAT_SEL_Y == fifo_data_sel)
+				fifo_data_sel =
+					bosch_axis_remap_tab_dft[place].src_y + 1;
+		}
+	}
+
+	if (SMI_GYRO_CALL_API(set_fifo_data_sel)(fifo_data_sel) < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t smi_gyro_show_fifo_tag(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int err;
+	unsigned char fifo_tag;
+	SMI_GYRO_CALL_API(get_fifo_tag)(&fifo_tag);
+	err = snprintf(buf, 16, "%d\n", fifo_tag);
+	return err;
+}
+
+static ssize_t smi_gyro_store_fifo_tag(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+
+{
+	int err;
+	unsigned long fifo_tag;
+	err = kstrtoul(buf, 10, &fifo_tag);
+	if (err)
+		return err;
+	SMI_GYRO_CALL_API(set_fifo_tag)(fifo_tag);
+	return count;
+}
+#endif
+
+static ssize_t smi130_gyro_driver_version_show(struct device *dev
+		, struct device_attribute *attr, char *buf)
+{
+	struct input_dev *input = to_input_dev(dev);
+	struct smi_gyro_client_data *client_data = input_get_drvdata(input);
+	int ret;
+
+	if (client_data == NULL) {
+		printk(KERN_ERR "Invalid client_data pointer");
+		return -ENODEV;
+	}
+
+	ret = snprintf(buf, 128, "Driver version: %s\n",
+			DRIVER_VERSION);
+	return ret;
+}
+static DEVICE_ATTR(chip_id, S_IRUSR,
+		smi_gyro_show_chip_id, NULL);
+static DEVICE_ATTR(op_mode, S_IRUGO | S_IWUSR,
+		smi_gyro_show_op_mode, smi_gyro_store_op_mode);
+static DEVICE_ATTR(value, S_IRUSR,
+		smi_gyro_show_value, NULL);
+static DEVICE_ATTR(range, S_IRUGO | S_IWUSR,
+		smi_gyro_show_range, smi_gyro_store_range);
+static DEVICE_ATTR(bandwidth, S_IRUGO | S_IWUSR,
+		smi_gyro_show_bandwidth, smi_gyro_store_bandwidth);
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
+		smi_gyro_show_enable, smi_gyro_store_enable);
+static DEVICE_ATTR(delay, S_IRUGO | S_IWUSR,
+		smi_gyro_show_delay, smi_gyro_store_delay);
+static DEVICE_ATTR(fastoffset_en, S_IWUSR,
+		NULL, smi_gyro_store_fastoffset_en);
+static DEVICE_ATTR(slowoffset_en, S_IWUSR,
+		NULL, smi_gyro_store_slowoffset_en);
+static DEVICE_ATTR(selftest, S_IRUGO,
+		smi_gyro_show_selftest, NULL);
+static DEVICE_ATTR(sleepdur, S_IRUGO | S_IWUSR,
+		smi_gyro_show_sleepdur, smi_gyro_store_sleepdur);
+static DEVICE_ATTR(autosleepdur, S_IRUGO | S_IWUSR,
+		smi_gyro_show_autosleepdur, smi_gyro_store_autosleepdur);
+static DEVICE_ATTR(place, S_IRUSR,
+		smi_gyro_show_place, NULL);
+static DEVICE_ATTR(enable_timer, S_IRUGO | S_IWUSR,
+		smi_gyro_show_enable_timer, smi_gyro_store_enable_timer);
+static DEVICE_ATTR(debug_level, S_IRUGO | S_IWUSR,
+		smi130_gyro_show_debug_level, smi130_gyro_store_debug_level);
+static DEVICE_ATTR(driver_version, S_IRUSR,
+		smi130_gyro_driver_version_show, NULL);
+#ifdef SMI_GYRO_DEBUG
+static DEVICE_ATTR(softreset, S_IWUSR,
+		NULL, smi_gyro_store_softreset);
+static DEVICE_ATTR(regdump, S_IRUSR,
+		smi_gyro_show_dumpreg, NULL);
+#endif
+#ifdef SMI_GYRO_USE_FIFO
+static DEVICE_ATTR(fifo_mode, S_IRUGO | S_IWUSR,
+		smi_gyro_show_fifo_mode, smi_gyro_store_fifo_mode);
+static DEVICE_ATTR(fifo_framecount, S_IRUGO | S_IWUSR,
+		smi_gyro_show_fifo_framecount, smi_gyro_store_fifo_framecount);
+static DEVICE_ATTR(fifo_overrun, S_IRUGO,
+		smi_gyro_show_fifo_overrun, NULL);
+static DEVICE_ATTR(fifo_data_frame, S_IRUSR,
+		smi_gyro_show_fifo_data_frame, NULL);
+static DEVICE_ATTR(fifo_data_sel, S_IRUGO | S_IWUSR,
+		smi_gyro_show_fifo_data_sel, smi_gyro_store_fifo_data_sel);
+static DEVICE_ATTR(fifo_tag, S_IRUGO | S_IWUSR,
+		smi_gyro_show_fifo_tag, smi_gyro_store_fifo_tag);
+#endif
+
+static struct attribute *smi_gyro_attributes[] = {
+	&dev_attr_chip_id.attr,
+	&dev_attr_op_mode.attr,
+	&dev_attr_value.attr,
+	&dev_attr_range.attr,
+	&dev_attr_bandwidth.attr,
+	&dev_attr_enable.attr,
+	&dev_attr_delay.attr,
+	&dev_attr_fastoffset_en.attr,
+	&dev_attr_slowoffset_en.attr,
+	&dev_attr_selftest.attr,
+	&dev_attr_sleepdur.attr,
+	&dev_attr_autosleepdur.attr,
+	&dev_attr_place.attr,
+	&dev_attr_enable_timer.attr,
+	&dev_attr_debug_level.attr,
+	&dev_attr_driver_version.attr,
+#ifdef SMI_GYRO_DEBUG
+	&dev_attr_softreset.attr,
+	&dev_attr_regdump.attr,
+#endif
+#ifdef SMI_GYRO_USE_FIFO
+	&dev_attr_fifo_mode.attr,
+	&dev_attr_fifo_framecount.attr,
+	&dev_attr_fifo_overrun.attr,
+	&dev_attr_fifo_data_frame.attr,
+	&dev_attr_fifo_data_sel.attr,
+	&dev_attr_fifo_tag.attr,
+#endif
+	NULL
+};
+
+static struct attribute_group smi_gyro_attribute_group = {
+	.attrs = smi_gyro_attributes
+};
+
+
+static int smi_gyro_input_init(struct smi_gyro_client_data *client_data)
+{
+	struct input_dev *dev;
+	int err = 0;
+
+	dev = input_allocate_device();
+	if (NULL == dev)
+		return -ENOMEM;
+
+	dev->name = SENSOR_NAME;
+	dev->id.bustype = BUS_I2C;
+
+	input_set_capability(dev, EV_ABS, ABS_MISC);
+	input_set_abs_params(dev, ABS_X, SMI_GYRO_VALUE_MIN, SMI_GYRO_VALUE_MAX, 0, 0);
+	input_set_abs_params(dev, ABS_Y, SMI_GYRO_VALUE_MIN, SMI_GYRO_VALUE_MAX, 0, 0);
+	input_set_abs_params(dev, ABS_Z, SMI_GYRO_VALUE_MIN, SMI_GYRO_VALUE_MAX, 0, 0);
+	input_set_capability(dev, EV_MSC, MSC_GESTURE);
+	input_set_capability(dev, EV_MSC, MSC_RAW);
+	input_set_capability(dev, EV_MSC, MSC_SCAN);
+	input_set_capability(dev, EV_MSC, MSC_TIME);
+	input_set_drvdata(dev, client_data);
+
+	err = input_register_device(dev);
+	if (err < 0) {
+		input_free_device(dev);
+		return err;
+	}
+	client_data->input = dev;
+
+	return 0;
+}
+
+static void smi_gyro_input_destroy(struct smi_gyro_client_data *client_data)
+{
+	struct input_dev *dev = client_data->input;
+
+	input_unregister_device(dev);
+	input_free_device(dev);
+}
+
+#if defined(SMI130_GYRO_ENABLE_INT1) || defined(SMI130_GYRO_ENABLE_INT2)
+static void smi130_gyro_irq_work_func(struct work_struct *work)
+{
+	struct smi_gyro_client_data *client_data = container_of(work,
+		struct smi_gyro_client_data, irq_work);
+	struct smi130_gyro_data_t gyro_data;
+	struct timespec ts;
+	ts = ns_to_timespec(client_data->timestamp);
+
+	SMI_GYRO_CALL_API(get_dataXYZ)(&gyro_data);
+	/*remapping for SMI130_GYRO sensor*/
+	smi130_gyro_remap_sensor_data(&gyro_data, client_data);
+	input_event(client_data->input, EV_MSC, MSC_TIME,
+		ts.tv_sec);
+	input_event(client_data->input, EV_MSC, MSC_TIME,
+		ts.tv_nsec);
+	input_event(client_data->input, EV_MSC,
+		MSC_GESTURE, gyro_data.datax);
+	input_event(client_data->input, EV_MSC,
+		MSC_RAW, gyro_data.datay);
+	input_event(client_data->input, EV_MSC,
+		MSC_SCAN, gyro_data.dataz);
+	input_sync(client_data->input);
+
+}
+
+static irqreturn_t smi_gyro_irq_handler(int irq, void *handle)
+{
+	struct smi_gyro_client_data *client_data = handle;
+	client_data->timestamp= smi130_gyro_get_alarm_timestamp();
+	schedule_work(&client_data->irq_work);
+	return IRQ_HANDLED;
+}
+#endif
+static int smi_gyro_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	int err = 0;
+	struct smi_gyro_client_data *client_data = NULL;
+	PINFO("function entrance");
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		PERR("i2c_check_functionality error!");
+		err = -EIO;
+		goto exit_err_clean;
+	}
+
+	if (NULL == smi_gyro_client) {
+		smi_gyro_client = client;
+	} else {
+		PERR("this driver does not support multiple clients");
+		err = -EINVAL;
+		goto exit_err_clean;
+	}
+
+	/* check chip id */
+	err = smi_gyro_check_chip_id(client);
+	if (!err) {
+		PINFO("Bosch Sensortec Device %s detected", SENSOR_NAME);
+	} else {
+		PERR("Bosch Sensortec Device not found, chip id mismatch");
+		err = -1;
+		goto exit_err_clean;
+	}
+
+	/* do soft reset */
+	smi130_gyro_delay(5);
+	err = smi_gyro_set_soft_reset(client);
+	if (err < 0) {
+		PERR("erro soft reset!\n");
+		err = -EINVAL;
+		goto exit_err_clean;
+	}
+	smi130_gyro_delay(30);
+
+
+	client_data = kzalloc(sizeof(struct smi_gyro_client_data), GFP_KERNEL);
+	if (NULL == client_data) {
+		PERR("no memory available");
+		err = -ENOMEM;
+		goto exit_err_clean;
+	}
+
+	i2c_set_clientdata(client, client_data);
+	client_data->client = client;
+
+	mutex_init(&client_data->mutex_op_mode);
+	mutex_init(&client_data->mutex_enable);
+
+	/* input device init */
+	err = smi_gyro_input_init(client_data);
+	if (err < 0)
+		goto exit_err_clean;
+
+	/* sysfs node creation */
+	err = sysfs_create_group(&client_data->input->dev.kobj,
+			&smi_gyro_attribute_group);
+
+	if (err < 0)
+		goto exit_err_sysfs;
+
+	if (NULL != client->dev.platform_data) {
+		client_data->bosch_pd = kzalloc(sizeof(*client_data->bosch_pd),
+				GFP_KERNEL);
+
+		if (NULL != client_data->bosch_pd) {
+			memcpy(client_data->bosch_pd, client->dev.platform_data,
+					sizeof(*client_data->bosch_pd));
+			PINFO("%s sensor driver set place: p%d",
+					SENSOR_NAME,
+					client_data->bosch_pd->place);
+		}
+	}
+
+	/* workqueue init */
+	INIT_DELAYED_WORK(&client_data->work, smi_gyro_work_func);
+	atomic_set(&client_data->delay, SMI_GYRO_DELAY_DEFAULT);
+
+	/* h/w init */
+	client_data->device.bus_read = smi_gyro_i2c_read_wrapper;
+	client_data->device.bus_write = smi_gyro_i2c_write_wrapper;
+	client_data->device.delay_msec = smi130_gyro_delay;
+	SMI_GYRO_CALL_API(init)(&client_data->device);
+
+	smi_gyro_dump_reg(client);
+
+	client_data->enable = 0;
+	client_data->fifo_datasel = 0;
+	client_data->fifo_count = 0;
+
+	/*workqueue init*/
+	INIT_WORK(&client_data->report_data_work,
+	smi130_gyro_work_func);
+	reportdata_wq = create_singlethread_workqueue("smi130_gyro_wq");
+	if (NULL == reportdata_wq)
+		PERR("fail to create the reportdta_wq %d", -ENOMEM);
+	hrtimer_init(&client_data->timer, CLOCK_MONOTONIC,
+		HRTIMER_MODE_REL);
+	client_data->timer.function = reportdata_timer_fun;
+	client_data->work_delay_kt = ns_to_ktime(10000000);
+	client_data->is_timer_running = 0;
+	client_data->time_odr = 500000;
+#ifdef SMI130_GYRO_ENABLE_INT1
+	err = SMI_GYRO_CALL_API(set_mode)(SMI130_GYRO_MODE_NORMAL);
+	smi130_gyro_delay(5);
+	/*config the interrupt and map the interrupt*/
+	/*high level trigger*/
+	err += smi130_gyro_set_int_lvl(SMI130_GYRO_INT1_DATA, 1);
+	smi130_gyro_delay(5);
+	err += smi130_gyro_set_int_od(SMI130_GYRO_INT1, 0);
+	smi130_gyro_delay(5);
+	err += smi130_gyro_set_int_data(SMI130_GYRO_INT1_DATA, SMI130_GYRO_ENABLE);
+	smi130_gyro_delay(5);
+	err += smi130_gyro_set_data_en(SMI130_GYRO_ENABLE);
+	smi130_gyro_delay(5);
+	/*default odr is 100HZ*/
+	err += SMI_GYRO_CALL_API(set_bw)(7);
+	smi130_gyro_delay(5);
+	if (err)
+		PERR("config sensor data ready interrupt failed");
+#endif
+#ifdef SMI130_GYRO_ENABLE_INT2
+	err = SMI_GYRO_CALL_API(set_mode)(SMI130_GYRO_MODE_NORMAL);
+	/*config the interrupt and map the interrupt*/
+	/*high level trigger*/
+	err += smi130_gyro_set_int_lvl(SMI130_GYRO_INT2_DATA, 1);
+	smi130_gyro_delay(3);
+	err += smi130_gyro_set_int_od(SMI130_GYRO_INT2, 0);
+	smi130_gyro_delay(5);
+	err += smi130_gyro_set_int_data(SMI130_GYRO_INT2_DATA, SMI130_GYRO_ENABLE);
+	smi130_gyro_delay(3);
+	err += smi130_gyro_set_data_en(SMI130_GYRO_ENABLE);
+	/*default odr is 100HZ*/
+	err += SMI_GYRO_CALL_API(set_bw)(7);
+	smi130_gyro_delay(5);
+	if (err)
+		PERR("config sensor data ready interrupt failed");
+#endif
+	err += SMI_GYRO_CALL_API(set_mode)(
+		SMI_GYRO_VAL_NAME(MODE_SUSPEND));
+	if (err < 0)
+		goto exit_err_sysfs;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	client_data->early_suspend_handler.suspend = smi_gyro_early_suspend;
+	client_data->early_suspend_handler.resume = smi_gyro_late_resume;
+	register_early_suspend(&client_data->early_suspend_handler);
+#endif
+#if defined(SMI130_GYRO_ENABLE_INT1) || defined(SMI130_GYRO_ENABLE_INT2)
+	client_data->gpio_pin = of_get_named_gpio_flags(
+		client->dev.of_node,
+		"smi130_gyro,gpio_irq", 0, NULL);
+	PDEBUG("smi130_gyro qpio number:%d\n", client_data->gpio_pin);
+	err = gpio_request_one(client_data->gpio_pin,
+				GPIOF_IN, "bm160_interrupt");
+	if (err < 0) {
+		PDEBUG("requestgpio  failed\n");
+		client_data->gpio_pin = 0;
+	}
+	if (client_data->gpio_pin != 0) {
+		err = gpio_direction_input(client_data->gpio_pin);
+		if (err < 0) {
+			PDEBUG("request failed\n");
+		}
+		client_data->IRQ = gpio_to_irq(client_data->gpio_pin);
+		err = request_irq(client_data->IRQ, smi_gyro_irq_handler,
+				IRQF_TRIGGER_RISING,
+				SENSOR_NAME, client_data);
+		if (err < 0)
+			PDEBUG("request handle failed\n");
+	}
+	INIT_WORK(&client_data->irq_work, smi130_gyro_irq_work_func);
+#endif
+	PINFO("sensor %s probed successfully", SENSOR_NAME);
+
+	dev_dbg(&client->dev,
+		"i2c_client: %p client_data: %p i2c_device: %p input: %p",
+		client, client_data, &client->dev, client_data->input);
+
+	return 0;
+
+exit_err_sysfs:
+	if (err)
+		smi_gyro_input_destroy(client_data);
+
+exit_err_clean:
+	if (err) {
+		if (client_data != NULL) {
+			kfree(client_data);
+			client_data = NULL;
+		}
+
+		smi_gyro_client = NULL;
+	}
+
+	return err;
+}
+
+static int smi_gyro_pre_suspend(struct i2c_client *client)
+{
+	int err = 0;
+	struct smi_gyro_client_data *client_data =
+		(struct smi_gyro_client_data *)i2c_get_clientdata(client);
+	PINFO("function entrance");
+
+	mutex_lock(&client_data->mutex_enable);
+	if (client_data->enable) {
+		cancel_delayed_work_sync(&client_data->work);
+		PINFO("cancel work");
+	}
+	mutex_unlock(&client_data->mutex_enable);
+	if (client_data->is_timer_running) {
+		hrtimer_cancel(&client_data->timer);
+		client_data->base_time = 0;
+		client_data->timestamp = 0;
+		client_data->fifo_time = 0;
+		client_data->gyro_count = 0;
+	}
+	return err;
+}
+
+static int smi_gyro_post_resume(struct i2c_client *client)
+{
+	int err = 0;
+	struct smi_gyro_client_data *client_data =
+		(struct smi_gyro_client_data *)i2c_get_clientdata(client);
+
+	PINFO("function entrance");
+	mutex_lock(&client_data->mutex_enable);
+	if (client_data->enable) {
+		schedule_delayed_work(&client_data->work,
+				msecs_to_jiffies(
+					atomic_read(&client_data->delay)));
+	}
+	mutex_unlock(&client_data->mutex_enable);
+	if (client_data->is_timer_running) {
+		hrtimer_start(&client_data->timer,
+					ns_to_ktime(client_data->time_odr),
+			HRTIMER_MODE_REL);
+		client_data->base_time = 0;
+		client_data->timestamp = 0;
+		client_data->is_timer_running = 1;
+		client_data->gyro_count = 0;
+	}
+	return err;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void smi_gyro_early_suspend(struct early_suspend *handler)
+{
+	int err = 0;
+	struct smi_gyro_client_data *client_data =
+		(struct smi_gyro_client_data *)container_of(handler,
+			struct smi_gyro_client_data, early_suspend_handler);
+	struct i2c_client *client = client_data->client;
+
+	PINFO("function entrance");
+
+	mutex_lock(&client_data->mutex_op_mode);
+	if (client_data->enable) {
+		err = smi_gyro_pre_suspend(client);
+		err = SMI_GYRO_CALL_API(set_mode)(
+				SMI_GYRO_VAL_NAME(MODE_SUSPEND));
+	}
+	mutex_unlock(&client_data->mutex_op_mode);
+}
+
+static void smi_gyro_late_resume(struct early_suspend *handler)
+{
+
+	int err = 0;
+	struct smi_gyro_client_data *client_data =
+		(struct smi_gyro_client_data *)container_of(handler,
+			struct smi_gyro_client_data, early_suspend_handler);
+	struct i2c_client *client = client_data->client;
+
+	PINFO("function entrance");
+
+	mutex_lock(&client_data->mutex_op_mode);
+
+	if (client_data->enable)
+		err = SMI_GYRO_CALL_API(set_mode)(SMI_GYRO_VAL_NAME(MODE_NORMAL));
+
+	/* post resume operation */
+	smi_gyro_post_resume(client);
+
+	mutex_unlock(&client_data->mutex_op_mode);
+}
+#else
+static int smi_gyro_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+	int err = 0;
+	struct smi_gyro_client_data *client_data =
+		(struct smi_gyro_client_data *)i2c_get_clientdata(client);
+
+	PINFO("function entrance");
+
+	mutex_lock(&client_data->mutex_op_mode);
+	if (client_data->enable) {
+		err = smi_gyro_pre_suspend(client);
+		err = SMI_GYRO_CALL_API(set_mode)(
+				SMI_GYRO_VAL_NAME(MODE_SUSPEND));
+	}
+	mutex_unlock(&client_data->mutex_op_mode);
+	return err;
+}
+
+static int smi_gyro_resume(struct i2c_client *client)
+{
+
+	int err = 0;
+	struct smi_gyro_client_data *client_data =
+		(struct smi_gyro_client_data *)i2c_get_clientdata(client);
+
+	PINFO("function entrance");
+
+	mutex_lock(&client_data->mutex_op_mode);
+
+	if (client_data->enable)
+		err = SMI_GYRO_CALL_API(set_mode)(SMI_GYRO_VAL_NAME(MODE_NORMAL));
+
+	/* post resume operation */
+	smi_gyro_post_resume(client);
+
+	mutex_unlock(&client_data->mutex_op_mode);
+	return err;
+}
+#endif
+
+void smi_gyro_shutdown(struct i2c_client *client)
+{
+	struct smi_gyro_client_data *client_data =
+		(struct smi_gyro_client_data *)i2c_get_clientdata(client);
+
+	mutex_lock(&client_data->mutex_op_mode);
+	SMI_GYRO_CALL_API(set_mode)(
+		SMI_GYRO_VAL_NAME(MODE_DEEPSUSPEND));
+	mutex_unlock(&client_data->mutex_op_mode);
+}
+
+static int smi_gyro_remove(struct i2c_client *client)
+{
+	int err = 0;
+	u8 op_mode;
+
+	struct smi_gyro_client_data *client_data =
+		(struct smi_gyro_client_data *)i2c_get_clientdata(client);
+
+	if (NULL != client_data) {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		unregister_early_suspend(&client_data->early_suspend_handler);
+#endif
+		mutex_lock(&client_data->mutex_op_mode);
+		SMI_GYRO_CALL_API(get_mode)(&op_mode);
+		if (SMI_GYRO_VAL_NAME(MODE_NORMAL) == op_mode) {
+			cancel_delayed_work_sync(&client_data->work);
+			PINFO("cancel work");
+		}
+		mutex_unlock(&client_data->mutex_op_mode);
+
+		err = SMI_GYRO_CALL_API(set_mode)(
+				SMI_GYRO_VAL_NAME(MODE_SUSPEND));
+		smi130_gyro_delay(SMI_GYRO_I2C_WRITE_DELAY_TIME);
+
+		sysfs_remove_group(&client_data->input->dev.kobj,
+				&smi_gyro_attribute_group);
+		smi_gyro_input_destroy(client_data);
+		kfree(client_data);
+
+		smi_gyro_client = NULL;
+	}
+
+	return err;
+}
+
+static const struct i2c_device_id smi_gyro_id[] = {
+	{ SENSOR_NAME, 0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(i2c, smi_gyro_id);
+static const struct of_device_id smi130_gyro_of_match[] = {
+	{ .compatible = "smi130_gyro", },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, smi130_gyro_of_match);
+
+static struct i2c_driver smi_gyro_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = SENSOR_NAME,
+		.of_match_table = smi130_gyro_of_match,
+	},
+	.class = I2C_CLASS_HWMON,
+	.id_table = smi_gyro_id,
+	.probe = smi_gyro_probe,
+	.remove = smi_gyro_remove,
+	.shutdown = smi_gyro_shutdown,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	//.suspend = smi_gyro_suspend,
+	//.resume = smi_gyro_resume,
+#endif
+};
+
+static int __init SMI_GYRO_init(void)
+{
+	return i2c_add_driver(&smi_gyro_driver);
+}
+
+static void __exit SMI_GYRO_exit(void)
+{
+	i2c_del_driver(&smi_gyro_driver);
+}
+
+MODULE_AUTHOR("contact@bosch-sensortec.com>");
+MODULE_DESCRIPTION("SMI_GYRO GYROSCOPE SENSOR DRIVER");
+MODULE_LICENSE("GPL v2");
+
+module_init(SMI_GYRO_init);
+module_exit(SMI_GYRO_exit);
diff --git a/drivers/input/sensors/smi130/smi130_i2c.c b/drivers/input/sensors/smi130/smi130_i2c.c
new file mode 100644
index 0000000..09c4d29
--- /dev/null
+++ b/drivers/input/sensors/smi130/smi130_i2c.c
@@ -0,0 +1,472 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+ *
+ * @filename smi130_i2c.c
+ * @date     2014/11/25 14:40
+ * @Modification Date 2018/08/28 18:20
+ * @id       "20f77db"
+ * @version  1.3
+ *
+ * @brief
+ * This file implements moudle function, which add
+ * the driver to I2C core.
+*/
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include "smi130_driver.h"
+
+/*! @defgroup smi130_i2c_src
+ *  @brief smi130 i2c driver module
+ @{*/
+
+static struct i2c_client *smi_client;
+/*!
+ * @brief define i2c wirte function
+ *
+ * @param client the pointer of i2c client
+ * @param reg_addr register address
+ * @param data the pointer of data buffer
+ * @param len block size need to write
+ *
+ * @return zero success, non-zero failed
+ * @retval zero success
+ * @retval non-zero failed
+*/
+/*	i2c read routine for API*/
+static s8 smi_i2c_read(struct i2c_client *client, u8 reg_addr,
+			u8 *data, u8 len)
+	{
+#if !defined SMI_USE_BASIC_I2C_FUNC
+		s32 dummy;
+		if (NULL == client)
+			return -EINVAL;
+
+		while (0 != len--) {
+#ifdef SMI_SMBUS
+			dummy = i2c_smbus_read_byte_data(client, reg_addr);
+			if (dummy < 0) {
+				dev_err(&client->dev, "i2c smbus read error");
+				return -EIO;
+			}
+			*data = (u8)(dummy & 0xff);
+#else
+			dummy = i2c_master_send(client, (char *)&reg_addr, 1);
+			if (dummy < 0) {
+				dev_err(&client->dev, "i2c bus master write error");
+				return -EIO;
+			}
+
+			dummy = i2c_master_recv(client, (char *)data, 1);
+			if (dummy < 0) {
+				dev_err(&client->dev, "i2c bus master read error");
+				return -EIO;
+			}
+#endif
+			reg_addr++;
+			data++;
+		}
+		return 0;
+#else
+		int retry;
+
+		struct i2c_msg msg[] = {
+			{
+			 .addr = client->addr,
+			 .flags = 0,
+			 .len = 1,
+			 .buf = &reg_addr,
+			},
+
+			{
+			 .addr = client->addr,
+			 .flags = I2C_M_RD,
+			 .len = len,
+			 .buf = data,
+			 },
+		};
+
+		for (retry = 0; retry < SMI_MAX_RETRY_I2C_XFER; retry++) {
+			if (i2c_transfer(client->adapter, msg,
+						ARRAY_SIZE(msg)) > 0)
+				break;
+			else
+				usleep_range(SMI_I2C_WRITE_DELAY_TIME * 1000,
+				SMI_I2C_WRITE_DELAY_TIME * 1000);
+		}
+
+		if (SMI_MAX_RETRY_I2C_XFER <= retry) {
+			dev_err(&client->dev, "I2C xfer error");
+			return -EIO;
+		}
+
+		return 0;
+#endif
+	}
+
+
+static s8 smi_i2c_burst_read(struct i2c_client *client, u8 reg_addr,
+		u8 *data, u16 len)
+{
+	int retry;
+
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &reg_addr,
+		},
+
+		{
+			.addr = client->addr,
+			.flags = I2C_M_RD,
+			.len = len,
+			.buf = data,
+		},
+	};
+
+	for (retry = 0; retry < SMI_MAX_RETRY_I2C_XFER; retry++) {
+		if (i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)) > 0)
+			break;
+		else
+			usleep_range(SMI_I2C_WRITE_DELAY_TIME * 1000,
+				SMI_I2C_WRITE_DELAY_TIME * 1000);
+	}
+
+	if (SMI_MAX_RETRY_I2C_XFER <= retry) {
+		dev_err(&client->dev, "I2C xfer error");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+
+/* i2c write routine for */
+static s8 smi_i2c_write(struct i2c_client *client, u8 reg_addr,
+		u8 *data, u8 len)
+{
+#if !defined SMI_USE_BASIC_I2C_FUNC
+	s32 dummy;
+
+#ifndef SMI_SMBUS
+	u8 buffer[2];
+#endif
+
+	if (NULL == client)
+		return -EPERM;
+
+	while (0 != len--) {
+#ifdef SMI_SMBUS
+		dummy = i2c_smbus_write_byte_data(client, reg_addr, *data);
+#else
+		buffer[0] = reg_addr;
+		buffer[1] = *data;
+		dummy = i2c_master_send(client, (char *)buffer, 2);
+#endif
+		reg_addr++;
+		data++;
+		if (dummy < 0) {
+			dev_err(&client->dev, "error writing i2c bus");
+			return -EPERM;
+		}
+
+	}
+	usleep_range(SMI_I2C_WRITE_DELAY_TIME * 1000,
+	SMI_I2C_WRITE_DELAY_TIME * 1000);
+	return 0;
+#else
+	u8 buffer[2];
+	int retry;
+	struct i2c_msg msg[] = {
+		{
+		 .addr = client->addr,
+		 .flags = 0,
+		 .len = 2,
+		 .buf = buffer,
+		 },
+	};
+
+	while (0 != len--) {
+		buffer[0] = reg_addr;
+		buffer[1] = *data;
+		for (retry = 0; retry < SMI_MAX_RETRY_I2C_XFER; retry++) {
+			if (i2c_transfer(client->adapter, msg,
+						ARRAY_SIZE(msg)) > 0) {
+				break;
+			} else {
+				usleep_range(SMI_I2C_WRITE_DELAY_TIME * 1000,
+				SMI_I2C_WRITE_DELAY_TIME * 1000);
+			}
+		}
+		if (SMI_MAX_RETRY_I2C_XFER <= retry) {
+			dev_err(&client->dev, "I2C xfer error");
+			return -EIO;
+		}
+		reg_addr++;
+		data++;
+	}
+
+	usleep_range(SMI_I2C_WRITE_DELAY_TIME * 1000,
+	SMI_I2C_WRITE_DELAY_TIME * 1000);
+	return 0;
+#endif
+}
+
+
+static s8 smi_i2c_read_wrapper(u8 dev_addr, u8 reg_addr, u8 *data, u8 len)
+{
+	int err = 0;
+	err = smi_i2c_read(smi_client, reg_addr, data, len);
+	return err;
+}
+
+static s8 smi_i2c_write_wrapper(u8 dev_addr, u8 reg_addr, u8 *data, u8 len)
+{
+	int err = 0;
+	err = smi_i2c_write(smi_client, reg_addr, data, len);
+	return err;
+}
+
+s8 smi_burst_read_wrapper(u8 dev_addr, u8 reg_addr, u8 *data, u16 len)
+{
+	int err = 0;
+	err = smi_i2c_burst_read(smi_client, reg_addr, data, len);
+	return err;
+}
+EXPORT_SYMBOL(smi_burst_read_wrapper);
+/*!
+ * @brief SMI probe function via i2c bus
+ *
+ * @param client the pointer of i2c client
+ * @param id the pointer of i2c device id
+ *
+ * @return zero success, non-zero failed
+ * @retval zero success
+ * @retval non-zero failed
+*/
+static int smi_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+		int err = 0;
+		struct smi_client_data *client_data = NULL;
+
+		dev_info(&client->dev, "SMI130 i2c function probe entrance");
+
+		if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+			dev_err(&client->dev, "i2c_check_functionality error!");
+			err = -EIO;
+			goto exit_err_clean;
+		}
+
+		if (NULL == smi_client) {
+			smi_client = client;
+		} else {
+			dev_err(&client->dev,
+				"this driver does not support multiple clients");
+			err = -EBUSY;
+			goto exit_err_clean;
+		}
+
+		client_data = kzalloc(sizeof(struct smi_client_data),
+							GFP_KERNEL);
+		if (NULL == client_data) {
+			dev_err(&client->dev, "no memory available");
+			err = -ENOMEM;
+			goto exit_err_clean;
+		}
+
+		client_data->device.bus_read = smi_i2c_read_wrapper;
+		client_data->device.bus_write = smi_i2c_write_wrapper;
+
+		return smi_probe(client_data, &client->dev);
+
+exit_err_clean:
+		if (err)
+			smi_client = NULL;
+		return err;
+}
+/*
+static int smi_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+	int err = 0;
+	err = smi_suspend(&client->dev);
+	return err;
+}
+
+static int smi_i2c_resume(struct i2c_client *client)
+{
+	int err = 0;
+
+	err = smi_resume(&client->dev);
+
+	return err;
+}
+*/
+
+static int smi_i2c_remove(struct i2c_client *client)
+{
+	int err = 0;
+	err = smi_remove(&client->dev);
+	smi_client = NULL;
+
+	return err;
+}
+
+
+
+static const struct i2c_device_id smi_id[] = {
+	{SENSOR_NAME, 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, smi_id);
+
+static const struct of_device_id smi130_of_match[] = {
+	{ .compatible = "bosch-sensortec,smi130", },
+	{ .compatible = "smi130", },
+	{ .compatible = "bosch, smi130", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, smi130_of_match);
+
+static struct i2c_driver smi_i2c_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = SENSOR_NAME,
+		.of_match_table = smi130_of_match,
+	},
+	.class = I2C_CLASS_HWMON,
+	.id_table = smi_id,
+	.probe = smi_i2c_probe,
+	.remove = smi_i2c_remove,
+	/*.suspend = smi_i2c_suspend,
+	.resume = smi_i2c_resume,*/
+};
+
+static int __init SMI_i2c_init(void)
+{
+	return i2c_add_driver(&smi_i2c_driver);
+}
+
+static void __exit SMI_i2c_exit(void)
+{
+	i2c_del_driver(&smi_i2c_driver);
+}
+
+MODULE_AUTHOR("Contact <contact@bosch-sensortec.com>");
+MODULE_DESCRIPTION("driver for " SENSOR_NAME);
+MODULE_LICENSE("GPL v2");
+
+module_init(SMI_i2c_init);
+module_exit(SMI_i2c_exit);
+
diff --git a/drivers/input/sensors/smi130/smi130_spi.c b/drivers/input/sensors/smi130/smi130_spi.c
new file mode 100644
index 0000000..b02efbf1
--- /dev/null
+++ b/drivers/input/sensors/smi130/smi130_spi.c
@@ -0,0 +1,402 @@
+/*!
+ * @section LICENSE
+ * (C) Copyright 2011~2016 Bosch Sensortec GmbH All Rights Reserved
+ *
+ * (C) Modification Copyright 2018 Robert Bosch Kft  All Rights Reserved
+ *
+ * This software program is licensed subject to the GNU General
+ * Public License (GPL).Version 2,June 1991,
+ * available at http://www.fsf.org/copyleft/gpl.html
+ *
+ * Special: Description of the Software:
+ *
+ * This software module (hereinafter called "Software") and any
+ * information on application-sheets (hereinafter called "Information") is
+ * provided free of charge for the sole purpose to support your application
+ * work. 
+ *
+ * As such, the Software is merely an experimental software, not tested for
+ * safety in the field and only intended for inspiration for further development 
+ * and testing. Any usage in a safety-relevant field of use (like automotive,
+ * seafaring, spacefaring, industrial plants etc.) was not intended, so there are
+ * no precautions for such usage incorporated in the Software.
+ * 
+ * The Software is specifically designed for the exclusive use for Bosch
+ * Sensortec products by personnel who have special experience and training. Do
+ * not use this Software if you do not have the proper experience or training.
+ * 
+ * This Software package is provided as is and without any expressed or
+ * implied warranties, including without limitation, the implied warranties of
+ * merchantability and fitness for a particular purpose.
+ * 
+ * Bosch Sensortec and their representatives and agents deny any liability for
+ * the functional impairment of this Software in terms of fitness, performance
+ * and safety. Bosch Sensortec and their representatives and agents shall not be
+ * liable for any direct or indirect damages or injury, except as otherwise
+ * stipulated in mandatory applicable law.
+ * The Information provided is believed to be accurate and reliable. Bosch
+ * Sensortec assumes no responsibility for the consequences of use of such
+ * Information nor for any infringement of patents or other rights of third
+ * parties which may result from its use.
+ * 
+ *------------------------------------------------------------------------------
+ * The following Product Disclaimer does not apply to the BSX4-HAL-4.1NoFusion Software 
+ * which is licensed under the Apache License, Version 2.0 as stated above.  
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Product Disclaimer
+ *
+ * Common:
+ *
+ * Assessment of Products Returned from Field
+ *
+ * Returned products are considered good if they fulfill the specifications / 
+ * test data for 0-mileage and field listed in this document.
+ *
+ * Engineering Samples
+ * 
+ * Engineering samples are marked with (e) or (E). Samples may vary from the
+ * valid technical specifications of the series product contained in this
+ * data sheet. Therefore, they are not intended or fit for resale to
+ * third parties or for use in end products. Their sole purpose is internal
+ * client testing. The testing of an engineering sample may in no way replace
+ * the testing of a series product. Bosch assumes no liability for the use
+ * of engineering samples. The purchaser shall indemnify Bosch from all claims
+ * arising from the use of engineering samples.
+ *
+ * Intended use
+ *
+ * Provided that SMI130 is used within the conditions (environment, application,
+ * installation, loads) as described in this TCD and the corresponding
+ * agreed upon documents, Bosch ensures that the product complies with
+ * the agreed properties. Agreements beyond this require
+ * the written approval by Bosch. The product is considered fit for the intended
+ * use when the product successfully has passed the tests
+ * in accordance with the TCD and agreed upon documents.
+ *
+ * It is the responsibility of the customer to ensure the proper application
+ * of the product in the overall system/vehicle.
+ *
+ * Bosch does not assume any responsibility for changes to the environment
+ * of the product that deviate from the TCD and the agreed upon documents 
+ * as well as all applications not released by Bosch
+  *
+ * The resale and/or use of products are at the purchaser’s own risk and 
+ * responsibility. The examination and testing of the SMI130 
+ * is the sole responsibility of the purchaser.
+ *
+ * The purchaser shall indemnify Bosch from all third party claims 
+ * arising from any product use not covered by the parameters of 
+ * this product data sheet or not approved by Bosch and reimburse Bosch 
+ * for all costs and damages in connection with such claims.
+ *
+ * The purchaser must monitor the market for the purchased products,
+ * particularly with regard to product safety, and inform Bosch without delay
+ * of all security relevant incidents.
+ *
+ * Application Examples and Hints
+ *
+ * With respect to any application examples, advice, normal values
+ * and/or any information regarding the application of the device,
+ * Bosch hereby disclaims any and all warranties and liabilities of any kind,
+ * including without limitation warranties of
+ * non-infringement of intellectual property rights or copyrights
+ * of any third party.
+ * The information given in this document shall in no event be regarded 
+ * as a guarantee of conditions or characteristics. They are provided
+ * for illustrative purposes only and no evaluation regarding infringement
+ * of intellectual property rights or copyrights or regarding functionality,
+ * performance or error has been made.
+ *
+ * @filename smi130_spi.c
+ * @date     2014/11/25 14:40
+ * @Modification Date 2018/08/28 18:20
+ * @id       "20f77db"
+ * @version  1.3
+ *
+ * @brief
+ * This file implements moudle function, which add
+ * the driver to SPI core.
+*/
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include "smi130_driver.h"
+
+/*! @defgroup smi130_spi_src
+ *  @brief smi130 spi driver module
+ @{*/
+/*! the maximum of transfer buffer size */
+#define SMI_MAX_BUFFER_SIZE      32
+
+static struct spi_device *smi_spi_client;
+
+/*!
+ * @brief define spi wirte function
+ *
+ * @param dev_addr sensor device address
+ * @param reg_addr register address
+ * @param data the pointer of data buffer
+ * @param len block size need to write
+ *
+ * @return zero success, non-zero failed
+ * @retval zero success
+ * @retval non-zero failed
+*/
+static char smi_spi_write_block(u8 dev_addr, u8 reg_addr, u8 *data, u8 len)
+{
+	struct spi_device *client = smi_spi_client;
+	u8 buffer[SMI_MAX_BUFFER_SIZE + 1];
+	struct spi_transfer xfer = {
+		.tx_buf     = buffer,
+		.len        = len + 1,
+	};
+	struct spi_message msg;
+
+	if (len > SMI_MAX_BUFFER_SIZE)
+		return -EINVAL;
+
+	buffer[0] = reg_addr&0x7F;/* write: MSB = 0 */
+	memcpy(&buffer[1], data, len);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	return spi_sync(client, &msg);
+}
+
+/*!
+ * @brief define spi read function
+ *
+ * @param dev_addr sensor device address
+ * @param reg_addr register address
+ * @param data the pointer of data buffer
+ * @param len block size need to read
+ *
+ * @return zero success, non-zero failed
+ * @retval zero success
+ * @retval non-zero failed
+*/
+static char smi_spi_read_block(u8 dev_addr, u8 reg_addr, u8 *data, u8 len)
+{
+	struct spi_device *client = smi_spi_client;
+	u8 reg = reg_addr | 0x80;/* read: MSB = 1 */
+	struct spi_transfer xfer[2] = {
+		[0] = {
+			.tx_buf = &reg,
+			.len = 1,
+		},
+		[1] = {
+			.rx_buf = data,
+			.len = len,
+		}
+	};
+	struct spi_message msg;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer[0], &msg);
+	spi_message_add_tail(&xfer[1], &msg);
+	return spi_sync(client, &msg);
+}
+
+s8 smi_burst_read_wrapper(u8 dev_addr, u8 reg_addr, u8 *data, u16 len)
+{
+	struct spi_device *client = smi_spi_client;
+	u8 reg = reg_addr | 0x80;/* read: MSB = 1 */
+	struct spi_transfer xfer[2] = {
+		[0] = {
+			.tx_buf = &reg,
+			.len = 1,
+		},
+		[1] = {
+			.rx_buf = data,
+			.len = len,
+		}
+	};
+	struct spi_message msg;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer[0], &msg);
+	spi_message_add_tail(&xfer[1], &msg);
+	return spi_sync(client, &msg);
+}
+EXPORT_SYMBOL(smi_burst_read_wrapper);
+/*!
+ * @brief SMI probe function via spi bus
+ *
+ * @param client the pointer of spi client
+ *
+ * @return zero success, non-zero failed
+ * @retval zero success
+ * @retval non-zero failed
+*/
+static int smi_spi_probe(struct spi_device *client)
+{
+	int status;
+	int err = 0;
+	struct smi_client_data *client_data = NULL;
+
+	if (NULL == smi_spi_client)
+		smi_spi_client = client;
+	else{
+		dev_err(&client->dev, "This driver does not support multiple clients!\n");
+		return -EBUSY;
+	}
+
+	client->bits_per_word = 8;
+	status = spi_setup(client);
+	if (status < 0) {
+		dev_err(&client->dev, "spi_setup failed!\n");
+		return status;
+	}
+
+	client_data = kzalloc(sizeof(struct smi_client_data), GFP_KERNEL);
+	if (NULL == client_data) {
+		dev_err(&client->dev, "no memory available");
+		err = -ENOMEM;
+		goto exit_err_clean;
+	}
+
+	client_data->device.bus_read = smi_spi_read_block;
+	client_data->device.bus_write = smi_spi_write_block;
+
+	return smi_probe(client_data, &client->dev);
+
+exit_err_clean:
+	if (err)
+		smi_spi_client = NULL;
+	return err;
+}
+
+/*!
+ * @brief shutdown smi device in spi driver
+ *
+ * @param client the pointer of spi client
+ *
+ * @return no return value
+*/
+static void smi_spi_shutdown(struct spi_device *client)
+{
+#ifdef CONFIG_PM
+	smi_suspend(&client->dev);
+#endif
+}
+
+/*!
+ * @brief remove smi spi client
+ *
+ * @param client the pointer of spi client
+ *
+ * @return zero
+ * @retval zero
+*/
+static int smi_spi_remove(struct spi_device *client)
+{
+	int err = 0;
+	err = smi_remove(&client->dev);
+	smi_spi_client = NULL;
+
+	return err;
+}
+
+#ifdef CONFIG_PM
+/*!
+ * @brief suspend smi device in spi driver
+ *
+ * @param dev the pointer of device
+ *
+ * @return zero
+ * @retval zero
+*/
+static int smi_spi_suspend(struct device *dev)
+{
+	int err = 0;
+	err = smi_suspend(dev);
+	return err;
+}
+
+/*!
+ * @brief resume smi device in spi driver
+ *
+ * @param dev the pointer of device
+ *
+ * @return zero
+ * @retval zero
+*/
+static int smi_spi_resume(struct device *dev)
+{
+	int err = 0;
+	/* post resume operation */
+	err = smi_resume(dev);
+
+	return err;
+}
+
+/*!
+ * @brief register spi device power manager hooks
+*/
+static const struct dev_pm_ops smi_spi_pm_ops = {
+	/**< device suspend */
+	.suspend = smi_spi_suspend,
+	/**< device resume */
+	.resume  = smi_spi_resume
+};
+#endif
+
+/*!
+ * @brief register spi device id
+*/
+static const struct spi_device_id smi_id[] = {
+	{ SENSOR_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, smi_id);
+
+/*!
+ * @brief register spi driver hooks
+*/
+static struct spi_driver smi_spi_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name  = SENSOR_NAME,
+#ifdef CONFIG_PM
+		.pm = &smi_spi_pm_ops,
+#endif
+	},
+	.id_table = smi_id,
+	.probe    = smi_spi_probe,
+	.shutdown = smi_spi_shutdown,
+	.remove   = smi_spi_remove
+};
+
+/*!
+ * @brief initialize smi spi module
+ *
+ * @return zero success, non-zero failed
+ * @retval zero success
+ * @retval non-zero failed
+*/
+static int __init smi_spi_init(void)
+{
+	return spi_register_driver(&smi_spi_driver);
+}
+
+/*!
+ * @brief remove smi spi module
+ *
+ * @return no return value
+*/
+static void __exit smi_spi_exit(void)
+{
+	spi_unregister_driver(&smi_spi_driver);
+}
+
+
+MODULE_AUTHOR("Contact <contact@bosch-sensortec.com>");
+MODULE_DESCRIPTION("SMI130 SPI DRIVER");
+MODULE_LICENSE("GPL v2");
+
+module_init(smi_spi_init);
+module_exit(smi_spi_exit);
+/*@}*/
+
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index e484ea2..34be096 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -527,6 +527,13 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
 		},
 	},
+	{
+		/* Lenovo LaVie Z */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+		},
+	},
 	{ }
 };
 
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 2613240..c2fb023 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1671,10 +1671,11 @@
 			break;
 		case MXT_TOUCH_MULTI_T9:
 			data->multitouch = MXT_TOUCH_MULTI_T9;
+			/* Only handle messages from first T9 instance */
 			data->T9_reportid_min = min_id;
-			data->T9_reportid_max = max_id;
-			data->num_touchids = object->num_report_ids
-						* mxt_obj_instances(object);
+			data->T9_reportid_max = min_id +
+						object->num_report_ids - 1;
+			data->num_touchids = object->num_report_ids;
 			break;
 		case MXT_SPT_MESSAGECOUNT_T44:
 			data->T44_address = object->start_address;
diff --git a/drivers/input/touchscreen/rohm_bu21023.c b/drivers/input/touchscreen/rohm_bu21023.c
index 611156a..be29984 100644
--- a/drivers/input/touchscreen/rohm_bu21023.c
+++ b/drivers/input/touchscreen/rohm_bu21023.c
@@ -304,7 +304,7 @@
 	msg[1].len = len;
 	msg[1].buf = buf;
 
-	i2c_lock_adapter(adap);
+	i2c_lock_bus(adap, I2C_LOCK_SEGMENT);
 
 	for (i = 0; i < 2; i++) {
 		if (__i2c_transfer(adap, &msg[i], 1) < 0) {
@@ -313,7 +313,7 @@
 		}
 	}
 
-	i2c_unlock_adapter(adap);
+	i2c_unlock_bus(adap, I2C_LOCK_SEGMENT);
 
 	return ret;
 }
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c
index 0ba4faa..4db9da1 100644
--- a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_fw_update.c
@@ -1951,7 +1951,7 @@
 			return retval;
 		}
 
-		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, true);
 		if (retval < 0) {
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to wait for idle status (%d blocks remaining)\n",
@@ -1961,6 +1961,8 @@
 
 		block_ptr += (transfer * fwu->block_size);
 		remaining -= transfer;
+		dev_dbg(rmi4_data->pdev->dev.parent, "%s: remaining %d\n",
+					__func__, remaining);
 	} while (remaining);
 
 	return 0;
@@ -2010,7 +2012,7 @@
 			return retval;
 		}
 
-		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, true);
 		if (retval < 0) {
 			dev_err(rmi4_data->pdev->dev.parent,
 					"%s: Failed to wait for idle status (block %d)\n",
@@ -2019,6 +2021,8 @@
 		}
 
 		block_ptr += fwu->block_size;
+		dev_dbg(rmi4_data->pdev->dev.parent, "%s: remaining %d\n",
+					__func__, block_cnt - blk);
 	}
 
 	return 0;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 0c910a8..16199b3 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2452,9 +2452,9 @@
 	}
 
 	if (amd_iommu_unmap_flush) {
-		dma_ops_free_iova(dma_dom, dma_addr, pages);
 		domain_flush_tlb(&dma_dom->domain);
 		domain_flush_complete(&dma_dom->domain);
+		dma_ops_free_iova(dma_dom, dma_addr, pages);
 	} else {
 		queue_add(dma_dom, dma_addr, pages);
 	}
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 7f294f7..ff4be11 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1233,6 +1233,7 @@
 
 	/* Sync our overflow flag, as we believe we're up to speed */
 	q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
+	writel(q->cons, q->cons_reg);
 	return IRQ_HANDLED;
 }
 
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8c53748..63110fb 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1328,8 +1328,8 @@
 	qi_submit_sync(&desc, iommu);
 }
 
-void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
-			u64 addr, unsigned mask)
+void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+			u16 qdep, u64 addr, unsigned mask)
 {
 	struct qi_desc desc;
 
@@ -1344,7 +1344,7 @@
 		qdep = 0;
 
 	desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
-		   QI_DIOTLB_TYPE;
+		   QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
 
 	qi_submit_sync(&desc, iommu);
 }
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 1612d3a..2558a38 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -421,6 +421,7 @@
 	struct list_head global; /* link to global list */
 	u8 bus;			/* PCI bus number */
 	u8 devfn;		/* PCI devfn number */
+	u16 pfsid;		/* SRIOV physical function source ID */
 	u8 pasid_supported:3;
 	u8 pasid_enabled:1;
 	u8 pri_supported:1;
@@ -1511,6 +1512,20 @@
 		return;
 
 	pdev = to_pci_dev(info->dev);
+	/* For IOMMU that supports device IOTLB throttling (DIT), we assign
+	 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
+	 * queue depth at PF level. If DIT is not set, PFSID will be treated as
+	 * reserved, which should be set to 0.
+	 */
+	if (!ecap_dit(info->iommu->ecap))
+		info->pfsid = 0;
+	else {
+		struct pci_dev *pf_pdev;
+
+		/* pdev will be returned if device is not a vf */
+		pf_pdev = pci_physfn(pdev);
+		info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
+	}
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
 	/* The PCIe spec, in its wisdom, declares that the behaviour of
@@ -1576,7 +1591,8 @@
 
 		sid = info->bus << 8 | info->devfn;
 		qdep = info->ats_qdep;
-		qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
+		qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+				qdep, addr, mask);
 	}
 	spin_unlock_irqrestore(&device_domain_lock, flags);
 }
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index ace331d..85b5e75 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -44,7 +44,7 @@
 	struct io_pgtable_ops *iop;
 
 	unsigned int context_id;
-	spinlock_t lock;			/* Protects mappings */
+	struct mutex mutex;			/* Protects mappings */
 };
 
 struct ipmmu_vmsa_archdata {
@@ -464,7 +464,7 @@
 	if (!domain)
 		return NULL;
 
-	spin_lock_init(&domain->lock);
+	mutex_init(&domain->mutex);
 
 	return &domain->io_domain;
 }
@@ -488,7 +488,6 @@
 	struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
 	struct ipmmu_vmsa_device *mmu = archdata->mmu;
 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
-	unsigned long flags;
 	unsigned int i;
 	int ret = 0;
 
@@ -497,7 +496,7 @@
 		return -ENXIO;
 	}
 
-	spin_lock_irqsave(&domain->lock, flags);
+	mutex_lock(&domain->mutex);
 
 	if (!domain->mmu) {
 		/* The domain hasn't been used yet, initialize it. */
@@ -513,7 +512,7 @@
 		ret = -EINVAL;
 	}
 
-	spin_unlock_irqrestore(&domain->lock, flags);
+	mutex_unlock(&domain->mutex);
 
 	if (ret < 0)
 		return ret;
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index c2662a1..6e24fac 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -215,6 +215,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_SMP
 static void bcm7038_l1_cpu_offline(struct irq_data *d)
 {
 	struct cpumask *mask = irq_data_get_affinity_mask(d);
@@ -239,6 +240,7 @@
 	}
 	irq_set_affinity_locked(d, &new_affinity, false);
 }
+#endif
 
 static int __init bcm7038_l1_init_one(struct device_node *dn,
 				      unsigned int idx,
@@ -291,7 +293,9 @@
 	.irq_mask		= bcm7038_l1_mask,
 	.irq_unmask		= bcm7038_l1_unmask,
 	.irq_set_affinity	= bcm7038_l1_set_affinity,
+#ifdef CONFIG_SMP
 	.irq_cpu_offline	= bcm7038_l1_cpu_offline,
+#endif
 };
 
 static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index e4c43a1..8088c34 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1655,13 +1655,7 @@
 			} else
 				return -EINVAL;
 		case IIOCDBGVAR:
-			if (arg) {
-				if (copy_to_user(argp, &dev, sizeof(ulong)))
-					return -EFAULT;
-				return 0;
-			} else
-				return -EINVAL;
-			break;
+			return -EINVAL;
 		default:
 			if ((cmd & IIOCDRVCTL) == IIOCDRVCTL)
 				cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK;
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 1e62cde..17eb5d56 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -438,6 +438,12 @@
 
 		val |= 0x1 << led->fnode[i].id;
 
+		rc = qpnp_flash_led_write(led,
+			FLASH_LED_REG_SAFETY_TMR(led->base + addr_offset),
+			FLASH_LED_SAFETY_TMR_DISABLED);
+		if (rc < 0)
+			return rc;
+
 		if (led->fnode[i].strobe_sel == HW_STROBE) {
 			if (led->fnode[i].id == LED3)
 				strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT;
@@ -1324,6 +1330,12 @@
 		if (rc < 0)
 			return rc;
 
+		rc = qpnp_flash_led_write(led,
+			FLASH_LED_REG_SAFETY_TMR(led->base + addr_offset),
+			FLASH_LED_SAFETY_TMR_DISABLED);
+		if (rc < 0)
+			return rc;
+
 		led->fnode[i].led_on = false;
 
 		if (led->fnode[i].strobe_sel == HW_STROBE) {
diff --git a/drivers/leds/leds-qpnp-vibrator.c b/drivers/leds/leds-qpnp-vibrator.c
index cc2615d..81f54f0 100644
--- a/drivers/leds/leds-qpnp-vibrator.c
+++ b/drivers/leds/leds-qpnp-vibrator.c
@@ -391,7 +391,6 @@
 				 (vib->vib_play_ms % 1000) * 1000000),
 					HRTIMER_MODE_REL);
 	}
-	vib->vib_play_ms = 0;
 	mutex_unlock(&vib->lock);
 	schedule_work(&vib->work);
 
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 91081dc..32c6967 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -531,8 +531,9 @@
 	int timeout;
 	struct adb_request req;
 
-	out_8(&via[B], via[B] | TREQ);			/* negate TREQ */
-	out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK);	/* TACK in, TREQ out */
+	/* Negate TREQ. Set TACK to input and TREQ to output. */
+	out_8(&via[B], in_8(&via[B]) | TREQ);
+	out_8(&via[DIRB], (in_8(&via[DIRB]) | TREQ) & ~TACK);
 
 	pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
 	timeout =  100000;
@@ -1454,8 +1455,8 @@
 	struct adb_request *req;
 	int bite = 0;
 
-	if (via[B] & TREQ) {
-		printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
+	if (in_8(&via[B]) & TREQ) {
+		printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via[B]));
 		out_8(&via[IFR], SR_INT);
 		return NULL;
 	}
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index dd2afbc..26d2f89 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -195,9 +195,9 @@
 	platform_set_drvdata(pdev, ctx);
 
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
-	if (!mb_base)
-		return -ENOMEM;
+	mb_base = devm_ioremap_resource(&pdev->dev, regs);
+	if (IS_ERR(mb_base))
+		return PTR_ERR(mb_base);
 
 	/* Setup mailbox links */
 	for (i = 0; i < MBOX_CNT; i++) {
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index dafa981..8bb26d5 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -292,6 +292,22 @@
 	  To compile this code as a module, choose M here: the module will
 	  be called dm-req-crypt.
 
+config DM_DEFAULT_KEY
+	tristate "Default-key crypt target support"
+	depends on BLK_DEV_DM
+	depends on PFK
+	---help---
+	  This (currently Android-specific) device-mapper target allows you to
+	  create a device that assigns a default encryption key to bios that
+	  don't already have one.  This can sit between inline cryptographic
+	  acceleration hardware and filesystems that use it.  This ensures that
+	  where the filesystem doesn't explicitly specify a key, such as for
+	  filesystem metadata, a default key will be used instead, leaving no
+	  sectors unencrypted.
+
+	  To compile this code as a module, choose M here: the module will be
+	  called dm-default-key.
+
 	  If unsure, say N.
 
 config DM_SNAPSHOT
@@ -533,16 +549,27 @@
 
 	  If unsure, say N.
 
+config DM_VERITY_AVB
+	tristate "Support AVB specific verity error behavior"
+	depends on DM_VERITY
+	---help---
+	  Enables Android Verified Boot platform-specific error
+	  behavior. In particular, it will modify the vbmeta partition
+	  specified on the kernel command-line when non-transient error
+	  occurs (followed by a panic).
+
+	  If unsure, say N.
+
 config DM_ANDROID_VERITY
 	bool "Android verity target support"
+	depends on BLK_DEV_DM=y
 	depends on DM_VERITY=y
 	depends on X509_CERTIFICATE_PARSER
 	depends on SYSTEM_TRUSTED_KEYRING
-	depends on PUBLIC_KEY_ALGO_RSA
+	depends on CRYPTO_RSA
 	depends on KEYS
 	depends on ASYMMETRIC_KEY_TYPE
 	depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
-	depends on MD_LINEAR=y
 	select DM_VERITY_HASH_PREFETCH_MIN_SIZE_128
 	---help---
 	  This device-mapper target is virtually a VERITY target. This
@@ -554,8 +581,8 @@
 
 config DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED
 	bool "Verity will validate blocks at most once"
-   depends on DM_VERITY
-   ---help---
+	depends on DM_VERITY
+	---help---
 	  Default enables at_most_once option for dm-verity
 
 	  Verify data blocks only the first time they are read from the
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index f14e2fc..c8dec9c 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -40,6 +40,7 @@
 obj-$(CONFIG_DM_BUFIO)		+= dm-bufio.o
 obj-$(CONFIG_DM_BIO_PRISON)	+= dm-bio-prison.o
 obj-$(CONFIG_DM_CRYPT)		+= dm-crypt.o
+obj-$(CONFIG_DM_DEFAULT_KEY)	+= dm-default-key.o
 obj-$(CONFIG_DM_DELAY)		+= dm-delay.o
 obj-$(CONFIG_DM_FLAKEY)		+= dm-flakey.o
 obj-$(CONFIG_DM_MULTIPATH)	+= dm-multipath.o dm-round-robin.o
@@ -69,3 +70,7 @@
 ifeq ($(CONFIG_DM_VERITY_FEC),y)
 dm-verity-objs			+= dm-verity-fec.o
 endif
+
+ifeq ($(CONFIG_DM_VERITY_AVB),y)
+dm-verity-objs			+= dm-verity-avb.o
+endif
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index bb7aa31..cdf388d 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -456,8 +456,10 @@
 			 * data on cache. BCACHE_DEV_DETACHING flag is set in
 			 * bch_cached_dev_detach().
 			 */
-			if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
+			if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
+				up_write(&dc->writeback_lock);
 				break;
+			}
 		}
 
 		up_write(&dc->writeback_lock);
diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c
index eb4bdf6..f9491de 100644
--- a/drivers/md/dm-android-verity.c
+++ b/drivers/md/dm-android-verity.c
@@ -33,6 +33,7 @@
 
 #include <asm/setup.h>
 #include <crypto/hash.h>
+#include <crypto/hash_info.h>
 #include <crypto/public_key.h>
 #include <crypto/sha.h>
 #include <keys/asymmetric-type.h>
@@ -122,75 +123,6 @@
 	return !strncmp(verifiedbootstate, unlocked, sizeof(unlocked));
 }
 
-static int table_extract_mpi_array(struct public_key_signature *pks,
-				const void *data, size_t len)
-{
-	MPI mpi = mpi_read_raw_data(data, len);
-
-	if (!mpi) {
-		DMERR("Error while allocating mpi array");
-		return -ENOMEM;
-	}
-
-	pks->mpi[0] = mpi;
-	pks->nr_mpi = 1;
-	return 0;
-}
-
-static struct public_key_signature *table_make_digest(
-						enum hash_algo hash,
-						const void *table,
-						unsigned long table_len)
-{
-	struct public_key_signature *pks = NULL;
-	struct crypto_shash *tfm;
-	struct shash_desc *desc;
-	size_t digest_size, desc_size;
-	int ret;
-
-	/* Allocate the hashing algorithm we're going to need and find out how
-	 * big the hash operational data will be.
-	 */
-	tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0);
-	if (IS_ERR(tfm))
-		return ERR_CAST(tfm);
-
-	desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
-	digest_size = crypto_shash_digestsize(tfm);
-
-	/* We allocate the hash operational data storage on the end of out
-	 * context data and the digest output buffer on the end of that.
-	 */
-	ret = -ENOMEM;
-	pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL);
-	if (!pks)
-		goto error;
-
-	pks->pkey_hash_algo = hash;
-	pks->digest = (u8 *)pks + sizeof(*pks) + desc_size;
-	pks->digest_size = digest_size;
-
-	desc = (struct shash_desc *)(pks + 1);
-	desc->tfm = tfm;
-	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
-	ret = crypto_shash_init(desc);
-	if (ret < 0)
-		goto error;
-
-	ret = crypto_shash_finup(desc, table, table_len, pks->digest);
-	if (ret < 0)
-		goto error;
-
-	crypto_free_shash(tfm);
-	return pks;
-
-error:
-	kfree(pks);
-	crypto_free_shash(tfm);
-	return ERR_PTR(ret);
-}
-
 static int read_block_dev(struct bio_read *payload, struct block_device *bdev,
 		sector_t offset, int length)
 {
@@ -207,6 +139,7 @@
 
 	bio->bi_bdev = bdev;
 	bio->bi_iter.bi_sector = offset;
+	bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
 
 	payload->page_io = kzalloc(sizeof(struct page *) *
 		payload->number_of_pages, GFP_KERNEL);
@@ -230,7 +163,7 @@
 		}
 	}
 
-	if (!submit_bio_wait(READ, bio))
+	if (!submit_bio_wait(bio))
 		/* success */
 		goto free_bio;
 	DMERR("bio read failed");
@@ -567,51 +500,6 @@
 	return DM_VERITY_MODE_EIO;
 }
 
-static int verify_verity_signature(char *key_id,
-		struct android_metadata *metadata)
-{
-	key_ref_t key_ref;
-	struct key *key;
-	struct public_key_signature *pks = NULL;
-	int retval = -EINVAL;
-
-	key_ref = keyring_search(make_key_ref(system_trusted_keyring, 1),
-		&key_type_asymmetric, key_id);
-
-	if (IS_ERR(key_ref)) {
-		DMERR("keyring: key not found");
-		return -ENOKEY;
-	}
-
-	key = key_ref_to_ptr(key_ref);
-
-	pks = table_make_digest(HASH_ALGO_SHA256,
-			(const void *)metadata->verity_table,
-			le32_to_cpu(metadata->header->table_length));
-
-	if (IS_ERR(pks)) {
-		DMERR("hashing failed");
-		retval = PTR_ERR(pks);
-		pks = NULL;
-		goto error;
-	}
-
-	retval = table_extract_mpi_array(pks, &metadata->header->signature[0],
-				RSANUMBYTES);
-	if (retval < 0) {
-		DMERR("Error extracting mpi %d", retval);
-		goto error;
-	}
-
-	retval = verify_signature(key, pks);
-	mpi_free(pks->rsa.s);
-error:
-	kfree(pks);
-	key_put(key);
-
-	return retval;
-}
-
 static void handle_error(void)
 {
 	int mode = verity_mode();
@@ -623,6 +511,95 @@
 	}
 }
 
+static struct public_key_signature *table_make_digest(
+						enum hash_algo hash,
+						const void *table,
+						unsigned long table_len)
+{
+	struct public_key_signature *pks = NULL;
+	struct crypto_shash *tfm;
+	struct shash_desc *desc;
+	size_t digest_size, desc_size;
+	int ret;
+
+	/* Allocate the hashing algorithm we're going to need and find out how
+	 * big the hash operational data will be.
+	 */
+	tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0);
+	if (IS_ERR(tfm))
+		return ERR_CAST(tfm);
+
+	desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
+	digest_size = crypto_shash_digestsize(tfm);
+
+	/* We allocate the hash operational data storage on the end of out
+	 * context data and the digest output buffer on the end of that.
+	 */
+	ret = -ENOMEM;
+	pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL);
+	if (!pks)
+		goto error;
+
+	pks->pkey_algo = "rsa";
+	pks->hash_algo = hash_algo_name[hash];
+	pks->digest = (u8 *)pks + sizeof(*pks) + desc_size;
+	pks->digest_size = digest_size;
+
+	desc = (struct shash_desc *)(pks + 1);
+	desc->tfm = tfm;
+	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	ret = crypto_shash_init(desc);
+	if (ret < 0)
+		goto error;
+
+	ret = crypto_shash_finup(desc, table, table_len, pks->digest);
+	if (ret < 0)
+		goto error;
+
+	crypto_free_shash(tfm);
+	return pks;
+
+error:
+	kfree(pks);
+	crypto_free_shash(tfm);
+	return ERR_PTR(ret);
+}
+
+
+static int verify_verity_signature(char *key_id,
+		struct android_metadata *metadata)
+{
+	struct public_key_signature *pks = NULL;
+	int retval = -EINVAL;
+
+	if (!key_id)
+		goto error;
+
+	pks = table_make_digest(HASH_ALGO_SHA256,
+			(const void *)metadata->verity_table,
+			le32_to_cpu(metadata->header->table_length));
+	if (IS_ERR(pks)) {
+		DMERR("hashing failed");
+		retval = PTR_ERR(pks);
+		pks = NULL;
+		goto error;
+	}
+
+	pks->s = kmemdup(&metadata->header->signature[0], RSANUMBYTES, GFP_KERNEL);
+	if (!pks->s) {
+		DMERR("Error allocating memory for signature");
+		goto error;
+	}
+	pks->s_size = RSANUMBYTES;
+
+	retval = verify_signature_one(pks, NULL, key_id);
+	kfree(pks->s);
+error:
+	kfree(pks);
+	return retval;
+}
+
 static inline bool test_mult_overflow(sector_t a, u32 b)
 {
 	sector_t r = (sector_t)~0ULL;
@@ -694,8 +671,8 @@
 	dev_t uninitialized_var(dev);
 	struct android_metadata *metadata = NULL;
 	int err = 0, i, mode;
-	char *key_id, *table_ptr, dummy, *target_device,
-	*verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS];
+	char *key_id = NULL, *table_ptr, dummy, *target_device;
+	char *verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS];
 	/* One for specifying number of opt args and one for mode */
 	sector_t data_sectors;
 	u32 data_block_size;
@@ -714,16 +691,16 @@
 			handle_error();
 			return -EINVAL;
 		}
-	} else if (argc == 2)
-		key_id = argv[1];
-	else {
+		target_device = argv[0];
+	} else if (argc == 2) {
+		key_id = argv[0];
+		target_device = argv[1];
+	} else {
 		DMERR("Incorrect number of arguments");
 		handle_error();
 		return -EINVAL;
 	}
 
-	target_device = argv[0];
-
 	dev = name_to_dev_t(target_device);
 	if (!dev) {
 		DMERR("no dev found for %s", target_device);
@@ -877,12 +854,11 @@
 	}
 
 	err = verity_ctr(ti, no_of_args, verity_table_args);
-
-	if (err)
-		DMERR("android-verity failed to mount as verity target");
-	else {
+	if (err) {
+		DMERR("android-verity failed to create a verity target");
+	} else {
 		target_added = true;
-		DMINFO("android-verity mounted as verity target");
+		DMINFO("android-verity created as verity target");
 	}
 
 free_metadata:
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 6937ca4..62eb4b7 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -344,7 +344,7 @@
 	disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
 	memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
 	memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
-	disk_super->policy_hint_size = 0;
+	disk_super->policy_hint_size = cpu_to_le32(0);
 
 	__copy_sm_root(cmd, disk_super);
 
@@ -659,6 +659,7 @@
 	disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
 	disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
 	disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
+	disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
 
 	disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
 	disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
@@ -1261,8 +1262,8 @@
 		if (hints_valid) {
 			r = dm_array_cursor_next(&cmd->hint_cursor);
 			if (r) {
-				DMERR("dm_array_cursor_next for hint failed");
-				goto out;
+				dm_array_cursor_end(&cmd->hint_cursor);
+				hints_valid = false;
 			}
 		}
 	}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index bed056c..f3993a4 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3390,8 +3390,13 @@
 
 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
 {
-	if (from_cblock(new_size) > from_cblock(cache->cache_size))
-		return true;
+	if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+		if (cache->sized) {
+			DMERR("%s: unable to extend cache due to missing cache table reload",
+			      cache_device_name(cache));
+			return false;
+		}
+	}
 
 	/*
 	 * We can't drop a dirty block when shrinking the cache.
diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c
new file mode 100644
index 0000000..ca50de1
--- /dev/null
+++ b/drivers/md/dm-default-key.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device-mapper.h>
+#include <linux/module.h>
+#include <linux/pfk.h>
+
+#define DM_MSG_PREFIX "default-key"
+
+struct default_key_c {
+	struct dm_dev *dev;
+	sector_t start;
+	struct blk_encryption_key key;
+};
+
+static void default_key_dtr(struct dm_target *ti)
+{
+	struct default_key_c *dkc = ti->private;
+
+	if (dkc->dev)
+		dm_put_device(ti, dkc->dev);
+	kzfree(dkc);
+}
+
+/*
+ * Construct a default-key mapping: <mode> <key> <dev_path> <start>
+ */
+static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	struct default_key_c *dkc;
+	size_t key_size;
+	unsigned long long tmp;
+	char dummy;
+	int err;
+
+	if (argc != 4) {
+		ti->error = "Invalid argument count";
+		return -EINVAL;
+	}
+
+	dkc = kzalloc(sizeof(*dkc), GFP_KERNEL);
+	if (!dkc) {
+		ti->error = "Out of memory";
+		return -ENOMEM;
+	}
+	ti->private = dkc;
+
+	if (strcmp(argv[0], "AES-256-XTS") != 0) {
+		ti->error = "Unsupported encryption mode";
+		err = -EINVAL;
+		goto bad;
+	}
+
+	key_size = strlen(argv[1]);
+	if (key_size != 2 * BLK_ENCRYPTION_KEY_SIZE_AES_256_XTS) {
+		ti->error = "Unsupported key size";
+		err = -EINVAL;
+		goto bad;
+	}
+	key_size /= 2;
+
+	if (hex2bin(dkc->key.raw, argv[1], key_size) != 0) {
+		ti->error = "Malformed key string";
+		err = -EINVAL;
+		goto bad;
+	}
+
+	err = dm_get_device(ti, argv[2], dm_table_get_mode(ti->table),
+			    &dkc->dev);
+	if (err) {
+		ti->error = "Device lookup failed";
+		goto bad;
+	}
+
+	if (sscanf(argv[3], "%llu%c", &tmp, &dummy) != 1) {
+		ti->error = "Invalid start sector";
+		err = -EINVAL;
+		goto bad;
+	}
+	dkc->start = tmp;
+
+	if (!blk_queue_inlinecrypt(bdev_get_queue(dkc->dev->bdev))) {
+		ti->error = "Device does not support inline encryption";
+		err = -EINVAL;
+		goto bad;
+	}
+
+	/* Pass flush requests through to the underlying device. */
+	ti->num_flush_bios = 1;
+
+	/*
+	 * We pass discard requests through to the underlying device, although
+	 * the discarded blocks will be zeroed, which leaks information about
+	 * unused blocks.  It's also impossible for dm-default-key to know not
+	 * to decrypt discarded blocks, so they will not be read back as zeroes
+	 * and we must set discard_zeroes_data_unsupported.
+	 */
+	ti->num_discard_bios = 1;
+
+	/*
+	 * It's unclear whether WRITE_SAME would work with inline encryption; it
+	 * would depend on whether the hardware duplicates the data before or
+	 * after encryption.  But since the internal storage in some  devices
+	 * (MSM8998-based) doesn't claim to support WRITE_SAME anyway, we don't
+	 * currently have a way to test it.  Leave it disabled it for now.
+	 */
+	/*ti->num_write_same_bios = 1;*/
+
+	return 0;
+
+bad:
+	default_key_dtr(ti);
+	return err;
+}
+
+static int default_key_map(struct dm_target *ti, struct bio *bio)
+{
+	const struct default_key_c *dkc = ti->private;
+
+	bio->bi_bdev = dkc->dev->bdev;
+	if (bio_sectors(bio)) {
+		bio->bi_iter.bi_sector = dkc->start +
+			dm_target_offset(ti, bio->bi_iter.bi_sector);
+	}
+
+	if (!bio->bi_crypt_key && !bio->bi_crypt_skip)
+		bio->bi_crypt_key = &dkc->key;
+
+	return DM_MAPIO_REMAPPED;
+}
+
+static void default_key_status(struct dm_target *ti, status_type_t type,
+			       unsigned int status_flags, char *result,
+			       unsigned int maxlen)
+{
+	const struct default_key_c *dkc = ti->private;
+	unsigned int sz = 0;
+
+	switch (type) {
+	case STATUSTYPE_INFO:
+		result[0] = '\0';
+		break;
+
+	case STATUSTYPE_TABLE:
+
+		/* encryption mode */
+		DMEMIT("AES-256-XTS");
+
+		/* reserved for key; dm-crypt shows it, but we don't for now */
+		DMEMIT(" -");
+
+		/* name of underlying device, and the start sector in it */
+		DMEMIT(" %s %llu", dkc->dev->name,
+		       (unsigned long long)dkc->start);
+		break;
+	}
+}
+
+static int default_key_prepare_ioctl(struct dm_target *ti,
+				     struct block_device **bdev, fmode_t *mode)
+{
+	struct default_key_c *dkc = ti->private;
+	struct dm_dev *dev = dkc->dev;
+
+	*bdev = dev->bdev;
+
+	/*
+	 * Only pass ioctls through if the device sizes match exactly.
+	 */
+	if (dkc->start ||
+	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+		return 1;
+	return 0;
+}
+
+static int default_key_iterate_devices(struct dm_target *ti,
+				       iterate_devices_callout_fn fn,
+				       void *data)
+{
+	struct default_key_c *dkc = ti->private;
+
+	return fn(ti, dkc->dev, dkc->start, ti->len, data);
+}
+
+static struct target_type default_key_target = {
+	.name   = "default-key",
+	.version = {1, 0, 0},
+	.module = THIS_MODULE,
+	.ctr    = default_key_ctr,
+	.dtr    = default_key_dtr,
+	.map    = default_key_map,
+	.status = default_key_status,
+	.prepare_ioctl = default_key_prepare_ioctl,
+	.iterate_devices = default_key_iterate_devices,
+};
+
+static int __init dm_default_key_init(void)
+{
+	return dm_register_target(&default_key_target);
+}
+
+static void __exit dm_default_key_exit(void)
+{
+	dm_unregister_target(&default_key_target);
+}
+
+module_init(dm_default_key_init);
+module_exit(dm_default_key_exit);
+
+MODULE_AUTHOR("Paul Lawrence <paullawrence@google.com>");
+MODULE_AUTHOR("Paul Crowley <paulcrowley@google.com>");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_DESCRIPTION(DM_NAME " target for encrypting filesystem metadata");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 9e9d04cb..56fcccc 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -454,6 +454,8 @@
 	if (atomic_dec_and_test(&kc->nr_jobs))
 		wake_up(&kc->destroyq);
 
+	cond_resched();
+
 	return 0;
 }
 
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index ee75e35..3f389b2 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2880,6 +2880,11 @@
 		set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
 		rs_set_new(rs);
 	} else if (rs_is_recovering(rs)) {
+		/* Rebuild particular devices */
+		if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+			set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+			rs_setup_recovery(rs, MaxSector);
+		}
 		/* A recovering raid set may be resized */
 		; /* skip setup rs */
 	} else if (rs_is_reshaping(rs)) {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index b75ccef..6b7b4dd 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -511,14 +511,14 @@
  * On the other hand, dm-switch needs to process bulk data using messages and
  * excessive use of GFP_NOIO could cause trouble.
  */
-static char **realloc_argv(unsigned *array_size, char **old_argv)
+static char **realloc_argv(unsigned *size, char **old_argv)
 {
 	char **argv;
 	unsigned new_size;
 	gfp_t gfp;
 
-	if (*array_size) {
-		new_size = *array_size * 2;
+	if (*size) {
+		new_size = *size * 2;
 		gfp = GFP_KERNEL;
 	} else {
 		new_size = 8;
@@ -526,8 +526,8 @@
 	}
 	argv = kmalloc(new_size * sizeof(*argv), gfp);
 	if (argv) {
-		memcpy(argv, old_argv, *array_size * sizeof(*argv));
-		*array_size = new_size;
+		memcpy(argv, old_argv, *size * sizeof(*argv));
+		*size = new_size;
 	}
 
 	kfree(old_argv);
@@ -1495,6 +1495,16 @@
 	return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
 }
 
+static int queue_supports_inline_encryption(struct dm_target *ti,
+					    struct dm_dev *dev,
+					    sector_t start, sector_t len,
+					    void *data)
+{
+	struct request_queue *q = bdev_get_queue(dev->bdev);
+
+	return q && blk_queue_inlinecrypt(q);
+}
+
 static bool dm_table_all_devices_attribute(struct dm_table *t,
 					   iterate_devices_callout_fn func)
 {
@@ -1615,6 +1625,11 @@
 	else
 		queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
 
+	if (dm_table_all_devices_attribute(t, queue_supports_inline_encryption))
+		queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q);
+	else
+		queue_flag_clear_unlocked(QUEUE_FLAG_INLINECRYPT, q);
+
 	dm_table_verify_integrity(t);
 
 	/*
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index e976f4f..149fbac 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -190,6 +190,12 @@
 	sector_t data_block_size;
 
 	/*
+	 * We reserve a section of the metadata for commit overhead.
+	 * All reported space does *not* include this.
+	 */
+	dm_block_t metadata_reserve;
+
+	/*
 	 * Set if a transaction has to be aborted but the attempt to roll back
 	 * to the previous (good) transaction failed.  The only pool metadata
 	 * operation possible in this state is the closing of the device.
@@ -827,6 +833,20 @@
 	return dm_tm_commit(pmd->tm, sblock);
 }
 
+static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
+{
+	int r;
+	dm_block_t total;
+	dm_block_t max_blocks = 4096; /* 16M */
+
+	r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
+	if (r) {
+		DMERR("could not get size of metadata device");
+		pmd->metadata_reserve = max_blocks;
+	} else
+		pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
+}
+
 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
 					       sector_t data_block_size,
 					       bool format_device)
@@ -860,6 +880,8 @@
 		return ERR_PTR(r);
 	}
 
+	__set_metadata_reserve(pmd);
+
 	return pmd;
 }
 
@@ -1831,6 +1853,13 @@
 	down_read(&pmd->root_lock);
 	if (!pmd->fail_io)
 		r = dm_sm_get_nr_free(pmd->metadata_sm, result);
+
+	if (!r) {
+		if (*result < pmd->metadata_reserve)
+			*result = 0;
+		else
+			*result -= pmd->metadata_reserve;
+	}
 	up_read(&pmd->root_lock);
 
 	return r;
@@ -1943,8 +1972,11 @@
 	int r = -EINVAL;
 
 	down_write(&pmd->root_lock);
-	if (!pmd->fail_io)
+	if (!pmd->fail_io) {
 		r = __resize_space_map(pmd->metadata_sm, new_count);
+		if (!r)
+			__set_metadata_reserve(pmd);
+	}
 	up_write(&pmd->root_lock);
 
 	return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ea1bfc1..e697283 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -200,7 +200,13 @@
 enum pool_mode {
 	PM_WRITE,		/* metadata may be changed */
 	PM_OUT_OF_DATA_SPACE,	/* metadata may be changed, though data may not be allocated */
+
+	/*
+	 * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
+	 */
+	PM_OUT_OF_METADATA_SPACE,
 	PM_READ_ONLY,		/* metadata may not be changed */
+
 	PM_FAIL,		/* all I/O fails */
 };
 
@@ -1386,7 +1392,35 @@
 
 static void requeue_bios(struct pool *pool);
 
-static void check_for_space(struct pool *pool)
+static bool is_read_only_pool_mode(enum pool_mode mode)
+{
+	return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
+}
+
+static bool is_read_only(struct pool *pool)
+{
+	return is_read_only_pool_mode(get_pool_mode(pool));
+}
+
+static void check_for_metadata_space(struct pool *pool)
+{
+	int r;
+	const char *ooms_reason = NULL;
+	dm_block_t nr_free;
+
+	r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
+	if (r)
+		ooms_reason = "Could not get free metadata blocks";
+	else if (!nr_free)
+		ooms_reason = "No free metadata blocks";
+
+	if (ooms_reason && !is_read_only(pool)) {
+		DMERR("%s", ooms_reason);
+		set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
+	}
+}
+
+static void check_for_data_space(struct pool *pool)
 {
 	int r;
 	dm_block_t nr_free;
@@ -1412,14 +1446,16 @@
 {
 	int r;
 
-	if (get_pool_mode(pool) >= PM_READ_ONLY)
+	if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
 		return -EINVAL;
 
 	r = dm_pool_commit_metadata(pool->pmd);
 	if (r)
 		metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
-	else
-		check_for_space(pool);
+	else {
+		check_for_metadata_space(pool);
+		check_for_data_space(pool);
+	}
 
 	return r;
 }
@@ -1485,6 +1521,19 @@
 		return r;
 	}
 
+	r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
+	if (r) {
+		metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
+		return r;
+	}
+
+	if (!free_blocks) {
+		/* Let's commit before we use up the metadata reserve. */
+		r = commit(pool);
+		if (r)
+			return r;
+	}
+
 	return 0;
 }
 
@@ -1516,6 +1565,7 @@
 	case PM_OUT_OF_DATA_SPACE:
 		return pool->pf.error_if_no_space ? -ENOSPC : 0;
 
+	case PM_OUT_OF_METADATA_SPACE:
 	case PM_READ_ONLY:
 	case PM_FAIL:
 		return -EIO;
@@ -2479,8 +2529,9 @@
 		error_retry_list(pool);
 		break;
 
+	case PM_OUT_OF_METADATA_SPACE:
 	case PM_READ_ONLY:
-		if (old_mode != new_mode)
+		if (!is_read_only_pool_mode(old_mode))
 			notify_of_pool_mode_change(pool, "read-only");
 		dm_pool_metadata_read_only(pool->pmd);
 		pool->process_bio = process_bio_read_only;
@@ -2518,6 +2569,8 @@
 	case PM_WRITE:
 		if (old_mode != new_mode)
 			notify_of_pool_mode_change(pool, "write");
+		if (old_mode == PM_OUT_OF_DATA_SPACE)
+			cancel_delayed_work_sync(&pool->no_space_timeout);
 		pool->out_of_data_space = false;
 		pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
 		dm_pool_metadata_read_write(pool->pmd);
@@ -3416,6 +3469,10 @@
 		DMINFO("%s: growing the metadata device from %llu to %llu blocks",
 		       dm_device_name(pool->pool_md),
 		       sb_metadata_dev_size, metadata_dev_size);
+
+		if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
+			set_pool_mode(pool, PM_WRITE);
+
 		r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
 		if (r) {
 			metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@@ -3719,7 +3776,7 @@
 	struct pool_c *pt = ti->private;
 	struct pool *pool = pt->pool;
 
-	if (get_pool_mode(pool) >= PM_READ_ONLY) {
+	if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
 		DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
 		      dm_device_name(pool->pool_md));
 		return -EOPNOTSUPP;
@@ -3793,6 +3850,7 @@
 	dm_block_t nr_blocks_data;
 	dm_block_t nr_blocks_metadata;
 	dm_block_t held_root;
+	enum pool_mode mode;
 	char buf[BDEVNAME_SIZE];
 	char buf2[BDEVNAME_SIZE];
 	struct pool_c *pt = ti->private;
@@ -3863,9 +3921,10 @@
 		else
 			DMEMIT("- ");
 
-		if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+		mode = get_pool_mode(pool);
+		if (mode == PM_OUT_OF_DATA_SPACE)
 			DMEMIT("out_of_data_space ");
-		else if (pool->pf.mode == PM_READ_ONLY)
+		else if (is_read_only_pool_mode(mode))
 			DMEMIT("ro ");
 		else
 			DMEMIT("rw ");
diff --git a/drivers/md/dm-verity-avb.c b/drivers/md/dm-verity-avb.c
new file mode 100644
index 0000000..89f95e4
--- /dev/null
+++ b/drivers/md/dm-verity-avb.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2017 Google.
+ *
+ * This file is released under the GPLv2.
+ *
+ * Based on drivers/md/dm-verity-chromeos.c
+ */
+
+#include <linux/device-mapper.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+
+#define DM_MSG_PREFIX "verity-avb"
+
+/* Set via module parameters. */
+static char avb_vbmeta_device[64];
+static char avb_invalidate_on_error[4];
+
+static void invalidate_vbmeta_endio(struct bio *bio)
+{
+	if (bio->bi_error)
+		DMERR("invalidate_vbmeta_endio: error %d", bio->bi_error);
+	complete(bio->bi_private);
+}
+
+static int invalidate_vbmeta_submit(struct bio *bio,
+				    struct block_device *bdev,
+				    int op, int access_last_sector,
+				    struct page *page)
+{
+	DECLARE_COMPLETION_ONSTACK(wait);
+
+	bio->bi_private = &wait;
+	bio->bi_end_io = invalidate_vbmeta_endio;
+	bio->bi_bdev = bdev;
+	bio_set_op_attrs(bio, op, REQ_SYNC | REQ_NOIDLE);
+
+	bio->bi_iter.bi_sector = 0;
+	if (access_last_sector) {
+		sector_t last_sector;
+
+		last_sector = (i_size_read(bdev->bd_inode)>>SECTOR_SHIFT) - 1;
+		bio->bi_iter.bi_sector = last_sector;
+	}
+	if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
+		DMERR("invalidate_vbmeta_submit: bio_add_page error");
+		return -EIO;
+	}
+
+	submit_bio(bio);
+	/* Wait up to 2 seconds for completion or fail. */
+	if (!wait_for_completion_timeout(&wait, msecs_to_jiffies(2000)))
+		return -EIO;
+	return 0;
+}
+
+static int invalidate_vbmeta(dev_t vbmeta_devt)
+{
+	int ret = 0;
+	struct block_device *bdev;
+	struct bio *bio;
+	struct page *page;
+	fmode_t dev_mode;
+	/* Ensure we do synchronous unblocked I/O. We may also need
+	 * sync_bdev() on completion, but it really shouldn't.
+	 */
+	int access_last_sector = 0;
+
+	DMINFO("invalidate_vbmeta: acting on device %d:%d",
+	       MAJOR(vbmeta_devt), MINOR(vbmeta_devt));
+
+	/* First we open the device for reading. */
+	dev_mode = FMODE_READ | FMODE_EXCL;
+	bdev = blkdev_get_by_dev(vbmeta_devt, dev_mode,
+				 invalidate_vbmeta);
+	if (IS_ERR(bdev)) {
+		DMERR("invalidate_kernel: could not open device for reading");
+		dev_mode = 0;
+		ret = -ENOENT;
+		goto failed_to_read;
+	}
+
+	bio = bio_alloc(GFP_NOIO, 1);
+	if (!bio) {
+		ret = -ENOMEM;
+		goto failed_bio_alloc;
+	}
+
+	page = alloc_page(GFP_NOIO);
+	if (!page) {
+		ret = -ENOMEM;
+		goto failed_to_alloc_page;
+	}
+
+	access_last_sector = 0;
+	ret = invalidate_vbmeta_submit(bio, bdev, REQ_OP_READ,
+				       access_last_sector, page);
+	if (ret) {
+		DMERR("invalidate_vbmeta: error reading");
+		goto failed_to_submit_read;
+	}
+
+	/* We have a page. Let's make sure it looks right. */
+	if (memcmp("AVB0", page_address(page), 4) == 0) {
+		/* Stamp it. */
+		memcpy(page_address(page), "AVE0", 4);
+		DMINFO("invalidate_vbmeta: found vbmeta partition");
+	} else {
+		/* Could be this is on a AVB footer, check. Also, since the
+		 * AVB footer is in the last 64 bytes, adjust for the fact that
+		 * we're dealing with 512-byte sectors.
+		 */
+		size_t offset = (1<<SECTOR_SHIFT) - 64;
+
+		access_last_sector = 1;
+		ret = invalidate_vbmeta_submit(bio, bdev, REQ_OP_READ,
+					       access_last_sector, page);
+		if (ret) {
+			DMERR("invalidate_vbmeta: error reading");
+			goto failed_to_submit_read;
+		}
+		if (memcmp("AVBf", page_address(page) + offset, 4) != 0) {
+			DMERR("invalidate_vbmeta on non-vbmeta partition");
+			ret = -EINVAL;
+			goto invalid_header;
+		}
+		/* Stamp it. */
+		memcpy(page_address(page) + offset, "AVE0", 4);
+		DMINFO("invalidate_vbmeta: found vbmeta footer partition");
+	}
+
+	/* Now rewrite the changed page - the block dev was being
+	 * changed on read. Let's reopen here.
+	 */
+	blkdev_put(bdev, dev_mode);
+	dev_mode = FMODE_WRITE | FMODE_EXCL;
+	bdev = blkdev_get_by_dev(vbmeta_devt, dev_mode,
+				 invalidate_vbmeta);
+	if (IS_ERR(bdev)) {
+		DMERR("invalidate_vbmeta: could not open device for writing");
+		dev_mode = 0;
+		ret = -ENOENT;
+		goto failed_to_write;
+	}
+
+	/* We re-use the same bio to do the write after the read. Need to reset
+	 * it to initialize bio->bi_remaining.
+	 */
+	bio_reset(bio);
+
+	ret = invalidate_vbmeta_submit(bio, bdev, REQ_OP_WRITE,
+				       access_last_sector, page);
+	if (ret) {
+		DMERR("invalidate_vbmeta: error writing");
+		goto failed_to_submit_write;
+	}
+
+	DMERR("invalidate_vbmeta: completed.");
+	ret = 0;
+failed_to_submit_write:
+failed_to_write:
+invalid_header:
+	__free_page(page);
+failed_to_submit_read:
+	/* Technically, we'll leak a page with the pending bio, but
+	 * we're about to reboot anyway.
+	 */
+failed_to_alloc_page:
+	bio_put(bio);
+failed_bio_alloc:
+	if (dev_mode)
+		blkdev_put(bdev, dev_mode);
+failed_to_read:
+	return ret;
+}
+
+void dm_verity_avb_error_handler(void)
+{
+	dev_t dev;
+
+	DMINFO("AVB error handler called for %s", avb_vbmeta_device);
+
+	if (strcmp(avb_invalidate_on_error, "yes") != 0) {
+		DMINFO("Not configured to invalidate");
+		return;
+	}
+
+	if (avb_vbmeta_device[0] == '\0') {
+		DMERR("avb_vbmeta_device parameter not set");
+		goto fail_no_dev;
+	}
+
+	dev = name_to_dev_t(avb_vbmeta_device);
+	if (!dev) {
+		DMERR("No matching partition for device: %s",
+		      avb_vbmeta_device);
+		goto fail_no_dev;
+	}
+
+	invalidate_vbmeta(dev);
+
+fail_no_dev:
+	;
+}
+
+static int __init dm_verity_avb_init(void)
+{
+	DMINFO("AVB error handler initialized with vbmeta device: %s",
+	       avb_vbmeta_device);
+	return 0;
+}
+
+static void __exit dm_verity_avb_exit(void)
+{
+}
+
+module_init(dm_verity_avb_init);
+module_exit(dm_verity_avb_exit);
+
+MODULE_AUTHOR("David Zeuthen <zeuthen@google.com>");
+MODULE_DESCRIPTION("AVB-specific error handler for dm-verity");
+MODULE_LICENSE("GPL");
+
+/* Declare parameter with no module prefix */
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX	"androidboot.vbmeta."
+module_param_string(device, avb_vbmeta_device, sizeof(avb_vbmeta_device), 0);
+module_param_string(invalidate_on_error, avb_invalidate_on_error,
+		    sizeof(avb_invalidate_on_error), 0);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 0a7a828..b03e808 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -235,8 +235,12 @@
 	if (v->mode == DM_VERITY_MODE_LOGGING)
 		return 0;
 
-	if (v->mode == DM_VERITY_MODE_RESTART)
+	if (v->mode == DM_VERITY_MODE_RESTART) {
+#ifdef CONFIG_DM_VERITY_AVB
+		dm_verity_avb_error_handler();
+#endif
 		kernel_restart("dm-verity device corrupted");
+	}
 
 	return 1;
 }
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 6d6d8df..d216fc7 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -137,4 +137,5 @@
 extern void verity_dtr(struct dm_target *ti);
 extern int verity_ctr(struct dm_target *ti, unsigned argc, char **argv);
 extern int verity_map(struct dm_target *ti, struct bio *bio);
+extern void dm_verity_avb_error_handler(void);
 #endif /* DM_VERITY_H */
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index fcc2b57..e870b09 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -302,15 +302,6 @@
 	while (cinfo->recovery_map) {
 		slot = fls64((u64)cinfo->recovery_map) - 1;
 
-		/* Clear suspend_area associated with the bitmap */
-		spin_lock_irq(&cinfo->suspend_lock);
-		list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
-			if (slot == s->slot) {
-				list_del(&s->list);
-				kfree(s);
-			}
-		spin_unlock_irq(&cinfo->suspend_lock);
-
 		snprintf(str, 64, "bitmap%04d", slot);
 		bm_lockres = lockres_init(mddev, str, NULL, 1);
 		if (!bm_lockres) {
@@ -329,6 +320,16 @@
 			pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
 			goto clear_bit;
 		}
+
+		/* Clear suspend_area associated with the bitmap */
+		spin_lock_irq(&cinfo->suspend_lock);
+		list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+			if (slot == s->slot) {
+				list_del(&s->list);
+				kfree(s);
+			}
+		spin_unlock_irq(&cinfo->suspend_lock);
+
 		if (hi > 0) {
 			if (lo < mddev->recovery_cp)
 				mddev->recovery_cp = lo;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 13b3424..bf2d2df 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -6192,6 +6192,9 @@
 	char b[BDEVNAME_SIZE];
 	struct md_rdev *rdev;
 
+	if (!mddev->pers)
+		return -ENODEV;
+
 	rdev = find_rdev(mddev, dev);
 	if (!rdev)
 		return -ENXIO;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9b982d4..0e52852 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3734,6 +3734,13 @@
 			    disk->rdev->saved_raid_disk < 0)
 				conf->fullsync = 1;
 		}
+
+		if (disk->replacement &&
+		    !test_bit(In_sync, &disk->replacement->flags) &&
+		    disk->replacement->saved_raid_disk < 0) {
+			conf->fullsync = 1;
+		}
+
 		disk->recovery_disabled = mddev->recovery_disabled - 1;
 	}
 
@@ -4374,11 +4381,12 @@
 		allow_barrier(conf);
 	}
 
+	raise_barrier(conf, 0);
 read_more:
 	/* Now schedule reads for blocks from sector_nr to last */
 	r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
 	r10_bio->state = 0;
-	raise_barrier(conf, sectors_done != 0);
+	raise_barrier(conf, 1);
 	atomic_set(&r10_bio->remaining, 0);
 	r10_bio->mddev = mddev;
 	r10_bio->sector = sector_nr;
@@ -4485,6 +4493,8 @@
 	if (sector_nr <= last)
 		goto read_more;
 
+	lower_barrier(conf);
+
 	/* Now that we have done the whole section we can
 	 * update reshape_progress
 	 */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8de95a5..d15e29d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4207,6 +4207,12 @@
 			s->failed++;
 			if (rdev && !test_bit(Faulty, &rdev->flags))
 				do_recovery = 1;
+			else if (!rdev) {
+				rdev = rcu_dereference(
+				    conf->disks[i].replacement);
+				if (rdev && !test_bit(Faulty, &rdev->flags))
+					do_recovery = 1;
+			}
 		}
 	}
 	if (test_bit(STRIPE_SYNCING, &sh->state)) {
diff --git a/drivers/media/common/siano/smsendian.c b/drivers/media/common/siano/smsendian.c
index bfe831c..b95a631 100644
--- a/drivers/media/common/siano/smsendian.c
+++ b/drivers/media/common/siano/smsendian.c
@@ -35,7 +35,7 @@
 	switch (msg->x_msg_header.msg_type) {
 	case MSG_SMS_DATA_DOWNLOAD_REQ:
 	{
-		msg->msg_data[0] = le32_to_cpu(msg->msg_data[0]);
+		msg->msg_data[0] = le32_to_cpu((__force __le32)(msg->msg_data[0]));
 		break;
 	}
 
@@ -44,7 +44,7 @@
 				sizeof(struct sms_msg_hdr))/4;
 
 		for (i = 0; i < msg_words; i++)
-			msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]);
+			msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]);
 
 		break;
 	}
@@ -64,7 +64,7 @@
 	{
 		struct sms_version_res *ver =
 			(struct sms_version_res *) msg;
-		ver->chip_model = le16_to_cpu(ver->chip_model);
+		ver->chip_model = le16_to_cpu((__force __le16)ver->chip_model);
 		break;
 	}
 
@@ -81,7 +81,7 @@
 				sizeof(struct sms_msg_hdr))/4;
 
 		for (i = 0; i < msg_words; i++)
-			msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]);
+			msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]);
 
 		break;
 	}
@@ -95,9 +95,9 @@
 #ifdef __BIG_ENDIAN
 	struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)msg;
 
-	phdr->msg_type = le16_to_cpu(phdr->msg_type);
-	phdr->msg_length = le16_to_cpu(phdr->msg_length);
-	phdr->msg_flags = le16_to_cpu(phdr->msg_flags);
+	phdr->msg_type = le16_to_cpu((__force __le16)phdr->msg_type);
+	phdr->msg_length = le16_to_cpu((__force __le16)phdr->msg_length);
+	phdr->msg_flags = le16_to_cpu((__force __le16)phdr->msg_flags);
 #endif /* __BIG_ENDIAN */
 }
 EXPORT_SYMBOL_GPL(smsendian_handle_message_header);
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
index e06bcd4..800f386 100644
--- a/drivers/media/dvb-frontends/helene.c
+++ b/drivers/media/dvb-frontends/helene.c
@@ -898,7 +898,10 @@
 	helene_write_regs(priv, 0x99, cdata, sizeof(cdata));
 
 	/* 0x81 - 0x94 */
-	data[0] = 0x18; /* xtal 24 MHz */
+	if (priv->xtal == SONY_HELENE_XTAL_16000)
+		data[0] = 0x10; /* xtal 16 MHz */
+	else
+		data[0] = 0x18; /* xtal 24 MHz */
 	data[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */
 	data[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */
 	data[3] = 0x80; /* REFOUT signal output 500mVpp */
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 44f8c7e..8ffa13f 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -991,7 +991,7 @@
 		if (rval)
 			goto out;
 
-		for (i = 0; i < 1000; i++) {
+		for (i = 1000; i > 0; i--) {
 			rval = smiapp_read(
 				sensor,
 				SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s);
@@ -1002,11 +1002,10 @@
 			if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY)
 				break;
 
-			if (--i == 0) {
-				rval = -ETIMEDOUT;
-				goto out;
-			}
-
+		}
+		if (!i) {
+			rval = -ETIMEDOUT;
+			goto out;
 		}
 
 		for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index 7e68762..fa1cb24 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -834,7 +834,7 @@
 	 * set COM8
 	 */
 	if (priv->band_filter) {
-		ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, 1);
+		ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF);
 		if (!ret)
 			ret = ov772x_mask_set(client, BDBASE,
 					      0xff, 256 - priv->band_filter);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index cc8de56..6f46c59 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -60,7 +60,7 @@
 
 static long media_device_get_info(struct media_device *dev, void *arg)
 {
-	struct media_device_info *info = (struct media_device_info *)arg;
+	struct media_device_info *info = arg;
 
 	memset(info, 0, sizeof(*info));
 
@@ -100,7 +100,7 @@
 
 static long media_device_enum_entities(struct media_device *mdev, void *arg)
 {
-	struct media_entity_desc *entd = (struct media_entity_desc *)arg;
+	struct media_entity_desc *entd = arg;
 	struct media_entity *ent;
 
 	ent = find_entity(mdev, entd->id);
@@ -153,7 +153,7 @@
 
 static long media_device_enum_links(struct media_device *mdev, void *arg)
 {
-	struct media_links_enum *links = (struct media_links_enum *)arg;
+	struct media_links_enum *links = arg;
 	struct media_entity *entity;
 
 	entity = find_entity(mdev, links->entity);
@@ -201,7 +201,7 @@
 
 static long media_device_setup_link(struct media_device *mdev, void *arg)
 {
-	struct media_link_desc *linkd = (struct media_link_desc *)arg;
+	struct media_link_desc *linkd = arg;
 	struct media_link *link = NULL;
 	struct media_entity *source;
 	struct media_entity *sink;
@@ -229,7 +229,7 @@
 
 static long media_device_get_topology(struct media_device *mdev, void *arg)
 {
-	struct media_v2_topology *topo = (struct media_v2_topology *)arg;
+	struct media_v2_topology *topo = arg;
 	struct media_entity *entity;
 	struct media_interface *intf;
 	struct media_pad *pad;
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index 269e078..93d5319 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -430,7 +430,8 @@
 			__func__, fw->size);
 
 		if (fw->size != fwlength) {
-			printk(KERN_ERR "xc5000: firmware incorrect size\n");
+			printk(KERN_ERR "saa7164: firmware incorrect size %zu != %u\n",
+				fw->size, fwlength);
 			ret = -ENOMEM;
 			goto out;
 		}
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
index c3fafa9..3a06c00 100644
--- a/drivers/media/pci/tw686x/tw686x-video.c
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -1190,6 +1190,14 @@
 			return err;
 	}
 
+	/* Initialize vc->dev and vc->ch for the error path */
+	for (ch = 0; ch < max_channels(dev); ch++) {
+		struct tw686x_video_channel *vc = &dev->video_channels[ch];
+
+		vc->dev = dev;
+		vc->ch = ch;
+	}
+
 	for (ch = 0; ch < max_channels(dev); ch++) {
 		struct tw686x_video_channel *vc = &dev->video_channels[ch];
 		struct video_device *vdev;
@@ -1198,9 +1206,6 @@
 		spin_lock_init(&vc->qlock);
 		INIT_LIST_HEAD(&vc->vidq_queued);
 
-		vc->dev = dev;
-		vc->ch = ch;
-
 		/* default settings */
 		err = tw686x_set_standard(vc, V4L2_STD_NTSC);
 		if (err)
@@ -1228,7 +1233,8 @@
 		vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
 		vc->vidq.min_buffers_needed = 2;
 		vc->vidq.lock = &vc->vb_mutex;
-		vc->vidq.gfp_flags = GFP_DMA32;
+		vc->vidq.gfp_flags = dev->dma_mode != TW686X_DMA_MODE_MEMCPY ?
+				     GFP_DMA32 : 0;
 		vc->vidq.dev = &dev->pci_dev->dev;
 
 		err = vb2_queue_init(&vc->vidq);
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 400ce0c..e00fa03 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -384,12 +384,17 @@
 				struct v4l2_pix_format_mplane *pixm,
 				const struct fimc_fmt **fmt)
 {
-	*fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+	const struct fimc_fmt *__fmt;
+
+	__fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+
+	if (fmt)
+		*fmt = __fmt;
 
 	pixm->colorspace = V4L2_COLORSPACE_SRGB;
 	pixm->field = V4L2_FIELD_NONE;
-	pixm->num_planes = (*fmt)->memplanes;
-	pixm->pixelformat = (*fmt)->fourcc;
+	pixm->num_planes = __fmt->memplanes;
+	pixm->pixelformat = __fmt->fourcc;
 	/*
 	 * TODO: double check with the docmentation these width/height
 	 * constraints are correct.
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index ae8c6b3..7f0ed5a 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1417,7 +1417,7 @@
 				     sizeof(struct viu_reg), DRV_NAME)) {
 		dev_err(&op->dev, "Error while requesting mem region\n");
 		ret = -EBUSY;
-		goto err;
+		goto err_irq;
 	}
 
 	/* remap registers */
@@ -1425,7 +1425,7 @@
 	if (!viu_regs) {
 		dev_err(&op->dev, "Can't map register set\n");
 		ret = -ENOMEM;
-		goto err;
+		goto err_irq;
 	}
 
 	/* Prepare our private structure */
@@ -1433,7 +1433,7 @@
 	if (!viu_dev) {
 		dev_err(&op->dev, "Can't allocate private structure\n");
 		ret = -ENOMEM;
-		goto err;
+		goto err_irq;
 	}
 
 	viu_dev->vr = viu_regs;
@@ -1449,16 +1449,21 @@
 	ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
 	if (ret < 0) {
 		dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
-		goto err;
+		goto err_irq;
 	}
 
 	ad = i2c_get_adapter(0);
+	if (!ad) {
+		ret = -EFAULT;
+		dev_err(&op->dev, "couldn't get i2c adapter\n");
+		goto err_v4l2;
+	}
 
 	v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
 	if (viu_dev->hdl.error) {
 		ret = viu_dev->hdl.error;
 		dev_err(&op->dev, "couldn't register control\n");
-		goto err_vdev;
+		goto err_i2c;
 	}
 	/* This control handler will inherit the control(s) from the
 	   sub-device(s). */
@@ -1476,7 +1481,7 @@
 	vdev = video_device_alloc();
 	if (vdev == NULL) {
 		ret = -ENOMEM;
-		goto err_vdev;
+		goto err_hdl;
 	}
 
 	*vdev = viu_template;
@@ -1497,7 +1502,7 @@
 	ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
 	if (ret < 0) {
 		video_device_release(viu_dev->vdev);
-		goto err_vdev;
+		goto err_unlock;
 	}
 
 	/* enable VIU clock */
@@ -1505,12 +1510,12 @@
 	if (IS_ERR(clk)) {
 		dev_err(&op->dev, "failed to lookup the clock!\n");
 		ret = PTR_ERR(clk);
-		goto err_clk;
+		goto err_vdev;
 	}
 	ret = clk_prepare_enable(clk);
 	if (ret) {
 		dev_err(&op->dev, "failed to enable the clock!\n");
-		goto err_clk;
+		goto err_vdev;
 	}
 	viu_dev->clk = clk;
 
@@ -1521,7 +1526,7 @@
 	if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
 		dev_err(&op->dev, "Request VIU IRQ failed.\n");
 		ret = -ENODEV;
-		goto err_irq;
+		goto err_clk;
 	}
 
 	mutex_unlock(&viu_dev->lock);
@@ -1529,16 +1534,19 @@
 	dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
 	return ret;
 
-err_irq:
-	clk_disable_unprepare(viu_dev->clk);
 err_clk:
-	video_unregister_device(viu_dev->vdev);
+	clk_disable_unprepare(viu_dev->clk);
 err_vdev:
-	v4l2_ctrl_handler_free(&viu_dev->hdl);
+	video_unregister_device(viu_dev->vdev);
+err_unlock:
 	mutex_unlock(&viu_dev->lock);
+err_hdl:
+	v4l2_ctrl_handler_free(&viu_dev->hdl);
+err_i2c:
 	i2c_put_adapter(ad);
+err_v4l2:
 	v4l2_device_unregister(&viu_dev->v4l2_dev);
-err:
+err_irq:
 	irq_dispose_mapping(viu_irq);
 	return ret;
 }
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
index 03f6e0c..ff8be35 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -195,11 +195,11 @@
 	struct cam_cdm_bl_request *data;
 };
 
-/* struct cam_cdm_hw_mem - CDM hw memory.struct */
+/* struct cam_cdm_hw_mem - CDM hw memory struct */
 struct cam_cdm_hw_mem {
 	int32_t handle;
 	uint32_t vaddr;
-	uint64_t kmdvaddr;
+	uintptr_t kmdvaddr;
 	size_t size;
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
index f2796be..4ae2f0a 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
@@ -624,7 +624,8 @@
 }
 
 static void cam_hw_cdm_iommu_fault_handler(struct iommu_domain *domain,
-	struct device *dev, unsigned long iova, int flags, void *token)
+	struct device *dev, unsigned long iova, int flags, void *token,
+	uint32_t buf_info)
 {
 	struct cam_hw_info *cdm_hw = NULL;
 	struct cam_cdm *core = NULL;
@@ -910,7 +911,7 @@
 		CAM_ERR(CAM_CDM, "cpas-cdm get iommu handle failed");
 		goto unlock_release_mem;
 	}
-	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+	cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
 		cam_hw_cdm_iommu_fault_handler, cdm_hw);
 
 	rc = cam_smmu_ops(cdm_core->iommu_hdl.non_secure, CAM_SMMU_ATTACH);
@@ -1034,7 +1035,7 @@
 	flush_workqueue(cdm_core->work_queue);
 	destroy_workqueue(cdm_core->work_queue);
 destroy_non_secure_hdl:
-	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+	cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
 		NULL, cdm_hw);
 	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
 		CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
@@ -1106,8 +1107,8 @@
 
 	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
 		CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
-	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
-		NULL, cdm_hw);
+	cam_smmu_unset_client_page_fault_handler(
+		cdm_core->iommu_hdl.non_secure, cdm_hw);
 
 	mutex_destroy(&cdm_hw->hw_mutex);
 	kfree(cdm_hw->soc_info.soc_private);
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
index 2b00a87..6aa6e6d 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
@@ -105,10 +105,10 @@
 	union {
 		int32_t mem_handle;
 		uint32_t *hw_iova;
-		void *kernel_iova;
+		uintptr_t kernel_iova;
 	} bl_addr;
-	uint32_t  offset;
-	uint32_t  len;
+	uint32_t offset;
+	uint32_t len;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
index d76f344..9021eca 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -87,7 +87,7 @@
 
 	mutex_lock(&client->lock);
 	for (i = 0; i < req->data->cmd_arrary_count ; i++) {
-		uint64_t vaddr_ptr = 0;
+		uintptr_t vaddr_ptr = 0;
 		size_t len = 0;
 
 		if ((!cdm_cmd->cmd[i].len) &&
@@ -106,8 +106,7 @@
 		} else if (req->data->type ==
 			CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA) {
 			rc = 0;
-			vaddr_ptr =
-				(uint64_t)cdm_cmd->cmd[i].bl_addr.kernel_iova;
+			vaddr_ptr = cdm_cmd->cmd[i].bl_addr.kernel_iova;
 			len = cdm_cmd->cmd[i].offset + cdm_cmd->cmd[i].len;
 		} else {
 			CAM_ERR(CAM_CDM,
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index da78527..4ed53f9 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -42,6 +42,7 @@
 	int rc = 0;
 	int32_t ctx_hdl = ctx->dev_hdl;
 
+	mutex_lock(&ctx->ctx_mutex);
 	if (ctx->state_machine[ctx->state].ioctl_ops.stop_dev) {
 		rc = ctx->state_machine[ctx->state].ioctl_ops.stop_dev(
 			ctx, NULL);
@@ -54,6 +55,7 @@
 		if (rc < 0)
 			CAM_ERR(CAM_CORE, "Error while dev release %d", rc);
 	}
+	mutex_unlock(&ctx->ctx_mutex);
 
 	if (!rc)
 		rc = cam_destroy_device_hdl(ctx_hdl);
@@ -221,6 +223,27 @@
 	return rc;
 }
 
+int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova,
+	uint32_t buf_info)
+{
+	int rc = 0;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (ctx->state_machine[ctx->state].pagefault_ops) {
+		rc = ctx->state_machine[ctx->state].pagefault_ops(ctx, iova,
+			buf_info);
+	} else {
+		CAM_WARN(CAM_CORE, "No dump ctx in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+	}
+
+	return rc;
+}
+
 int cam_context_handle_acquire_dev(struct cam_context *ctx,
 	struct cam_acquire_dev_cmd *cmd)
 {
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index ffceea2..420f9f6 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -57,23 +57,25 @@
  * @num_out_acked:         Number of out fence acked
  * @flushed:               Request is flushed
  * @ctx:                   The context to which this request belongs
+ * @pf_data                page fault debug data
  *
  */
 struct cam_ctx_request {
-	struct list_head              list;
-	uint32_t                      status;
-	uint64_t                      request_id;
+	struct list_head               list;
+	uint32_t                       status;
+	uint64_t                       request_id;
 	void                          *req_priv;
-	struct cam_hw_update_entry    hw_update_entries[CAM_CTX_CFG_MAX];
-	uint32_t                      num_hw_update_entries;
-	struct cam_hw_fence_map_entry in_map_entries[CAM_CTX_CFG_MAX];
-	uint32_t                      num_in_map_entries;
-	struct cam_hw_fence_map_entry out_map_entries[CAM_CTX_CFG_MAX];
-	uint32_t                      num_out_map_entries;
-	atomic_t                      num_in_acked;
-	uint32_t                      num_out_acked;
-	int                           flushed;
-	struct cam_context           *ctx;
+	struct cam_hw_update_entry     hw_update_entries[CAM_CTX_CFG_MAX];
+	uint32_t                       num_hw_update_entries;
+	struct cam_hw_fence_map_entry  in_map_entries[CAM_CTX_CFG_MAX];
+	uint32_t                       num_in_map_entries;
+	struct cam_hw_fence_map_entry  out_map_entries[CAM_CTX_CFG_MAX];
+	uint32_t                       num_out_map_entries;
+	atomic_t                       num_in_acked;
+	uint32_t                       num_out_acked;
+	int                            flushed;
+	struct cam_context            *ctx;
+	struct cam_hw_mgr_dump_pf_data pf_data;
 };
 
 /**
@@ -135,12 +137,14 @@
  * @ioctl_ops:             Ioctl funciton table
  * @crm_ops:               CRM to context interface function table
  * @irq_ops:               Hardware event handle function
+ * @pagefault_ops:         Function to be called on page fault
  *
  */
 struct cam_ctx_ops {
 	struct cam_ctx_ioctl_ops     ioctl_ops;
 	struct cam_ctx_crm_ops       crm_ops;
 	cam_hw_event_cb_func         irq_ops;
+	cam_hw_pagefault_cb_func     pagefault_ops;
 };
 
 /**
@@ -292,6 +296,19 @@
 	struct cam_req_mgr_link_evt_data *process_evt);
 
 /**
+ * cam_context_dump_pf_info()
+ *
+ * @brief:        Handle dump active request request command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @iova:         Page fault address
+ * @buf_info:     Information about closest memory handle
+ *
+ */
+int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova,
+	uint32_t buf_info);
+
+/**
  * cam_context_handle_acquire_dev()
  *
  * @brief:        Handle acquire device command
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index 6c2383e..8021f12 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -272,7 +272,7 @@
 	int rc = 0;
 	struct cam_ctx_request *req = NULL;
 	struct cam_hw_prepare_update_args cfg;
-	uint64_t packet_addr;
+	uintptr_t packet_addr;
 	struct cam_packet *packet;
 	size_t len = 0;
 	int32_t i = 0, j = 0;
@@ -315,8 +315,7 @@
 	/* for config dev, only memory handle is supported */
 	/* map packet from the memhandle */
 	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
-		(uint64_t *) &packet_addr,
-		&len);
+		&packet_addr, &len);
 	if (rc != 0) {
 		CAM_ERR(CAM_CTXT, "[%s][%d] Can not get packet address",
 			ctx->dev_name, ctx->ctx_id);
@@ -324,7 +323,8 @@
 		goto free_req;
 	}
 
-	packet = (struct cam_packet *) (packet_addr + cmd->offset);
+	packet = (struct cam_packet *) ((uint8_t *)packet_addr +
+		(uint32_t)cmd->offset);
 
 	/* preprocess the configuration */
 	memset(&cfg, 0, sizeof(cfg));
@@ -337,6 +337,7 @@
 	cfg.out_map_entries = req->out_map_entries;
 	cfg.max_in_map_entries = CAM_CTX_CFG_MAX;
 	cfg.in_map_entries = req->in_map_entries;
+	cfg.pf_data = &(req->pf_data);
 
 	rc = ctx->hw_mgr_intf->hw_prepare_update(
 		ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
@@ -905,3 +906,38 @@
 end:
 	return rc;
 }
+
+int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx,
+	struct cam_packet *packet, unsigned long iova, uint32_t buf_info,
+	bool *mem_found)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args cmd_args;
+
+	if (!ctx) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK ", ctx);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	if (ctx->hw_mgr_intf->hw_cmd) {
+		cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+		cmd_args.cmd_type = CAM_HW_MGR_CMD_DUMP_PF_INFO;
+		cmd_args.u.pf_args.pf_data.packet = packet;
+		cmd_args.u.pf_args.iova = iova;
+		cmd_args.u.pf_args.buf_info = buf_info;
+		cmd_args.u.pf_args.mem_found = mem_found;
+		ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+			&cmd_args);
+	}
+
+end:
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
index 9b95ead..43e6940 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -31,5 +31,8 @@
 int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx);
 int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
 	struct cam_flush_dev_cmd *cmd);
+int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx,
+	struct cam_packet *packet, unsigned long iova, uint32_t buf_info,
+	bool *mem_found);
 
 #endif /* _CAM_CONTEXT_UTILS_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
index f7990b6..54b0f4d 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
@@ -13,6 +13,9 @@
 #ifndef _CAM_HW_MGR_INTF_H_
 #define _CAM_HW_MGR_INTF_H_
 
+#include <linux/time.h>
+#include <linux/types.h>
+
 /*
  * This file declares Constants, Enums, Structures and APIs to be used as
  * Interface between HW Manager and Context.
@@ -29,6 +32,10 @@
 typedef int (*cam_hw_event_cb_func)(void *context, uint32_t evt_id,
 	void *evt_data);
 
+/* hardware page fault callback function type */
+typedef int (*cam_hw_pagefault_cb_func)(void *context, unsigned long iova,
+	uint32_t buf_info);
+
 /**
  * struct cam_hw_update_entry - Entry for hardware config
  *
@@ -44,7 +51,7 @@
 	uint32_t           offset;
 	uint32_t           len;
 	uint32_t           flags;
-	uint64_t           addr;
+	uintptr_t          addr;
 };
 
 /**
@@ -89,7 +96,7 @@
 	void                        *context_data;
 	cam_hw_event_cb_func         event_cb;
 	uint32_t                     num_acq;
-	uint64_t                     acquire_info;
+	uintptr_t                    acquire_info;
 	void                        *ctxt_to_hw_map;
 };
 
@@ -131,6 +138,16 @@
 	void              *args;
 };
 
+
+/**
+ * struct cam_hw_mgr_dump_pf_data - page fault debug data
+ *
+ * packet:     pointer to packet
+ */
+struct cam_hw_mgr_dump_pf_data {
+	void    *packet;
+};
+
 /**
  * struct cam_hw_prepare_update_args - Payload for prepare command
  *
@@ -146,6 +163,7 @@
  * @in_map_entries:        Actual input fence mapping list (returned)
  * @num_in_map_entries:    Number of acutal input fence mapping (returned)
  * @priv:                  Private pointer of hw update
+ * @pf_data:               Debug data for page fault
  *
  */
 struct cam_hw_prepare_update_args {
@@ -161,6 +179,7 @@
 	struct cam_hw_fence_map_entry  *in_map_entries;
 	uint32_t                        num_in_map_entries;
 	void                           *priv;
+	struct cam_hw_mgr_dump_pf_data *pf_data;
 };
 
 /**
@@ -207,6 +226,48 @@
 };
 
 /**
+ * struct cam_hw_dump_pf_args - Payload for dump pf info command
+ *
+ * @pf_data:               Debug data for page fault
+ * @iova:                  Page fault address
+ * @buf_info:              Info about memory buffer where page
+ *                               fault occurred
+ * @mem_found:             If fault memory found in current
+ *                               request
+ *
+ */
+struct cam_hw_dump_pf_args {
+	struct cam_hw_mgr_dump_pf_data  pf_data;
+	unsigned long                   iova;
+	uint32_t                        buf_info;
+	bool                           *mem_found;
+};
+
+/* enum cam_hw_mgr_command - Hardware manager command type */
+enum cam_hw_mgr_command {
+	CAM_HW_MGR_CMD_INTERNAL,
+	CAM_HW_MGR_CMD_DUMP_PF_INFO,
+};
+
+/**
+ * struct cam_hw_cmd_args - Payload for hw manager command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @cmd_type               HW command type
+ * @internal_args          Arguments for internal command
+ * @pf_args                Arguments for Dump PF info command
+ *
+ */
+struct cam_hw_cmd_args {
+	void                               *ctxt_to_hw_map;
+	uint32_t                            cmd_type;
+	union {
+		void                       *internal_args;
+		struct cam_hw_dump_pf_args  pf_args;
+	} u;
+};
+
+/**
  * cam_hw_mgr_intf - HW manager interface
  *
  * @hw_mgr_priv:           HW manager object
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 3f24c6d..212a11b 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -286,20 +286,30 @@
 		return -EINVAL;
 	}
 
-	rc = cam_context_handle_release_dev(ctx, release);
-	if (rc)
-		CAM_ERR(CAM_CORE, "context release failed node %s", node->name);
+	if (ctx->state > CAM_CTX_UNINIT && ctx->state < CAM_CTX_STATE_MAX) {
+		rc = cam_context_handle_release_dev(ctx, release);
+		if (rc)
+			CAM_ERR(CAM_CORE, "context release failed for node %s",
+				node->name);
+	} else {
+		CAM_WARN(CAM_CORE,
+			"node %s context id %u state %d invalid to release hdl",
+			node->name, ctx->ctx_id, ctx->state);
+		goto destroy_dev_hdl;
+	}
 
+	cam_context_putref(ctx);
+
+destroy_dev_hdl:
 	rc = cam_destroy_device_hdl(release->dev_handle);
 	if (rc)
-		CAM_ERR(CAM_CORE, "destroy device handle is failed node %s",
+		CAM_ERR(CAM_CORE, "destroy device hdl failed for node %s",
 			node->name);
 
 	CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d",
 		node->name, ctx->ctx_id,
 		atomic_read(&(ctx->refcount.refcount)));
 
-	cam_context_putref(ctx);
 	return rc;
 }
 
@@ -420,6 +430,9 @@
 
 	for (i = 0; i < node->ctx_size; i++) {
 		if (node->ctx_list[i].dev_hdl > 0) {
+			CAM_DBG(CAM_CORE,
+				"Node [%s] invoking shutdown on context [%d]",
+				node->name, i);
 			rc = cam_context_shutdown(&(node->ctx_list[i]));
 			if (rc)
 				continue;
@@ -491,7 +504,7 @@
 	case CAM_QUERY_CAP: {
 		struct cam_query_cap_cmd query;
 
-		if (copy_from_user(&query, (void __user *)cmd->handle,
+		if (copy_from_user(&query, u64_to_user_ptr(cmd->handle),
 			sizeof(query))) {
 			rc = -EFAULT;
 			break;
@@ -504,7 +517,7 @@
 			break;
 		}
 
-		if (copy_to_user((void __user *)cmd->handle, &query,
+		if (copy_to_user(u64_to_user_ptr(cmd->handle), &query,
 			sizeof(query)))
 			rc = -EFAULT;
 
@@ -513,7 +526,7 @@
 	case CAM_ACQUIRE_DEV: {
 		struct cam_acquire_dev_cmd acquire;
 
-		if (copy_from_user(&acquire, (void __user *)cmd->handle,
+		if (copy_from_user(&acquire, u64_to_user_ptr(cmd->handle),
 			sizeof(acquire))) {
 			rc = -EFAULT;
 			break;
@@ -524,7 +537,7 @@
 				rc);
 			break;
 		}
-		if (copy_to_user((void __user *)cmd->handle, &acquire,
+		if (copy_to_user(u64_to_user_ptr(cmd->handle), &acquire,
 			sizeof(acquire)))
 			rc = -EFAULT;
 		break;
@@ -532,7 +545,7 @@
 	case CAM_START_DEV: {
 		struct cam_start_stop_dev_cmd start;
 
-		if (copy_from_user(&start, (void __user *)cmd->handle,
+		if (copy_from_user(&start, u64_to_user_ptr(cmd->handle),
 			sizeof(start)))
 			rc = -EFAULT;
 		else {
@@ -546,7 +559,7 @@
 	case CAM_STOP_DEV: {
 		struct cam_start_stop_dev_cmd stop;
 
-		if (copy_from_user(&stop, (void __user *)cmd->handle,
+		if (copy_from_user(&stop, u64_to_user_ptr(cmd->handle),
 			sizeof(stop)))
 			rc = -EFAULT;
 		else {
@@ -560,7 +573,7 @@
 	case CAM_CONFIG_DEV: {
 		struct cam_config_dev_cmd config;
 
-		if (copy_from_user(&config, (void __user *)cmd->handle,
+		if (copy_from_user(&config, u64_to_user_ptr(cmd->handle),
 			sizeof(config)))
 			rc = -EFAULT;
 		else {
@@ -574,7 +587,7 @@
 	case CAM_RELEASE_DEV: {
 		struct cam_release_dev_cmd release;
 
-		if (copy_from_user(&release, (void __user *)cmd->handle,
+		if (copy_from_user(&release, u64_to_user_ptr(cmd->handle),
 			sizeof(release)))
 			rc = -EFAULT;
 		else {
@@ -588,7 +601,7 @@
 	case CAM_FLUSH_REQ: {
 		struct cam_flush_dev_cmd flush;
 
-		if (copy_from_user(&flush, (void __user *)cmd->handle,
+		if (copy_from_user(&flush, u64_to_user_ptr(cmd->handle),
 			sizeof(flush)))
 			rc = -EFAULT;
 		else {
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 053447e..948485f 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -933,7 +933,7 @@
 	}
 
 	if (sizeof(struct cam_cpas_hw_cmd_start) != arg_size) {
-		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
+		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
 			sizeof(struct cam_cpas_hw_cmd_start), arg_size);
 		return -EINVAL;
 	}
@@ -1065,7 +1065,7 @@
 	}
 
 	if (sizeof(struct cam_cpas_hw_cmd_stop) != arg_size) {
-		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
+		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
 			sizeof(struct cam_cpas_hw_cmd_stop), arg_size);
 		return -EINVAL;
 	}
@@ -1168,7 +1168,7 @@
 	}
 
 	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
-		CAM_ERR(CAM_CPAS, "INIT HW size mismatch %ld %d",
+		CAM_ERR(CAM_CPAS, "INIT HW size mismatch %zd %d",
 			sizeof(struct cam_cpas_hw_caps), arg_size);
 		return -EINVAL;
 	}
@@ -1325,7 +1325,7 @@
 	}
 
 	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
-		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
+		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
 			sizeof(struct cam_cpas_hw_caps), arg_size);
 		return -EINVAL;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index d51b152..eb8b156 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -23,7 +23,7 @@
 #define CAM_CPAS_AXI_MIN_MNOC_AB_BW   (2048 * 1024)
 #define CAM_CPAS_AXI_MIN_MNOC_IB_BW   (2048 * 1024)
 #define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
-#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000L)
+#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000UL)
 
 #define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
 #define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
index cdc8a3b..a9f1e4f 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -367,7 +367,7 @@
 	case CAM_QUERY_CAP: {
 		struct cam_cpas_query_cap query;
 
-		rc = copy_from_user(&query, (void __user *) cmd->handle,
+		rc = copy_from_user(&query, u64_to_user_ptr(cmd->handle),
 			sizeof(query));
 		if (rc) {
 			CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d",
@@ -381,7 +381,7 @@
 		if (rc)
 			break;
 
-		rc = copy_to_user((void __user *) cmd->handle, &query,
+		rc = copy_to_user(u64_to_user_ptr(cmd->handle), &query,
 			sizeof(query));
 		if (rc)
 			CAM_ERR(CAM_CPAS, "Failed in copy to user, rc=%d", rc);
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
index 09388fe..7e78f45 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -417,7 +417,7 @@
 		uint32_t *get_raw_results = (uint32_t *)blob_data;
 
 		if (sizeof(uint32_t) != blob_size) {
-			CAM_ERR(CAM_FD, "Invalid blob size %lu %u",
+			CAM_ERR(CAM_FD, "Invalid blob size %zu %u",
 				sizeof(uint32_t), blob_size);
 			return -EINVAL;
 		}
@@ -430,7 +430,7 @@
 			(struct cam_fd_soc_clock_bw_request *)blob_data;
 
 		if (sizeof(struct cam_fd_soc_clock_bw_request) != blob_size) {
-			CAM_ERR(CAM_FD, "Invalid blob size %lu %u",
+			CAM_ERR(CAM_FD, "Invalid blob size %zu %u",
 				sizeof(struct cam_fd_soc_clock_bw_request),
 				blob_size);
 			return -EINVAL;
@@ -537,7 +537,7 @@
 	uint32_t i, j, plane, num_out_buf, num_in_buf;
 	struct cam_buf_io_cfg *io_cfg;
 	dma_addr_t io_addr[CAM_PACKET_MAX_PLANES];
-	uint64_t cpu_addr[CAM_PACKET_MAX_PLANES];
+	uintptr_t cpu_addr[CAM_PACKET_MAX_PLANES];
 	size_t size;
 	bool need_io_map, need_cpu_map;
 
@@ -583,7 +583,7 @@
 				rc = cam_mem_get_io_buf(
 					io_cfg[i].mem_handle[plane],
 					iommu_hdl, &io_addr[plane], &size);
-				if ((rc) || (io_addr[plane] >> 32)) {
+				if (rc) {
 					CAM_ERR(CAM_FD,
 						"Invalid io buf %d %d %d %d",
 						io_cfg[i].direction,
@@ -599,7 +599,8 @@
 				rc = cam_mem_get_cpu_buf(
 					io_cfg[i].mem_handle[plane],
 					&cpu_addr[plane], &size);
-				if (rc) {
+				if (rc || ((io_addr[plane] & 0xFFFFFFFF)
+					!= io_addr[plane])) {
 					CAM_ERR(CAM_FD,
 						"Invalid cpu buf %d %d %d %d",
 						io_cfg[i].direction,
@@ -1088,8 +1089,10 @@
 	struct cam_fd_hw_mgr *hw_mgr = hw_mgr_priv;
 	struct cam_query_cap_cmd *query = hw_get_caps_args;
 	struct cam_fd_query_cap_cmd query_fd;
+	void __user *caps_handle =
+		u64_to_user_ptr(query->caps_handle);
 
-	if (copy_from_user(&query_fd, (void __user *)query->caps_handle,
+	if (copy_from_user(&query_fd, caps_handle,
 		sizeof(struct cam_fd_query_cap_cmd))) {
 		CAM_ERR(CAM_FD, "Failed in copy from user, rc=%d", rc);
 		return -EFAULT;
@@ -1106,7 +1109,7 @@
 		query_fd.hw_caps.wrapper_version.major,
 		query_fd.hw_caps.wrapper_version.minor);
 
-	if (copy_to_user((void __user *)query->caps_handle, &query_fd,
+	if (copy_to_user(caps_handle, &query_fd,
 		sizeof(struct cam_fd_query_cap_cmd)))
 		rc = -EFAULT;
 
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
index 87dc694..fa648c7 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
@@ -651,7 +651,7 @@
 	}
 
 	if (arg_size != sizeof(struct cam_fd_hw_init_args)) {
-		CAM_ERR(CAM_FD, "Invalid arg size %u, %lu", arg_size,
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
 			sizeof(struct cam_fd_hw_init_args));
 		return -EINVAL;
 	}
@@ -735,7 +735,7 @@
 	}
 
 	if (arg_size != sizeof(struct cam_fd_hw_deinit_args)) {
-		CAM_ERR(CAM_FD, "Invalid arg size %u, %lu", arg_size,
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
 			sizeof(struct cam_fd_hw_deinit_args));
 		return -EINVAL;
 	}
@@ -859,7 +859,7 @@
 	}
 
 	if (arg_size != sizeof(struct cam_fd_hw_cmd_start_args)) {
-		CAM_ERR(CAM_FD, "Invalid arg size %u, %lu", arg_size,
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
 			sizeof(struct cam_fd_hw_cmd_start_args));
 		return -EINVAL;
 	}
@@ -1010,7 +1010,7 @@
 	}
 
 	if (arg_size != sizeof(struct cam_fd_hw_reserve_args)) {
-		CAM_ERR(CAM_FD, "Invalid arg size %u, %lu", arg_size,
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
 			sizeof(struct cam_fd_hw_reserve_args));
 		return -EINVAL;
 	}
@@ -1079,7 +1079,7 @@
 	}
 
 	if (arg_size != sizeof(struct cam_fd_hw_release_args)) {
-		CAM_ERR(CAM_FD, "Invalid arg size %u, %lu", arg_size,
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
 			sizeof(struct cam_fd_hw_release_args));
 		return -EINVAL;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
index aae7648..ef3b6c9 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -111,7 +111,7 @@
 	struct cam_buf_io_cfg *io_cfg;
 	uint32_t               num_buf;
 	uint64_t               io_addr[CAM_PACKET_MAX_PLANES];
-	uint64_t               cpu_addr[CAM_PACKET_MAX_PLANES];
+	uintptr_t              cpu_addr[CAM_PACKET_MAX_PLANES];
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index 502c95d..522a602 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -25,9 +25,46 @@
 #include "cam_mem_mgr.h"
 #include "cam_trace.h"
 #include "cam_debug_util.h"
+#include "cam_packet_util.h"
 
 static const char icp_dev_name[] = "icp";
 
+static int cam_icp_context_dump_active_request(void *data, unsigned long iova,
+	uint32_t buf_info)
+{
+	struct cam_context *ctx = (struct cam_context *)data;
+	struct cam_ctx_request          *req = NULL;
+	struct cam_ctx_request          *req_temp = NULL;
+	struct cam_hw_mgr_dump_pf_data  *pf_dbg_entry = NULL;
+	int rc = 0;
+	bool b_mem_found = false;
+
+	if (!ctx) {
+		CAM_ERR(CAM_ICP, "Invalid ctx");
+		return -EINVAL;
+	}
+
+	CAM_INFO(CAM_ICP, "iommu fault for icp ctx %d state %d",
+		ctx->ctx_id, ctx->state);
+
+	list_for_each_entry_safe(req, req_temp,
+			&ctx->active_req_list, list) {
+		pf_dbg_entry = &(req->pf_data);
+		CAM_INFO(CAM_ICP, "req_id : %lld", req->request_id);
+
+		rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+			iova, buf_info, &b_mem_found);
+		if (rc)
+			CAM_ERR(CAM_ICP, "Failed to dump pf info");
+
+		if (b_mem_found)
+			CAM_ERR(CAM_ICP, "Found page fault in req %lld %d",
+				req->request_id, rc);
+	}
+
+	return rc;
+}
+
 static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx,
 	struct cam_acquire_dev_cmd *cmd)
 {
@@ -156,6 +193,7 @@
 		},
 		.crm_ops = {},
 		.irq_ops = __cam_icp_handle_buf_done_in_ready,
+		.pagefault_ops = cam_icp_context_dump_active_request,
 	},
 	/* Ready */
 	{
@@ -167,12 +205,14 @@
 		},
 		.crm_ops = {},
 		.irq_ops = __cam_icp_handle_buf_done_in_ready,
+		.pagefault_ops = cam_icp_context_dump_active_request,
 	},
 	/* Activated */
 	{
 		.ioctl_ops = {},
 		.crm_ops = {},
 		.irq_ops = NULL,
+		.pagefault_ops = cam_icp_context_dump_active_request,
 	},
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
index 7df806b..2ea7738 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
@@ -35,6 +35,7 @@
 #include "cam_hw_mgr_intf.h"
 #include "cam_icp_hw_mgr_intf.h"
 #include "cam_debug_util.h"
+#include "cam_smmu_api.h"
 
 #define CAM_ICP_DEV_NAME        "cam-icp"
 
@@ -55,6 +56,25 @@
 	{}
 };
 
+static void cam_icp_dev_iommu_fault_handler(
+	struct iommu_domain *domain, struct device *dev, unsigned long iova,
+	int flags, void *token, uint32_t buf_info)
+{
+	int i = 0;
+	struct cam_node *node = NULL;
+
+	if (!token) {
+		CAM_ERR(CAM_ICP, "invalid token in page handler cb");
+		return;
+	}
+
+	node = (struct cam_node *)token;
+
+	for (i = 0; i < node->ctx_size; i++)
+		cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+			buf_info);
+}
+
 static int cam_icp_subdev_open(struct v4l2_subdev *sd,
 	struct v4l2_subdev_fh *fh)
 {
@@ -96,7 +116,7 @@
 
 	mutex_lock(&g_icp_dev.icp_lock);
 	if (g_icp_dev.open_cnt <= 0) {
-		CAM_ERR(CAM_ICP, "ICP subdev is already closed");
+		CAM_DBG(CAM_ICP, "ICP subdev is already closed");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -135,6 +155,7 @@
 	int rc = 0, i = 0;
 	struct cam_node *node;
 	struct cam_hw_mgr_intf *hw_mgr_intf;
+	int iommu_hdl = -1;
 
 	if (!pdev) {
 		CAM_ERR(CAM_ICP, "pdev is NULL");
@@ -158,7 +179,8 @@
 		goto hw_alloc_fail;
 	}
 
-	rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf);
+	rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf,
+		&iommu_hdl);
 	if (rc) {
 		CAM_ERR(CAM_ICP, "ICP HW manager init failed: %d", rc);
 		goto hw_init_fail;
@@ -181,6 +203,9 @@
 		goto ctx_fail;
 	}
 
+	cam_smmu_set_client_page_fault_handler(iommu_hdl,
+		cam_icp_dev_iommu_fault_handler, node);
+
 	g_icp_dev.open_cnt = 0;
 	mutex_init(&g_icp_dev.icp_lock);
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
index 3e636c6..3d0ee72 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -23,10 +23,10 @@
  * @reserved: reserved field
  */
 struct hfi_mem {
-	uint64_t len;
-	uint64_t kva;
-	uint32_t iova;
-	uint32_t reserved;
+	uint64_t  len;
+	uintptr_t kva;
+	uint32_t  iova;
+	uint32_t  reserved;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
index 14a3e65..cbe6886 100644
--- a/drivers/media/platform/msm/camera/cam_icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -42,6 +42,9 @@
 
 #define HFI_MAX_POLL_TRY 5
 
+#define HFI_MAX_PC_POLL_TRY 50
+#define HFI_POLL_TRY_SLEEP 20
+
 static struct hfi_info *g_hfi;
 unsigned int g_icp_mmu_hdl;
 static DEFINE_MUTEX(hfi_cmd_q_mutex);
@@ -513,8 +516,8 @@
 	uint32_t val;
 	uint32_t try = 0;
 
-	while (try < HFI_MAX_POLL_TRY) {
-		data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
+	while (try < HFI_MAX_PC_POLL_TRY) {
+		data = cam_io_r_mb(icp_base + HFI_REG_A5_CSR_A5_STATUS);
 		CAM_DBG(CAM_HFI, "wfi status = %x\n", (int)data);
 
 		if (data & ICP_CSR_A5_STATUS_WFI)
@@ -523,7 +526,7 @@
 		 * and Host can the proceed. No interrupt is expected from FW
 		 * at this time.
 		 */
-		msleep(100);
+		msleep_interruptible(HFI_POLL_TRY_SLEEP);
 		try++;
 	}
 
@@ -533,6 +536,11 @@
 
 	val = cam_io_r(icp_base + HFI_REG_A5_CSR_NSEC_RESET);
 	cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+
+	cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_RESET,
+		icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
+	cam_io_w_mb((uint32_t)INTR_DISABLE,
+		g_hfi->csr_base + HFI_REG_A5_CSR_A2HOSTINTEN);
 }
 
 void cam_hfi_enable_cpu(void __iomem *icp_base)
@@ -883,11 +891,6 @@
 	g_hfi->cmd_q_state = false;
 	g_hfi->msg_q_state = false;
 
-	cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_RESET,
-		icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
-
-	cam_io_w_mb((uint32_t)INTR_DISABLE,
-		g_hfi->csr_base + HFI_REG_A5_CSR_A2HOSTINTEN);
 	kzfree(g_hfi);
 	g_hfi = NULL;
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index 8f2c769..18bd6d8 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -167,7 +167,7 @@
 		if (prg_hdr->p_filesz != 0) {
 			src = (u8 *)((u8 *)elf + prg_hdr->p_offset);
 			dest = (u8 *)(((u8 *)core_info->fw_kva_addr) +
-						prg_hdr->p_vaddr);
+				prg_hdr->p_vaddr);
 
 			memcpy_toio(dest, src, prg_hdr->p_filesz);
 		}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
index f4bc813..9b02167 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
@@ -69,7 +69,7 @@
 	const struct firmware *fw_elf;
 	void *fw;
 	uint32_t fw_buf;
-	uint64_t fw_kva_addr;
+	uintptr_t fw_kva_addr;
 	uint64_t fw_buf_len;
 	struct cam_icp_a5_query_cap query_cap;
 	struct cam_icp_a5_acquire_dev a5_acquire[8];
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
index d016374..b969c92 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -199,8 +199,10 @@
 	cam_cpas_reg_read(core_info->cpas_handle,
 		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
 	if (pwr_ctrl & BPS_COLLAPSE_MASK) {
-		CAM_ERR(CAM_ICP, "BPS: pwr_ctrl(%x)", pwr_ctrl);
-		return -EINVAL;
+		CAM_DBG(CAM_ICP, "BPS: pwr_ctrl set(%x)", pwr_ctrl);
+		cam_cpas_reg_write(core_info->cpas_handle,
+			CAM_CPAS_REG_CPASTOP,
+			hw_info->pwr_ctrl, true, 0);
 	}
 
 	rc = cam_bps_transfer_gdsc_control(soc_info);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 1d5e483..3e1b33f 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -1455,7 +1455,8 @@
 
 	ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
 	request_id = ioconfig_ack->user_data2;
-	ctx_data = (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+	ctx_data = (struct cam_icp_hw_ctx_data *)
+		U64_TO_PTR(ioconfig_ack->user_data1);
 	if (!ctx_data) {
 		CAM_ERR(CAM_ICP, "Invalid Context");
 		return -EINVAL;
@@ -1562,8 +1563,8 @@
 				ipe_config_ack->rc, ioconfig_ack->err_type);
 			return -EIO;
 		}
-		ctx_data =
-			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+		ctx_data = (struct cam_icp_hw_ctx_data *)
+			U64_TO_PTR(ioconfig_ack->user_data1);
 		if (!ctx_data) {
 			CAM_ERR(CAM_ICP, "wrong ctx data from IPE response");
 			return -EINVAL;
@@ -1577,8 +1578,8 @@
 				bps_config_ack->rc, ioconfig_ack->opcode);
 			return -EIO;
 		}
-		ctx_data =
-			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+		ctx_data = (struct cam_icp_hw_ctx_data *)
+			U64_TO_PTR(ioconfig_ack->user_data1);
 		if (!ctx_data) {
 			CAM_ERR(CAM_ICP, "wrong ctx data from BPS response");
 			return -EINVAL;
@@ -1601,7 +1602,9 @@
 		return -EINVAL;
 	}
 
-	ctx_data = (struct cam_icp_hw_ctx_data *)create_handle_ack->user_data1;
+	ctx_data =
+		(struct cam_icp_hw_ctx_data *)(uintptr_t)
+		create_handle_ack->user_data1;
 	if (!ctx_data) {
 		CAM_ERR(CAM_ICP, "Invalid ctx_data");
 		return -EINVAL;
@@ -1632,7 +1635,8 @@
 		return -EINVAL;
 	}
 
-	ctx_data = (struct cam_icp_hw_ctx_data *)ping_ack->user_data;
+	ctx_data = (struct cam_icp_hw_ctx_data *)
+		U64_TO_PTR(ping_ack->user_data);
 	if (!ctx_data) {
 		CAM_ERR(CAM_ICP, "Invalid ctx_data");
 		return -EINVAL;
@@ -1696,8 +1700,8 @@
 	case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT:
 	case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT:
 		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
-		ctx_data =
-			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+		ctx_data = (struct cam_icp_hw_ctx_data *)
+			U64_TO_PTR(ioconfig_ack->user_data1);
 		if (ctx_data->state != CAM_ICP_CTX_STATE_FREE)
 			complete(&ctx_data->wait_complete);
 		CAM_DBG(CAM_ICP, "received IPE/BPS/ ABORT: ctx_state =%d",
@@ -1706,8 +1710,8 @@
 	case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY:
 	case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
 		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
-		ctx_data =
-			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+		ctx_data = (struct cam_icp_hw_ctx_data *)
+			U64_TO_PTR(ioconfig_ack->user_data1);
 		if ((ctx_data->state == CAM_ICP_CTX_STATE_RELEASE) ||
 			(ctx_data->state == CAM_ICP_CTX_STATE_IN_USE)) {
 			complete(&ctx_data->wait_complete);
@@ -2132,7 +2136,7 @@
 static int cam_icp_allocate_fw_mem(void)
 {
 	int rc;
-	uint64_t kvaddr;
+	uintptr_t kvaddr;
 	size_t len;
 	dma_addr_t iova;
 
@@ -2146,7 +2150,7 @@
 	icp_hw_mgr.hfi_mem.fw_buf.iova = iova;
 	icp_hw_mgr.hfi_mem.fw_buf.smmu_hdl = icp_hw_mgr.iommu_hdl;
 
-	CAM_DBG(CAM_ICP, "kva: %llX, iova: %llx, len: %zu",
+	CAM_DBG(CAM_ICP, "kva: %zX, iova: %llx, len: %zu",
 		kvaddr, iova, len);
 
 	return rc;
@@ -2486,7 +2490,7 @@
 	reinit_completion(&ctx_data->wait_complete);
 	abort_cmd->num_fw_handles = 1;
 	abort_cmd->fw_handles[0] = ctx_data->fw_handle;
-	abort_cmd->user_data1 = (uint64_t)ctx_data;
+	abort_cmd->user_data1 = PTR_TO_U64(ctx_data);
 	abort_cmd->user_data2 = (uint64_t)0x0;
 
 	rc = hfi_write_cmd(abort_cmd);
@@ -2537,7 +2541,7 @@
 	reinit_completion(&ctx_data->wait_complete);
 	destroy_cmd->num_fw_handles = 1;
 	destroy_cmd->fw_handles[0] = ctx_data->fw_handle;
-	destroy_cmd->user_data1 = (uint64_t)ctx_data;
+	destroy_cmd->user_data1 = PTR_TO_U64(ctx_data);
 	destroy_cmd->user_data2 = (uint64_t)0x0;
 	memcpy(destroy_cmd->payload.direct, &ctx_data->temp_payload,
 		sizeof(uint64_t));
@@ -3097,7 +3101,7 @@
 	ioconfig_cmd.num_fw_handles = 1;
 	ioconfig_cmd.fw_handles[0] = ctx_data->fw_handle;
 	ioconfig_cmd.payload.indirect = io_buf_addr;
-	ioconfig_cmd.user_data1 = (uint64_t)ctx_data;
+	ioconfig_cmd.user_data1 = PTR_TO_U64(ctx_data);
 	ioconfig_cmd.user_data2 = (uint64_t)0x0;
 	task_data = (struct hfi_cmd_work_data *)task->payload;
 	task_data->data = (void *)&ioconfig_cmd;
@@ -3228,7 +3232,7 @@
 	hfi_cmd->num_fw_handles = 1;
 	hfi_cmd->fw_handles[0] = ctx_data->fw_handle;
 	hfi_cmd->payload.indirect = fw_cmd_buf_iova_addr;
-	hfi_cmd->user_data1 = (uint64_t)ctx_data;
+	hfi_cmd->user_data1 = PTR_TO_U64(ctx_data);
 	hfi_cmd->user_data2 = request_id;
 
 	CAM_DBG(CAM_ICP, "ctx_data : %pK, request_id :%lld cmd_buf %x",
@@ -3277,7 +3281,7 @@
 	dma_addr_t addr;
 	size_t len;
 	struct cam_cmd_buf_desc *cmd_desc = NULL;
-	uint64_t cpu_addr = 0;
+	uintptr_t cpu_addr = 0;
 	struct ipe_frame_process_data *frame_process_data = NULL;
 	struct bps_frame_process_data *bps_frame_process_data = NULL;
 	struct frame_set *ipe_set = NULL;
@@ -3318,7 +3322,7 @@
 
 	if (ctx_data->icp_dev_acquire_info->dev_type !=
 		CAM_ICP_RES_TYPE_BPS) {
-		CAM_DBG(CAM_ICP, "cpu addr = %llx", cpu_addr);
+		CAM_DBG(CAM_ICP, "cpu addr = %zx", cpu_addr);
 		frame_process_data = (struct ipe_frame_process_data *)cpu_addr;
 		CAM_DBG(CAM_ICP, "%u %u %u", frame_process_data->max_num_cores,
 			frame_process_data->target_time,
@@ -3339,7 +3343,7 @@
 			}
 		}
 	} else {
-		CAM_DBG(CAM_ICP, "cpu addr = %llx", cpu_addr);
+		CAM_DBG(CAM_ICP, "cpu addr = %zx", cpu_addr);
 		bps_frame_process_data =
 			(struct bps_frame_process_data *)cpu_addr;
 		CAM_DBG(CAM_ICP, "%u %u",
@@ -3388,10 +3392,11 @@
 			prepare_args->num_out_map_entries++;
 		}
 		CAM_DBG(CAM_REQ,
-			"ctx_id: %u req_id: %llu dir[%d]: %u, fence: %u resource_type = %u",
+			"ctx_id: %u req_id: %llu dir[%d]: %u, fence: %u resource_type = %u memh %x",
 			ctx_data->ctx_id, packet->header.request_id, i,
 			io_cfg_ptr[i].direction, io_cfg_ptr[i].fence,
-			io_cfg_ptr[i].resource_type);
+			io_cfg_ptr[i].resource_type,
+			io_cfg_ptr[i].mem_handle[0]);
 	}
 
 	if (prepare_args->num_in_map_entries > 1)
@@ -3439,7 +3444,7 @@
 	uint32_t index;
 	size_t io_buf_size;
 	int rc = 0;
-	uint64_t pResource;
+	uintptr_t pResource;
 
 	if (!blob_data || (blob_size == 0)) {
 		CAM_ERR(CAM_ICP, "Invalid blob info %pK %d", blob_data,
@@ -3545,7 +3550,7 @@
 	ioconfig_cmd->num_fw_handles = 1;
 	ioconfig_cmd->fw_handles[0] = ctx_data->fw_handle;
 	ioconfig_cmd->payload.indirect = io_config;
-	ioconfig_cmd->user_data1 = (uint64_t)ctx_data;
+	ioconfig_cmd->user_data1 = PTR_TO_U64(ctx_data);
 	ioconfig_cmd->user_data2 = request_id;
 
 	return 0;
@@ -3593,6 +3598,72 @@
 	return rc;
 }
 
+static void cam_icp_mgr_print_io_bufs(struct cam_packet *packet,
+	int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+	bool *mem_found)
+{
+	dma_addr_t iova_addr;
+	size_t     src_buf_size;
+	int        i;
+	int        j;
+	int        rc = 0;
+	int32_t    mmu_hdl;
+
+	struct cam_buf_io_cfg  *io_cfg = NULL;
+
+	if (mem_found)
+		*mem_found = false;
+
+	io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+		packet->io_configs_offset / 4);
+
+	for (i = 0; i < packet->num_io_configs; i++) {
+		for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+			if (!io_cfg[i].mem_handle[j])
+				break;
+
+			if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+				GET_FD_FROM_HANDLE(pf_buf_info)) {
+				CAM_INFO(CAM_ICP,
+					"Found PF at port: %d mem %x fd: %x",
+					io_cfg[i].resource_type,
+					io_cfg[i].mem_handle[j],
+					pf_buf_info);
+				if (mem_found)
+					*mem_found = true;
+			}
+
+			CAM_INFO(CAM_ICP, "port: %d f: %u format: %d dir %d",
+				io_cfg[i].resource_type,
+				io_cfg[i].fence,
+				io_cfg[i].format,
+				io_cfg[i].direction);
+
+			mmu_hdl = cam_mem_is_secure_buf(
+				io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+				iommu_hdl;
+			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+				mmu_hdl, &iova_addr, &src_buf_size);
+			if (rc < 0) {
+				CAM_ERR(CAM_UTIL, "get src buf address fail");
+				continue;
+			}
+
+			CAM_INFO(CAM_ICP,
+				"pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x",
+				j, io_cfg[i].planes[j].width,
+				io_cfg[i].planes[j].height,
+				(int32_t)src_buf_size,
+				(unsigned int)iova_addr,
+				io_cfg[i].offsets[j],
+				io_cfg[i].mem_handle[j]);
+
+			iova_addr += io_cfg[i].offsets[j];
+
+		}
+	}
+}
+
 static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
 	void *prepare_hw_update_args)
 {
@@ -3635,6 +3706,8 @@
 		return rc;
 	}
 
+	prepare_args->pf_data->packet = packet;
+
 	CAM_DBG(CAM_REQ, "req id = %lld for ctx = %u",
 		packet->header.request_id, ctx_data->ctx_id);
 	/* Update Buffer Address from handles and patch information */
@@ -3671,7 +3744,7 @@
 		fw_cmd_buf_iova_addr);
 
 	prepare_args->num_hw_update_entries = 1;
-	prepare_args->hw_update_entries[0].addr = (uint64_t)hfi_cmd;
+	prepare_args->hw_update_entries[0].addr = (uintptr_t)hfi_cmd;
 	prepare_args->priv = &ctx_data->hfi_frame_process.frame_info[idx];
 
 	CAM_DBG(CAM_ICP, "X: req id = %lld ctx_id = %u",
@@ -3968,7 +4041,7 @@
 	create_handle.size = sizeof(struct hfi_cmd_create_handle);
 	create_handle.pkt_type = HFI_CMD_IPEBPS_CREATE_HANDLE;
 	create_handle.handle_type = dev_type;
-	create_handle.user_data1 = (uint64_t)ctx_data;
+	create_handle.user_data1 = PTR_TO_U64(ctx_data);
 	reinit_completion(&ctx_data->wait_complete);
 	task_data = (struct hfi_cmd_work_data *)task->payload;
 	task_data->data = (void *)&create_handle;
@@ -4013,7 +4086,7 @@
 
 	ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
 	ping_pkt.pkt_type = HFI_CMD_SYS_PING;
-	ping_pkt.user_data = (uint64_t)ctx_data;
+	ping_pkt.user_data = PTR_TO_U64(ctx_data);
 	init_completion(&ctx_data->wait_complete);
 	task_data = (struct hfi_cmd_work_data *)task->payload;
 	task_data->data = (void *)&ping_pkt;
@@ -4313,7 +4386,7 @@
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	if (copy_from_user(&icp_hw_mgr.icp_caps,
-		(void __user *)query_cap->caps_handle,
+		u64_to_user_ptr(query_cap->caps_handle),
 		sizeof(struct cam_icp_query_cap_cmd))) {
 		CAM_ERR(CAM_ICP, "copy_from_user failed");
 		rc = -EFAULT;
@@ -4327,7 +4400,7 @@
 	icp_hw_mgr.icp_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl;
 	icp_hw_mgr.icp_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl;
 
-	if (copy_to_user((void __user *)query_cap->caps_handle,
+	if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle),
 		&icp_hw_mgr.icp_caps, sizeof(struct cam_icp_query_cap_cmd))) {
 		CAM_ERR(CAM_ICP, "copy_to_user failed");
 		rc = -EFAULT;
@@ -4551,7 +4624,35 @@
 	return rc;
 }
 
-int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
+static int cam_icp_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+	struct cam_icp_hw_mgr  *hw_mgr = hw_mgr_priv;
+
+	if (!hw_mgr_priv || !cmd_args) {
+		CAM_ERR(CAM_ICP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	switch (hw_cmd_args->cmd_type) {
+	case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+		cam_icp_mgr_print_io_bufs(
+			hw_cmd_args->u.pf_args.pf_data.packet,
+			hw_mgr->iommu_hdl,
+			hw_mgr->iommu_sec_hdl,
+			hw_cmd_args->u.pf_args.buf_info,
+			hw_cmd_args->u.pf_args.mem_found);
+		break;
+	default:
+		CAM_ERR(CAM_ICP, "Invalid cmd");
+	}
+
+	return rc;
+}
+
+int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
+	int *iommu_hdl)
 {
 	int i, rc = 0;
 	struct cam_hw_mgr_intf *hw_mgr_intf;
@@ -4574,6 +4675,7 @@
 	hw_mgr_intf->hw_open = cam_icp_mgr_hw_open_u;
 	hw_mgr_intf->hw_close = cam_icp_mgr_hw_close_u;
 	hw_mgr_intf->hw_flush = cam_icp_mgr_hw_flush;
+	hw_mgr_intf->hw_cmd = cam_icp_mgr_cmd;
 
 	icp_hw_mgr.secure_mode = CAM_SECURE_MODE_NON_SECURE;
 	mutex_init(&icp_hw_mgr.hw_mgr_mutex);
@@ -4617,6 +4719,9 @@
 	if (rc)
 		goto icp_wq_create_failed;
 
+	if (iommu_hdl)
+		*iommu_hdl = icp_hw_mgr.iommu_hdl;
+
 	init_completion(&icp_hw_mgr.a5_complete);
 	return rc;
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
index 771c4ed..7bb9b9ed 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
@@ -28,7 +28,7 @@
 #define CPAS_IPE1_BIT            0x2000
 
 int cam_icp_hw_mgr_init(struct device_node *of_node,
-	uint64_t *hw_mgr_hdl);
+	uint64_t *hw_mgr_hdl, int *iommu_hdl);
 
 /**
  * struct cam_icp_cpas_vote
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
index 620a4bd..142fcdc 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -195,9 +195,12 @@
 		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
 		true, &pwr_ctrl);
 	if (pwr_ctrl & IPE_COLLAPSE_MASK) {
-		CAM_ERR(CAM_ICP, "IPE: resume failed : %d", pwr_ctrl);
-		return -EINVAL;
+		CAM_DBG(CAM_ICP, "IPE pwr_ctrl set(%x)", pwr_ctrl);
+		cam_cpas_reg_write(core_info->cpas_handle,
+			CAM_CPAS_REG_CPASTOP,
+			hw_info->pwr_ctrl, true, 0);
 	}
+
 	rc = cam_ipe_transfer_gdsc_control(soc_info);
 	cam_cpas_reg_read(core_info->cpas_handle,
 		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 5760371..aaa172d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -23,8 +23,15 @@
 #include "cam_req_mgr_dev.h"
 #include "cam_trace.h"
 #include "cam_debug_util.h"
+#include "cam_packet_util.h"
+#include "cam_context_utils.h"
+#include "cam_common_util.h"
 
 static const char isp_dev_name[] = "isp";
+
+static int cam_isp_context_dump_active_request(void *data, unsigned long iova,
+	uint32_t buf_info);
+
 static void __cam_isp_ctx_update_state_monitor_array(
 	struct cam_isp_context *ctx_isp,
 	enum cam_isp_state_change_trigger trigger_type,
@@ -375,7 +382,7 @@
 			continue;
 		}
 
-		if (!bubble_state) {
+		if (!req_isp->bubble_detected) {
 			CAM_DBG(CAM_ISP,
 				"Sync with success: req %lld res 0x%x fd 0x%x",
 				req->request_id,
@@ -402,15 +409,14 @@
 		} else {
 			/*
 			 * Ignore the buffer done if bubble detect is on
-			 * In most case, active list should be empty when
-			 * bubble detects. But for safety, we just move the
-			 * current active request to the pending list here.
+			 * Increment the ack number here, and queue the
+			 * request back to pending list whenever all the
+			 * buffers are done.
 			 */
+			req_isp->num_acked++;
 			CAM_DBG(CAM_ISP,
 				"buf done with bubble state %d recovery %d",
 				bubble_state, req_isp->bubble_report);
-			list_del_init(&req->list);
-			list_add(&req->list, &ctx->pending_req_list);
 			continue;
 		}
 
@@ -431,10 +437,25 @@
 			req_isp->num_fence_map_out);
 		WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
 	}
-	if (req_isp->num_acked == req_isp->num_fence_map_out) {
+
+	if (req_isp->num_acked != req_isp->num_fence_map_out)
+		return rc;
+
+	ctx_isp->active_req_cnt--;
+
+	if (req_isp->bubble_detected && req_isp->bubble_report) {
+		req_isp->num_acked = 0;
+		req_isp->bubble_detected = false;
+		list_del_init(&req->list);
+		list_add(&req->list, &ctx->pending_req_list);
+
+		CAM_DBG(CAM_REQ,
+			"Move active request %lld to pending list(cnt = %d) [bubble recovery]",
+			 req->request_id, ctx_isp->active_req_cnt);
+	} else {
 		list_del_init(&req->list);
 		list_add_tail(&req->list, &ctx->free_req_list);
-		ctx_isp->active_req_cnt--;
+
 		CAM_DBG(CAM_REQ,
 			"Move active request %lld to free list(cnt = %d) [all fences done]",
 			 req->request_id, ctx_isp->active_req_cnt);
@@ -732,15 +753,13 @@
 	req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
 		list);
 	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+	req_isp->bubble_detected = true;
 
 	CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
 	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
 		ctx->ctx_crm_intf->notify_err) {
 		struct cam_req_mgr_error_notify notify;
 
-		list_del_init(&req->list);
-		list_add(&req->list, &ctx->pending_req_list);
-
 		notify.link_hdl = ctx->link_hdl;
 		notify.dev_hdl = ctx->dev_hdl;
 		notify.req_id = req->request_id;
@@ -749,18 +768,19 @@
 		CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
 			ctx_isp->frame_id);
 	} else {
-		/*
-		 * Since can not bubble report, always move the request to
-		 * active list.
-		 */
-		list_del_init(&req->list);
-		list_add_tail(&req->list, &ctx->active_req_list);
-		ctx_isp->active_req_cnt++;
-		CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
-			 req->request_id, ctx_isp->active_req_cnt);
 		req_isp->bubble_report = 0;
 	}
 
+	/*
+	 * Always move the request to active list. Let buf done
+	 * function handles the rest.
+	 */
+	CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
+		req->request_id, ctx_isp->active_req_cnt);
+	ctx_isp->active_req_cnt++;
+	list_del_init(&req->list);
+	list_add_tail(&req->list, &ctx->active_req_list);
+
 	if (req->request_id > ctx_isp->reported_req_id) {
 		request_id = req->request_id;
 		ctx_isp->reported_req_id = request_id;
@@ -882,13 +902,12 @@
 	req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
 		list);
 	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
-	list_del_init(&req->list);
+	req_isp->bubble_detected = true;
 
 	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
 		ctx->ctx_crm_intf->notify_err) {
 		struct cam_req_mgr_error_notify notify;
 
-		list_add(&req->list, &ctx->pending_req_list);
 		notify.link_hdl = ctx->link_hdl;
 		notify.dev_hdl = ctx->dev_hdl;
 		notify.req_id = req->request_id;
@@ -898,17 +917,19 @@
 			"Notify CRM about Bubble req_id %llu frame %lld",
 			req->request_id, ctx_isp->frame_id);
 	} else {
-		/*
-		 * If we can not report bubble, then treat it as if no bubble
-		 * report. Just move the req to active list.
-		 */
-		list_add_tail(&req->list, &ctx->active_req_list);
-		ctx_isp->active_req_cnt++;
-		CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
-			 req->request_id, ctx_isp->active_req_cnt);
 		req_isp->bubble_report = 0;
 	}
 
+	/*
+	 * Always move the request to active list. Let buf done
+	 * function handles the rest.
+	 */
+	CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+		req->request_id, ctx_isp->active_req_cnt);
+	ctx_isp->active_req_cnt++;
+	list_del_init(&req->list);
+	list_add_tail(&req->list, &ctx->active_req_list);
+
 	if (!req_isp->bubble_report) {
 		if (req->request_id > ctx_isp->reported_req_id) {
 			request_id = req->request_id;
@@ -1411,6 +1432,7 @@
 		CAM_DBG(CAM_ISP, "try to flush active list");
 		rc = __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list,
 		flush_req);
+		ctx_isp->active_req_cnt = 0;
 		spin_unlock_bh(&ctx->lock);
 
 		/* Start hw */
@@ -1609,12 +1631,12 @@
 	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
 		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
 
-	if (list_empty(&ctx->pending_req_list)) {
+	if (list_empty(&ctx->wait_req_list)) {
 		/*
 		 * If no pending req in epoch, this is an error case.
 		 * The recovery is to go back to sof state
 		 */
-		CAM_ERR(CAM_ISP, "No pending request");
+		CAM_ERR(CAM_ISP, "No wait request");
 		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
 
 		/* Send SOF event as empty frame*/
@@ -1624,9 +1646,10 @@
 		goto end;
 	}
 
-	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+	req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
 		list);
 	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+	req_isp->bubble_detected = true;
 
 	CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
 	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
@@ -1641,18 +1664,19 @@
 		CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
 			ctx_isp->frame_id);
 	} else {
-		/*
-		 * Since can not bubble report, always move the request to
-		 * active list.
-		 */
-		list_del_init(&req->list);
-		list_add_tail(&req->list, &ctx->active_req_list);
-		ctx_isp->active_req_cnt++;
-		CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
-			req->request_id, ctx_isp->active_req_cnt);
 		req_isp->bubble_report = 0;
 	}
 
+	/*
+	 * Always move the request to active list. Let buf done
+	 * function handles the rest.
+	 */
+	ctx_isp->active_req_cnt++;
+	list_del_init(&req->list);
+	list_add_tail(&req->list, &ctx->active_req_list);
+	CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+			req->request_id, ctx_isp->active_req_cnt);
+
 	if (!req_isp->bubble_report) {
 		if (req->request_id > ctx_isp->reported_req_id) {
 			request_id = req->request_id;
@@ -2008,7 +2032,7 @@
 	int rc = 0, i;
 	struct cam_ctx_request           *req = NULL;
 	struct cam_isp_ctx_req           *req_isp;
-	uint64_t                          packet_addr;
+	uintptr_t                         packet_addr;
 	struct cam_packet                *packet;
 	size_t                            len = 0;
 	struct cam_hw_prepare_update_args cfg;
@@ -2038,16 +2062,16 @@
 	/* for config dev, only memory handle is supported */
 	/* map packet from the memhandle */
 	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
-		(uint64_t *) &packet_addr, &len);
+		&packet_addr, &len);
 	if (rc != 0) {
 		CAM_ERR(CAM_ISP, "Can not get packet address");
 		rc = -EINVAL;
 		goto free_req;
 	}
 
-	packet = (struct cam_packet *) (packet_addr + cmd->offset);
+	packet = (struct cam_packet *)(packet_addr + (uint32_t)cmd->offset);
 	CAM_DBG(CAM_ISP, "pack_handle %llx", cmd->packet_handle);
-	CAM_DBG(CAM_ISP, "packet address is 0x%llx", packet_addr);
+	CAM_DBG(CAM_ISP, "packet address is 0x%zx", packet_addr);
 	CAM_DBG(CAM_ISP, "packet with length %zu, offset 0x%llx",
 		len, cmd->offset);
 	CAM_DBG(CAM_ISP, "Packet request id %lld",
@@ -2066,6 +2090,7 @@
 	cfg.out_map_entries = req_isp->fence_map_out;
 	cfg.in_map_entries = req_isp->fence_map_in;
 	cfg.priv  = &req_isp->hw_update_data;
+	cfg.pf_data = &(req->pf_data);
 
 	CAM_DBG(CAM_ISP, "try to prepare config packet......");
 
@@ -2080,6 +2105,7 @@
 	req_isp->num_fence_map_out = cfg.num_out_map_entries;
 	req_isp->num_fence_map_in = cfg.num_in_map_entries;
 	req_isp->num_acked = 0;
+	req_isp->bubble_detected = false;
 
 	for (i = 0; i < req_isp->num_fence_map_out; i++) {
 		rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id);
@@ -2164,7 +2190,8 @@
 	struct cam_hw_release_args       release;
 	struct cam_isp_context          *ctx_isp =
 		(struct cam_isp_context *) ctx->ctx_priv;
-	struct cam_isp_hw_cmd_args       hw_cmd_args;
+	struct cam_hw_cmd_args       hw_cmd_args;
+	struct cam_isp_hw_cmd_args   isp_hw_cmd_args;
 
 	if (!ctx->hw_mgr_intf) {
 		CAM_ERR(CAM_ISP, "HW interface is not ready");
@@ -2200,7 +2227,7 @@
 	CAM_DBG(CAM_ISP, "start copy %d resources from user",
 		 cmd->num_resources);
 
-	if (copy_from_user(isp_res, (void __user *)cmd->resource_hdl,
+	if (copy_from_user(isp_res, u64_to_user_ptr(cmd->resource_hdl),
 		sizeof(*isp_res)*cmd->num_resources)) {
 		rc = -EFAULT;
 		goto free_res;
@@ -2209,7 +2236,7 @@
 	param.context_data = ctx;
 	param.event_cb = ctx->irq_cb_intf;
 	param.num_acq = cmd->num_resources;
-	param.acquire_info = (uint64_t) isp_res;
+	param.acquire_info = (uintptr_t) isp_res;
 
 	/* call HW manager to reserve the resource */
 	rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
@@ -2221,7 +2248,9 @@
 
 	/* Query the context has rdi only resource */
 	hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
-	hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT;
+	hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+	isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT;
+	hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
 	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
 				&hw_cmd_args);
 	if (rc) {
@@ -2229,7 +2258,7 @@
 		goto free_hw;
 	}
 
-	if (hw_cmd_args.u.is_rdi_only_context) {
+	if (isp_hw_cmd_args.u.is_rdi_only_context) {
 		/*
 		 * this context has rdi only resource assign rdi only
 		 * state machine
@@ -2248,8 +2277,9 @@
 			cam_isp_ctx_activated_state_machine;
 	}
 
-	ctx_isp->rdi_only_context = hw_cmd_args.u.is_rdi_only_context;
+	ctx_isp->rdi_only_context = isp_hw_cmd_args.u.is_rdi_only_context;
 	ctx_isp->hw_ctx = param.ctxt_to_hw_map;
+	ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
 
 	req_hdl_param.session_hdl = cmd->session_handle;
 	/* bridge is not ready for these flags. so false for now */
@@ -2276,7 +2306,7 @@
 	CAM_DBG(CAM_ISP,
 		"Acquire success on session_hdl 0x%x num_rsrces %d RDI only %d ctx %u",
 		cmd->session_handle, cmd->num_resources,
-		(hw_cmd_args.u.is_rdi_only_context ? 1 : 0), ctx->ctx_id);
+		(isp_hw_cmd_args.u.is_rdi_only_context ? 1 : 0), ctx->ctx_id);
 	kfree(isp_res);
 	return rc;
 
@@ -2567,12 +2597,15 @@
 static int __cam_isp_ctx_link_pause(struct cam_context *ctx)
 {
 	int rc = 0;
-	struct cam_isp_hw_cmd_args   hw_cmd_args;
+	struct cam_hw_cmd_args       hw_cmd_args;
+	struct cam_isp_hw_cmd_args   isp_hw_cmd_args;
 	struct cam_isp_context      *ctx_isp =
 		(struct cam_isp_context *) ctx->ctx_priv;
 
 	hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
-	hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
+	hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+	isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
+	hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
 	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
 		&hw_cmd_args);
 
@@ -2582,12 +2615,15 @@
 static int __cam_isp_ctx_link_resume(struct cam_context *ctx)
 {
 	int rc = 0;
-	struct cam_isp_hw_cmd_args   hw_cmd_args;
+	struct cam_hw_cmd_args       hw_cmd_args;
+	struct cam_isp_hw_cmd_args   isp_hw_cmd_args;
 	struct cam_isp_context      *ctx_isp =
 		(struct cam_isp_context *) ctx->ctx_priv;
 
 	hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
-	hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
+	hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+	isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
+	hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
 	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
 		&hw_cmd_args);
 
@@ -2598,13 +2634,16 @@
 	struct cam_context *ctx)
 {
 	int rc = 0;
-	struct cam_isp_hw_cmd_args   hw_cmd_args;
+	struct cam_hw_cmd_args       hw_cmd_args;
+	struct cam_isp_hw_cmd_args   isp_hw_cmd_args;
 	struct cam_isp_context      *ctx_isp =
 		(struct cam_isp_context *) ctx->ctx_priv;
 
 	hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
-	hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG;
-	hw_cmd_args.u.sof_irq_enable = 1;
+	hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+	isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG;
+	isp_hw_cmd_args.u.sof_irq_enable = 1;
+	hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
 
 	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
 		&hw_cmd_args);
@@ -2746,6 +2785,7 @@
 			.flush_req = __cam_isp_ctx_flush_req_in_top_state,
 		},
 		.irq_ops = NULL,
+		.pagefault_ops = cam_isp_context_dump_active_request,
 	},
 	/* Ready */
 	{
@@ -2759,6 +2799,7 @@
 			.flush_req = __cam_isp_ctx_flush_req_in_ready,
 		},
 		.irq_ops = NULL,
+		.pagefault_ops = cam_isp_context_dump_active_request,
 	},
 	/* Activated */
 	{
@@ -2774,10 +2815,55 @@
 			.process_evt = __cam_isp_ctx_process_evt,
 		},
 		.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
+		.pagefault_ops = cam_isp_context_dump_active_request,
 	},
 };
 
 
+static int cam_isp_context_dump_active_request(void *data, unsigned long iova,
+	uint32_t buf_info)
+{
+
+	struct cam_context *ctx = (struct cam_context *)data;
+	struct cam_ctx_request *req = NULL;
+	struct cam_ctx_request *req_temp = NULL;
+	struct cam_isp_ctx_req *req_isp  = NULL;
+	struct cam_isp_prepare_hw_update_data *hw_update_data = NULL;
+	struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL;
+	bool mem_found = false;
+	int rc = 0;
+
+	struct cam_isp_context *isp_ctx =
+		(struct cam_isp_context *)ctx->ctx_priv;
+
+	if (!isp_ctx) {
+		CAM_ERR(CAM_ISP, "Invalid isp ctx");
+		return -EINVAL;
+	}
+
+	CAM_INFO(CAM_ISP, "iommu fault handler for isp ctx %d state %d",
+		ctx->ctx_id, ctx->state);
+
+	list_for_each_entry_safe(req, req_temp,
+		&ctx->active_req_list, list) {
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		hw_update_data = &req_isp->hw_update_data;
+		pf_dbg_entry = &(req->pf_data);
+		CAM_INFO(CAM_ISP, "req_id : %lld ", req->request_id);
+
+		rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+			iova, buf_info, &mem_found);
+		if (rc)
+			CAM_ERR(CAM_ISP, "Failed to dump pf info");
+
+		if (mem_found)
+			CAM_ERR(CAM_ISP, "Found page fault in req %lld %d",
+				req->request_id, rc);
+	}
+
+	return rc;
+}
+
 int cam_isp_context_init(struct cam_isp_context *ctx,
 	struct cam_context *ctx_base,
 	struct cam_req_mgr_kmd_ops *crm_node_intf,
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
index 4592e42..6f89841 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -115,6 +115,7 @@
 	uint32_t                              num_acked;
 	int32_t                               bubble_report;
 	struct cam_isp_prepare_hw_update_data hw_update_data;
+	bool                                  bubble_detected;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
index a067915..c7e5d38 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
@@ -26,9 +26,29 @@
 #include "cam_isp_hw_mgr_intf.h"
 #include "cam_node.h"
 #include "cam_debug_util.h"
+#include "cam_smmu_api.h"
 
 static struct cam_isp_dev g_isp_dev;
 
+static void cam_isp_dev_iommu_fault_handler(
+	struct iommu_domain *domain, struct device *dev, unsigned long iova,
+	int flags, void *token, uint32_t buf_info)
+{
+	int i = 0;
+	struct cam_node *node = NULL;
+
+	if (!token) {
+		CAM_ERR(CAM_ISP, "invalid token in page handler cb");
+		return;
+	}
+
+	node = (struct cam_node *)token;
+
+	for (i = 0; i < node->ctx_size; i++)
+		cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+			buf_info);
+}
+
 static const struct of_device_id cam_isp_dt_match[] = {
 	{
 		.compatible = "qcom,cam-isp"
@@ -36,23 +56,47 @@
 	{}
 };
 
-static int cam_isp_subdev_close(struct v4l2_subdev *sd,
+static int cam_isp_subdev_open(struct v4l2_subdev *sd,
 	struct v4l2_subdev_fh *fh)
 {
-	struct cam_node *node = v4l2_get_subdevdata(sd);
-
-	if (!node) {
-		CAM_ERR(CAM_ISP, "Node ptr is NULL");
-		return -EINVAL;
-	}
-
-	cam_node_shutdown(node);
+	mutex_lock(&g_isp_dev.isp_mutex);
+	g_isp_dev.open_cnt++;
+	mutex_unlock(&g_isp_dev.isp_mutex);
 
 	return 0;
 }
 
+static int cam_isp_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	int rc = 0;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+
+	mutex_lock(&g_isp_dev.isp_mutex);
+	if (g_isp_dev.open_cnt <= 0) {
+		CAM_DBG(CAM_ISP, "ISP subdev is already closed");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	g_isp_dev.open_cnt--;
+	if (!node) {
+		CAM_ERR(CAM_ISP, "Node ptr is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (g_isp_dev.open_cnt == 0)
+		cam_node_shutdown(node);
+
+end:
+	mutex_unlock(&g_isp_dev.isp_mutex);
+	return rc;
+}
+
 static const struct v4l2_subdev_internal_ops cam_isp_subdev_internal_ops = {
 	.close = cam_isp_subdev_close,
+	.open = cam_isp_subdev_open,
 };
 
 static int cam_isp_dev_remove(struct platform_device *pdev)
@@ -82,6 +126,7 @@
 	int i;
 	struct cam_hw_mgr_intf         hw_mgr_intf;
 	struct cam_node               *node;
+	int iommu_hdl = -1;
 
 	g_isp_dev.sd.internal_ops = &cam_isp_subdev_internal_ops;
 	/* Initialze the v4l2 subdevice first. (create cam_node) */
@@ -94,7 +139,7 @@
 	node = (struct cam_node *) g_isp_dev.sd.token;
 
 	memset(&hw_mgr_intf, 0, sizeof(hw_mgr_intf));
-	rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf);
+	rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf, &iommu_hdl);
 	if (rc != 0) {
 		CAM_ERR(CAM_ISP, "Can not initialized ISP HW manager!");
 		goto unregister;
@@ -119,6 +164,11 @@
 		goto unregister;
 	}
 
+	cam_smmu_set_client_page_fault_handler(iommu_hdl,
+		cam_isp_dev_iommu_fault_handler, node);
+
+	mutex_init(&g_isp_dev.isp_mutex);
+
 	CAM_INFO(CAM_ISP, "Camera ISP probe complete");
 
 	return 0;
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
index 95463ca..a88ed55 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,12 +24,15 @@
  * @sd:                    Commone camera subdevice node
  * @ctx:                   Isp base context storage
  * @ctx_isp:               Isp private context storage
- *
+ * @isp_mutex:             ISP dev mutex
+ * @open_cnt:              Open device count
  */
 struct cam_isp_dev {
 	struct cam_subdev          sd;
 	struct cam_context         ctx[CAM_CTX_MAX];
 	struct cam_isp_context     ctx_isp[CAM_CTX_MAX];
+	struct mutex               isp_mutex;
+	int32_t                    open_cnt;
 };
 
 #endif /* __CAM_ISP_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index aab323e..ae678ef 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -28,6 +28,8 @@
 #include "cam_packet_util.h"
 #include "cam_debug_util.h"
 #include "cam_cpas_api.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_common_util.h"
 
 #define CAM_IFE_HW_ENTRIES_MAX  20
 
@@ -94,7 +96,8 @@
 
 	CAM_DBG(CAM_ISP, "enter");
 
-	if (copy_from_user(&query_isp, (void __user *)query->caps_handle,
+	if (copy_from_user(&query_isp,
+		u64_to_user_ptr(query->caps_handle),
 		sizeof(struct cam_isp_query_cap_cmd))) {
 		rc = -EFAULT;
 		return rc;
@@ -113,8 +116,8 @@
 		query_isp.dev_caps[i].hw_version.reserved = 0;
 	}
 
-	if (copy_to_user((void __user *)query->caps_handle, &query_isp,
-		sizeof(struct cam_isp_query_cap_cmd)))
+	if (copy_to_user(u64_to_user_ptr(query->caps_handle),
+		&query_isp, sizeof(struct cam_isp_query_cap_cmd)))
 		rc = -EFAULT;
 
 	CAM_DBG(CAM_ISP, "exit rc :%d", rc);
@@ -762,6 +765,8 @@
 			if (!ife_src_res->hw_res[j])
 				continue;
 
+			hw_intf = ife_src_res->hw_res[j]->hw_intf;
+
 			if (j == CAM_ISP_HW_SPLIT_LEFT) {
 				vfe_acquire.vfe_out.split_id  =
 					CAM_ISP_HW_SPLIT_LEFT;
@@ -769,7 +774,7 @@
 					/*TBD */
 					vfe_acquire.vfe_out.is_master     = 1;
 					vfe_acquire.vfe_out.dual_slave_core =
-						1;
+						(hw_intf->hw_idx == 0) ? 1 : 0;
 				} else {
 					vfe_acquire.vfe_out.is_master   = 0;
 					vfe_acquire.vfe_out.dual_slave_core =
@@ -779,10 +784,10 @@
 				vfe_acquire.vfe_out.split_id  =
 					CAM_ISP_HW_SPLIT_RIGHT;
 				vfe_acquire.vfe_out.is_master       = 0;
-				vfe_acquire.vfe_out.dual_slave_core = 0;
+				vfe_acquire.vfe_out.dual_slave_core =
+					(hw_intf->hw_idx == 0) ? 1 : 0;
 			}
 
-			hw_intf = ife_src_res->hw_res[j]->hw_intf;
 			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
 				&vfe_acquire,
 				sizeof(struct cam_vfe_acquire_args));
@@ -1038,7 +1043,7 @@
 	}
 
 	/* Acquire Left if not already acquired */
-	for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+	for (i = CAM_IFE_CSID_HW_NUM_MAX - 1; i >= 0; i--) {
 		if (!ife_hw_mgr->csid_devices[i])
 			continue;
 
@@ -1054,7 +1059,7 @@
 		}
 	}
 
-	if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
+	if (i == -1 || !csid_acquire.node_res) {
 		CAM_ERR(CAM_ISP, "Can not acquire ife cid resource for path %d",
 			csid_path);
 		goto put_res;
@@ -1559,7 +1564,8 @@
 			goto free_res;
 		}
 
-		in_port = memdup_user((void __user *)isp_resource[i].res_hdl,
+		in_port = memdup_user(
+			u64_to_user_ptr(isp_resource[i].res_hdl),
 			isp_resource[i].length);
 		if (!IS_ERR(in_port)) {
 			in_port_length = sizeof(struct cam_isp_in_port_info) +
@@ -2159,7 +2165,8 @@
 	struct cam_isp_stop_args          stop_isp;
 	struct cam_ife_hw_mgr_ctx        *ctx;
 	struct cam_ife_hw_mgr_res        *hw_mgr_res;
-	uint32_t                          i;
+	struct cam_isp_resource_node     *rsrc_node = NULL;
+	uint32_t                          i, camif_debug;
 
 	if (!hw_mgr_priv || !start_isp) {
 		CAM_ERR(CAM_ISP, "Invalid arguments");
@@ -2193,6 +2200,24 @@
 				sizeof(g_ife_hw_mgr.debug_cfg.csid_debug));
 	}
 
+	camif_debug = g_ife_hw_mgr.debug_cfg.camif_debug;
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			rsrc_node = hw_mgr_res->hw_res[i];
+			if (rsrc_node->process_cmd && (rsrc_node->res_id ==
+				CAM_ISP_HW_VFE_IN_CAMIF)) {
+				rc = hw_mgr_res->hw_res[i]->process_cmd(
+					hw_mgr_res->hw_res[i],
+					CAM_ISP_HW_CMD_SET_CAMIF_DEBUG,
+					&camif_debug,
+					sizeof(camif_debug));
+			}
+		}
+	}
+
 	rc = cam_ife_hw_mgr_init_hw(ctx);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Init failed");
@@ -2804,45 +2829,128 @@
 	return rc;
 }
 
+static void cam_ife_mgr_print_io_bufs(struct cam_packet *packet,
+	int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+	bool *mem_found)
+{
+	dma_addr_t iova_addr;
+	size_t     src_buf_size;
+	int        i;
+	int        j;
+	int        rc = 0;
+	int32_t    mmu_hdl;
+
+	struct cam_buf_io_cfg  *io_cfg = NULL;
+
+	if (mem_found)
+		*mem_found = false;
+
+	io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+		packet->io_configs_offset / 4);
+
+	for (i = 0; i < packet->num_io_configs; i++) {
+		for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+			if (!io_cfg[i].mem_handle[j])
+				break;
+
+			if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+				GET_FD_FROM_HANDLE(pf_buf_info)) {
+				CAM_INFO(CAM_ISP,
+					"Found PF at port: %d mem %x fd: %x",
+					io_cfg[i].resource_type,
+					io_cfg[i].mem_handle[j],
+					pf_buf_info);
+				if (mem_found)
+					*mem_found = true;
+			}
+
+			CAM_INFO(CAM_ISP, "port: %d f: %u format: %d dir %d",
+				io_cfg[i].resource_type,
+				io_cfg[i].fence,
+				io_cfg[i].format,
+				io_cfg[i].direction);
+
+			mmu_hdl = cam_mem_is_secure_buf(
+				io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+				iommu_hdl;
+			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+				mmu_hdl, &iova_addr, &src_buf_size);
+			if (rc < 0) {
+				CAM_ERR(CAM_ISP, "get src buf address fail");
+				continue;
+			}
+
+			CAM_INFO(CAM_ISP,
+				"pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x",
+				j, io_cfg[i].planes[j].width,
+				io_cfg[i].planes[j].height,
+				(int32_t)src_buf_size,
+				(unsigned int)iova_addr,
+				io_cfg[i].offsets[j],
+				io_cfg[i].mem_handle[j]);
+		}
+	}
+}
+
 static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
 {
 	int rc = 0;
-	struct cam_isp_hw_cmd_args  *hw_cmd_args  = cmd_args;
-	struct cam_ife_hw_mgr_ctx   *ctx;
+	struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+	struct cam_ife_hw_mgr  *hw_mgr = hw_mgr_priv;
+	struct cam_ife_hw_mgr_ctx *ctx = (struct cam_ife_hw_mgr_ctx *)
+		hw_cmd_args->ctxt_to_hw_map;
 
 	if (!hw_mgr_priv || !cmd_args) {
 		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 
-	ctx = (struct cam_ife_hw_mgr_ctx *)hw_cmd_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
 		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used");
 		return -EPERM;
 	}
 
 	switch (hw_cmd_args->cmd_type) {
-	case CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT:
-		if (ctx->is_rdi_only_context)
-			hw_cmd_args->u.is_rdi_only_context = 1;
-		else
-			hw_cmd_args->u.is_rdi_only_context = 0;
+	case CAM_HW_MGR_CMD_INTERNAL: {
+		struct cam_isp_hw_cmd_args *isp_hw_cmd_args =
+			(struct cam_isp_hw_cmd_args *)hw_cmd_args->
+				u.internal_args;
 
+		switch (isp_hw_cmd_args->cmd_type) {
+		case CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT:
+			if (ctx->is_rdi_only_context)
+				isp_hw_cmd_args->u.is_rdi_only_context = 1;
+			else
+				isp_hw_cmd_args->u.is_rdi_only_context = 0;
+			break;
+		case CAM_ISP_HW_MGR_CMD_PAUSE_HW:
+			cam_ife_mgr_pause_hw(ctx);
+			break;
+		case CAM_ISP_HW_MGR_CMD_RESUME_HW:
+			cam_ife_mgr_resume_hw(ctx);
+			break;
+		case CAM_ISP_HW_MGR_CMD_SOF_DEBUG:
+			cam_ife_mgr_sof_irq_debug(ctx,
+				isp_hw_cmd_args->u.sof_irq_enable);
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
+				hw_cmd_args->cmd_type);
+			rc = -EINVAL;
+			break;
+		}
 		break;
-	case CAM_ISP_HW_MGR_CMD_PAUSE_HW:
-		cam_ife_mgr_pause_hw(ctx);
-		break;
-	case CAM_ISP_HW_MGR_CMD_RESUME_HW:
-		cam_ife_mgr_resume_hw(ctx);
-		break;
-	case CAM_ISP_HW_MGR_CMD_SOF_DEBUG:
-		cam_ife_mgr_sof_irq_debug(ctx, hw_cmd_args->u.sof_irq_enable);
+	}
+	case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+		cam_ife_mgr_print_io_bufs(
+			hw_cmd_args->u.pf_args.pf_data.packet,
+			hw_mgr->mgr_common.img_iommu_hdl,
+			hw_mgr->mgr_common.img_iommu_hdl_secure,
+			hw_cmd_args->u.pf_args.buf_info,
+			hw_cmd_args->u.pf_args.mem_found);
 		break;
 	default:
-		CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
-			hw_cmd_args->cmd_type);
-		rc = -EINVAL;
-		break;
+		CAM_ERR(CAM_ISP, "Invalid cmd");
 	}
 
 	return rc;
@@ -4038,8 +4146,8 @@
 	evt_payload = evt_payload_priv;
 	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)evt_payload->ctx;
 
-	CAM_DBG(CAM_ISP, "addr of evt_payload = %llx core index:0x%x",
-		(uint64_t)evt_payload, evt_payload->core_index);
+	CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core index:0x%x",
+		evt_payload, evt_payload->core_index);
 	CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
 	CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
 	CAM_DBG(CAM_ISP, "bus_irq_status_2: = %x", evt_payload->irq_reg_val[2]);
@@ -4174,6 +4282,28 @@
 	cam_ife_get_csid_debug,
 	cam_ife_set_csid_debug, "%16llu");
 
+static int cam_ife_set_camif_debug(void *data, u64 val)
+{
+	g_ife_hw_mgr.debug_cfg.camif_debug = val;
+	CAM_DBG(CAM_ISP,
+		"Set camif enable_diag_sensor_status value :%lld", val);
+	return 0;
+}
+
+static int cam_ife_get_camif_debug(void *data, u64 *val)
+{
+	*val = g_ife_hw_mgr.debug_cfg.camif_debug;
+	CAM_DBG(CAM_ISP,
+		"Set camif enable_diag_sensor_status value :%lld",
+		g_ife_hw_mgr.debug_cfg.csid_debug);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_ife_camif_debug,
+	cam_ife_get_camif_debug,
+	cam_ife_set_camif_debug, "%16llu");
+
 static int cam_ife_hw_mgr_debug_register(void)
 {
 	g_ife_hw_mgr.debug_cfg.dentry = debugfs_create_dir("camera_ife",
@@ -4199,6 +4329,14 @@
 		CAM_ERR(CAM_ISP, "failed to create enable_recovery");
 		goto err;
 	}
+
+	if (!debugfs_create_file("ife_camif_debug",
+		0644,
+		g_ife_hw_mgr.debug_cfg.dentry, NULL,
+		&cam_ife_camif_debug)) {
+		CAM_ERR(CAM_ISP, "failed to create cam_ife_camif_debug");
+		goto err;
+	}
 	g_ife_hw_mgr.debug_cfg.enable_recovery = 0;
 
 	return 0;
@@ -4208,7 +4346,7 @@
 	return -ENOMEM;
 }
 
-int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf)
+int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
 {
 	int rc = -EFAULT;
 	int i, j;
@@ -4380,6 +4518,9 @@
 	hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
 	hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;
 
+	if (iommu_hdl)
+		*iommu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl;
+
 	cam_ife_hw_mgr_debug_register();
 	CAM_DBG(CAM_ISP, "Exit");
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index 0198f3d..9bfa34f 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -81,15 +81,17 @@
 /**
  * struct cam_ife_hw_mgr_debug - contain the debug information
  *
- * @dentry:              Debugfs entry
- * @csid_debug:          csid debug information
- * @enable_recovery      enable recovery
+ * @dentry:                    Debugfs entry
+ * @csid_debug:                csid debug information
+ * @enable_recovery:           enable recovery
+ * @enable_diag_sensor_status: enable sensor diagnosis status
  *
  */
 struct cam_ife_hw_mgr_debug {
 	struct dentry  *dentry;
 	uint64_t       csid_debug;
 	uint32_t       enable_recovery;
+	uint32_t       camif_debug;
 };
 
 /**
@@ -203,9 +205,10 @@
  *                      etnry functinon for the IFE HW manager.
  *
  * @hw_mgr_intf:        IFE hardware manager object returned
+ * @iommu_hdl:          Iommu handle to be returned
  *
  */
-int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf);
+int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl);
 
 /**
  * cam_ife_mgr_do_tasklet_buf_done()
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
index 2f18895..8b9c555 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,7 @@
 
 
 int cam_isp_hw_mgr_init(struct device_node *of_node,
-	struct cam_hw_mgr_intf *hw_mgr)
+	struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl)
 {
 	int rc = 0;
 	const char *compat_str = NULL;
@@ -25,7 +25,7 @@
 		(const char **)&compat_str);
 
 	if (strnstr(compat_str, "ife", strlen(compat_str)))
-		rc = cam_ife_hw_mgr_init(hw_mgr);
+		rc = cam_ife_hw_mgr_init(hw_mgr, iommu_hdl);
 	else {
 		CAM_ERR(CAM_ISP, "Invalid ISP hw type");
 		rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index eaa7325..f652256 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -457,6 +457,7 @@
 	num_out_buf = 0;
 	num_in_buf  = 0;
 	io_cfg_used_bytes = 0;
+	prepare->pf_data->packet = prepare->packet;
 
 	/* Max one hw entries required for each base */
 	if (prepare->num_hw_update_entries + 1 >=
@@ -595,13 +596,6 @@
 					return rc;
 				}
 
-				if (io_addr[plane_id] >> 32) {
-					CAM_ERR(CAM_ISP,
-						"Invalid mapped address");
-					rc = -EINVAL;
-					return rc;
-				}
-
 				/* need to update with offset */
 				io_addr[plane_id] +=
 						io_cfg[i].offsets[plane_id];
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
index fd71c37..1586216 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -203,13 +203,11 @@
 /**
  * struct cam_isp_hw_cmd_args - Payload for hw manager command
  *
- * @ctxt_to_hw_map:        HW context from the acquire
  * @cmd_type               HW command type
  * @get_context            Get context type information
  */
 struct cam_isp_hw_cmd_args {
-	void                               *ctxt_to_hw_map;
-	uint32_t                            cmd_type;
+	uint32_t                              cmd_type;
 	union {
 		uint32_t                      is_rdi_only_context;
 		uint32_t                      sof_irq_enable;
@@ -225,9 +223,9 @@
  * @of_node:            Device node input
  * @hw_mgr:             Input/output structure for the ISP hardware manager
  *                          initialization
- *
+ * @iommu_hdl:          Iommu handle to be returned
  */
 int cam_isp_hw_mgr_init(struct device_node *of_node,
-	struct cam_hw_mgr_intf *hw_mgr);
+	struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl);
 
 #endif /* __CAM_ISP_HW_MGR_INTF_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
index c68ddf7..f90356a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -52,6 +52,7 @@
 	.csid_ipp_timestamp_perv1_eof_addr   = 0x2ac,
 	/* configurations */
 	.pix_store_en_shift_val              = 7,
+	.early_eof_en_shift_val              = 29,
 };
 
 static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_0_reg_offset = {
@@ -286,6 +287,8 @@
 	.crop_shift                                   = 16,
 	.ipp_irq_mask_all                             = 0x7FFF,
 	.rdi_irq_mask_all                             = 0x7FFF,
+	.measure_en_hbi_vbi_cnt_mask                  = 0xC,
+	.format_measure_en_val                        = 1,
 };
 
 struct cam_ife_csid_reg_offset cam_ife_csid_170_reg_offset = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index c3431ca..9ffd923 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -45,6 +45,9 @@
 /* Max number of sof irq's triggered in case of SOF freeze */
 #define CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX 6
 
+/* Max CSI Rx irq error count threshold value */
+#define CAM_IFE_CSID_MAX_IRQ_ERROR_COUNT               100
+
 static int cam_ife_csid_is_ipp_format_supported(
 	uint32_t in_format)
 {
@@ -423,6 +426,7 @@
 	if (val != 0)
 		CAM_ERR(CAM_ISP, "CSID:%d IRQ value after reset rc = %d",
 			csid_hw->hw_intf->hw_idx, val);
+	csid_hw->error_irq_count = 0;
 
 	return rc;
 }
@@ -1046,6 +1050,7 @@
 			csid_hw->hw_intf->hw_idx);
 
 	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+	csid_hw->error_irq_count = 0;
 	return rc;
 }
 
@@ -1390,8 +1395,12 @@
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
 
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->ipp_reg->csid_ipp_cfg1_addr);
+
 	/* select the post irq sub sample strobe for time stamp capture */
-	cam_io_w_mb(CSID_TIMESTAMP_STB_POST_IRQ, soc_info->reg_map[0].mem_base +
+	val |= CSID_TIMESTAMP_STB_POST_IRQ;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->ipp_reg->csid_ipp_cfg1_addr);
 
 	if (path_data->crop_enable) {
@@ -1410,6 +1419,16 @@
 			csid_reg->ipp_reg->csid_ipp_vcrop_addr);
 		CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x",
 			csid_hw->hw_intf->hw_idx, val);
+
+		/* Enable generating early eof strobe based on crop config */
+		if (!(csid_hw->csid_debug & CSID_DEBUG_DISABLE_EARLY_EOF)) {
+			val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+			val |= (1 <<
+				csid_reg->ipp_reg->early_eof_en_shift_val);
+			cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+		}
 	}
 
 	/* set frame drop pattern to 0 and period to 1 */
@@ -1438,9 +1457,23 @@
 	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
 		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
 	val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO)
+		val |= csid_reg->cmn_reg->format_measure_en_val;
+
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
 
+	/* Enable the HBI/VBI counter */
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_format_measure_cfg0_addr);
+		val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+		cam_io_w_mb(val,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_format_measure_cfg0_addr);
+	}
+
 	/* configure the rx packet capture based on csid debug set */
 	val = 0;
 	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
@@ -1479,8 +1512,10 @@
 	struct cam_isp_resource_node    *res)
 {
 	int rc = 0;
+	uint32_t val = 0;
 	struct cam_ife_csid_reg_offset      *csid_reg;
 	struct cam_hw_soc_info              *soc_info;
+	struct cam_ife_csid_ipp_reg_offset  *ipp_reg;
 
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
@@ -1498,8 +1533,26 @@
 			csid_hw->hw_intf->hw_idx,
 			res->res_id);
 		rc = -EINVAL;
+		goto end;
 	}
 
+	ipp_reg = csid_reg->ipp_reg;
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			ipp_reg->csid_ipp_cfg0_addr);
+	if (val & csid_reg->cmn_reg->format_measure_en_val) {
+		val &= ~csid_reg->cmn_reg->format_measure_en_val;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			ipp_reg->csid_ipp_cfg0_addr);
+
+		/* Disable the HBI/VBI counter */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			ipp_reg->csid_ipp_format_measure_cfg0_addr);
+		val &= ~csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			ipp_reg->csid_ipp_format_measure_cfg0_addr);
+	}
+
+end:
 	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	return rc;
 }
@@ -1644,6 +1697,7 @@
 	struct cam_ife_csid_reg_offset         *csid_reg;
 	struct cam_hw_soc_info                 *soc_info;
 	uint32_t path_format = 0, plain_fmt = 0, val = 0, id;
+	uint32_t format_measure_addr;
 
 	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
 	csid_reg = csid_hw->csid_info->csid_reg;
@@ -1737,9 +1791,24 @@
 		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
 	val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
 
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO)
+		val |= csid_reg->cmn_reg->format_measure_en_val;
+
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
 
+	format_measure_addr =
+		csid_reg->rdi_reg[id]->csid_rdi_format_measure_cfg0_addr;
+
+	/* Enable the HBI/VBI counter */
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			format_measure_addr);
+		val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+		cam_io_w_mb(val,
+			soc_info->reg_map[0].mem_base + format_measure_addr);
+	}
+
 	/* configure the rx packet capture based on csid debug set */
 	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
 		val = ((1 <<
@@ -1775,7 +1844,7 @@
 	struct cam_isp_resource_node    *res)
 {
 	int rc = 0;
-	uint32_t id;
+	uint32_t id, val, format_measure_addr;
 	struct cam_ife_csid_reg_offset      *csid_reg;
 	struct cam_hw_soc_info              *soc_info;
 
@@ -1792,6 +1861,24 @@
 		return -EINVAL;
 	}
 
+	format_measure_addr =
+		csid_reg->rdi_reg[id]->csid_rdi_format_measure_cfg0_addr;
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+		val &= ~csid_reg->cmn_reg->format_measure_en_val;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+		/* Disable the HBI/VBI counter */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			format_measure_addr);
+		val &= ~csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			format_measure_addr);
+	}
+
 	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	return rc;
 }
@@ -1894,6 +1981,55 @@
 	return rc;
 }
 
+static int cam_ife_csid_get_hbi_vbi(
+	struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node *res)
+{
+	uint32_t  hbi, vbi;
+	const struct cam_ife_csid_reg_offset     *csid_reg;
+	const struct cam_ife_csid_rdi_reg_offset *rdi_reg;
+	struct cam_hw_soc_info                   *soc_info;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d",
+			csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_format_measure1_addr);
+		vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_ipp_format_measure2_addr);
+	} else {
+		rdi_reg = csid_reg->rdi_reg[res->res_id];
+		hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			rdi_reg->csid_rdi_format_measure1_addr);
+		vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			rdi_reg->csid_rdi_format_measure2_addr);
+	}
+
+	CAM_INFO_RATE_LIMIT(CAM_ISP, "Resource %u HBI: 0x%x", res->res_id,
+		hbi);
+	CAM_INFO_RATE_LIMIT(CAM_ISP, "Resource %u VBI: 0x%x", res->res_id,
+		vbi);
+
+	return 0;
+}
+
+
 static int cam_ife_csid_get_time_stamp(
 		struct cam_ife_csid_hw   *csid_hw, void *cmd_args)
 {
@@ -2547,6 +2683,7 @@
 	int rc = 0;
 	struct cam_ife_csid_hw               *csid_hw;
 	struct cam_hw_info                   *csid_hw_info;
+	struct cam_isp_resource_node         *res = NULL;
 
 	if (!hw_priv || !cmd_args) {
 		CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
@@ -2559,6 +2696,11 @@
 	switch (cmd_type) {
 	case CAM_IFE_CSID_CMD_GET_TIME_STAMP:
 		rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
+		if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+			res = ((struct cam_csid_get_time_stamp_args *)
+				cmd_args)->node_res;
+			cam_ife_csid_get_hbi_vbi(csid_hw, res);
+		}
 		break;
 	case CAM_IFE_CSID_SET_CSID_DEBUG:
 		rc = cam_ife_csid_set_csid_debug(csid_hw, cmd_args);
@@ -2645,18 +2787,22 @@
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 0 over flow",
 			 csid_hw->hw_intf->hw_idx);
+		csid_hw->error_irq_count++;
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 1 over flow",
 			 csid_hw->hw_intf->hw_idx);
+		csid_hw->error_irq_count++;
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 2 over flow",
 			 csid_hw->hw_intf->hw_idx);
+		csid_hw->error_irq_count++;
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 3 over flow",
 			 csid_hw->hw_intf->hw_idx);
+		csid_hw->error_irq_count++;
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d TG OVER  FLOW",
@@ -2677,6 +2823,7 @@
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_CRC) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_CRC",
 			 csid_hw->hw_intf->hw_idx);
+		csid_hw->error_irq_count++;
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_ECC) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_ECC",
@@ -2689,10 +2836,12 @@
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_STREAM_UNDERFLOW",
 			 csid_hw->hw_intf->hw_idx);
+		csid_hw->error_irq_count++;
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d UNBOUNDED_FRAME",
 			 csid_hw->hw_intf->hw_idx);
+		csid_hw->error_irq_count++;
 	}
 
 	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ) {
@@ -2858,6 +3007,26 @@
 		csid_hw->irq_debug_cnt = 0;
 	}
 
+	if (csid_hw->error_irq_count >
+		CAM_IFE_CSID_MAX_IRQ_ERROR_COUNT) {
+		/* Mask line overflow, underflow, unbound interrupts */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+		val &=  ~(CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW |
+			CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW |
+			CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW |
+			CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW |
+			CSID_CSI2_RX_ERROR_CRC                 |
+			CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW    |
+			CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME);
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+		CAM_WARN(CAM_ISP, "Masked csi rx error interrupts");
+		csid_hw->error_irq_count = 0;
+	}
+
 	CAM_DBG(CAM_ISP, "IRQ Handling exit");
 	return IRQ_HANDLED;
 }
@@ -2975,6 +3144,7 @@
 	}
 
 	ife_csid_hw->csid_debug = 0;
+	ife_csid_hw->error_irq_count = 0;
 	return 0;
 err:
 	if (rc) {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
index ad993eb..c547f4c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -76,6 +76,8 @@
 #define CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE       BIT(4)
 #define CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE        BIT(5)
 #define CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE        BIT(6)
+#define CSID_DEBUG_ENABLE_HBI_VBI_INFO            BIT(7)
+#define CSID_DEBUG_DISABLE_EARLY_EOF              BIT(8)
 
 /* enum cam_csid_path_halt_mode select the path halt mode control */
 enum cam_csid_path_halt_mode {
@@ -135,6 +137,7 @@
 
 	/* configuration */
 	uint32_t  pix_store_en_shift_val;
+	uint32_t  early_eof_en_shift_val;
 };
 
 struct cam_ife_csid_rdi_reg_offset {
@@ -285,6 +288,8 @@
 	uint32_t crop_shift;
 	uint32_t ipp_irq_mask_all;
 	uint32_t rdi_irq_mask_all;
+	uint32_t measure_en_hbi_vbi_cnt_mask;
+	uint32_t format_measure_en_val;
 };
 
 /**
@@ -438,6 +443,8 @@
  * @sof_irq_triggered:        Flag is set on receiving event to enable sof irq
  *                            incase of SOF freeze.
  * @irq_debug_cnt:            Counter to track sof irq's when above flag is set.
+ * @error_irq_count           Error IRQ count, if continuous error irq comes
+ *                            need to stop the CSID and mask interrupts.
  *
  */
 struct cam_ife_csid_hw {
@@ -461,6 +468,7 @@
 	uint64_t                         clk_rate;
 	bool                             sof_irq_triggered;
 	uint32_t                         irq_debug_cnt;
+	uint32_t                         error_irq_count;
 };
 
 int cam_ife_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index 28cfcc8..54aa4c2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -96,6 +96,7 @@
 	CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ,
 	CAM_ISP_HW_CMD_GET_REG_DUMP,
 	CAM_ISP_HW_CMD_SOF_IRQ_DEBUG,
+	CAM_ISP_HW_CMD_SET_CAMIF_DEBUG,
 	CAM_ISP_HW_CMD_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
index c7d3aa2..d1284d9 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -50,6 +50,8 @@
 	.raw_crop_width_cfg       = 0x00000CE4,
 	.raw_crop_height_cfg      = 0x00000CE8,
 	.reg_update_cmd           = 0x000004AC,
+	.vfe_diag_config          = 0x00000C48,
+	.vfe_diag_sensor_status   = 0x00000C4C,
 };
 
 static struct cam_vfe_camif_reg_data vfe_170_camif_reg_data = {
@@ -79,6 +81,7 @@
 	.eof_irq_mask                    = 0x00000002,
 	.error_irq_mask0                 = 0x0003FC00,
 	.error_irq_mask1                 = 0x0FFF7E80,
+	.enable_diagnostic_hw            = 0x1,
 };
 
 struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_170_reg = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 0bb1374..54ec282 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -2438,7 +2438,7 @@
 	for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
 		if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
 			CAM_ERR(CAM_ISP,
-				"reg_val_pair %d exceeds the array limit %lu",
+				"reg_val_pair %d exceeds the array limit %zu",
 				j, MAX_REG_VAL_PAIR_SIZE);
 			return -ENOMEM;
 		}
@@ -2711,7 +2711,7 @@
 	for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
 		if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
 			CAM_ERR(CAM_ISP,
-				"reg_val_pair %d exceeds the array limit %lu",
+				"reg_val_pair %d exceeds the array limit %zu",
 				j, MAX_REG_VAL_PAIR_SIZE);
 			return -ENOMEM;
 		}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 73b4ee7..fc257ec 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -43,6 +43,7 @@
 	uint32_t                           last_line;
 	bool                               enable_sof_irq_debug;
 	uint32_t                           irq_debug_cnt;
+	uint32_t                           camif_debug;
 };
 
 static int cam_vfe_camif_validate_pix_pattern(uint32_t pattern)
@@ -211,6 +212,8 @@
 	uint32_t                             epoch0_irq_mask;
 	uint32_t                             epoch1_irq_mask;
 	uint32_t                             computed_epoch_line_cfg;
+	uint32_t                             camera_hw_version = 0;
+	int                                  rc = 0;
 
 	if (!camif_res) {
 		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -250,16 +253,50 @@
 		rsrc_data->common_reg->module_ctrl[
 		CAM_VFE_TOP_VER2_MODULE_STATS]->cgc_ovd);
 
+	/* get the HW version */
+	rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Couldn't find HW version. rc: %d", rc);
+		return rc;
+	}
+
 	/* epoch config */
-	epoch0_irq_mask = ((rsrc_data->last_line - rsrc_data->first_line) / 2) +
-		rsrc_data->first_line;
-	epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg & 0xFFFF;
-	computed_epoch_line_cfg = (epoch0_irq_mask << 16) | epoch1_irq_mask;
-	cam_io_w_mb(computed_epoch_line_cfg,
-		rsrc_data->mem_base + rsrc_data->camif_reg->epoch_irq);
-	CAM_DBG(CAM_ISP, "first_line:%u last_line:%u epoch_line_cfg: 0x%x",
-		rsrc_data->first_line, rsrc_data->last_line,
-		computed_epoch_line_cfg);
+	switch (camera_hw_version) {
+	case CAM_CPAS_TITAN_175_V101:
+	case CAM_CPAS_TITAN_175_V100:
+		epoch0_irq_mask = ((rsrc_data->last_line -
+				rsrc_data->first_line) / 2) +
+				rsrc_data->first_line;
+		epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg &
+				0xFFFF;
+		computed_epoch_line_cfg = (epoch0_irq_mask << 16) |
+				epoch1_irq_mask;
+		cam_io_w_mb(computed_epoch_line_cfg,
+				rsrc_data->mem_base +
+				rsrc_data->camif_reg->epoch_irq);
+		CAM_DBG(CAM_ISP, "first_line: %u\n"
+				"last_line: %u\n"
+				"epoch_line_cfg: 0x%x",
+				rsrc_data->first_line,
+				rsrc_data->last_line,
+				computed_epoch_line_cfg);
+		break;
+	case CAM_CPAS_TITAN_170_V100:
+	case CAM_CPAS_TITAN_170_V110:
+	case CAM_CPAS_TITAN_170_V120:
+		cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg,
+				rsrc_data->mem_base +
+				rsrc_data->camif_reg->epoch_irq);
+		break;
+	default:
+		cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg,
+				rsrc_data->mem_base +
+				rsrc_data->camif_reg->epoch_irq);
+		CAM_WARN(CAM_ISP, "Hardware version not proper: 0x%x",
+				camera_hw_version);
+		break;
+	}
 
 	camif_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
@@ -273,6 +310,15 @@
 	rsrc_data->enable_sof_irq_debug = false;
 	rsrc_data->irq_debug_cnt = 0;
 
+	if (rsrc_data->camif_debug &
+		CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+		val = cam_io_r_mb(rsrc_data->mem_base +
+			rsrc_data->camif_reg->vfe_diag_config);
+		val |= rsrc_data->reg_data->enable_diagnostic_hw;
+		cam_io_w_mb(val, rsrc_data->mem_base +
+			rsrc_data->camif_reg->vfe_diag_config);
+	}
+
 	CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx);
 	return 0;
 }
@@ -364,6 +410,14 @@
 	if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
 		camif_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 
+	val = cam_io_r_mb(camif_priv->mem_base +
+			camif_priv->camif_reg->vfe_diag_config);
+	if (val & camif_priv->reg_data->enable_diagnostic_hw) {
+		val &= ~camif_priv->reg_data->enable_diagnostic_hw;
+		cam_io_w_mb(val, camif_priv->mem_base +
+			camif_priv->camif_reg->vfe_diag_config);
+	}
+
 	return rc;
 }
 
@@ -388,6 +442,7 @@
 	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
 {
 	int rc = -EINVAL;
+	struct cam_vfe_mux_camif_data *camif_priv = NULL;
 
 	if (!rsrc_node || !cmd_args) {
 		CAM_ERR(CAM_ISP, "Invalid input arguments");
@@ -405,6 +460,11 @@
 	case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG:
 		rc = cam_vfe_camif_sof_irq_debug(rsrc_node, cmd_args);
 		break;
+	case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG:
+		camif_priv =
+			(struct cam_vfe_mux_camif_data *)rsrc_node->res_priv;
+		camif_priv->camif_debug = *((uint32_t *)cmd_args);
+		break;
 	default:
 		CAM_ERR(CAM_ISP,
 			"unsupported process command:%d", cmd_type);
@@ -429,6 +489,7 @@
 	struct cam_vfe_top_irq_evt_payload   *payload;
 	uint32_t                              irq_status0;
 	uint32_t                              irq_status1;
+	uint32_t                              val;
 
 	if (!handler_priv || !evt_payload_priv) {
 		CAM_ERR(CAM_ISP, "Invalid params");
@@ -491,6 +552,14 @@
 		} else {
 			ret = CAM_ISP_HW_ERROR_NONE;
 		}
+
+		if (camif_priv->camif_debug &
+			CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+			val = cam_io_r(camif_priv->mem_base +
+				camif_priv->camif_reg->vfe_diag_sensor_status);
+			CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x",
+				camif_priv->mem_base, val);
+		}
 		break;
 	default:
 		break;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
index 4a73bd7..7a69589 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,11 @@
 #include "cam_isp_hw.h"
 #include "cam_vfe_top.h"
 
+/*
+ * Debug values for camif module
+ */
+#define CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS      BIT(0)
+
 struct cam_vfe_camif_ver2_reg {
 	uint32_t     camif_cmd;
 	uint32_t     camif_config;
@@ -27,6 +32,8 @@
 	uint32_t     raw_crop_width_cfg;
 	uint32_t     raw_crop_height_cfg;
 	uint32_t     reg_update_cmd;
+	uint32_t     vfe_diag_config;
+	uint32_t     vfe_diag_sensor_status;
 };
 
 struct cam_vfe_camif_reg_data {
@@ -63,6 +70,8 @@
 	uint32_t     eof_irq_mask;
 	uint32_t     error_irq_mask0;
 	uint32_t     error_irq_mask1;
+
+	uint32_t     enable_diagnostic_hw;
 };
 
 struct cam_vfe_camif_ver2_hw_info {
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
index 02334a4..287d4a4 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
@@ -20,9 +20,49 @@
 #include "cam_jpeg_context.h"
 #include "cam_context_utils.h"
 #include "cam_debug_util.h"
+#include "cam_packet_util.h"
 
 static const char jpeg_dev_name[] = "jpeg";
 
+static int cam_jpeg_context_dump_active_request(void *data, unsigned long iova,
+	uint32_t buf_info)
+{
+
+	struct cam_context *ctx = (struct cam_context *)data;
+	struct cam_ctx_request          *req = NULL;
+	struct cam_ctx_request          *req_temp = NULL;
+	struct cam_hw_mgr_dump_pf_data  *pf_dbg_entry = NULL;
+	int rc = 0;
+	int closest_port;
+	bool b_mem_found = false;
+
+
+	if (!ctx) {
+		CAM_ERR(CAM_JPEG, "Invalid ctx");
+		return -EINVAL;
+	}
+
+	CAM_INFO(CAM_JPEG, "iommu fault for jpeg ctx %d state %d",
+		ctx->ctx_id, ctx->state);
+
+	list_for_each_entry_safe(req, req_temp,
+			&ctx->active_req_list, list) {
+		pf_dbg_entry = &(req->pf_data);
+		closest_port = -1;
+		CAM_INFO(CAM_JPEG, "req_id : %lld ", req->request_id);
+
+		rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+			iova, buf_info, &b_mem_found);
+		if (rc)
+			CAM_ERR(CAM_JPEG, "Failed to dump pf info");
+
+		if (b_mem_found)
+			CAM_ERR(CAM_JPEG, "Found page fault in req %lld %d",
+				req->request_id, rc);
+	}
+	return rc;
+}
+
 static int __cam_jpeg_ctx_acquire_dev_in_available(struct cam_context *ctx,
 	struct cam_acquire_dev_cmd *cmd)
 {
@@ -116,6 +156,7 @@
 		},
 		.crm_ops = { },
 		.irq_ops = __cam_jpeg_ctx_handle_buf_done_in_acquired,
+		.pagefault_ops = cam_jpeg_context_dump_active_request,
 	},
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
index 46cc08f..1489222 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
@@ -22,11 +22,31 @@
 #include "cam_jpeg_hw_mgr_intf.h"
 #include "cam_jpeg_dev.h"
 #include "cam_debug_util.h"
+#include "cam_smmu_api.h"
 
 #define CAM_JPEG_DEV_NAME "cam-jpeg"
 
 static struct cam_jpeg_dev g_jpeg_dev;
 
+static void cam_jpeg_dev_iommu_fault_handler(
+	struct iommu_domain *domain, struct device *dev, unsigned long iova,
+	int flags, void *token, uint32_t buf_info)
+{
+	int i = 0;
+	struct cam_node *node = NULL;
+
+	if (!token) {
+		CAM_ERR(CAM_JPEG, "invalid token in page handler cb");
+		return;
+	}
+
+	node = (struct cam_node *)token;
+
+	for (i = 0; i < node->ctx_size; i++)
+		cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+			buf_info);
+}
+
 static const struct of_device_id cam_jpeg_dt_match[] = {
 	{
 		.compatible = "qcom,cam-jpeg"
@@ -34,23 +54,50 @@
 	{ }
 };
 
-static int cam_jpeg_subdev_close(struct v4l2_subdev *sd,
+static int cam_jpeg_subdev_open(struct v4l2_subdev *sd,
 	struct v4l2_subdev_fh *fh)
 {
-	struct cam_node *node = v4l2_get_subdevdata(sd);
 
-	if (!node) {
-		CAM_ERR(CAM_JPEG, "Node ptr is NULL");
-		return -EINVAL;
-	}
-
-	cam_node_shutdown(node);
+	mutex_lock(&g_jpeg_dev.jpeg_mutex);
+	g_jpeg_dev.open_cnt++;
+	mutex_unlock(&g_jpeg_dev.jpeg_mutex);
 
 	return 0;
 }
 
+static int cam_jpeg_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	int rc = 0;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+
+
+	mutex_lock(&g_jpeg_dev.jpeg_mutex);
+	if (g_jpeg_dev.open_cnt <= 0) {
+		CAM_DBG(CAM_JPEG, "JPEG subdev is already closed");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	g_jpeg_dev.open_cnt--;
+
+	if (!node) {
+		CAM_ERR(CAM_JPEG, "Node ptr is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (g_jpeg_dev.open_cnt == 0)
+		cam_node_shutdown(node);
+
+end:
+	mutex_unlock(&g_jpeg_dev.jpeg_mutex);
+	return rc;
+}
+
 static const struct v4l2_subdev_internal_ops cam_jpeg_subdev_internal_ops = {
 	.close = cam_jpeg_subdev_close,
+	.open = cam_jpeg_subdev_open,
 };
 
 static int cam_jpeg_dev_remove(struct platform_device *pdev)
@@ -78,6 +125,7 @@
 	int i;
 	struct cam_hw_mgr_intf hw_mgr_intf;
 	struct cam_node *node;
+	int iommu_hdl = -1;
 
 	g_jpeg_dev.sd.internal_ops = &cam_jpeg_subdev_internal_ops;
 	rc = cam_subdev_probe(&g_jpeg_dev.sd, pdev, CAM_JPEG_DEV_NAME,
@@ -89,7 +137,7 @@
 	node = (struct cam_node *)g_jpeg_dev.sd.token;
 
 	rc = cam_jpeg_hw_mgr_init(pdev->dev.of_node,
-		(uint64_t *)&hw_mgr_intf);
+		(uint64_t *)&hw_mgr_intf, &iommu_hdl);
 	if (rc) {
 		CAM_ERR(CAM_JPEG, "Can not initialize JPEG HWmanager %d", rc);
 		goto unregister;
@@ -114,6 +162,9 @@
 		goto ctx_init_fail;
 	}
 
+	cam_smmu_set_client_page_fault_handler(iommu_hdl,
+		cam_jpeg_dev_iommu_fault_handler, node);
+
 	mutex_init(&g_jpeg_dev.jpeg_mutex);
 
 	CAM_INFO(CAM_JPEG, "Camera JPEG probe complete");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
index deab2d5..0d15ced 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -26,6 +26,7 @@
  * @ctx: JPEG base context storage
  * @ctx_jpeg: JPEG private context storage
  * @jpeg_mutex: Jpeg dev mutex
+ * @open_cnt: Open device count
  */
 struct cam_jpeg_dev {
 	struct cam_subdev sd;
@@ -33,5 +34,6 @@
 	struct cam_context ctx[CAM_CTX_MAX];
 	struct cam_jpeg_context ctx_jpeg[CAM_CTX_MAX];
 	struct mutex jpeg_mutex;
+	int32_t open_cnt;
 };
 #endif /* __CAM_JPEG_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index f0913b2..0f4f037 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -34,6 +34,7 @@
 #include "cam_mem_mgr.h"
 #include "cam_cdm_intf_api.h"
 #include "cam_debug_util.h"
+#include "cam_common_util.h"
 
 #define CAM_JPEG_HW_ENTRIES_MAX  20
 #define CAM_JPEG_CHBASE          0
@@ -55,8 +56,8 @@
 	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
 	struct cam_hw_done_event_data buf_data;
 	struct cam_jpeg_set_irq_cb irq_cb;
-	uint32_t dev_type = 0;
-	uint64_t kaddr;
+	uintptr_t dev_type = 0;
+	uintptr_t kaddr;
 	uint32_t *cmd_buf_kaddr;
 	size_t cmd_buf_len;
 	struct cam_jpeg_config_inout_param_info *p_params;
@@ -113,7 +114,7 @@
 		rc = hw_mgr->devices[dev_type][0]->hw_ops.deinit(
 			hw_mgr->devices[dev_type][0]->hw_priv, NULL, 0);
 		if (rc)
-			CAM_ERR(CAM_JPEG, "Failed to Deinit %d HW", dev_type);
+			CAM_ERR(CAM_JPEG, "Failed to Deinit %lu HW", dev_type);
 	}
 
 	hw_mgr->device_in_use[dev_type][0] = false;
@@ -133,7 +134,7 @@
 		CAM_ERR(CAM_JPEG, "task_data is NULL");
 		return -EINVAL;
 	}
-	wq_task_data->data = (void *)(uint64_t)dev_type;
+	wq_task_data->data = (void *)dev_type;
 	wq_task_data->request_id = 0;
 	wq_task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
 	task->process_cb = cam_jpeg_mgr_process_cmd;
@@ -146,7 +147,7 @@
 
 	rc = cam_mem_get_cpu_buf(
 		p_cfg_req->hw_cfg_args.hw_update_entries[CAM_JPEG_PARAM].handle,
-		(uint64_t *)&kaddr, &cmd_buf_len);
+		&kaddr, &cmd_buf_len);
 	if (rc) {
 		CAM_ERR(CAM_JPEG, "unable to get info for cmd buf: %x %d",
 			hw_mgr->iommu_hdl, rc);
@@ -172,7 +173,7 @@
 		p_cfg_req->hw_cfg_args.out_map_entries[i].resource_handle;
 	}
 	buf_data.request_id =
-		(uint64_t)p_cfg_req->hw_cfg_args.priv;
+		PTR_TO_U64(p_cfg_req->hw_cfg_args.priv);
 	ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
 
 	list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
@@ -262,7 +263,7 @@
 	struct cam_cdm_bl_request *cdm_cmd;
 	uint32_t size;
 	uint32_t mem_cam_base;
-	uint64_t iova_addr;
+	uintptr_t iova_addr;
 	uint32_t *ch_base_iova_addr;
 	size_t ch_base_len;
 
@@ -314,7 +315,7 @@
 	struct cam_cdm_bl_request *cdm_cmd;
 	struct cam_hw_config_args *config_args = NULL;
 	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
-	uint64_t request_id = 0;
+	uintptr_t request_id = 0;
 	struct cam_jpeg_process_frame_work_data_t *task_data =
 		(struct cam_jpeg_process_frame_work_data_t *)data;
 	uint32_t dev_type;
@@ -357,9 +358,9 @@
 
 	config_args = (struct cam_hw_config_args *)&p_cfg_req->hw_cfg_args;
 	request_id = task_data->request_id;
-	if (request_id != (uint64_t)config_args->priv) {
-		CAM_DBG(CAM_JPEG, "not a recent req %lld %lld",
-			request_id, (uint64_t)config_args->priv);
+	if (request_id != (uintptr_t)config_args->priv) {
+		CAM_DBG(CAM_JPEG, "not a recent req %zd %zd",
+			request_id, (uintptr_t)config_args->priv);
 	}
 
 	if (!config_args->num_hw_update_entries) {
@@ -489,7 +490,8 @@
 			buf_data.resource_handle[i] =
 			hw_cfg_args->out_map_entries[i].resource_handle;
 		}
-		buf_data.request_id = (uint64_t)p_cfg_req->hw_cfg_args.priv;
+		buf_data.request_id =
+			(uintptr_t)p_cfg_req->hw_cfg_args.priv;
 		ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
 	}
 
@@ -509,7 +511,7 @@
 	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
 	struct cam_hw_config_args *config_args = config_hw_args;
 	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
-	uint64_t request_id = 0;
+	uintptr_t request_id = 0;
 	struct cam_hw_update_entry *hw_update_entries;
 	struct crm_workq_task *task;
 	struct cam_jpeg_process_frame_work_data_t *task_data;
@@ -549,11 +551,11 @@
 	p_cfg_req->hw_cfg_args = *config_args;
 	p_cfg_req->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
 
-	request_id = (uint64_t)config_args->priv;
+	request_id = (uintptr_t)config_args->priv;
 	p_cfg_req->req_id = request_id;
 	hw_update_entries = config_args->hw_update_entries;
-	CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %lld %lld",
-		ctx_data, request_id, (uint64_t)config_args->priv);
+	CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %lld %zd",
+		ctx_data, request_id, (uintptr_t)config_args->priv);
 	task = cam_req_mgr_workq_get_task(g_jpeg_hw_mgr.work_process_frame);
 	if (!task) {
 		CAM_ERR(CAM_JPEG, "no empty task");
@@ -578,7 +580,7 @@
 	list_add_tail(&p_cfg_req->list, &hw_mgr->hw_config_req_list);
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
-	task_data->data = (void *)(int64_t)p_cfg_req->dev_type;
+	task_data->data = (void *)(uintptr_t)p_cfg_req->dev_type;
 	task_data->request_id = request_id;
 	task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
 	task->process_cb = cam_jpeg_mgr_process_cmd;
@@ -600,6 +602,69 @@
 	return rc;
 }
 
+static void cam_jpeg_mgr_print_io_bufs(struct cam_packet *packet,
+	int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+	bool *mem_found)
+{
+	dma_addr_t iova_addr;
+	size_t     src_buf_size;
+	int        i;
+	int        j;
+	int        rc = 0;
+	int32_t    mmu_hdl;
+	struct cam_buf_io_cfg  *io_cfg = NULL;
+
+	if (mem_found)
+		*mem_found = false;
+
+	io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+		packet->io_configs_offset / 4);
+
+	for (i = 0; i < packet->num_io_configs; i++) {
+		for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+			if (!io_cfg[i].mem_handle[j])
+				break;
+
+			if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+				GET_FD_FROM_HANDLE(pf_buf_info)) {
+				CAM_INFO(CAM_JPEG,
+					"Found PF at port: %d mem %x fd: %x",
+					io_cfg[i].resource_type,
+					io_cfg[i].mem_handle[j],
+					pf_buf_info);
+				if (mem_found)
+					*mem_found = true;
+			}
+
+			CAM_INFO(CAM_JPEG, "port: %d f: %u format: %d dir %d",
+				io_cfg[i].resource_type,
+				io_cfg[i].fence,
+				io_cfg[i].format,
+				io_cfg[i].direction);
+
+			mmu_hdl = cam_mem_is_secure_buf(
+				io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+				iommu_hdl;
+			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+				mmu_hdl, &iova_addr, &src_buf_size);
+			if (rc < 0) {
+				CAM_ERR(CAM_UTIL, "get src buf address fail");
+				continue;
+			}
+
+			CAM_INFO(CAM_JPEG,
+				"pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x",
+				j, io_cfg[i].planes[j].width,
+				io_cfg[i].planes[j].height,
+				(int32_t)src_buf_size,
+				(unsigned int)iova_addr,
+				io_cfg[i].offsets[j],
+				io_cfg[i].mem_handle[j]);
+
+			iova_addr += io_cfg[i].offsets[j];
+		}
+	}
+}
 
 static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv,
 	void *prepare_hw_update_args)
@@ -675,6 +740,7 @@
 	CAM_DBG(CAM_JPEG, "packet = %pK io_cfg_ptr = %pK size = %lu",
 		(void *)packet, (void *)io_cfg_ptr,
 		sizeof(struct cam_buf_io_cfg));
+	prepare_args->pf_data->packet = packet;
 
 	prepare_args->num_out_map_entries = 0;
 
@@ -721,7 +787,7 @@
 			(uint32_t)cmd_desc[i].offset;
 	}
 	prepare_args->num_hw_update_entries = j;
-	prepare_args->priv = (void *)packet->header.request_id;
+	prepare_args->priv = (void *)(uintptr_t)packet->header.request_id;
 
 	CAM_DBG(CAM_JPEG, "will wait on input sync sync_id %d",
 		prepare_args->in_map_entries[0].sync_id);
@@ -827,7 +893,7 @@
 	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
 	struct cam_jpeg_hw_cfg_req *cfg_req = NULL;
 	struct cam_jpeg_hw_cfg_req *req_temp = NULL;
-	int64_t request_id = 0;
+	long request_id = 0;
 	uint32_t dev_type;
 	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
 	bool b_req_found = false;
@@ -842,13 +908,13 @@
 	if (flush_args->num_req_pending)
 		return 0;
 
-	request_id = (int64_t)flush_args->flush_req_active[0];
+	request_id = (uintptr_t)flush_args->flush_req_active[0];
 
 	if (!flush_args->num_req_active)
 		return 0;
 
 	if (request_id <= 0) {
-		CAM_ERR(CAM_JPEG, "Invalid red id %lld", request_id);
+		CAM_ERR(CAM_JPEG, "Invalid red id %ld", request_id);
 		return -EINVAL;
 	}
 
@@ -885,7 +951,7 @@
 	}
 
 	if (!b_req_found) {
-		CAM_ERR(CAM_JPEG, "req not found %lld", request_id);
+		CAM_ERR(CAM_JPEG, "req not found %ld", request_id);
 		return -EINVAL;
 	}
 
@@ -1189,7 +1255,7 @@
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 
-	if (copy_to_user((void __user *)query_cap->caps_handle,
+	if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle),
 		&g_jpeg_hw_mgr.jpeg_caps,
 		sizeof(struct cam_jpeg_query_cap_cmd))) {
 		CAM_ERR(CAM_JPEG, "copy_to_user failed");
@@ -1410,7 +1476,35 @@
 	return rc;
 }
 
-int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
+static int cam_jpeg_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+
+	if (!hw_mgr_priv || !cmd_args) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	switch (hw_cmd_args->cmd_type) {
+	case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+		cam_jpeg_mgr_print_io_bufs(
+			hw_cmd_args->u.pf_args.pf_data.packet,
+			hw_mgr->iommu_hdl,
+			hw_mgr->iommu_sec_hdl,
+			hw_cmd_args->u.pf_args.buf_info,
+			hw_cmd_args->u.pf_args.mem_found);
+		break;
+	default:
+		CAM_ERR(CAM_JPEG, "Invalid cmd");
+	}
+
+	return rc;
+}
+
+int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
+	int *iommu_hdl)
 {
 	int i, rc;
 	uint32_t num_dev;
@@ -1434,6 +1528,7 @@
 	hw_mgr_intf->hw_config = cam_jpeg_mgr_config_hw;
 	hw_mgr_intf->hw_flush = cam_jpeg_mgr_hw_flush;
 	hw_mgr_intf->hw_stop = cam_jpeg_mgr_hw_stop;
+	hw_mgr_intf->hw_cmd = cam_jpeg_mgr_cmd;
 
 	mutex_init(&g_jpeg_hw_mgr.hw_mgr_mutex);
 	spin_lock_init(&g_jpeg_hw_mgr.hw_mgr_lock);
@@ -1495,6 +1590,9 @@
 		goto cdm_iommu_failed;
 	}
 
+	if (iommu_hdl)
+		*iommu_hdl = g_jpeg_hw_mgr.iommu_hdl;
+
 	return rc;
 
 cdm_iommu_failed:
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
index 5e10167..82022ec 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
@@ -38,7 +38,7 @@
 struct cam_jpeg_process_frame_work_data_t {
 	uint32_t type;
 	void *data;
-	uint64_t request_id;
+	uintptr_t request_id;
 };
 
 /**
@@ -81,7 +81,7 @@
 	struct list_head list;
 	struct cam_hw_config_args hw_cfg_args;
 	uint32_t dev_type;
-	int64_t req_id;
+	uintptr_t req_id;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
index 5fb4e3ad..5705890 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,8 +17,7 @@
 #include <uapi/media/cam_defs.h>
 #include <linux/of.h>
 
-
 int cam_jpeg_hw_mgr_init(struct device_node *of_node,
-	uint64_t *hw_mgr_hdl);
+	uint64_t *hw_mgr_hdl, int *iommu_hdl);
 
 #endif /* CAM_JPEG_HW_MGR_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
index 3d0266d..99a8fe1 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
@@ -22,10 +22,10 @@
 	struct cam_acquire_dev_cmd *cmd)
 {
 	int rc = 0;
-	uint64_t ctxt_to_hw_map = (uint64_t)ctx->ctxt_to_hw_map;
+	uintptr_t ctxt_to_hw_map = (uintptr_t)ctx->ctxt_to_hw_map;
 	struct cam_lrme_context *lrme_ctx = ctx->ctx_priv;
 
-	CAM_DBG(CAM_LRME, "Enter");
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
 
 	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
 	if (rc) {
@@ -46,7 +46,7 @@
 {
 	int rc = 0;
 
-	CAM_DBG(CAM_LRME, "Enter");
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
 
 	rc = cam_context_release_dev_to_hw(ctx, cmd);
 	if (rc) {
@@ -64,7 +64,7 @@
 {
 	int rc = 0;
 
-	CAM_DBG(CAM_LRME, "Enter");
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
 
 	rc = cam_context_start_dev_to_hw(ctx, cmd);
 	if (rc) {
@@ -82,7 +82,7 @@
 {
 	int rc;
 
-	CAM_DBG(CAM_LRME, "Enter");
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
 
 	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
 	if (rc) {
@@ -98,6 +98,8 @@
 {
 	int rc;
 
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
 	rc = cam_context_flush_dev_to_hw(ctx, cmd);
 	if (rc)
 		CAM_ERR(CAM_LRME, "Failed to flush device");
@@ -109,7 +111,7 @@
 {
 	int rc = 0;
 
-	CAM_DBG(CAM_LRME, "Enter");
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
 
 	rc = cam_context_stop_dev_to_hw(ctx);
 	if (rc) {
@@ -127,7 +129,7 @@
 {
 	int rc = 0;
 
-	CAM_DBG(CAM_LRME, "Enter");
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
 
 	rc = __cam_lrme_ctx_stop_dev_in_activated(ctx, NULL);
 	if (rc) {
@@ -182,6 +184,7 @@
 	/* Acquired */
 	{
 		.ioctl_ops = {
+			.config_dev = __cam_lrme_ctx_config_dev_in_activated,
 			.release_dev = __cam_lrme_ctx_release_dev_in_acquired,
 			.start_dev = __cam_lrme_ctx_start_dev_in_acquired,
 		},
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
index 4c705c1..dc1c8f4 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
@@ -19,7 +19,7 @@
 #include "cam_req_mgr_interface.h"
 #include "cam_sync_api.h"
 
-#define CAM_LRME_CTX_INDEX_SHIFT 32
+#define CAM_LRME_CTX_INDEX_SHIFT 16
 
 /**
  * struct cam_lrme_context
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
index a4ee104..6b1250a 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
@@ -81,6 +81,7 @@
 static int cam_lrme_dev_close(struct v4l2_subdev *sd,
 	struct v4l2_subdev_fh *fh)
 {
+	int rc = 0;
 	struct cam_lrme_dev *lrme_dev = g_lrme_dev;
 	struct cam_node *node = v4l2_get_subdevdata(sd);
 
@@ -90,18 +91,25 @@
 	}
 
 	mutex_lock(&lrme_dev->lock);
-	lrme_dev->open_cnt--;
-	mutex_unlock(&lrme_dev->lock);
+	if (lrme_dev->open_cnt <= 0) {
+		CAM_DBG(CAM_LRME, "LRME subdev is already closed");
+		rc = -EINVAL;
+		goto end;
+	}
 
+	lrme_dev->open_cnt--;
 	if (!node) {
 		CAM_ERR(CAM_LRME, "Node is NULL");
-		return -EINVAL;
+		rc = -EINVAL;
+		goto end;
 	}
 
 	if (lrme_dev->open_cnt == 0)
 		cam_node_shutdown(node);
 
-	return 0;
+end:
+	mutex_unlock(&lrme_dev->lock);
+	return rc;
 }
 
 static const struct v4l2_subdev_internal_ops cam_lrme_subdev_internal_ops = {
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
index fa8984c..eecba39 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -188,12 +188,6 @@
 
 			io_addr[plane] += io_cfg[i].offsets[plane];
 
-			if (io_addr[plane] >> 32) {
-				CAM_ERR(CAM_LRME, "Invalid io addr for %d %d",
-					plane, rc);
-				return -ENOMEM;
-			}
-
 			CAM_DBG(CAM_LRME, "IO Address[%d][%d] : %llu",
 				io_cfg[i].direction, plane, io_addr[plane]);
 		}
@@ -571,12 +565,13 @@
 
 	if (sizeof(struct cam_lrme_query_cap_cmd) != args->size) {
 		CAM_ERR(CAM_LRME,
-			"sizeof(struct cam_query_cap_cmd) = %lu, args->size = %d",
+			"sizeof(struct cam_query_cap_cmd) = %zu, args->size = %d",
 			sizeof(struct cam_query_cap_cmd), args->size);
 		return -EFAULT;
 	}
 
-	if (copy_to_user((void __user *)args->caps_handle, &(hw_mgr->lrme_caps),
+	if (copy_to_user(u64_to_user_ptr(args->caps_handle),
+		&(hw_mgr->lrme_caps),
 		sizeof(struct cam_lrme_query_cap_cmd))) {
 		CAM_ERR(CAM_LRME, "copy to user failed");
 		return -EFAULT;
@@ -591,7 +586,7 @@
 	struct cam_hw_acquire_args *args =
 		(struct cam_hw_acquire_args *)hw_acquire_args;
 	struct cam_lrme_acquire_args lrme_acquire_args;
-	uint64_t device_index;
+	uintptr_t device_index;
 
 	if (!hw_mgr_priv || !args) {
 		CAM_ERR(CAM_LRME,
@@ -612,7 +607,7 @@
 	CAM_DBG(CAM_LRME, "Get device id %llu", device_index);
 
 	if (device_index >= hw_mgr->device_count) {
-		CAM_ERR(CAM_LRME, "Get wrong device id %llu", device_index);
+		CAM_ERR(CAM_LRME, "Get wrong device id %lu", device_index);
 		return -EINVAL;
 	}
 
@@ -667,7 +662,7 @@
 	}
 
 	args = (struct cam_hw_flush_args *)hw_flush_args;
-	device_index = ((uint64_t)args->ctxt_to_hw_map & 0xF);
+	device_index = ((uintptr_t)args->ctxt_to_hw_map & 0xF);
 	if (device_index >= hw_mgr->device_count) {
 		CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
 		return -EPERM;
@@ -765,6 +760,12 @@
 		return -EINVAL;
 	}
 
+	rc = hw_device->hw_intf.hw_ops.process_cmd(
+			hw_device->hw_intf.hw_priv,
+			CAM_LRME_HW_CMD_DUMP_REGISTER,
+			&g_lrme_hw_mgr.debugfs_entry.dump_register,
+			sizeof(bool));
+
 	return rc;
 }
 
@@ -963,6 +964,35 @@
 	return rc;
 }
 
+static int cam_lrme_mgr_create_debugfs_entry(void)
+{
+	int rc = 0;
+
+	g_lrme_hw_mgr.debugfs_entry.dentry =
+		debugfs_create_dir("camera_lrme", NULL);
+	if (!g_lrme_hw_mgr.debugfs_entry.dentry) {
+		CAM_ERR(CAM_LRME, "failed to create dentry");
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_bool("dump_register",
+		0644,
+		g_lrme_hw_mgr.debugfs_entry.dentry,
+		&g_lrme_hw_mgr.debugfs_entry.dump_register)) {
+		CAM_ERR(CAM_LRME, "failed to create dump register entry");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	return rc;
+
+err:
+	debugfs_remove_recursive(g_lrme_hw_mgr.debugfs_entry.dentry);
+	g_lrme_hw_mgr.debugfs_entry.dentry = NULL;
+	return rc;
+}
+
+
 int cam_lrme_mgr_register_device(
 	struct cam_hw_intf *lrme_hw_intf,
 	struct cam_iommu_handle *device_iommu,
@@ -1113,6 +1143,8 @@
 
 	g_lrme_hw_mgr.event_cb = cam_lrme_dev_buf_done_cb;
 
+	cam_lrme_mgr_create_debugfs_entry();
+
 	CAM_DBG(CAM_LRME, "Hw mgr init done");
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
index f7ce4d2..87419cf 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -30,13 +30,13 @@
 #define CAM_LRME_WORKQ_NUM_TASK 10
 
 #define CAM_LRME_DECODE_DEVICE_INDEX(ctxt_to_hw_map) \
-	((uint64_t)ctxt_to_hw_map & 0xF)
+	((uintptr_t)ctxt_to_hw_map & 0xF)
 
 #define CAM_LRME_DECODE_PRIORITY(ctxt_to_hw_map) \
-	(((uint64_t)ctxt_to_hw_map & 0xF0) >> 4)
+	(((uintptr_t)ctxt_to_hw_map & 0xF0) >> 4)
 
 #define CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map) \
-	((uint64_t)ctxt_to_hw_map >> CAM_LRME_CTX_INDEX_SHIFT)
+	((uint64_t)(uintptr_t)ctxt_to_hw_map >> CAM_LRME_CTX_INDEX_SHIFT)
 
 /**
  * enum cam_lrme_hw_mgr_ctx_priority
@@ -52,13 +52,24 @@
 /**
  * struct cam_lrme_mgr_work_data : HW Mgr work data
  *
- * hw_device : Pointer to the hw device
+ * @hw_device                    : Pointer to the hw device
  */
 struct cam_lrme_mgr_work_data {
 	struct cam_lrme_device *hw_device;
 };
 
 /**
+ * struct cam_lrme_debugfs_entry : debugfs entry struct
+ *
+ * @dentry                       : entry of debugfs
+ * @dump_register                : flag to dump registers
+ */
+struct cam_lrme_debugfs_entry {
+	struct dentry   *dentry;
+	bool             dump_register;
+};
+
+/**
  * struct cam_lrme_device     : LRME HW device
  *
  * @hw_caps                   : HW device's capabilities
@@ -98,6 +109,7 @@
  * @frame_req       : List of frame request to use
  * @lrme_caps       : LRME capabilities
  * @event_cb        : IRQ callback function
+ * @debugfs_entry   : debugfs entry to set debug prop
  */
 struct cam_lrme_hw_mgr {
 	uint32_t                      device_count;
@@ -110,6 +122,7 @@
 	struct cam_lrme_frame_request frame_req[CAM_CTX_REQ_MAX * CAM_CTX_MAX];
 	struct cam_lrme_query_cap_cmd lrme_caps;
 	cam_hw_event_cb_func          event_cb;
+	struct cam_lrme_debugfs_entry debugfs_entry;
 };
 
 int cam_lrme_mgr_register_device(struct cam_hw_intf *lrme_hw_intf,
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
index 595bb81..a5f9ff1 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
@@ -14,6 +14,20 @@
 #include "cam_lrme_hw_soc.h"
 #include "cam_smmu_api.h"
 
+static void cam_lrme_dump_registers(void __iomem *base)
+{
+	/* dump the clc registers */
+	cam_io_dump(base, 0x60, (0xc0 - 0x60) / 0x4);
+	/* dump the fe and we registers */
+	cam_io_dump(base, 0x200, (0x29c - 0x200) / 0x4);
+	cam_io_dump(base, 0x2f0, (0x330 - 0x2f0) / 0x4);
+	cam_io_dump(base, 0x500, (0x5b4 - 0x500) / 0x4);
+	cam_io_dump(base, 0x700, (0x778 - 0x700) / 0x4);
+	cam_io_dump(base, 0x800, (0x878 - 0x800) / 0x4);
+	/* dump lrme sw registers, interrupts */
+	cam_io_dump(base, 0x900, (0x928 - 0x900) / 0x4);
+}
+
 static void cam_lrme_cdm_write_reg_val_pair(uint32_t *buffer,
 	uint32_t *index, uint32_t reg_offset, uint32_t reg_value)
 {
@@ -64,7 +78,8 @@
 		cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
 			hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
 			0x0);
-	else if (io_buf->io_cfg->format == CAM_FORMAT_Y_ONLY)
+	else if (io_buf->io_cfg->format == CAM_FORMAT_Y_ONLY ||
+			io_buf->io_cfg->format == CAM_FORMAT_PLAIN8)
 		cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
 			hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
 			0x1);
@@ -567,6 +582,8 @@
 			lrme_core->state);
 	}
 
+	cam_lrme_dump_registers(lrme_hw->soc_info.reg_map[0].mem_base);
+
 	CAM_ERR_RATE_LIMIT(CAM_LRME, "Start recovery");
 	lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY;
 	rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
@@ -610,6 +627,9 @@
 	lrme_core->req_proc = lrme_core->req_submit;
 	lrme_core->req_submit = NULL;
 
+	if (lrme_core->dump_flag)
+		cam_lrme_dump_registers(lrme_hw->soc_info.reg_map[0].mem_base);
+
 	return 0;
 }
 
@@ -654,13 +674,13 @@
 		cam_io_w_mb(0xFFFF,
 			soc_info->reg_map[0].mem_base +
 			hw_info->titan_reg.top_irq_mask);
-		cam_io_w_mb(0xFFFF,
+		cam_io_w_mb(0xFFFFF,
 			soc_info->reg_map[0].mem_base +
 			hw_info->bus_wr_reg.common_reg.irq_mask_0);
-		cam_io_w_mb(0xFFFF,
+		cam_io_w_mb(0xFFFFF,
 			soc_info->reg_map[0].mem_base +
 			hw_info->bus_wr_reg.common_reg.irq_mask_1);
-		cam_io_w_mb(0xFFFF,
+		cam_io_w_mb(0xFFFFF,
 			soc_info->reg_map[0].mem_base +
 			hw_info->bus_rd_reg.common_reg.irq_mask);
 		break;
@@ -900,7 +920,7 @@
 
 	if (sizeof(struct cam_lrme_hw_submit_args) != arg_size) {
 		CAM_ERR(CAM_LRME,
-			"size of args %lu, arg_size %d",
+			"size of args %zu, arg_size %d",
 			sizeof(struct cam_lrme_hw_submit_args), arg_size);
 		return -EINVAL;
 	}
@@ -952,6 +972,7 @@
 	}
 
 	lrme_core->req_submit = frame_req;
+
 	mutex_unlock(&lrme_hw->hw_mutex);
 	CAM_DBG(CAM_LRME, "Release lock, submit done for req %llu",
 		frame_req->req_id);
@@ -1235,6 +1256,14 @@
 		break;
 	}
 
+	case CAM_LRME_HW_CMD_DUMP_REGISTER: {
+		struct cam_lrme_core *lrme_core =
+			(struct cam_lrme_core *)lrme_hw->core_info;
+		lrme_core->dump_flag = *(bool *)cmd_args;
+		CAM_DBG(CAM_LRME, "dump_flag %d", lrme_core->dump_flag);
+		break;
+	}
+
 	default:
 		break;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
index bf2f370..cf8e740 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
@@ -137,6 +137,7 @@
 	struct cam_lrme_frame_request    *req_submit;
 	struct cam_lrme_cdm_info         *hw_cdm_info;
 	uint32_t                          hw_idx;
+	bool                              dump_flag;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
index d16b174..26b5608 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
@@ -65,11 +65,13 @@
  * @CAM_LRME_HW_CMD_prepare_hw_update : Prepare HW update
  * @CAM_LRME_HW_CMD_REGISTER_CB       : register HW manager callback
  * @CAM_LRME_HW_CMD_SUBMIT            : Submit frame to HW
+ * @CAM_LRME_HW_CMD_DUMP_REGISTER     : dump register values
  */
 enum cam_lrme_hw_cmd_type {
 	CAM_LRME_HW_CMD_PREPARE_HW_UPDATE,
 	CAM_LRME_HW_CMD_REGISTER_CB,
 	CAM_LRME_HW_CMD_SUBMIT,
+	CAM_LRME_HW_CMD_DUMP_REGISTER,
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index 0e77a4c..f2c243e 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -25,11 +25,11 @@
 static struct cam_mem_table tbl;
 
 static int cam_mem_util_map_cpu_va(struct ion_handle *hdl,
-	uint64_t *vaddr,
+	uintptr_t *vaddr,
 	size_t *len)
 {
 	*vaddr = (uintptr_t)ion_map_kernel(tbl.client, hdl);
-	if (IS_ERR_OR_NULL((void *)*vaddr)) {
+	if (IS_ERR_OR_NULL((void *)(uintptr_t)(*vaddr))) {
 		CAM_ERR(CAM_MEM, "kernel map fail");
 		return -ENOSPC;
 	}
@@ -183,12 +183,12 @@
 }
 EXPORT_SYMBOL(cam_mem_get_io_buf);
 
-int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr, size_t *len)
+int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
 {
 	int rc = 0;
 	int idx;
 	struct ion_handle *ion_hdl = NULL;
-	uint64_t kvaddr = 0;
+	uintptr_t kvaddr = 0;
 	size_t klen = 0;
 
 	if (!buf_handle || !vaddr_ptr || !len)
@@ -288,7 +288,7 @@
 
 		rc = msm_ion_do_cache_op(tbl.client,
 				tbl.bufq[idx].i_hdl,
-				(void *)tbl.bufq[idx].vaddr,
+				(void *)(uintptr_t)tbl.bufq[idx].vaddr,
 				tbl.bufq[idx].len,
 				ion_cache_ops);
 		if (rc)
@@ -926,7 +926,7 @@
 	int rc = 0;
 	uint32_t heap_id;
 	int32_t ion_flag = 0;
-	uint64_t kvaddr;
+	uintptr_t kvaddr;
 	dma_addr_t iova = 0;
 	size_t request_len = 0;
 	uint32_t mem_handle;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
index 83727d2..92c366d 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -56,7 +56,7 @@
 	size_t len;
 	uint32_t flags;
 	uint64_t vaddr;
-	uint64_t kmdvaddr;
+	uintptr_t kmdvaddr;
 	bool active;
 	bool is_imported;
 };
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
index 14b1a67..64258e8 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
@@ -43,7 +43,7 @@
  * @region     : Region to which allocated memory belongs
  */
 struct cam_mem_mgr_memory_desc {
-	uint64_t kva;
+	uintptr_t kva;
 	uint32_t iova;
 	int32_t smmu_hdl;
 	uint32_t mem_handle;
@@ -92,7 +92,7 @@
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
-int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr,
+int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr,
 	size_t *len);
 
 static inline bool cam_mem_is_secure_buf(int32_t buf_handle)
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 460b3df..4c4afc1 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -1325,9 +1325,9 @@
 		return NULL;
 	}
 
-	if (session->num_links >= MAX_LINKS_PER_SESSION) {
+	if (session->num_links >= MAXIMUM_LINKS_PER_SESSION) {
 		CAM_ERR(CAM_CRM, "Reached max links %d per session limit %d",
-			session->num_links, MAX_LINKS_PER_SESSION);
+			session->num_links, MAXIMUM_LINKS_PER_SESSION);
 		return NULL;
 	}
 
@@ -1362,7 +1362,7 @@
 
 	mutex_lock(&session->lock);
 	/*  Loop through and find a free index */
-	for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+	for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
 		if (!session->links[i]) {
 			CAM_DBG(CAM_CRM,
 				"Free link index %d found, num_links=%d",
@@ -1372,7 +1372,7 @@
 		}
 	}
 
-	if (i == MAX_LINKS_PER_SESSION) {
+	if (i == MAXIMUM_LINKS_PER_SESSION) {
 		CAM_ERR(CAM_CRM, "Free link index not found");
 		goto error;
 	}
@@ -1433,7 +1433,7 @@
 		return;
 	}
 
-	for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+	for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
 		if (session->links[i] == link)
 			session->links[i] = NULL;
 	}
@@ -1445,7 +1445,7 @@
 		 * of only having 2 links in a given session
 		 */
 		session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
-		for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+		for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
 			if (session->links[i])
 				session->links[i]->sync_link = NULL;
 		}
@@ -2387,7 +2387,7 @@
 			ses_info->session_hdl,
 			cam_session->num_links);
 
-		for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+		for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
 			link = cam_session->links[i];
 
 			if (!link)
@@ -2628,7 +2628,8 @@
 	}
 
 	if ((sync_info->num_links < 0) ||
-		(sync_info->num_links > MAX_LINKS_PER_SESSION)) {
+		(sync_info->num_links >
+		MAX_LINKS_PER_SESSION)) {
 		CAM_ERR(CAM_CRM, "Invalid num links %d", sync_info->num_links);
 		return -EINVAL;
 	}
@@ -2777,6 +2778,13 @@
 		goto end;
 	}
 
+	if (control->num_links > MAX_LINKS_PER_SESSION) {
+		CAM_ERR(CAM_CRM, "Invalid number of links %d",
+			control->num_links);
+		rc = -EINVAL;
+		goto end;
+	}
+
 	mutex_lock(&g_crm_core_dev->crm_lock);
 	for (i = 0; i < control->num_links; i++) {
 		link = (struct cam_req_mgr_core_link *)
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 68ec09b..8b86931 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -34,6 +34,8 @@
 
 #define SYNC_LINK_SOF_CNT_MAX_LMT 1
 
+#define MAXIMUM_LINKS_PER_SESSION  4
+
 /**
  * enum crm_workq_task_type
  * @codes: to identify which type of task is present
@@ -353,7 +355,7 @@
 struct cam_req_mgr_core_session {
 	int32_t                       session_hdl;
 	uint32_t                      num_links;
-	struct cam_req_mgr_core_link *links[MAX_LINKS_PER_SESSION];
+	struct cam_req_mgr_core_link *links[MAXIMUM_LINKS_PER_SESSION];
 	struct list_head              entry;
 	struct mutex                  lock;
 	int32_t                       force_err_recovery;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index 0d21064..cb60ef4 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -25,6 +25,7 @@
 #include "cam_subdev.h"
 #include "cam_mem_mgr.h"
 #include "cam_debug_util.h"
+#include "cam_common_util.h"
 #include <linux/slub_def.h>
 
 #define CAM_REQ_MGR_EVENT_MAX 30
@@ -153,6 +154,10 @@
 
 static int cam_req_mgr_close(struct file *filep)
 {
+	struct v4l2_subdev *sd;
+	struct v4l2_fh *vfh = filep->private_data;
+	struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
+
 	mutex_lock(&g_dev.cam_lock);
 
 	if (g_dev.open_cnt <= 0) {
@@ -161,6 +166,17 @@
 	}
 
 	cam_req_mgr_handle_core_shutdown();
+
+	list_for_each_entry(sd, &g_dev.v4l2_dev->subdevs, list) {
+		if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
+			continue;
+		if (sd->internal_ops && sd->internal_ops->close) {
+			CAM_DBG(CAM_CRM, "Invoke subdev close for device %s",
+				sd->name);
+			sd->internal_ops->close(sd, subdev_fh);
+		}
+	}
+
 	g_dev.open_cnt--;
 	v4l2_fh_release(filep);
 
@@ -220,14 +236,15 @@
 			return -EINVAL;
 
 		if (copy_from_user(&ses_info,
-			(void *)k_ioctl->handle,
+			u64_to_user_ptr(k_ioctl->handle),
 			k_ioctl->size)) {
 			return -EFAULT;
 		}
 
 		rc = cam_req_mgr_create_session(&ses_info);
 		if (!rc)
-			if (copy_to_user((void *)k_ioctl->handle,
+			if (copy_to_user(
+				u64_to_user_ptr(k_ioctl->handle),
 				&ses_info, k_ioctl->size))
 				rc = -EFAULT;
 		}
@@ -240,7 +257,7 @@
 			return -EINVAL;
 
 		if (copy_from_user(&ses_info,
-			(void *)k_ioctl->handle,
+			u64_to_user_ptr(k_ioctl->handle),
 			k_ioctl->size)) {
 			return -EFAULT;
 		}
@@ -256,14 +273,15 @@
 			return -EINVAL;
 
 		if (copy_from_user(&link_info,
-			(void *)k_ioctl->handle,
+			u64_to_user_ptr(k_ioctl->handle),
 			k_ioctl->size)) {
 			return -EFAULT;
 		}
 
 		rc = cam_req_mgr_link(&link_info);
 		if (!rc)
-			if (copy_to_user((void *)k_ioctl->handle,
+			if (copy_to_user(
+				u64_to_user_ptr(k_ioctl->handle),
 				&link_info, k_ioctl->size))
 				rc = -EFAULT;
 		}
@@ -276,7 +294,7 @@
 			return -EINVAL;
 
 		if (copy_from_user(&unlink_info,
-			(void *)k_ioctl->handle,
+			u64_to_user_ptr(k_ioctl->handle),
 			k_ioctl->size)) {
 			return -EFAULT;
 		}
@@ -292,7 +310,7 @@
 			return -EINVAL;
 
 		if (copy_from_user(&sched_req,
-			(void *)k_ioctl->handle,
+			u64_to_user_ptr(k_ioctl->handle),
 			k_ioctl->size)) {
 			return -EFAULT;
 		}
@@ -308,7 +326,7 @@
 			return -EINVAL;
 
 		if (copy_from_user(&flush_info,
-			(void *)k_ioctl->handle,
+			u64_to_user_ptr(k_ioctl->handle),
 			k_ioctl->size)) {
 			return -EFAULT;
 		}
@@ -324,7 +342,7 @@
 			return -EINVAL;
 
 		if (copy_from_user(&sync_info,
-			(void *)k_ioctl->handle,
+			u64_to_user_ptr(k_ioctl->handle),
 			k_ioctl->size)) {
 			return -EFAULT;
 		}
@@ -339,7 +357,7 @@
 			return -EINVAL;
 
 		if (copy_from_user(&cmd,
-			(void *)k_ioctl->handle,
+			u64_to_user_ptr(k_ioctl->handle),
 			k_ioctl->size)) {
 			rc = -EFAULT;
 			break;
@@ -347,7 +365,8 @@
 
 		rc = cam_mem_mgr_alloc_and_map(&cmd);
 		if (!rc)
-			if (copy_to_user((void *)k_ioctl->handle,
+			if (copy_to_user(
+				u64_to_user_ptr(k_ioctl->handle),
 				&cmd, k_ioctl->size)) {
 				rc = -EFAULT;
 				break;
@@ -361,7 +380,7 @@
 			return -EINVAL;
 
 		if (copy_from_user(&cmd,
-			(void *)k_ioctl->handle,
+			u64_to_user_ptr(k_ioctl->handle),
 			k_ioctl->size)) {
 			rc = -EFAULT;
 			break;
@@ -369,7 +388,8 @@
 
 		rc = cam_mem_mgr_map(&cmd);
 		if (!rc)
-			if (copy_to_user((void *)k_ioctl->handle,
+			if (copy_to_user(
+				u64_to_user_ptr(k_ioctl->handle),
 				&cmd, k_ioctl->size)) {
 				rc = -EFAULT;
 				break;
@@ -383,7 +403,7 @@
 			return -EINVAL;
 
 		if (copy_from_user(&cmd,
-			(void *)k_ioctl->handle,
+			u64_to_user_ptr(k_ioctl->handle),
 			k_ioctl->size)) {
 			rc = -EFAULT;
 			break;
@@ -399,7 +419,7 @@
 			return -EINVAL;
 
 		if (copy_from_user(&cmd,
-			(void *)k_ioctl->handle,
+			u64_to_user_ptr(k_ioctl->handle),
 			k_ioctl->size)) {
 			rc = -EFAULT;
 			break;
@@ -417,7 +437,7 @@
 			return -EINVAL;
 
 		if (copy_from_user(&cmd,
-			(void __user *)k_ioctl->handle,
+			u64_to_user_ptr(k_ioctl->handle),
 			k_ioctl->size)) {
 			rc = -EFAULT;
 			break;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index 3798ef8..68b5569 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -228,7 +228,7 @@
 				crm_workq->task.num_task,
 				GFP_KERNEL);
 		if (!crm_workq->task.pool) {
-			CAM_WARN(CAM_CRM, "Insufficient memory %lu",
+			CAM_WARN(CAM_CRM, "Insufficient memory %zu",
 				sizeof(struct crm_workq_task) *
 				crm_workq->task.num_task);
 			kfree(crm_workq);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index ed0a26b..c14a74d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -16,6 +16,7 @@
 #include "cam_sensor_util.h"
 #include "cam_trace.h"
 #include "cam_res_mgr_api.h"
+#include "cam_common_util.h"
 
 int32_t cam_actuator_construct_default_power_setting(
 	struct cam_sensor_power_ctrl_t *power_info)
@@ -141,7 +142,7 @@
 		CAM_ERR(CAM_ACTUATOR, "failed: power_info %pK", power_info);
 		return -EINVAL;
 	}
-	rc = msm_camera_power_down(power_info, soc_info);
+	rc = cam_sensor_util_power_down(power_info, soc_info);
 	if (rc) {
 		CAM_ERR(CAM_ACTUATOR, "power down the core is failed:%d", rc);
 		return rc;
@@ -301,7 +302,7 @@
 	trace_cam_apply_req("Actuator", apply->request_id);
 
 	CAM_DBG(CAM_ACTUATOR, "Request Id: %lld", apply->request_id);
-
+	mutex_lock(&(a_ctrl->actuator_mutex));
 	if ((apply->request_id ==
 		a_ctrl->i2c_data.per_frame[request_id].request_id) &&
 		(a_ctrl->i2c_data.per_frame[request_id].is_settings_valid)
@@ -312,7 +313,7 @@
 			CAM_ERR(CAM_ACTUATOR,
 				"Failed in applying the request: %lld\n",
 				apply->request_id);
-			return rc;
+			goto release_mutex;
 		}
 	}
 	del_req_id = (request_id +
@@ -327,12 +328,14 @@
 			CAM_ERR(CAM_ACTUATOR,
 				"Fail deleting the req: %d err: %d\n",
 				del_req_id, rc);
-			return rc;
+			goto release_mutex;
 		}
 	} else {
 		CAM_DBG(CAM_ACTUATOR, "No Valid Req to clean Up");
 	}
 
+release_mutex:
+	mutex_unlock(&(a_ctrl->actuator_mutex));
 	return rc;
 }
 
@@ -352,6 +355,8 @@
 		CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
 		return -EINVAL;
 	}
+
+	mutex_lock(&(a_ctrl->actuator_mutex));
 	if (link->link_enable) {
 		a_ctrl->bridge_intf.link_hdl = link->link_hdl;
 		a_ctrl->bridge_intf.crm_cb = link->crm_cb;
@@ -359,6 +364,7 @@
 		a_ctrl->bridge_intf.link_hdl = -1;
 		a_ctrl->bridge_intf.crm_cb = NULL;
 	}
+	mutex_unlock(&(a_ctrl->actuator_mutex));
 
 	return 0;
 }
@@ -409,7 +415,7 @@
 	size_t   len_of_buff = 0;
 	uint32_t *offset = NULL;
 	uint32_t *cmd_buf = NULL;
-	uint64_t generic_ptr;
+	uintptr_t generic_ptr;
 	struct common_header      *cmm_hdr = NULL;
 	struct cam_control        *ioctl_ctrl = NULL;
 	struct cam_packet         *csl_packet = NULL;
@@ -431,11 +437,12 @@
 	power_info = &soc_private->power_info;
 
 	ioctl_ctrl = (struct cam_control *)arg;
-	if (copy_from_user(&config, (void __user *) ioctl_ctrl->handle,
+	if (copy_from_user(&config,
+		u64_to_user_ptr(ioctl_ctrl->handle),
 		sizeof(config)))
 		return -EFAULT;
 	rc = cam_mem_get_cpu_buf(config.packet_handle,
-		(uint64_t *)&generic_ptr, &len_of_buff);
+		&generic_ptr, &len_of_buff);
 	if (rc < 0) {
 		CAM_ERR(CAM_ACTUATOR, "Error in converting command Handle %d",
 			rc);
@@ -449,7 +456,8 @@
 		return -EINVAL;
 	}
 
-	csl_packet = (struct cam_packet *)(generic_ptr + config.offset);
+	csl_packet =
+		(struct cam_packet *)(generic_ptr + (uint32_t)config.offset);
 	CAM_DBG(CAM_ACTUATOR, "Pkt opcode: %d", csl_packet->header.op_code);
 
 	switch (csl_packet->header.op_code & 0xFFFFFF) {
@@ -464,7 +472,7 @@
 			if (!total_cmd_buf_in_bytes)
 				continue;
 			rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
-					(uint64_t *)&generic_ptr, &len_of_buff);
+					&generic_ptr, &len_of_buff);
 			if (rc < 0) {
 				CAM_ERR(CAM_ACTUATOR, "Failed to get cpu buf");
 				return rc;
@@ -704,7 +712,7 @@
 			goto release_mutex;
 		}
 		rc = copy_from_user(&actuator_acq_dev,
-			(void __user *) cmd->handle,
+			u64_to_user_ptr(cmd->handle),
 			sizeof(actuator_acq_dev));
 		if (rc < 0) {
 			CAM_ERR(CAM_ACTUATOR, "Failed Copying from user\n");
@@ -725,7 +733,8 @@
 
 		CAM_DBG(CAM_ACTUATOR, "Device Handle: %d",
 			actuator_acq_dev.device_handle);
-		if (copy_to_user((void __user *) cmd->handle, &actuator_acq_dev,
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+			&actuator_acq_dev,
 			sizeof(struct cam_sensor_acquire_dev))) {
 			CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
 			rc = -EFAULT;
@@ -778,7 +787,8 @@
 		struct cam_actuator_query_cap actuator_cap = {0};
 
 		actuator_cap.slot_info = a_ctrl->soc_info.index;
-		if (copy_to_user((void __user *) cmd->handle, &actuator_cap,
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+			&actuator_cap,
 			sizeof(struct cam_actuator_query_cap))) {
 			CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
 			rc = -EFAULT;
@@ -891,7 +901,9 @@
 			continue;
 
 		if (i2c_set->is_settings_valid == 1) {
+			mutex_lock(&(a_ctrl->actuator_mutex));
 			rc = delete_request(i2c_set);
+			mutex_unlock(&(a_ctrl->actuator_mutex));
 			if (rc < 0)
 				CAM_ERR(CAM_ACTUATOR,
 					"delete request: %lld rc: %d",
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 2e9aa6c..cb0bcc2 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -14,6 +14,7 @@
 #include "cam_csiphy_core.h"
 #include "cam_csiphy_dev.h"
 #include "cam_csiphy_soc.h"
+#include "cam_common_util.h"
 
 #include <soc/qcom/scm.h>
 #include <cam_mem_mgr.h>
@@ -21,19 +22,23 @@
 #define SCM_SVC_CAMERASS 0x18
 #define SECURE_SYSCALL_ID 0x6
 
+#define SECURE_SYSCALL_ID_2 0x7
+
 static int csiphy_dump;
 module_param(csiphy_dump, int, 0644);
 
-static int cam_csiphy_notify_secure_mode(int phy, bool protect)
+static int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
+	bool protect, int32_t offset)
 {
 	struct scm_desc desc = {0};
 
+	if (offset >= CSIPHY_MAX_INSTANCES)
+		return -EINVAL;
 	desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
 	desc.args[0] = protect;
-	desc.args[1] = phy;
+	desc.args[1] = csiphy_dev->csiphy_cpas_cp_reg_mask[offset];
 
-	CAM_DBG(CAM_CSIPHY, "phy : %d, protect : %d", phy, protect);
-	if (scm_call2(SCM_SIP_FNID(SCM_SVC_CAMERASS, SECURE_SYSCALL_ID),
+	if (scm_call2(SCM_SIP_FNID(SCM_SVC_CAMERASS, SECURE_SYSCALL_ID_2),
 		&desc)) {
 		CAM_ERR(CAM_CSIPHY, "scm call to hypervisor failed");
 		return -EINVAL;
@@ -42,6 +47,27 @@
 	return 0;
 }
 
+static int32_t cam_csiphy_get_instance_offset(
+	struct csiphy_device *csiphy_dev,
+	int32_t dev_handle)
+{
+	int32_t i;
+
+	if (csiphy_dev->acquire_count >
+		CSIPHY_MAX_INSTANCES) {
+		CAM_ERR(CAM_CSIPHY, "Invalid acquire count");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < csiphy_dev->acquire_count; i++) {
+		if (dev_handle ==
+			csiphy_dev->bridge_intf.device_hdl[i])
+			break;
+	}
+
+	return i;
+}
+
 void cam_csiphy_query_cap(struct csiphy_device *csiphy_dev,
 	struct cam_csiphy_query_cap *csiphy_cap)
 {
@@ -75,11 +101,56 @@
 	}
 }
 
+static int32_t cam_csiphy_update_secure_info(
+	struct csiphy_device *csiphy_dev,
+	struct cam_csiphy_info  *cam_cmd_csiphy_info,
+	struct cam_config_dev_cmd *cfg_dev)
+{
+	uint32_t clock_lane, adj_lane_mask, temp;
+	int32_t offset;
+
+	if (csiphy_dev->acquire_count >=
+		CSIPHY_MAX_INSTANCES) {
+		CAM_ERR(CAM_CSIPHY, "Invalid acquire count");
+		return -EINVAL;
+	}
+
+	offset = cam_csiphy_get_instance_offset(csiphy_dev,
+		cfg_dev->dev_handle);
+	if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) {
+		CAM_ERR(CAM_CSIPHY, "Invalid offset");
+		return -EINVAL;
+	}
+
+	if (cam_cmd_csiphy_info->combo_mode)
+		clock_lane =
+			csiphy_dev->ctrl_reg->csiphy_reg.csiphy_combo_clk_lane;
+	else
+		clock_lane =
+			csiphy_dev->ctrl_reg->csiphy_reg.csiphy_clock_lane;
+
+	adj_lane_mask = cam_cmd_csiphy_info->lane_mask & 0x1F &
+		~clock_lane;
+	temp = adj_lane_mask & (clock_lane - 1);
+	adj_lane_mask =
+		((adj_lane_mask & (~((clock_lane - 1)))) >> 1) | temp;
+
+	csiphy_dev->csiphy_info.secure_mode[offset] = 1;
+
+	csiphy_dev->csiphy_cpas_cp_reg_mask[offset] =
+		adj_lane_mask << (csiphy_dev->soc_info.index *
+		(CAM_CSIPHY_MAX_DPHY_LANES + CAM_CSIPHY_MAX_CPHY_LANES) +
+		(!cam_cmd_csiphy_info->csiphy_3phase) *
+		(CAM_CSIPHY_MAX_CPHY_LANES));
+
+	return 0;
+}
+
 int32_t cam_cmd_buf_parser(struct csiphy_device *csiphy_dev,
 	struct cam_config_dev_cmd *cfg_dev)
 {
 	int32_t                 rc = 0;
-	uint64_t                generic_ptr;
+	uintptr_t                generic_ptr;
 	struct cam_packet       *csl_packet = NULL;
 	struct cam_cmd_buf_desc *cmd_desc = NULL;
 	uint32_t                *cmd_buf = NULL;
@@ -92,7 +163,7 @@
 	}
 
 	rc = cam_mem_get_cpu_buf((int32_t) cfg_dev->packet_handle,
-		(uint64_t *)&generic_ptr, &len);
+		&generic_ptr, &len);
 	if (rc < 0) {
 		CAM_ERR(CAM_CSIPHY, "Failed to get packet Mem address: %d", rc);
 		return rc;
@@ -105,14 +176,15 @@
 		return -EINVAL;
 	}
 
-	csl_packet = (struct cam_packet *)(generic_ptr + cfg_dev->offset);
+	csl_packet = (struct cam_packet *)
+		(generic_ptr + (uint32_t)cfg_dev->offset);
 
 	cmd_desc = (struct cam_cmd_buf_desc *)
 		((uint32_t *)&csl_packet->payload +
 		csl_packet->cmd_buf_offset / 4);
 
 	rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
-		(uint64_t *)&generic_ptr, &len);
+		&generic_ptr, &len);
 	if (rc < 0) {
 		CAM_ERR(CAM_CSIPHY,
 			"Failed to get cmd buf Mem address : %d", rc);
@@ -136,7 +208,10 @@
 		csiphy_dev->csiphy_info.settle_time =
 			cam_cmd_csiphy_info->settle_time;
 	csiphy_dev->csiphy_info.data_rate = cam_cmd_csiphy_info->data_rate;
-	csiphy_dev->csiphy_info.secure_mode = cam_cmd_csiphy_info->secure_mode;
+
+	if (cam_cmd_csiphy_info->secure_mode == 1)
+		cam_csiphy_update_secure_info(csiphy_dev,
+			cam_cmd_csiphy_info, cfg_dev);
 
 	return rc;
 }
@@ -330,8 +405,10 @@
 				CAM_DBG(CAM_CSIPHY, "Do Nothing");
 			break;
 			}
-			usleep_range(reg_array[lane_pos][i].delay*1000,
-				reg_array[lane_pos][i].delay*1000 + 1000);
+			if (reg_array[lane_pos][i].delay > 0) {
+				usleep_range(reg_array[lane_pos][i].delay*1000,
+					reg_array[lane_pos][i].delay*1000 + 10);
+			}
 		}
 		lane_mask >>= 1;
 		lane_pos++;
@@ -345,6 +422,7 @@
 void cam_csiphy_shutdown(struct csiphy_device *csiphy_dev)
 {
 	struct cam_hw_soc_info *soc_info;
+	int32_t i = 0;
 
 	if (csiphy_dev->csiphy_state == CAM_CSIPHY_INIT)
 		return;
@@ -352,13 +430,17 @@
 	if (csiphy_dev->csiphy_state == CAM_CSIPHY_START) {
 		soc_info = &csiphy_dev->soc_info;
 
-		if (csiphy_dev->csiphy_info.secure_mode)
-			cam_csiphy_notify_secure_mode(
-				csiphy_dev->soc_info.index,
-				CAM_SECURE_MODE_NON_SECURE);
+		for (i = 0; i < csiphy_dev->acquire_count; i++) {
+			if (csiphy_dev->csiphy_info.secure_mode[i])
+				cam_csiphy_notify_secure_mode(
+					csiphy_dev,
+					CAM_SECURE_MODE_NON_SECURE, i);
 
-		csiphy_dev->csiphy_info.secure_mode =
-			CAM_SECURE_MODE_NON_SECURE;
+			csiphy_dev->csiphy_info.secure_mode[i] =
+				CAM_SECURE_MODE_NON_SECURE;
+
+			csiphy_dev->csiphy_cpas_cp_reg_mask[i] = 0;
+		}
 
 		cam_csiphy_reset(csiphy_dev);
 		cam_soc_util_disable_platform_resource(soc_info, true, true);
@@ -396,7 +478,7 @@
 	int32_t rc = 0;
 
 	if (copy_from_user(&cam_cmd_csiphy_info,
-		(void __user *)p_submit_cmd->packet_handle,
+		u64_to_user_ptr(p_submit_cmd->packet_handle),
 		sizeof(struct cam_csiphy_info))) {
 		CAM_ERR(CAM_CSIPHY, "failed to copy cam_csiphy_info\n");
 		rc = -EFAULT;
@@ -456,7 +538,7 @@
 		struct cam_create_dev_hdl bridge_params;
 
 		rc = copy_from_user(&csiphy_acq_dev,
-			(void __user *)cmd->handle,
+			u64_to_user_ptr(cmd->handle),
 			sizeof(csiphy_acq_dev));
 		if (rc < 0) {
 			CAM_ERR(CAM_CSIPHY, "Failed copying from User");
@@ -466,7 +548,7 @@
 		csiphy_acq_params.combo_mode = 0;
 
 		if (copy_from_user(&csiphy_acq_params,
-			(void __user *)csiphy_acq_dev.info_handle,
+			u64_to_user_ptr(csiphy_acq_dev.info_handle),
 			sizeof(csiphy_acq_params))) {
 			CAM_ERR(CAM_CSIPHY,
 				"Failed copying from User");
@@ -522,7 +604,7 @@
 		bridge_intf->session_hdl[csiphy_acq_params.combo_mode] =
 			csiphy_acq_dev.session_handle;
 
-		if (copy_to_user((void __user *)cmd->handle,
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
 				&csiphy_acq_dev,
 				sizeof(struct cam_sensor_acquire_dev))) {
 			CAM_ERR(CAM_CSIPHY, "Failed copying from User");
@@ -540,7 +622,7 @@
 		struct cam_csiphy_query_cap csiphy_cap = {0};
 
 		cam_csiphy_query_cap(csiphy_dev, &csiphy_cap);
-		if (copy_to_user((void __user *)cmd->handle,
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
 			&csiphy_cap, sizeof(struct cam_csiphy_query_cap))) {
 			CAM_ERR(CAM_CSIPHY, "Failed copying from User");
 			rc = -EINVAL;
@@ -549,6 +631,16 @@
 	}
 		break;
 	case CAM_STOP_DEV: {
+		int32_t offset, rc = 0;
+		struct cam_start_stop_dev_cmd config;
+
+		rc = copy_from_user(&config, u64_to_user_ptr(cmd->handle),
+					sizeof(config));
+		if (rc < 0) {
+			CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+			goto release_mutex;
+		}
+
 		if ((csiphy_dev->csiphy_state != CAM_CSIPHY_START) ||
 			!csiphy_dev->start_dev_count) {
 			CAM_ERR(CAM_CSIPHY, "Not in right state to stop : %d",
@@ -556,20 +648,38 @@
 			goto release_mutex;
 		}
 
-		if (--csiphy_dev->start_dev_count) {
-			CAM_DBG(CAM_CSIPHY, "Stop Dev ref Cnt: %d",
-				csiphy_dev->start_dev_count);
+		offset = cam_csiphy_get_instance_offset(csiphy_dev,
+			config.dev_handle);
+		if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) {
+			CAM_ERR(CAM_CSIPHY, "Invalid offset");
 			goto release_mutex;
 		}
 
-		if (csiphy_dev->csiphy_info.secure_mode)
-			cam_csiphy_notify_secure_mode(
-				csiphy_dev->soc_info.index,
-				CAM_SECURE_MODE_NON_SECURE);
+		if (--csiphy_dev->start_dev_count) {
+			CAM_DBG(CAM_CSIPHY, "Stop Dev ref Cnt: %d",
+				csiphy_dev->start_dev_count);
+			if (csiphy_dev->csiphy_info.secure_mode[offset])
+				cam_csiphy_notify_secure_mode(
+					csiphy_dev,
+					CAM_SECURE_MODE_NON_SECURE, offset);
 
-		csiphy_dev->csiphy_info.secure_mode =
+			csiphy_dev->csiphy_info.secure_mode[offset] =
+				CAM_SECURE_MODE_NON_SECURE;
+			csiphy_dev->csiphy_cpas_cp_reg_mask[offset] = 0;
+
+			goto release_mutex;
+		}
+
+		if (csiphy_dev->csiphy_info.secure_mode[offset])
+			cam_csiphy_notify_secure_mode(
+				csiphy_dev,
+				CAM_SECURE_MODE_NON_SECURE, offset);
+
+		csiphy_dev->csiphy_info.secure_mode[offset] =
 			CAM_SECURE_MODE_NON_SECURE;
 
+		csiphy_dev->csiphy_cpas_cp_reg_mask[offset] = 0x0;
+
 		rc = cam_csiphy_disable_hw(csiphy_dev);
 		if (rc < 0)
 			CAM_ERR(CAM_CSIPHY, "Failed in csiphy release");
@@ -590,7 +700,8 @@
 			goto release_mutex;
 		}
 
-		if (copy_from_user(&release, (void __user *) cmd->handle,
+		if (copy_from_user(&release,
+			u64_to_user_ptr(cmd->handle),
 			sizeof(release))) {
 			rc = -EFAULT;
 			goto release_mutex;
@@ -628,7 +739,8 @@
 	case CAM_CONFIG_DEV: {
 		struct cam_config_dev_cmd config;
 
-		if (copy_from_user(&config, (void __user *)cmd->handle,
+		if (copy_from_user(&config,
+			u64_to_user_ptr(cmd->handle),
 					sizeof(config))) {
 			rc = -EFAULT;
 		} else {
@@ -643,12 +755,28 @@
 	case CAM_START_DEV: {
 		struct cam_ahb_vote ahb_vote;
 		struct cam_axi_vote axi_vote;
+		struct cam_start_stop_dev_cmd config;
+		int32_t offset;
+
+		rc = copy_from_user(&config, u64_to_user_ptr(cmd->handle),
+			sizeof(config));
+		if (rc < 0) {
+			CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+			goto release_mutex;
+		}
 
 		if (csiphy_dev->csiphy_state == CAM_CSIPHY_START) {
 			csiphy_dev->start_dev_count++;
 			goto release_mutex;
 		}
 
+		offset = cam_csiphy_get_instance_offset(csiphy_dev,
+			config.dev_handle);
+		if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) {
+			CAM_ERR(CAM_CSIPHY, "Invalid offset");
+			goto release_mutex;
+		}
+
 		ahb_vote.type = CAM_VOTE_ABSOLUTE;
 		ahb_vote.vote.level = CAM_SVS_VOTE;
 		axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
@@ -661,12 +789,12 @@
 			goto release_mutex;
 		}
 
-		if (csiphy_dev->csiphy_info.secure_mode) {
+		if (csiphy_dev->csiphy_info.secure_mode[offset] == 1) {
 			rc = cam_csiphy_notify_secure_mode(
-				csiphy_dev->soc_info.index,
-				CAM_SECURE_MODE_SECURE);
+				csiphy_dev,
+				CAM_SECURE_MODE_SECURE, offset);
 			if (rc < 0)
-				csiphy_dev->csiphy_info.secure_mode =
+				csiphy_dev->csiphy_info.secure_mode[offset] =
 					CAM_SECURE_MODE_NON_SECURE;
 		}
 
@@ -694,7 +822,7 @@
 		struct cam_config_dev_cmd submit_cmd;
 
 		if (copy_from_user(&submit_cmd,
-			(void __user *)cmd->handle,
+			u64_to_user_ptr(cmd->handle),
 			sizeof(struct cam_config_dev_cmd))) {
 			CAM_ERR(CAM_CSIPHY, "failed copy config ext\n");
 			rc = -EFAULT;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
index 9c85af3..ac96255 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -58,6 +58,11 @@
 #define CSIPHY_SETTLE_CNT_HIGHER_BYTE    3
 #define CSIPHY_DNP_PARAMS                4
 
+#define CSIPHY_MAX_INSTANCES     2
+
+#define CAM_CSIPHY_MAX_DPHY_LANES    4
+#define CAM_CSIPHY_MAX_CPHY_LANES    3
+
 #define ENABLE_IRQ false
 
 #undef CDBG
@@ -101,6 +106,12 @@
 	uint32_t csiphy_reset_array_size;
 	uint32_t csiphy_2ph_config_array_size;
 	uint32_t csiphy_3ph_config_array_size;
+	uint32_t csiphy_cpas_cp_bits_per_phy;
+	uint32_t csiphy_cpas_cp_is_interleaved;
+	uint32_t csiphy_cpas_cp_2ph_offset;
+	uint32_t csiphy_cpas_cp_3ph_offset;
+	uint32_t csiphy_clock_lane;
+	uint32_t csiphy_combo_clk_lane;
 };
 
 /**
@@ -111,9 +122,9 @@
  * @crm_cb: Callback API pointers
  */
 struct intf_params {
-	int32_t device_hdl[2];
-	int32_t session_hdl[2];
-	int32_t link_hdl[2];
+	int32_t device_hdl[CSIPHY_MAX_INSTANCES];
+	int32_t session_hdl[CSIPHY_MAX_INSTANCES];
+	int32_t link_hdl[CSIPHY_MAX_INSTANCES];
 	struct cam_req_mgr_kmd_ops ops;
 	struct cam_req_mgr_crm_cb *crm_cb;
 };
@@ -175,7 +186,7 @@
 	uint8_t     csiphy_3phase;
 	uint8_t     combo_mode;
 	uint8_t     lane_cnt;
-	uint8_t     secure_mode;
+	uint8_t     secure_mode[CSIPHY_MAX_INSTANCES];
 	uint64_t    settle_time;
 	uint64_t    settle_time_combo_sensor;
 	uint64_t    data_rate;
@@ -231,6 +242,7 @@
 	struct cam_hw_soc_info   soc_info;
 	uint32_t cpas_handle;
 	uint32_t config_count;
+	uint64_t csiphy_cpas_cp_reg_mask[CSIPHY_MAX_INSTANCES];
 };
 
 #endif /* _CAM_CSIPHY_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
index 3245093..82cff27 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
@@ -23,6 +23,8 @@
 	.csiphy_reset_array_size = 5,
 	.csiphy_2ph_config_array_size = 14,
 	.csiphy_3ph_config_array_size = 19,
+	.csiphy_clock_lane = 0x1,
+	.csiphy_combo_clk_lane = 0x10,
 };
 
 struct csiphy_reg_t csiphy_common_reg_1_0[] = {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index c8730ca..92bace4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -17,6 +17,7 @@
 #include "cam_eeprom_core.h"
 #include "cam_eeprom_soc.h"
 #include "cam_debug_util.h"
+#include "cam_common_util.h"
 
 /**
  * cam_eeprom_read_memory() - read map data into buffer
@@ -31,7 +32,8 @@
 {
 	int                                rc = 0;
 	int                                j;
-	struct cam_sensor_i2c_reg_setting  i2c_reg_settings;
+	struct cam_sensor_i2c_reg_setting  i2c_reg_settings = {
+						NULL, 0, 0, 0, 0};
 	struct cam_sensor_i2c_reg_array    i2c_reg_array;
 	struct cam_eeprom_memory_map_t    *emap = block->map;
 	struct cam_eeprom_soc_private     *eb_info;
@@ -221,7 +223,7 @@
 		CAM_ERR(CAM_EEPROM, "failed: power_info %pK", power_info);
 		return -EINVAL;
 	}
-	rc = msm_camera_power_down(power_info, soc_info);
+	rc = cam_sensor_util_power_down(power_info, soc_info);
 	if (rc) {
 		CAM_ERR(CAM_EEPROM, "power down the core is failed:%d", rc);
 		return rc;
@@ -341,7 +343,8 @@
 		CAM_ERR(CAM_EEPROM, "Device is already acquired");
 		return -EFAULT;
 	}
-	if (copy_from_user(&eeprom_acq_dev, (void __user *) cmd->handle,
+	if (copy_from_user(&eeprom_acq_dev,
+		u64_to_user_ptr(cmd->handle),
 		sizeof(eeprom_acq_dev))) {
 		CAM_ERR(CAM_EEPROM,
 			"EEPROM:ACQUIRE_DEV: copy from user failed");
@@ -360,8 +363,8 @@
 	e_ctrl->bridge_intf.session_hdl = eeprom_acq_dev.session_handle;
 
 	CAM_DBG(CAM_EEPROM, "Device Handle: %d", eeprom_acq_dev.device_handle);
-	if (copy_to_user((void __user *) cmd->handle, &eeprom_acq_dev,
-		sizeof(struct cam_sensor_acquire_dev))) {
+	if (copy_to_user(u64_to_user_ptr(cmd->handle),
+		&eeprom_acq_dev, sizeof(struct cam_sensor_acquire_dev))) {
 		CAM_ERR(CAM_EEPROM, "EEPROM:ACQUIRE_DEV: copy to user failed");
 		return -EFAULT;
 	}
@@ -530,7 +533,7 @@
 	struct cam_cmd_buf_desc        *cmd_desc = NULL;
 	uint32_t                       *offset = NULL;
 	uint32_t                       *cmd_buf = NULL;
-	uint64_t                        generic_pkt_addr;
+	uintptr_t                        generic_pkt_addr;
 	size_t                          pkt_len = 0;
 	uint32_t                        total_cmd_buf_in_bytes = 0;
 	uint32_t                        processed_cmd_buf_in_bytes = 0;
@@ -564,7 +567,7 @@
 		if (!total_cmd_buf_in_bytes)
 			continue;
 		rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
-			(uint64_t *)&generic_pkt_addr, &pkt_len);
+			&generic_pkt_addr, &pkt_len);
 		if (rc) {
 			CAM_ERR(CAM_EEPROM, "Failed to get cpu buf");
 			return rc;
@@ -641,7 +644,7 @@
 	struct cam_buf_io_cfg *io_cfg;
 	uint32_t              i = 0;
 	int                   rc = 0;
-	uint64_t              buf_addr;
+	uintptr_t              buf_addr;
 	size_t                buf_size;
 	uint8_t               *read_buffer;
 
@@ -656,7 +659,7 @@
 		CAM_DBG(CAM_EEPROM, "Direction: %d:", io_cfg->direction);
 		if (io_cfg->direction == CAM_BUF_OUTPUT) {
 			rc = cam_mem_get_cpu_buf(io_cfg->mem_handle[0],
-				(uint64_t *)&buf_addr, &buf_size);
+				&buf_addr, &buf_size);
 			CAM_DBG(CAM_EEPROM, "buf_addr : %pK, buf_size : %zu\n",
 				(void *)buf_addr, buf_size);
 
@@ -699,7 +702,7 @@
 	int32_t                         rc = 0;
 	struct cam_control             *ioctl_ctrl = NULL;
 	struct cam_config_dev_cmd       dev_config;
-	uint64_t                        generic_pkt_addr;
+	uintptr_t                        generic_pkt_addr;
 	size_t                          pkt_len;
 	struct cam_packet              *csl_packet = NULL;
 	struct cam_eeprom_soc_private  *soc_private =
@@ -708,11 +711,12 @@
 
 	ioctl_ctrl = (struct cam_control *)arg;
 
-	if (copy_from_user(&dev_config, (void __user *) ioctl_ctrl->handle,
+	if (copy_from_user(&dev_config,
+		u64_to_user_ptr(ioctl_ctrl->handle),
 		sizeof(dev_config)))
 		return -EFAULT;
 	rc = cam_mem_get_cpu_buf(dev_config.packet_handle,
-		(uint64_t *)&generic_pkt_addr, &pkt_len);
+		&generic_pkt_addr, &pkt_len);
 	if (rc) {
 		CAM_ERR(CAM_EEPROM,
 			"error in converting command Handle Error: %d", rc);
@@ -727,7 +731,7 @@
 	}
 
 	csl_packet = (struct cam_packet *)
-		(generic_pkt_addr + dev_config.offset);
+		(generic_pkt_addr + (uint32_t)dev_config.offset);
 	switch (csl_packet->header.op_code & 0xFFFFFF) {
 	case CAM_EEPROM_PACKET_OPCODE_INIT:
 		if (e_ctrl->userspace_probe == false) {
@@ -880,7 +884,7 @@
 		else
 			eeprom_cap.eeprom_kernel_probe = false;
 
-		if (copy_to_user((void __user *) cmd->handle,
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
 			&eeprom_cap,
 			sizeof(struct cam_eeprom_query_cap_t))) {
 			CAM_ERR(CAM_EEPROM, "Failed Copy to User");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
index cc34a70..6d8820a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -261,9 +261,10 @@
 	for (i = 0; i < soc_info->num_clk; i++)
 		devm_clk_put(soc_info->dev, soc_info->clk[i]);
 
-	if (soc_private)
-		kfree(soc_private);
-
+	mutex_destroy(&(e_ctrl->eeprom_mutex));
+	kfree(soc_private);
+	kfree(e_ctrl->io_master_info.cci_client);
+	v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
 	kfree(e_ctrl);
 
 	return 0;
@@ -394,6 +395,8 @@
 		kfree(soc_private->power_info.gpio_num_info);
 		kfree(soc_private);
 	}
+	mutex_destroy(&(e_ctrl->eeprom_mutex));
+	v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
 	kfree(e_ctrl);
 
 	return 0;
@@ -489,8 +492,11 @@
 	for (i = 0; i < soc_info->num_clk; i++)
 		devm_clk_put(soc_info->dev, soc_info->clk[i]);
 
+	mutex_destroy(&(e_ctrl->eeprom_mutex));
 	kfree(soc_info->soc_private);
 	kfree(e_ctrl->io_master_info.cci_client);
+	platform_set_drvdata(pdev, NULL);
+	v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
 	kfree(e_ctrl);
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
index c7889a5..4d1cbdc 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
@@ -1,10 +1,11 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
 
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash_dev.o cam_flash_core.o cam_flash_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
index e997168..ff385ca 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -15,8 +15,9 @@
 #include "cam_sensor_cmn_header.h"
 #include "cam_flash_core.h"
 #include "cam_res_mgr_api.h"
+#include "cam_common_util.h"
 
-int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
+static int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
 	bool regulator_enable)
 {
 	int rc = 0;
@@ -55,7 +56,7 @@
 	return rc;
 }
 
-static int cam_flash_flush_nrt(struct cam_flash_ctrl *fctrl)
+static int cam_flash_pmic_flush_nrt(struct cam_flash_ctrl *fctrl)
 {
 	int j = 0;
 	struct cam_flash_frame_setting *nrt_settings;
@@ -86,20 +87,187 @@
 	return 0;
 }
 
-int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
+static int cam_flash_i2c_flush_nrt(struct cam_flash_ctrl *fctrl)
+{
+	int rc = 0;
+
+	if (fctrl->i2c_data.init_settings.is_settings_valid == true) {
+		rc = delete_request(&fctrl->i2c_data.init_settings);
+		if (rc) {
+			CAM_WARN(CAM_FLASH,
+				"Failed to delete Init i2c_setting: %d",
+				rc);
+			return rc;
+		}
+	}
+	if (fctrl->i2c_data.config_settings.is_settings_valid == true) {
+		rc = delete_request(&fctrl->i2c_data.config_settings);
+		if (rc) {
+			CAM_WARN(CAM_FLASH,
+				"Failed to delete NRT i2c_setting: %d",
+				rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_flash_construct_default_power_setting(
+	struct cam_sensor_power_ctrl_t *power_info)
+{
+	int rc = 0;
+
+	power_info->power_setting_size = 1;
+	power_info->power_setting =
+		(struct cam_sensor_power_setting *)
+		kzalloc(sizeof(struct cam_sensor_power_setting),
+			GFP_KERNEL);
+	if (!power_info->power_setting)
+		return -ENOMEM;
+
+	power_info->power_setting[0].seq_type = SENSOR_CUSTOM_REG1;
+	power_info->power_setting[0].seq_val = CAM_V_CUSTOM1;
+	power_info->power_setting[0].config_val = 0;
+	power_info->power_setting[0].delay = 2;
+
+	power_info->power_down_setting_size = 1;
+	power_info->power_down_setting =
+		(struct cam_sensor_power_setting *)
+		kzalloc(sizeof(struct cam_sensor_power_setting),
+			GFP_KERNEL);
+	if (!power_info->power_down_setting) {
+		rc = -ENOMEM;
+		goto free_power_settings;
+	}
+
+	power_info->power_down_setting[0].seq_type = SENSOR_CUSTOM_REG1;
+	power_info->power_down_setting[0].seq_val = CAM_V_CUSTOM1;
+	power_info->power_down_setting[0].config_val = 0;
+
+	return rc;
+
+free_power_settings:
+	kfree(power_info->power_setting);
+	power_info->power_setting = NULL;
+	power_info->power_setting_size = 0;
+	return rc;
+}
+
+int cam_flash_pmic_power_ops(struct cam_flash_ctrl *fctrl,
+	bool regulator_enable)
+{
+	int rc = 0;
+
+	if (!(fctrl->switch_trigger)) {
+		CAM_ERR(CAM_FLASH, "Invalid argument");
+		return -EINVAL;
+	}
+
+	if (regulator_enable) {
+		rc = cam_flash_prepare(fctrl, true);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+				"Enable Regulator Failed rc = %d", rc);
+			return rc;
+		}
+	}
+
+	if (!regulator_enable) {
+		if ((fctrl->flash_state == CAM_FLASH_STATE_START) &&
+			(fctrl->is_regulator_enabled == true)) {
+			rc = cam_flash_prepare(fctrl, false);
+			if (rc)
+				CAM_ERR(CAM_FLASH,
+					"Disable Regulator Failed rc: %d", rc);
+		}
+	}
+
+	return rc;
+}
+
+int cam_flash_i2c_power_ops(struct cam_flash_ctrl *fctrl,
+	bool regulator_enable)
+{
+	int rc = 0;
+	struct cam_hw_soc_info *soc_info = &fctrl->soc_info;
+	struct cam_sensor_power_ctrl_t *power_info =
+		&fctrl->power_info;
+
+	if (!power_info || !soc_info) {
+		CAM_ERR(CAM_FLASH, "Power Info is NULL");
+		return -EINVAL;
+	}
+	power_info->dev = soc_info->dev;
+
+	if (regulator_enable && (fctrl->is_regulator_enabled == false)) {
+		if ((power_info->power_setting == NULL) &&
+			(power_info->power_down_setting == NULL)) {
+			CAM_INFO(CAM_FLASH,
+				"Using default power settings");
+			rc = cam_flash_construct_default_power_setting(
+					power_info);
+			if (rc < 0) {
+				CAM_ERR(CAM_FLASH,
+				"Construct default pwr setting failed rc: %d",
+				rc);
+				return rc;
+			}
+		}
+
+		rc = cam_sensor_core_power_up(power_info, soc_info);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "power up the core is failed:%d",
+				rc);
+			goto free_pwr_settings;
+		}
+
+		rc = camera_io_init(&(fctrl->io_master_info));
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "cci_init failed: rc: %d", rc);
+			cam_sensor_util_power_down(power_info, soc_info);
+			goto free_pwr_settings;
+		}
+		fctrl->is_regulator_enabled = true;
+	} else if ((!regulator_enable) &&
+		(fctrl->is_regulator_enabled == true)) {
+		rc = cam_sensor_util_power_down(power_info, soc_info);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "power down the core is failed:%d",
+				rc);
+			return rc;
+		}
+		camera_io_release(&(fctrl->io_master_info));
+		fctrl->is_regulator_enabled = false;
+		goto free_pwr_settings;
+	}
+	return rc;
+
+free_pwr_settings:
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+	power_info->power_setting_size = 0;
+	power_info->power_down_setting_size = 0;
+
+	return rc;
+}
+
+int cam_flash_pmic_flush_request(struct cam_flash_ctrl *fctrl,
+	enum cam_flash_flush_type type, uint64_t req_id)
 {
 	int rc = 0;
 	int i = 0, j = 0;
-	struct cam_flash_ctrl *fctrl = NULL;
 	int frame_offset = 0;
 
-	fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl);
 	if (!fctrl) {
 		CAM_ERR(CAM_FLASH, "Device data is NULL");
 		return -EINVAL;
 	}
 
-	if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+	if (type == FLUSH_ALL) {
+		cam_flash_off(fctrl);
 	/* flush all requests*/
 		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
 			fctrl->per_frame[i].cmn_attr.request_id = 0;
@@ -109,19 +277,105 @@
 				fctrl->per_frame[i].led_current_ma[j] = 0;
 		}
 
-		rc = cam_flash_flush_nrt(fctrl);
-		if (rc)
-			CAM_ERR(CAM_FLASH, "NonRealTime flush error");
-	} else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+		cam_flash_pmic_flush_nrt(fctrl);
+	} else if ((type == FLUSH_REQ) && (req_id != 0)) {
 	/* flush request with req_id*/
-		frame_offset = flush->req_id % MAX_PER_FRAME_ARRAY;
+		frame_offset = req_id % MAX_PER_FRAME_ARRAY;
 		fctrl->per_frame[frame_offset].cmn_attr.request_id = 0;
 		fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
 			false;
 		fctrl->per_frame[frame_offset].cmn_attr.count = 0;
 		for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++)
 			fctrl->per_frame[frame_offset].led_current_ma[i] = 0;
+	} else if ((type == FLUSH_REQ) && (req_id == 0)) {
+		/* Handels NonRealTime usecase */
+		cam_flash_pmic_flush_nrt(fctrl);
+	} else {
+		CAM_ERR(CAM_FLASH, "Invalid arguments");
+		return -EINVAL;
 	}
+
+	return rc;
+}
+
+int cam_flash_i2c_flush_request(struct cam_flash_ctrl *fctrl,
+	enum cam_flash_flush_type type, uint64_t req_id)
+{
+	int rc = 0;
+	int i = 0;
+	uint32_t cancel_req_id_found = 0;
+	struct i2c_settings_array *i2c_set = NULL;
+
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Device data is NULL");
+		return -EINVAL;
+	}
+	if ((type == FLUSH_REQ) && (req_id == 0)) {
+		/* This setting will be called only when NonRealTime
+		 * settings needs to clean.
+		 */
+		cam_flash_i2c_flush_nrt(fctrl);
+	} else {
+		/* All other usecase will be handle here */
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+			i2c_set = &(fctrl->i2c_data.per_frame[i]);
+
+			if ((type == FLUSH_REQ) &&
+				(i2c_set->request_id != req_id))
+				continue;
+
+			if (i2c_set->is_settings_valid == 1) {
+				rc = delete_request(i2c_set);
+				if (rc < 0)
+					CAM_ERR(CAM_FLASH,
+						"delete request: %lld rc: %d",
+						i2c_set->request_id, rc);
+
+				if (type == FLUSH_REQ) {
+					cancel_req_id_found = 1;
+					break;
+				}
+			}
+		}
+	}
+
+	if ((type == FLUSH_REQ) && (req_id != 0) &&
+			(!cancel_req_id_found))
+		CAM_DBG(CAM_FLASH,
+			"Flush request id:%lld not found in the pending list",
+			req_id);
+
+	return rc;
+}
+
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
+{
+	int rc = 0;
+	struct cam_flash_ctrl *fctrl = NULL;
+
+	fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl);
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Device data is NULL");
+		return -EINVAL;
+	}
+
+	mutex_lock(&fctrl->flash_mutex);
+	if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+		rc = fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "FLUSH_TYPE_ALL failed rc: %d", rc);
+			goto end;
+		}
+	} else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+		rc = fctrl->func_tbl.flush_req(fctrl,
+				FLUSH_REQ, flush->req_id);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "FLUSH_REQ failed rc: %d", rc);
+			goto end;
+		}
+	}
+end:
+	mutex_unlock(&fctrl->flash_mutex);
 	return rc;
 }
 
@@ -254,26 +508,51 @@
 	return rc;
 }
 
-static int delete_req(struct cam_flash_ctrl *fctrl, uint64_t req_id)
+static int cam_flash_i2c_delete_req(struct cam_flash_ctrl *fctrl,
+	uint64_t req_id)
+{
+	int i = 0, rc = 0;
+	uint64_t top = 0, del_req_id = 0;
+
+	if (req_id != 0) {
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+			if ((req_id >=
+				fctrl->i2c_data.per_frame[i].request_id) &&
+				(top <
+				fctrl->i2c_data.per_frame[i].request_id) &&
+				(fctrl->i2c_data.per_frame[i].is_settings_valid
+					== 1)) {
+				del_req_id = top;
+				top = fctrl->i2c_data.per_frame[i].request_id;
+			}
+		}
+
+		if (top < req_id) {
+			if ((((top % MAX_PER_FRAME_ARRAY) - (req_id %
+				MAX_PER_FRAME_ARRAY)) >= BATCH_SIZE_MAX) ||
+				(((top % MAX_PER_FRAME_ARRAY) - (req_id %
+				MAX_PER_FRAME_ARRAY)) <= -BATCH_SIZE_MAX))
+				del_req_id = req_id;
+		}
+
+		if (!del_req_id)
+			return rc;
+
+		CAM_DBG(CAM_FLASH, "top: %llu, del_req_id:%llu",
+			top, del_req_id);
+	}
+	fctrl->func_tbl.flush_req(fctrl, FLUSH_REQ, del_req_id);
+	return 0;
+}
+
+static int cam_flash_pmic_delete_req(struct cam_flash_ctrl *fctrl,
+	uint64_t req_id)
 {
 	int i = 0;
 	struct cam_flash_frame_setting *flash_data = NULL;
 	uint64_t top = 0, del_req_id = 0;
 
-	if (req_id == 0) {
-		flash_data = &fctrl->nrt_info;
-		if ((fctrl->nrt_info.cmn_attr.cmd_type ==
-			CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
-			(fctrl->nrt_info.cmn_attr.cmd_type ==
-			CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) {
-			flash_data->cmn_attr.is_settings_valid = false;
-			for (i = 0; i < flash_data->cmn_attr.count; i++)
-				flash_data->led_current_ma[i] = 0;
-		} else {
-			fctrl->flash_init_setting.cmn_attr.is_settings_valid
-				= false;
-		}
-	} else {
+	if (req_id != 0) {
 		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
 			flash_data = &fctrl->per_frame[i];
 			if (req_id >= flash_data->cmn_attr.request_id &&
@@ -305,28 +584,100 @@
 
 		CAM_DBG(CAM_FLASH, "top: %llu, del_req_id:%llu",
 			top, del_req_id);
+	}
 
-		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
-			flash_data = &fctrl->per_frame[i];
-			if ((del_req_id ==
-				flash_data->cmn_attr.request_id) &&
-				(flash_data->cmn_attr.
-					is_settings_valid == 1)) {
-				CAM_DBG(CAM_FLASH, "Deleting request[%d] %llu",
-					i, flash_data->cmn_attr.request_id);
-				flash_data->cmn_attr.request_id = 0;
-				flash_data->cmn_attr.is_settings_valid = false;
-				flash_data->opcode = 0;
-				for (i = 0; i < flash_data->cmn_attr.count; i++)
-					flash_data->led_current_ma[i] = 0;
+	fctrl->func_tbl.flush_req(fctrl, FLUSH_REQ, del_req_id);
+	return 0;
+}
+
+static int32_t cam_flash_slaveInfo_pkt_parser(struct cam_flash_ctrl *fctrl,
+	uint32_t *cmd_buf)
+{
+	int32_t rc = 0;
+	struct cam_cmd_i2c_info *i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+
+	if (fctrl->io_master_info.master_type == CCI_MASTER) {
+		fctrl->io_master_info.cci_client->cci_i2c_master =
+			fctrl->cci_i2c_master;
+		fctrl->io_master_info.cci_client->i2c_freq_mode =
+			i2c_info->i2c_freq_mode;
+		fctrl->io_master_info.cci_client->sid =
+			i2c_info->slave_addr >> 1;
+		CAM_DBG(CAM_FLASH, "Slave addr: 0x%x Freq Mode: %d",
+			i2c_info->slave_addr, i2c_info->i2c_freq_mode);
+	} else if (fctrl->io_master_info.master_type == I2C_MASTER) {
+		fctrl->io_master_info.client->addr = i2c_info->slave_addr;
+		CAM_DBG(CAM_FLASH, "Slave addr: 0x%x", i2c_info->slave_addr);
+	} else {
+		CAM_ERR(CAM_FLASH, "Invalid Master type: %d",
+			fctrl->io_master_info.master_type);
+		 rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_flash_i2c_apply_setting(struct cam_flash_ctrl *fctrl,
+	uint64_t req_id)
+{
+	struct i2c_settings_list *i2c_list;
+	struct i2c_settings_array *i2c_set = NULL;
+	int frame_offset = 0, rc = 0;
+
+	if (req_id == 0) {
+		/* NonRealTime Init settings*/
+		if (fctrl->i2c_data.init_settings.is_settings_valid == true) {
+			list_for_each_entry(i2c_list,
+				&(fctrl->i2c_data.init_settings.list_head),
+				list) {
+				rc = cam_sensor_util_i2c_apply_setting
+					(&(fctrl->io_master_info), i2c_list);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"Failed to apply init settings: %d",
+					rc);
+					return rc;
+				}
+			}
+		}
+		/* NonRealTime (Widget/RER/INIT_FIRE settings) */
+		if (fctrl->i2c_data.config_settings.is_settings_valid == true) {
+			list_for_each_entry(i2c_list,
+				&(fctrl->i2c_data.config_settings.list_head),
+				list) {
+				rc = cam_sensor_util_i2c_apply_setting
+					(&(fctrl->io_master_info), i2c_list);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"Failed to apply NRT settings: %d", rc);
+					return rc;
+				}
+			}
+		}
+	} else {
+		/* RealTime */
+		frame_offset = req_id % MAX_PER_FRAME_ARRAY;
+		i2c_set = &fctrl->i2c_data.per_frame[frame_offset];
+		if ((i2c_set->is_settings_valid == true) &&
+			(i2c_set->request_id == req_id)) {
+			list_for_each_entry(i2c_list,
+				&(i2c_set->list_head), list) {
+				rc = cam_sensor_util_i2c_apply_setting(
+					&(fctrl->io_master_info), i2c_list);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"Failed to apply settings: %d", rc);
+					return rc;
+				}
 			}
 		}
 	}
 
-	return 0;
+	cam_flash_i2c_delete_req(fctrl, req_id);
+	return rc;
 }
 
-int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
+int cam_flash_pmic_apply_setting(struct cam_flash_ctrl *fctrl,
 	uint64_t req_id)
 {
 	int rc = 0, i = 0;
@@ -344,12 +695,12 @@
 
 			if (flash_data->opcode ==
 				CAMERA_SENSOR_FLASH_OP_FIREHIGH) {
-				if (fctrl->flash_state !=
-					CAM_FLASH_STATE_CONFIG) {
+				if (fctrl->flash_state ==
+					CAM_FLASH_STATE_START) {
 					CAM_WARN(CAM_FLASH,
-					"Cannot apply Start Dev:Prev state: %d",
+					"Wrong state :Prev state: %d",
 					fctrl->flash_state);
-					return rc;
+					return -EINVAL;
 				}
 				rc = cam_flash_prepare(fctrl, true);
 				if (rc) {
@@ -360,8 +711,27 @@
 				rc = cam_flash_high(fctrl, flash_data);
 				if (rc)
 					CAM_ERR(CAM_FLASH,
-						"FLASH ON failed : %d",
-						rc);
+						"FLASH ON failed : %d", rc);
+			}
+			if (flash_data->opcode ==
+				CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+				if (fctrl->flash_state ==
+					CAM_FLASH_STATE_START) {
+					CAM_WARN(CAM_FLASH,
+					"Wrong state :Prev state: %d",
+					fctrl->flash_state);
+					return -EINVAL;
+				}
+				rc = cam_flash_prepare(fctrl, true);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"Enable Regulator Failed rc = %d", rc);
+					return rc;
+				}
+				rc = cam_flash_low(fctrl, flash_data);
+				if (rc)
+					CAM_ERR(CAM_FLASH,
+						"TORCH ON failed : %d", rc);
 			}
 			if (flash_data->opcode ==
 				CAMERA_SENSOR_FLASH_OP_OFF) {
@@ -409,7 +779,6 @@
 		} else if (fctrl->nrt_info.cmn_attr.cmd_type ==
 			CAMERA_SENSOR_FLASH_CMD_TYPE_RER) {
 			flash_data = &fctrl->nrt_info;
-
 			if (fctrl->flash_state != CAM_FLASH_STATE_START) {
 				rc = cam_flash_off(fctrl);
 				if (rc) {
@@ -442,8 +811,7 @@
 				rc = cam_flash_off(fctrl);
 				if (rc) {
 					CAM_ERR(CAM_FLASH,
-						"Flash off failed: %d",
-						rc);
+						"Flash off failed: %d", rc);
 					continue;
 				}
 				fctrl->flash_state = CAM_FLASH_STATE_START;
@@ -505,15 +873,325 @@
 	}
 
 nrt_del_req:
-	delete_req(fctrl, req_id);
+	cam_flash_pmic_delete_req(fctrl, req_id);
 apply_setting_err:
 	return rc;
 }
 
-int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
+int cam_flash_i2c_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg)
 {
 	int rc = 0, i = 0;
-	uint64_t generic_ptr;
+	uintptr_t generic_ptr;
+	uint32_t total_cmd_buf_in_bytes = 0;
+	uint32_t processed_cmd_buf_in_bytes = 0;
+	uint16_t cmd_length_in_bytes = 0;
+	uint32_t *cmd_buf =  NULL;
+	uint32_t *offset = NULL;
+	uint32_t frm_offset = 0;
+	size_t len_of_buffer;
+	struct cam_flash_init *flash_init = NULL;
+	struct common_header  *cmn_hdr = NULL;
+	struct cam_control *ioctl_ctrl = NULL;
+	struct cam_packet *csl_packet = NULL;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct cam_config_dev_cmd config;
+	struct cam_req_mgr_add_request add_req;
+	struct i2c_data_settings *i2c_data = NULL;
+	struct i2c_settings_array *i2c_reg_settings = NULL;
+	struct cam_sensor_power_ctrl_t *power_info = NULL;
+
+	if (!fctrl || !arg) {
+		CAM_ERR(CAM_FLASH, "fctrl/arg is NULL");
+		return -EINVAL;
+	}
+	/* getting CSL Packet */
+	ioctl_ctrl = (struct cam_control *)arg;
+
+	if (copy_from_user((&config), u64_to_user_ptr(ioctl_ctrl->handle),
+		sizeof(config))) {
+		CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed");
+		return -EFAULT;
+	}
+
+	rc = cam_mem_get_cpu_buf(config.packet_handle,
+		&generic_ptr, &len_of_buffer);
+	if (rc) {
+		CAM_ERR(CAM_FLASH, "Failed in getting the buffer : %d", rc);
+		return rc;
+	}
+
+	if (config.offset > len_of_buffer) {
+		CAM_ERR(CAM_FLASH,
+			"offset is out of bounds: offset: %lld len: %zu",
+			config.offset, len_of_buffer);
+		return -EINVAL;
+	}
+
+	/* Add offset to the flash csl header */
+	csl_packet = (struct cam_packet *)(uintptr_t)(generic_ptr +
+			config.offset);
+	switch (csl_packet->header.op_code & 0xFFFFFF) {
+	case CAM_FLASH_PACKET_OPCODE_INIT: {
+		/* INIT packet*/
+		offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+			csl_packet->cmd_buf_offset);
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+		/* Loop through multiple command buffers */
+		for (i = 1; i < csl_packet->num_cmd_buf; i++) {
+			total_cmd_buf_in_bytes = cmd_desc[i].length;
+			processed_cmd_buf_in_bytes = 0;
+			if (!total_cmd_buf_in_bytes)
+				continue;
+			rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+				&generic_ptr, &len_of_buffer);
+			if (rc < 0) {
+				CAM_ERR(CAM_FLASH, "Failed to get cpu buf");
+				return rc;
+			}
+			cmd_buf = (uint32_t *)generic_ptr;
+			if (!cmd_buf) {
+				CAM_ERR(CAM_FLASH, "invalid cmd buf");
+				return -EINVAL;
+			}
+			cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+			cmn_hdr = (struct common_header *)cmd_buf;
+
+			/* Loop through cmd formats in one cmd buffer */
+			CAM_DBG(CAM_FLASH,
+				"command Type: %d,Processed: %d,Total: %d",
+				cmn_hdr->cmd_type, processed_cmd_buf_in_bytes,
+				total_cmd_buf_in_bytes);
+			switch (cmn_hdr->cmd_type) {
+			case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO:
+				flash_init = (struct cam_flash_init *)cmd_buf;
+				fctrl->flash_type = flash_init->flash_type;
+				cmd_length_in_bytes =
+					sizeof(struct cam_flash_init);
+				processed_cmd_buf_in_bytes +=
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/
+						sizeof(uint32_t);
+				break;
+			case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+				rc = cam_flash_slaveInfo_pkt_parser(
+					fctrl, cmd_buf);
+				if (rc < 0) {
+					CAM_ERR(CAM_FLASH,
+					"Failed parsing slave info: rc: %d",
+					rc);
+					return rc;
+				}
+				cmd_length_in_bytes =
+					sizeof(struct cam_cmd_i2c_info);
+				processed_cmd_buf_in_bytes +=
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/
+						sizeof(uint32_t);
+				break;
+			case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+			case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+				CAM_DBG(CAM_FLASH,
+					"Received power settings");
+				cmd_length_in_bytes =
+					total_cmd_buf_in_bytes;
+				rc = cam_sensor_update_power_settings(
+					cmd_buf,
+					total_cmd_buf_in_bytes,
+					&fctrl->power_info);
+				processed_cmd_buf_in_bytes +=
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/
+						sizeof(uint32_t);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"Failed update power settings");
+					return rc;
+				}
+				break;
+			default:
+				CAM_DBG(CAM_FLASH,
+					"Received initSettings");
+				i2c_data = &(fctrl->i2c_data);
+				i2c_reg_settings =
+					&fctrl->i2c_data.init_settings;
+
+				i2c_reg_settings->request_id = 0;
+				i2c_reg_settings->is_settings_valid = 1;
+				rc = cam_sensor_i2c_command_parser(
+					&fctrl->io_master_info,
+					i2c_reg_settings,
+					&cmd_desc[i], 1);
+				if (rc < 0) {
+					CAM_ERR(CAM_FLASH,
+					"pkt parsing failed: %d", rc);
+					return rc;
+				}
+				cmd_length_in_bytes =
+					cmd_desc[i].length;
+				processed_cmd_buf_in_bytes +=
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/
+						sizeof(uint32_t);
+
+				break;
+			}
+		}
+		power_info = &fctrl->power_info;
+		if (!power_info) {
+			CAM_ERR(CAM_FLASH, "Power_info is NULL");
+			return -EINVAL;
+		}
+
+		/* Parse and fill vreg params for power up settings */
+		rc = msm_camera_fill_vreg_params(&fctrl->soc_info,
+			power_info->power_setting,
+			power_info->power_setting_size);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+				"failed to fill vreg params for power up rc:%d",
+				rc);
+			return rc;
+		}
+
+		/* Parse and fill vreg params for power down settings*/
+		rc = msm_camera_fill_vreg_params(
+			&fctrl->soc_info,
+			power_info->power_down_setting,
+			power_info->power_down_setting_size);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+				"failed to fill vreg params power down rc:%d",
+				rc);
+			return rc;
+		}
+
+		rc = fctrl->func_tbl.power_ops(fctrl, true);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+				"Enable Regulator Failed rc = %d", rc);
+			return rc;
+		}
+
+		rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc);
+			return rc;
+		}
+
+		fctrl->flash_state = CAM_FLASH_STATE_CONFIG;
+		break;
+	}
+	case CAM_FLASH_PACKET_OPCODE_SET_OPS: {
+		offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+			csl_packet->cmd_buf_offset);
+		frm_offset = csl_packet->header.request_id %
+			MAX_PER_FRAME_ARRAY;
+		/* add support for handling i2c_data*/
+		i2c_reg_settings =
+			&fctrl->i2c_data.per_frame[frm_offset];
+		if (i2c_reg_settings->is_settings_valid == true) {
+			i2c_reg_settings->request_id = 0;
+			i2c_reg_settings->is_settings_valid = false;
+			goto update_req_mgr;
+		}
+		i2c_reg_settings->is_settings_valid = true;
+		i2c_reg_settings->request_id =
+			csl_packet->header.request_id;
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_sensor_i2c_command_parser(
+			&fctrl->io_master_info,
+			i2c_reg_settings, cmd_desc, 1);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+			"Failed in parsing i2c packets");
+			return rc;
+		}
+		break;
+	}
+	case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: {
+		offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+			csl_packet->cmd_buf_offset);
+
+		/* add support for handling i2c_data*/
+		i2c_reg_settings = &fctrl->i2c_data.config_settings;
+		if (i2c_reg_settings->is_settings_valid == true) {
+			i2c_reg_settings->request_id = 0;
+			i2c_reg_settings->is_settings_valid = false;
+
+			rc = delete_request(i2c_reg_settings);
+			if (rc) {
+				CAM_ERR(CAM_FLASH,
+				"Failed in Deleting the err: %d", rc);
+				return rc;
+			}
+		}
+		i2c_reg_settings->is_settings_valid = true;
+		i2c_reg_settings->request_id =
+			csl_packet->header.request_id;
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_sensor_i2c_command_parser(
+			&fctrl->io_master_info,
+			i2c_reg_settings, cmd_desc, 1);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+			"Failed in parsing i2c NRT packets");
+			return rc;
+		}
+		rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+		if (rc)
+			CAM_ERR(CAM_FLASH,
+			"Apply setting failed: %d", rc);
+		return rc;
+	}
+	case CAM_PKT_NOP_OPCODE: {
+		if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+			(fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE)) {
+			CAM_WARN(CAM_FLASH,
+				"Rxed NOP packets without linking");
+			frm_offset = csl_packet->header.request_id %
+				MAX_PER_FRAME_ARRAY;
+			fctrl->i2c_data.per_frame[frm_offset].is_settings_valid
+				= false;
+			return 0;
+		}
+
+		CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
+			csl_packet->header.request_id);
+		goto update_req_mgr;
+	}
+	default:
+		CAM_ERR(CAM_FLASH, "Wrong Opcode : %d",
+			(csl_packet->header.op_code & 0xFFFFFF));
+		return -EINVAL;
+	}
+update_req_mgr:
+	if (((csl_packet->header.op_code  & 0xFFFFF) ==
+		CAM_PKT_NOP_OPCODE) ||
+		((csl_packet->header.op_code & 0xFFFFF) ==
+		CAM_FLASH_PACKET_OPCODE_SET_OPS)) {
+		add_req.link_hdl = fctrl->bridge_intf.link_hdl;
+		add_req.req_id = csl_packet->header.request_id;
+		add_req.dev_hdl = fctrl->bridge_intf.device_hdl;
+
+		if ((csl_packet->header.op_code & 0xFFFFF) ==
+			CAM_FLASH_PACKET_OPCODE_SET_OPS)
+			add_req.skip_before_applying = 1;
+		else
+			add_req.skip_before_applying = 0;
+
+		if (fctrl->bridge_intf.crm_cb &&
+			fctrl->bridge_intf.crm_cb->add_req)
+			fctrl->bridge_intf.crm_cb->add_req(&add_req);
+		CAM_DBG(CAM_FLASH, "add req to req_mgr= %lld", add_req.req_id);
+	}
+	return rc;
+}
+
+int cam_flash_pmic_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg)
+{
+	int rc = 0, i = 0;
+	uintptr_t generic_ptr;
 	uint32_t *cmd_buf =  NULL;
 	uint32_t *offset = NULL;
 	uint32_t frm_offset = 0;
@@ -537,7 +1215,8 @@
 	/* getting CSL Packet */
 	ioctl_ctrl = (struct cam_control *)arg;
 
-	if (copy_from_user((&config), (void __user *) ioctl_ctrl->handle,
+	if (copy_from_user((&config),
+		u64_to_user_ptr(ioctl_ctrl->handle),
 		sizeof(config))) {
 		CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed");
 		rc = -EFAULT;
@@ -545,7 +1224,7 @@
 	}
 
 	rc = cam_mem_get_cpu_buf(config.packet_handle,
-		(uint64_t *)&generic_ptr, &len_of_buffer);
+		&generic_ptr, &len_of_buffer);
 	if (rc) {
 		CAM_ERR(CAM_FLASH, "Failed in getting the buffer : %d", rc);
 		return rc;
@@ -559,57 +1238,69 @@
 	}
 
 	/* Add offset to the flash csl header */
-	csl_packet = (struct cam_packet *)(generic_ptr + config.offset);
+	csl_packet =
+		(struct cam_packet *)(generic_ptr + (uint32_t)config.offset);
 
 	switch (csl_packet->header.op_code & 0xFFFFFF) {
 	case CAM_FLASH_PACKET_OPCODE_INIT: {
 		/* INIT packet*/
 		offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
 			csl_packet->cmd_buf_offset);
-		fctrl->flash_init_setting.cmn_attr.request_id = 0;
-		fctrl->flash_init_setting.cmn_attr.is_settings_valid = true;
 		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
 		rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
-			(uint64_t *)&generic_ptr, &len_of_buffer);
+			&generic_ptr, &len_of_buffer);
 		cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
 			cmd_desc->offset);
 		cam_flash_info = (struct cam_flash_init *)cmd_buf;
 
 		switch (cam_flash_info->cmd_type) {
-		case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO:
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO: {
+			CAM_DBG(CAM_FLASH, "INIT_INFO CMD CALLED");
+			fctrl->flash_init_setting.cmn_attr.request_id = 0;
+			fctrl->flash_init_setting.cmn_attr.is_settings_valid =
+				true;
 			fctrl->flash_type = cam_flash_info->flash_type;
 			fctrl->is_regulator_enabled = false;
 			fctrl->nrt_info.cmn_attr.cmd_type =
 				CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO;
+
+			rc = fctrl->func_tbl.power_ops(fctrl, true);
+			if (rc) {
+				CAM_ERR(CAM_FLASH,
+					"Enable Regulator Failed rc = %d", rc);
+				return rc;
+			}
+
 			fctrl->flash_state =
 				CAM_FLASH_STATE_CONFIG;
 			break;
-		case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE:
-			CAM_DBG(CAM_FLASH, "Widget Flash Operation");
-				flash_operation_info =
-					(struct cam_flash_set_on_off *) cmd_buf;
-				fctrl->nrt_info.cmn_attr.count =
-					flash_operation_info->count;
-				fctrl->nrt_info.cmn_attr.request_id = 0;
-				fctrl->nrt_info.opcode =
-					flash_operation_info->opcode;
-				fctrl->nrt_info.cmn_attr.cmd_type =
-					CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE;
-				for (i = 0;
-					i < flash_operation_info->count; i++)
-					fctrl->nrt_info.led_current_ma[i] =
-					flash_operation_info->led_current_ma[i];
+		}
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE: {
+			CAM_DBG(CAM_FLASH, "INIT_FIRE Operation");
 
-				mutex_lock(&fctrl->flash_wq_mutex);
-				rc = cam_flash_apply_setting(fctrl, 0);
-				if (rc)
-					CAM_ERR(CAM_FLASH,
-						"Apply setting failed: %d",
-						rc);
-				mutex_unlock(&fctrl->flash_wq_mutex);
-				fctrl->flash_state =
-					CAM_FLASH_STATE_CONFIG;
+			flash_operation_info =
+				(struct cam_flash_set_on_off *) cmd_buf;
+			fctrl->nrt_info.cmn_attr.count =
+				flash_operation_info->count;
+			fctrl->nrt_info.cmn_attr.request_id = 0;
+			fctrl->nrt_info.opcode =
+				flash_operation_info->opcode;
+			fctrl->nrt_info.cmn_attr.cmd_type =
+				CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE;
+			for (i = 0;
+				i < flash_operation_info->count; i++)
+				fctrl->nrt_info.led_current_ma[i] =
+				flash_operation_info->led_current_ma[i];
+
+			rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+			if (rc)
+				CAM_ERR(CAM_FLASH,
+					"Apply setting failed: %d",
+					rc);
+
+			fctrl->flash_state = CAM_FLASH_STATE_CONFIG;
 			break;
+		}
 		default:
 			CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
 				cam_flash_info->cmd_type);
@@ -635,7 +1326,7 @@
 		flash_data->cmn_attr.is_settings_valid = true;
 		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
 		rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
-			(uint64_t *)&generic_ptr, &len_of_buffer);
+			&generic_ptr, &len_of_buffer);
 		cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
 			cmd_desc->offset);
 
@@ -647,7 +1338,7 @@
 		switch (cmn_hdr->cmd_type) {
 		case CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE: {
 			CAM_DBG(CAM_FLASH,
-				"CAMERA_FLASH_CMD_TYPE_OPS case called");
+				"CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE cmd called");
 			if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
 				(fctrl->flash_state ==
 					CAM_FLASH_STATE_ACQUIRE)) {
@@ -671,8 +1362,8 @@
 			for (i = 0; i < flash_operation_info->count; i++)
 				flash_data->led_current_ma[i]
 				= flash_operation_info->led_current_ma[i];
-			}
-			break;
+		}
+		break;
 		default:
 			CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
 				cmn_hdr->cmd_type);
@@ -686,7 +1377,7 @@
 		fctrl->nrt_info.cmn_attr.is_settings_valid = true;
 		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
 		rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
-			(uint64_t *)&generic_ptr, &len_of_buffer);
+			&generic_ptr, &len_of_buffer);
 		cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
 			cmd_desc->offset);
 		cmn_hdr = (struct common_header *)cmd_buf;
@@ -708,12 +1399,10 @@
 				fctrl->nrt_info.led_current_ma[i] =
 					flash_operation_info->led_current_ma[i];
 
-			mutex_lock(&fctrl->flash_wq_mutex);
-			rc = cam_flash_apply_setting(fctrl, 0);
+			rc = fctrl->func_tbl.apply_setting(fctrl, 0);
 			if (rc)
 				CAM_ERR(CAM_FLASH, "Apply setting failed: %d",
 					rc);
-			mutex_unlock(&fctrl->flash_wq_mutex);
 			return rc;
 		}
 		case CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR: {
@@ -754,12 +1443,10 @@
 					flash_rer_info->led_current_ma[i];
 
 
-			mutex_lock(&fctrl->flash_wq_mutex);
-			rc = cam_flash_apply_setting(fctrl, 0);
+			rc = fctrl->func_tbl.apply_setting(fctrl, 0);
 			if (rc)
 				CAM_ERR(CAM_FLASH, "apply_setting failed: %d",
 					rc);
-			mutex_unlock(&fctrl->flash_wq_mutex);
 			return rc;
 		}
 		default:
@@ -767,7 +1454,6 @@
 				cmn_hdr->cmd_type);
 			return -EINVAL;
 		}
-
 		break;
 	}
 	case CAM_PKT_NOP_OPCODE: {
@@ -785,7 +1471,7 @@
 		fctrl->per_frame[frm_offset].cmn_attr.is_settings_valid = false;
 		fctrl->per_frame[frm_offset].cmn_attr.request_id = 0;
 		fctrl->per_frame[frm_offset].opcode = CAM_PKT_NOP_OPCODE;
-		CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
+		CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %llu",
 			csl_packet->header.request_id);
 		goto update_req_mgr;
 	}
@@ -839,7 +1525,7 @@
 		CAM_ERR(CAM_FLASH, " Device data is NULL");
 		return -EINVAL;
 	}
-
+	mutex_lock(&fctrl->flash_mutex);
 	if (link->link_enable) {
 		fctrl->bridge_intf.link_hdl = link->link_hdl;
 		fctrl->bridge_intf.crm_cb = link->crm_cb;
@@ -847,43 +1533,11 @@
 		fctrl->bridge_intf.link_hdl = -1;
 		fctrl->bridge_intf.crm_cb = NULL;
 	}
+	mutex_unlock(&fctrl->flash_mutex);
 
 	return 0;
 }
 
-
-int cam_flash_stop_dev(struct cam_flash_ctrl *fctrl)
-{
-	int rc = 0, i, j;
-
-	cam_flash_off(fctrl);
-
-	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
-		fctrl->per_frame[i].cmn_attr.request_id = 0;
-		fctrl->per_frame[i].cmn_attr.is_settings_valid = false;
-		fctrl->per_frame[i].cmn_attr.count = 0;
-		for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
-			fctrl->per_frame[i].led_current_ma[j] = 0;
-	}
-
-	rc = cam_flash_flush_nrt(fctrl);
-	if (rc) {
-		CAM_ERR(CAM_FLASH,
-			"NonRealTime Dev flush failed rc: %d", rc);
-		return rc;
-	}
-
-	if ((fctrl->flash_state == CAM_FLASH_STATE_START) &&
-		(fctrl->is_regulator_enabled == true)) {
-		rc = cam_flash_prepare(fctrl, false);
-		if (rc)
-			CAM_ERR(CAM_FLASH, "Disable Regulator Failed rc: %d",
-				rc);
-	}
-
-	return rc;
-}
-
 int cam_flash_release_dev(struct cam_flash_ctrl *fctrl)
 {
 	int rc = 0;
@@ -911,9 +1565,13 @@
 
 	if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) ||
 		(fctrl->flash_state == CAM_FLASH_STATE_START)) {
-		rc = cam_flash_stop_dev(fctrl);
+		mutex_lock(&(fctrl->flash_mutex));
+		fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+		mutex_unlock(&(fctrl->flash_mutex));
+		rc = fctrl->func_tbl.power_ops(fctrl, false);
 		if (rc)
-			CAM_ERR(CAM_FLASH, "Stop Failed rc: %d", rc);
+			CAM_ERR(CAM_FLASH, "Power Down Failed rc: %d",
+				rc);
 	}
 
 	rc = cam_flash_release_dev(fctrl);
@@ -937,12 +1595,12 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&fctrl->flash_wq_mutex);
-	rc = cam_flash_apply_setting(fctrl, apply->request_id);
+	mutex_lock(&fctrl->flash_mutex);
+	rc = fctrl->func_tbl.apply_setting(fctrl, apply->request_id);
 	if (rc)
 		CAM_ERR(CAM_FLASH, "apply_setting failed with rc=%d",
 			rc);
-	mutex_unlock(&fctrl->flash_wq_mutex);
+	mutex_unlock(&fctrl->flash_mutex);
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
index f73409a..1bd3b31 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,20 +16,12 @@
 #include <linux/leds-qpnp-flash.h>
 #include <media/cam_sensor.h>
 #include "cam_flash_dev.h"
-#include "cam_sync_api.h"
-#include "cam_mem_mgr_api.h"
 
-int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg);
 int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info);
 int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link);
-int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id);
 int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply);
 int cam_flash_process_evt(struct cam_req_mgr_link_evt_data *event_data);
 int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush);
-int cam_flash_off(struct cam_flash_ctrl *fctrl);
-int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
-	bool regulator_enable);
-void cam_flash_shutdown(struct cam_flash_ctrl *flash_ctrl);
-int cam_flash_stop_dev(struct cam_flash_ctrl *flash_ctrl);
-int cam_flash_release_dev(struct cam_flash_ctrl *fctrl);
+
+
 #endif /*_CAM_FLASH_CORE_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
index d9b5f64..4a6307d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -15,6 +15,7 @@
 #include "cam_flash_dev.h"
 #include "cam_flash_soc.h"
 #include "cam_flash_core.h"
+#include "cam_common_util.h"
 
 static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
 		void *arg, struct cam_flash_private_soc *soc_private)
@@ -57,7 +58,8 @@
 			goto release_mutex;
 		}
 
-		rc = copy_from_user(&flash_acq_dev, (void __user *)cmd->handle,
+		rc = copy_from_user(&flash_acq_dev,
+			u64_to_user_ptr(cmd->handle),
 			sizeof(flash_acq_dev));
 		if (rc) {
 			CAM_ERR(CAM_FLASH, "Failed Copying from User");
@@ -77,7 +79,8 @@
 		fctrl->bridge_intf.session_hdl =
 			flash_acq_dev.session_handle;
 
-		rc = copy_to_user((void __user *) cmd->handle, &flash_acq_dev,
+		rc = copy_to_user(u64_to_user_ptr(cmd->handle),
+			&flash_acq_dev,
 			sizeof(struct cam_sensor_acquire_dev));
 		if (rc) {
 			CAM_ERR(CAM_FLASH, "Failed Copy to User with rc = %d",
@@ -93,7 +96,7 @@
 		if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
 			(fctrl->flash_state == CAM_FLASH_STATE_START)) {
 			CAM_WARN(CAM_FLASH,
-				"Cannot apply Release dev: Prev state:%d",
+				"Wrong state for Release dev: Prev state:%d",
 				fctrl->flash_state);
 		}
 
@@ -106,11 +109,18 @@
 			rc = -EINVAL;
 			goto release_mutex;
 		}
-		rc = cam_flash_release_dev(fctrl);
-		if (rc)
-			CAM_ERR(CAM_FLASH,
-				"Failed in destroying the device Handle rc= %d",
-				rc);
+
+		if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) ||
+			(fctrl->flash_state == CAM_FLASH_STATE_START))
+			fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+
+		if (cam_flash_release_dev(fctrl))
+			CAM_WARN(CAM_FLASH,
+				"Failed in destroying the device Handle");
+
+		if (fctrl->func_tbl.power_ops(fctrl, false))
+			CAM_WARN(CAM_FLASH, "Power Down Failed");
+
 		fctrl->flash_state = CAM_FLASH_STATE_INIT;
 		break;
 	}
@@ -130,8 +140,8 @@
 			flash_cap.max_current_torch[i] =
 				soc_private->torch_max_current[i];
 
-		if (copy_to_user((void __user *) cmd->handle, &flash_cap,
-			sizeof(struct cam_flash_query_cap_info))) {
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+			&flash_cap, sizeof(struct cam_flash_query_cap_info))) {
 			CAM_ERR(CAM_FLASH, "Failed Copy to User");
 			rc = -EFAULT;
 			goto release_mutex;
@@ -149,17 +159,6 @@
 			goto release_mutex;
 		}
 
-		rc = cam_flash_prepare(fctrl, true);
-		if (rc) {
-			CAM_ERR(CAM_FLASH,
-				"Enable Regulator Failed rc = %d", rc);
-			goto release_mutex;
-		}
-		rc = cam_flash_apply_setting(fctrl, 0);
-		if (rc) {
-			CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc);
-			goto release_mutex;
-		}
 		fctrl->flash_state = CAM_FLASH_STATE_START;
 		break;
 	}
@@ -173,18 +172,13 @@
 			goto release_mutex;
 		}
 
-		rc = cam_flash_stop_dev(fctrl);
-		if (rc) {
-			CAM_ERR(CAM_FLASH, "Stop Dev Failed rc = %d",
-				rc);
-			goto release_mutex;
-		}
+		fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
 		fctrl->flash_state = CAM_FLASH_STATE_ACQUIRE;
 		break;
 	}
 	case CAM_CONFIG_DEV: {
 		CAM_DBG(CAM_FLASH, "CAM_CONFIG_DEV");
-		rc = cam_flash_parser(fctrl, arg);
+		rc = fctrl->func_tbl.parser(fctrl, arg);
 		if (rc) {
 			CAM_ERR(CAM_FLASH, "Failed Flash Config: rc=%d\n", rc);
 			goto release_mutex;
@@ -201,6 +195,35 @@
 	return rc;
 }
 
+static int32_t cam_flash_init_default_params(struct cam_flash_ctrl *fctrl)
+{
+	/* Validate input parameters */
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "failed: invalid params fctrl %pK",
+			fctrl);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_FLASH,
+		"master_type: %d", fctrl->io_master_info.master_type);
+	/* Initialize cci_client */
+	if (fctrl->io_master_info.master_type == CCI_MASTER) {
+		fctrl->io_master_info.cci_client = kzalloc(sizeof(
+			struct cam_sensor_cci_client), GFP_KERNEL);
+		if (!(fctrl->io_master_info.cci_client))
+			return -ENOMEM;
+	} else if (fctrl->io_master_info.master_type == I2C_MASTER) {
+		if (!(fctrl->io_master_info.client))
+			return -EINVAL;
+	} else {
+		CAM_ERR(CAM_FLASH,
+			"Invalid master / Master type Not supported");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static const struct of_device_id cam_flash_dt_match[] = {
 	{.compatible = "qcom,camera-flash", .data = NULL},
 	{}
@@ -291,20 +314,36 @@
 	return 0;
 }
 
+static int32_t cam_flash_i2c_driver_remove(struct i2c_client *client)
+{
+	int32_t rc = 0;
+	struct cam_flash_ctrl *fctrl = i2c_get_clientdata(client);
+	/* Handle I2C Devices */
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Flash device is NULL");
+		return -EINVAL;
+	}
+	/*Free Allocated Mem */
+	kfree(fctrl->i2c_data.per_frame);
+	fctrl->i2c_data.per_frame = NULL;
+	kfree(fctrl);
+	return rc;
+}
+
 static int cam_flash_subdev_close(struct v4l2_subdev *sd,
 	struct v4l2_subdev_fh *fh)
 {
-	struct cam_flash_ctrl *flash_ctrl =
+	struct cam_flash_ctrl *fctrl =
 		v4l2_get_subdevdata(sd);
 
-	if (!flash_ctrl) {
+	if (!fctrl) {
 		CAM_ERR(CAM_FLASH, "Flash ctrl ptr is NULL");
 		return -EINVAL;
 	}
 
-	mutex_lock(&flash_ctrl->flash_mutex);
-	cam_flash_shutdown(flash_ctrl);
-	mutex_unlock(&flash_ctrl->flash_mutex);
+	mutex_lock(&fctrl->flash_mutex);
+	cam_flash_shutdown(fctrl);
+	mutex_unlock(&fctrl->flash_mutex);
 
 	return 0;
 }
@@ -324,10 +363,30 @@
 	.close = cam_flash_subdev_close,
 };
 
+static int cam_flash_init_subdev(struct cam_flash_ctrl *fctrl)
+{
+	int rc = 0;
+
+	fctrl->v4l2_dev_str.internal_ops =
+		&cam_flash_internal_ops;
+	fctrl->v4l2_dev_str.ops = &cam_flash_subdev_ops;
+	fctrl->v4l2_dev_str.name = CAMX_FLASH_DEV_NAME;
+	fctrl->v4l2_dev_str.sd_flags =
+		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+	fctrl->v4l2_dev_str.ent_function = CAM_FLASH_DEVICE_TYPE;
+	fctrl->v4l2_dev_str.token = fctrl;
+
+	rc = cam_register_subdev(&(fctrl->v4l2_dev_str));
+	if (rc)
+		CAM_ERR(CAM_FLASH, "Fail to create subdev with %d", rc);
+
+	return rc;
+}
+
 static int32_t cam_flash_platform_probe(struct platform_device *pdev)
 {
-	int32_t rc = 0;
-	struct cam_flash_ctrl *flash_ctrl = NULL;
+	int32_t rc = 0, i = 0;
+	struct cam_flash_ctrl *fctrl = NULL;
 
 	CAM_DBG(CAM_FLASH, "Enter");
 	if (!pdev->dev.of_node) {
@@ -335,53 +394,181 @@
 		return -EINVAL;
 	}
 
-	flash_ctrl = kzalloc(sizeof(struct cam_flash_ctrl), GFP_KERNEL);
-	if (!flash_ctrl)
+	fctrl = kzalloc(sizeof(struct cam_flash_ctrl), GFP_KERNEL);
+	if (!fctrl)
 		return -ENOMEM;
 
-	flash_ctrl->pdev = pdev;
-	flash_ctrl->soc_info.pdev = pdev;
-	flash_ctrl->soc_info.dev = &pdev->dev;
-	flash_ctrl->soc_info.dev_name = pdev->name;
+	fctrl->pdev = pdev;
+	fctrl->soc_info.pdev = pdev;
+	fctrl->soc_info.dev = &pdev->dev;
+	fctrl->soc_info.dev_name = pdev->name;
 
-	rc = cam_flash_get_dt_data(flash_ctrl, &flash_ctrl->soc_info);
+	platform_set_drvdata(pdev, fctrl);
+
+	rc = cam_flash_get_dt_data(fctrl, &fctrl->soc_info);
 	if (rc) {
 		CAM_ERR(CAM_FLASH, "cam_flash_get_dt_data failed with %d", rc);
-		kfree(flash_ctrl);
+		kfree(fctrl);
 		return -EINVAL;
 	}
 
-	flash_ctrl->v4l2_dev_str.internal_ops =
-		&cam_flash_internal_ops;
-	flash_ctrl->v4l2_dev_str.ops = &cam_flash_subdev_ops;
-	flash_ctrl->v4l2_dev_str.name = CAMX_FLASH_DEV_NAME;
-	flash_ctrl->v4l2_dev_str.sd_flags =
-		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
-	flash_ctrl->v4l2_dev_str.ent_function = CAM_FLASH_DEVICE_TYPE;
-	flash_ctrl->v4l2_dev_str.token = flash_ctrl;
+	if (of_find_property(pdev->dev.of_node, "cci-master", NULL)) {
+		/* Get CCI master */
+		rc = of_property_read_u32(pdev->dev.of_node, "cci-master",
+			&fctrl->cci_i2c_master);
+		CAM_DBG(CAM_FLASH, "cci-master %d, rc %d",
+			fctrl->cci_i2c_master, rc);
+		if (rc < 0) {
+			/* Set default master 0 */
+			fctrl->cci_i2c_master = MASTER_0;
+			rc = 0;
+		}
 
-	rc = cam_register_subdev(&(flash_ctrl->v4l2_dev_str));
-	if (rc) {
-		CAM_ERR(CAM_FLASH, "Fail to create subdev with %d", rc);
-		goto free_resource;
+		fctrl->io_master_info.master_type = CCI_MASTER;
+		rc = cam_flash_init_default_params(fctrl);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+				"failed: cam_flash_init_default_params rc %d",
+				rc);
+			return rc;
+		}
+
+		fctrl->i2c_data.per_frame = (struct i2c_settings_array *)
+			kzalloc(sizeof(struct i2c_settings_array) *
+			MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+		if (fctrl->i2c_data.per_frame == NULL) {
+			CAM_ERR(CAM_FLASH, "No Memory");
+			rc = -ENOMEM;
+			goto free_cci_resource;
+		}
+
+		INIT_LIST_HEAD(&(fctrl->i2c_data.init_settings.list_head));
+		INIT_LIST_HEAD(&(fctrl->i2c_data.config_settings.list_head));
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+			INIT_LIST_HEAD(
+				&(fctrl->i2c_data.per_frame[i].list_head));
+
+		fctrl->func_tbl.parser = cam_flash_i2c_pkt_parser;
+		fctrl->func_tbl.apply_setting = cam_flash_i2c_apply_setting;
+		fctrl->func_tbl.power_ops = cam_flash_i2c_power_ops;
+		fctrl->func_tbl.flush_req = cam_flash_i2c_flush_request;
+	} else {
+		/* PMIC Flash */
+		fctrl->func_tbl.parser = cam_flash_pmic_pkt_parser;
+		fctrl->func_tbl.apply_setting = cam_flash_pmic_apply_setting;
+		fctrl->func_tbl.power_ops = cam_flash_pmic_power_ops;
+		fctrl->func_tbl.flush_req = cam_flash_pmic_flush_request;
 	}
-	flash_ctrl->bridge_intf.device_hdl = -1;
-	flash_ctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info;
-	flash_ctrl->bridge_intf.ops.link_setup = cam_flash_establish_link;
-	flash_ctrl->bridge_intf.ops.apply_req = cam_flash_apply_request;
-	flash_ctrl->bridge_intf.ops.flush_req = cam_flash_flush_request;
 
-	platform_set_drvdata(pdev, flash_ctrl);
-	v4l2_set_subdevdata(&flash_ctrl->v4l2_dev_str.sd, flash_ctrl);
+	rc = cam_flash_init_subdev(fctrl);
+	if (rc) {
+		if (fctrl->io_master_info.cci_client != NULL)
+			goto free_cci_resource;
+		else
+			goto free_resource;
+	}
 
-	mutex_init(&(flash_ctrl->flash_mutex));
-	mutex_init(&(flash_ctrl->flash_wq_mutex));
+	fctrl->bridge_intf.device_hdl = -1;
+	fctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info;
+	fctrl->bridge_intf.ops.link_setup = cam_flash_establish_link;
+	fctrl->bridge_intf.ops.apply_req = cam_flash_apply_request;
+	fctrl->bridge_intf.ops.flush_req = cam_flash_flush_request;
 
-	flash_ctrl->flash_state = CAM_FLASH_STATE_INIT;
+	mutex_init(&(fctrl->flash_mutex));
+
+	fctrl->flash_state = CAM_FLASH_STATE_INIT;
 	CAM_DBG(CAM_FLASH, "Probe success");
 	return rc;
+
+free_cci_resource:
+	kfree(fctrl->io_master_info.cci_client);
+	fctrl->io_master_info.cci_client = NULL;
 free_resource:
-	kfree(flash_ctrl);
+	kfree(fctrl->i2c_data.per_frame);
+	kfree(fctrl->soc_info.soc_private);
+	cam_soc_util_release_platform_resource(&fctrl->soc_info);
+	fctrl->i2c_data.per_frame = NULL;
+	fctrl->soc_info.soc_private = NULL;
+	kfree(fctrl);
+	fctrl = NULL;
+	return rc;
+}
+
+static int32_t cam_flash_i2c_driver_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int32_t rc = 0, i = 0;
+	struct cam_flash_ctrl *fctrl;
+
+	if (client == NULL || id == NULL) {
+		CAM_ERR(CAM_FLASH, "Invalid Args client: %pK id: %pK",
+			client, id);
+		return -EINVAL;
+	}
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CAM_ERR(CAM_FLASH, "%s :: i2c_check_functionality failed",
+			 client->name);
+		return -EFAULT;
+	}
+
+	/* Create sensor control structure */
+	fctrl = kzalloc(sizeof(*fctrl), GFP_KERNEL);
+	if (!fctrl)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, fctrl);
+
+	fctrl->io_master_info.client = client;
+	fctrl->soc_info.dev = &client->dev;
+	fctrl->soc_info.dev_name = client->name;
+	fctrl->io_master_info.master_type = I2C_MASTER;
+
+	rc = cam_flash_get_dt_data(fctrl, &fctrl->soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FLASH, "failed: cam_sensor_parse_dt rc %d", rc);
+		goto free_ctrl;
+	}
+
+	rc = cam_flash_init_subdev(fctrl);
+	if (rc)
+		goto free_ctrl;
+
+	fctrl->i2c_data.per_frame =
+		(struct i2c_settings_array *)
+		kzalloc(sizeof(struct i2c_settings_array) *
+		MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+	if (fctrl->i2c_data.per_frame == NULL) {
+		rc = -ENOMEM;
+		goto unreg_subdev;
+	}
+
+	INIT_LIST_HEAD(&(fctrl->i2c_data.init_settings.list_head));
+	INIT_LIST_HEAD(&(fctrl->i2c_data.config_settings.list_head));
+	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+		INIT_LIST_HEAD(&(fctrl->i2c_data.per_frame[i].list_head));
+
+	fctrl->func_tbl.parser = cam_flash_i2c_pkt_parser;
+	fctrl->func_tbl.apply_setting = cam_flash_i2c_apply_setting;
+	fctrl->func_tbl.power_ops = cam_flash_i2c_power_ops;
+	fctrl->func_tbl.flush_req = cam_flash_i2c_flush_request;
+
+	fctrl->bridge_intf.device_hdl = -1;
+	fctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info;
+	fctrl->bridge_intf.ops.link_setup = cam_flash_establish_link;
+	fctrl->bridge_intf.ops.apply_req = cam_flash_apply_request;
+	fctrl->bridge_intf.ops.flush_req = cam_flash_flush_request;
+
+	mutex_init(&(fctrl->flash_mutex));
+	fctrl->flash_state = CAM_FLASH_STATE_INIT;
+
+	return rc;
+
+unreg_subdev:
+	cam_unregister_subdev(&(fctrl->v4l2_dev_str));
+free_ctrl:
+	kfree(fctrl);
+	fctrl = NULL;
 	return rc;
 }
 
@@ -398,20 +585,40 @@
 	},
 };
 
-static int __init cam_flash_init_module(void)
+static const struct i2c_device_id i2c_id[] = {
+	{FLASH_DRIVER_I2C, (kernel_ulong_t)NULL},
+	{ }
+};
+
+static struct i2c_driver cam_flash_i2c_driver = {
+	.id_table = i2c_id,
+	.probe  = cam_flash_i2c_driver_probe,
+	.remove = cam_flash_i2c_driver_remove,
+	.driver = {
+		.name = FLASH_DRIVER_I2C,
+	},
+};
+
+static int32_t __init cam_flash_init_module(void)
 {
 	int32_t rc = 0;
 
 	rc = platform_driver_register(&cam_flash_platform_driver);
-	if (rc)
-		CAM_ERR(CAM_FLASH, "platform probe for flash failed");
+	if (rc == 0) {
+		CAM_DBG(CAM_FLASH, "platform probe success");
+		return 0;
+	}
 
+	rc = i2c_add_driver(&cam_flash_i2c_driver);
+	if (rc)
+		CAM_ERR(CAM_FLASH, "i2c_add_driver failed rc: %d", rc);
 	return rc;
 }
 
 static void __exit cam_flash_exit_module(void)
 {
 	platform_driver_unregister(&cam_flash_platform_driver);
+	i2c_del_driver(&cam_flash_i2c_driver);
 }
 
 module_init(cam_flash_init_module);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
index 4adc1b2..cb54239 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
@@ -33,15 +33,21 @@
 #include "cam_sensor_cmn_header.h"
 #include "cam_soc_util.h"
 #include "cam_debug_util.h"
+#include "cam_sensor_io.h"
+#include "cam_flash_core.h"
 
 #define CAMX_FLASH_DEV_NAME "cam-flash-dev"
 
 #define CAM_FLASH_PIPELINE_DELAY 1
 
+#define FLASH_DRIVER_I2C "i2c_flash"
+
 #define CAM_FLASH_PACKET_OPCODE_INIT                 0
 #define CAM_FLASH_PACKET_OPCODE_SET_OPS              1
 #define CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS 2
 
+struct cam_flash_ctrl;
+
 enum cam_flash_switch_trigger_ops {
 	LED_SWITCH_OFF = 0,
 	LED_SWITCH_ON,
@@ -54,6 +60,12 @@
 	CAM_FLASH_STATE_START,
 };
 
+enum cam_flash_flush_type {
+	FLUSH_ALL = 0,
+	FLUSH_REQ,
+	FLUSH_MAX,
+};
+
 /**
  * struct cam_flash_intf_params
  * @device_hdl   : Device Handle
@@ -136,6 +148,14 @@
 	uint32_t     torch_max_current[CAM_FLASH_MAX_LED_TRIGGERS];
 };
 
+struct cam_flash_func_tbl {
+	int (*parser)(struct cam_flash_ctrl *fctrl, void *arg);
+	int (*apply_setting)(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+	int (*power_ops)(struct cam_flash_ctrl *fctrl, bool regulator_enable);
+	int (*flush_req)(struct cam_flash_ctrl *fctrl,
+		enum cam_flash_flush_type type, uint64_t req_id);
+};
+
 /**
  *  struct cam_flash_ctrl
  * @soc_info            : Soc related information
@@ -150,32 +170,57 @@
  * @flash_num_sources   : Number of flash sources
  * @torch_num_source    : Number of torch sources
  * @flash_mutex         : Mutex for flash operations
- * @flash_wq_mutex      : Mutex for flash apply setting
- * @flash_state         : Current flash state (LOW/OFF/ON/INIT)
+  * @flash_state         : Current flash state (LOW/OFF/ON/INIT)
  * @flash_type          : Flash types (PMIC/I2C/GPIO)
  * @is_regulator_enable : Regulator disable/enable notifier
+ * @func_tbl            : Function table for different HW
+ *	                      (e.g. i2c/pmic/gpio)
  * @flash_trigger       : Flash trigger ptr
  * @torch_trigger       : Torch trigger ptr
+ * @cci_i2c_master      : I2C structure
+ * @io_master_info      : Information about the communication master
+ * @i2c_data            : I2C register settings
  */
 struct cam_flash_ctrl {
-	struct cam_hw_soc_info          soc_info;
-	struct platform_device         *pdev;
-	struct cam_flash_frame_setting  per_frame[MAX_PER_FRAME_ARRAY];
-	struct cam_flash_frame_setting  nrt_info;
-	struct device_node             *of_node;
-	struct cam_subdev               v4l2_dev_str;
-	struct cam_flash_intf_params    bridge_intf;
-	struct cam_flash_init_packet    flash_init_setting;
-	struct led_trigger             *switch_trigger;
-	uint32_t                        flash_num_sources;
-	uint32_t                        torch_num_sources;
-	struct mutex                    flash_mutex;
-	struct mutex                    flash_wq_mutex;
-	enum   cam_flash_state          flash_state;
-	uint8_t                         flash_type;
-	bool                            is_regulator_enabled;
+	struct cam_hw_soc_info              soc_info;
+	struct platform_device             *pdev;
+	struct cam_sensor_power_ctrl_t      power_info;
+	struct cam_flash_frame_setting      per_frame[MAX_PER_FRAME_ARRAY];
+	struct cam_flash_frame_setting      nrt_info;
+	struct device_node                 *of_node;
+	struct cam_subdev                   v4l2_dev_str;
+	struct cam_flash_intf_params        bridge_intf;
+	struct cam_flash_init_packet        flash_init_setting;
+	struct led_trigger                 *switch_trigger;
+	uint32_t                            flash_num_sources;
+	uint32_t                            torch_num_sources;
+	struct mutex                        flash_mutex;
+	enum   cam_flash_state              flash_state;
+	uint8_t                             flash_type;
+	bool                                is_regulator_enabled;
+	struct cam_flash_func_tbl           func_tbl;
 	struct led_trigger           *flash_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
 	struct led_trigger           *torch_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
+/* I2C related setting */
+	enum   cci_i2c_master_t             cci_i2c_master;
+	struct camera_io_master             io_master_info;
+	struct i2c_data_settings            i2c_data;
 };
 
+int cam_flash_pmic_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg);
+int cam_flash_i2c_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg);
+int cam_flash_pmic_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+int cam_flash_i2c_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+int cam_flash_off(struct cam_flash_ctrl *fctrl);
+int cam_flash_pmic_power_ops(struct cam_flash_ctrl *fctrl,
+	bool regulator_enable);
+int cam_flash_i2c_power_ops(struct cam_flash_ctrl *fctrl,
+	bool regulator_enable);
+int cam_flash_i2c_flush_request(struct cam_flash_ctrl *fctrl,
+	enum cam_flash_flush_type type, uint64_t req_id);
+int cam_flash_pmic_flush_request(struct cam_flash_ctrl *fctrl,
+	enum cam_flash_flush_type, uint64_t req_id);
+void cam_flash_shutdown(struct cam_flash_ctrl *fctrl);
+int cam_flash_release_dev(struct cam_flash_ctrl *fctrl);
+
 #endif /*_CAM_FLASH_DEV_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
index a195762..22a124d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -193,32 +193,31 @@
 		return -EINVAL;
 	}
 
-	of_node = fctrl->pdev->dev.of_node;
-
-	rc = cam_soc_util_get_dt_properties(soc_info);
-	if (rc < 0) {
-		CAM_ERR(CAM_FLASH, "Get_dt_properties failed rc %d", rc);
-		return rc;
-	}
-
 	soc_info->soc_private =
 		kzalloc(sizeof(struct cam_flash_private_soc), GFP_KERNEL);
 	if (!soc_info->soc_private) {
 		rc = -ENOMEM;
 		goto release_soc_res;
 	}
+	of_node = fctrl->pdev->dev.of_node;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FLASH, "Get_dt_properties failed rc %d", rc);
+		goto free_soc_private;
+	}
 
 	rc = cam_get_source_node_info(of_node, fctrl, soc_info->soc_private);
-	if (rc < 0) {
+	if (rc) {
 		CAM_ERR(CAM_FLASH,
 			"cam_flash_get_pmic_source_info failed rc %d", rc);
 		goto free_soc_private;
 	}
-
 	return rc;
 
 free_soc_private:
 	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
 release_soc_res:
 	cam_soc_util_release_platform_resource(soc_info);
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
index 0570655..850b315 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -18,6 +18,7 @@
 #include "cam_sensor_util.h"
 #include "cam_debug_util.h"
 #include "cam_res_mgr_api.h"
+#include "cam_common_util.h"
 
 int32_t cam_ois_construct_default_power_setting(
 	struct cam_sensor_power_ctrl_t *power_info)
@@ -79,7 +80,7 @@
 		CAM_ERR(CAM_OIS, "Device is already acquired");
 		return -EFAULT;
 	}
-	if (copy_from_user(&ois_acq_dev, (void __user *) cmd->handle,
+	if (copy_from_user(&ois_acq_dev, u64_to_user_ptr(cmd->handle),
 		sizeof(ois_acq_dev)))
 		return -EFAULT;
 
@@ -95,7 +96,7 @@
 	o_ctrl->bridge_intf.session_hdl = ois_acq_dev.session_handle;
 
 	CAM_DBG(CAM_OIS, "Device Handle: %d", ois_acq_dev.device_handle);
-	if (copy_to_user((void __user *) cmd->handle, &ois_acq_dev,
+	if (copy_to_user(u64_to_user_ptr(cmd->handle), &ois_acq_dev,
 		sizeof(struct cam_sensor_acquire_dev))) {
 		CAM_ERR(CAM_OIS, "ACQUIRE_DEV: copy to user failed");
 		return -EFAULT;
@@ -193,7 +194,7 @@
 		return -EINVAL;
 	}
 
-	rc = msm_camera_power_down(power_info, soc_info);
+	rc = cam_sensor_util_power_down(power_info, soc_info);
 	if (rc) {
 		CAM_ERR(CAM_OIS, "power down the core is failed:%d", rc);
 		return rc;
@@ -425,12 +426,12 @@
 	int32_t                         i = 0;
 	uint32_t                        total_cmd_buf_in_bytes = 0;
 	struct common_header           *cmm_hdr = NULL;
-	uint64_t                        generic_ptr;
+	uintptr_t                       generic_ptr;
 	struct cam_control             *ioctl_ctrl = NULL;
 	struct cam_config_dev_cmd       dev_config;
 	struct i2c_settings_array      *i2c_reg_settings = NULL;
 	struct cam_cmd_buf_desc        *cmd_desc = NULL;
-	uint64_t                        generic_pkt_addr;
+	uintptr_t                       generic_pkt_addr;
 	size_t                          pkt_len;
 	struct cam_packet              *csl_packet = NULL;
 	size_t                          len_of_buff = 0;
@@ -440,11 +441,12 @@
 	struct cam_sensor_power_ctrl_t  *power_info = &soc_private->power_info;
 
 	ioctl_ctrl = (struct cam_control *)arg;
-	if (copy_from_user(&dev_config, (void __user *) ioctl_ctrl->handle,
+	if (copy_from_user(&dev_config,
+		u64_to_user_ptr(ioctl_ctrl->handle),
 		sizeof(dev_config)))
 		return -EFAULT;
 	rc = cam_mem_get_cpu_buf(dev_config.packet_handle,
-		(uint64_t *)&generic_pkt_addr, &pkt_len);
+		&generic_pkt_addr, &pkt_len);
 	if (rc) {
 		CAM_ERR(CAM_OIS,
 			"error in converting command Handle Error: %d", rc);
@@ -459,7 +461,7 @@
 	}
 
 	csl_packet = (struct cam_packet *)
-		(generic_pkt_addr + dev_config.offset);
+		(generic_pkt_addr + (uint32_t)dev_config.offset);
 	switch (csl_packet->header.op_code & 0xFFFFFF) {
 	case CAM_OIS_PACKET_OPCODE_INIT:
 		offset = (uint32_t *)&csl_packet->payload;
@@ -473,7 +475,7 @@
 				continue;
 
 			rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
-				(uint64_t *)&generic_ptr, &len_of_buff);
+				&generic_ptr, &len_of_buff);
 			if (rc < 0) {
 				CAM_ERR(CAM_OIS, "Failed to get cpu buf");
 				return rc;
@@ -716,7 +718,7 @@
 	case CAM_QUERY_CAP:
 		ois_cap.slot_info = o_ctrl->soc_info.index;
 
-		if (copy_to_user((void __user *) cmd->handle,
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
 			&ois_cap,
 			sizeof(struct cam_ois_query_cap_t))) {
 			CAM_ERR(CAM_OIS, "Failed Copy to User");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index a2431be..c88e969 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -16,6 +16,7 @@
 #include "cam_sensor_util.h"
 #include "cam_soc_util.h"
 #include "cam_trace.h"
+#include "cam_common_util.h"
 
 static void cam_sensor_update_req_mgr(
 	struct cam_sensor_ctrl_t *s_ctrl,
@@ -62,30 +63,12 @@
 	}
 }
 
-static void cam_sensor_release_resource(
+static void cam_sensor_release_per_frame_resource(
 	struct cam_sensor_ctrl_t *s_ctrl)
 {
 	struct i2c_settings_array *i2c_set = NULL;
 	int i, rc;
 
-	i2c_set = &(s_ctrl->i2c_data.init_settings);
-	if (i2c_set->is_settings_valid == 1) {
-		i2c_set->is_settings_valid = -1;
-		rc = delete_request(i2c_set);
-		if (rc < 0)
-			CAM_ERR(CAM_SENSOR,
-				"failed while deleting Init settings");
-	}
-
-	i2c_set = &(s_ctrl->i2c_data.config_settings);
-	if (i2c_set->is_settings_valid == 1) {
-		i2c_set->is_settings_valid = -1;
-		rc = delete_request(i2c_set);
-		if (rc < 0)
-			CAM_ERR(CAM_SENSOR,
-				"failed while deleting Res settings");
-	}
-
 	if (s_ctrl->i2c_data.per_frame != NULL) {
 		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
 			i2c_set = &(s_ctrl->i2c_data.per_frame[i]);
@@ -105,7 +88,7 @@
 	void *arg)
 {
 	int32_t rc = 0;
-	uint64_t generic_ptr;
+	uintptr_t generic_ptr;
 	struct cam_control *ioctl_ctrl = NULL;
 	struct cam_packet *csl_packet = NULL;
 	struct cam_cmd_buf_desc *cmd_desc = NULL;
@@ -122,13 +105,14 @@
 		return -EINVAL;
 	}
 
-	if (copy_from_user(&config, (void __user *) ioctl_ctrl->handle,
+	if (copy_from_user(&config,
+		u64_to_user_ptr(ioctl_ctrl->handle),
 		sizeof(config)))
 		return -EFAULT;
 
 	rc = cam_mem_get_cpu_buf(
 		config.packet_handle,
-		(uint64_t *)&generic_ptr,
+		&generic_ptr,
 		&len_of_buff);
 	if (rc < 0) {
 		CAM_ERR(CAM_SENSOR, "Failed in getting the buffer: %d", rc);
@@ -136,7 +120,7 @@
 	}
 
 	csl_packet = (struct cam_packet *)(generic_ptr +
-		config.offset);
+		(uint32_t)config.offset);
 	if (config.offset > len_of_buff) {
 		CAM_ERR(CAM_SENSOR,
 			"offset is out of bounds: off: %lld len: %zu",
@@ -403,15 +387,16 @@
 int32_t cam_handle_mem_ptr(uint64_t handle, struct cam_sensor_ctrl_t *s_ctrl)
 {
 	int rc = 0, i;
-	void *packet = NULL, *cmd_buf1 = NULL;
 	uint32_t *cmd_buf;
 	void *ptr;
 	size_t len;
 	struct cam_packet *pkt;
 	struct cam_cmd_buf_desc *cmd_desc;
+	uintptr_t cmd_buf1 = 0;
+	uintptr_t packet = 0;
 
 	rc = cam_mem_get_cpu_buf(handle,
-		(uint64_t *)&packet, &len);
+		&packet, &len);
 	if (rc < 0) {
 		CAM_ERR(CAM_SENSOR, "Failed to get the command Buffer");
 		return -EINVAL;
@@ -432,7 +417,7 @@
 		if (!(cmd_desc[i].length))
 			continue;
 		rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
-			(uint64_t *)&cmd_buf1, &len);
+			&cmd_buf1, &len);
 		if (rc < 0) {
 			CAM_ERR(CAM_SENSOR,
 				"Failed to parse the command Buffer Header");
@@ -503,10 +488,9 @@
 		(s_ctrl->is_probe_succeed == 0))
 		return;
 
-	cam_sensor_release_resource(s_ctrl);
 	cam_sensor_release_stream_rsc(s_ctrl);
-	if (s_ctrl->sensor_state >= CAM_SENSOR_ACQUIRE)
-		cam_sensor_power_down(s_ctrl);
+	cam_sensor_release_per_frame_resource(s_ctrl);
+	cam_sensor_power_down(s_ctrl);
 
 	rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
 	if (rc < 0)
@@ -676,7 +660,8 @@
 			goto release_mutex;
 		}
 		rc = copy_from_user(&sensor_acq_dev,
-			(void __user *) cmd->handle, sizeof(sensor_acq_dev));
+			u64_to_user_ptr(cmd->handle),
+			sizeof(sensor_acq_dev));
 		if (rc < 0) {
 			CAM_ERR(CAM_SENSOR, "Failed Copying from user");
 			goto release_mutex;
@@ -695,7 +680,8 @@
 
 		CAM_DBG(CAM_SENSOR, "Device Handle: %d",
 			sensor_acq_dev.device_handle);
-		if (copy_to_user((void __user *) cmd->handle, &sensor_acq_dev,
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+			&sensor_acq_dev,
 			sizeof(struct cam_sensor_acquire_dev))) {
 			CAM_ERR(CAM_SENSOR, "Failed Copy to User");
 			rc = -EFAULT;
@@ -731,7 +717,7 @@
 			goto release_mutex;
 		}
 
-		cam_sensor_release_resource(s_ctrl);
+		cam_sensor_release_per_frame_resource(s_ctrl);
 		cam_sensor_release_stream_rsc(s_ctrl);
 		if (s_ctrl->bridge_intf.device_hdl == -1) {
 			CAM_ERR(CAM_SENSOR,
@@ -762,8 +748,8 @@
 		struct  cam_sensor_query_cap sensor_cap;
 
 		cam_sensor_query_cap(s_ctrl, &sensor_cap);
-		if (copy_to_user((void __user *) cmd->handle, &sensor_cap,
-			sizeof(struct  cam_sensor_query_cap))) {
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+			&sensor_cap, sizeof(struct  cam_sensor_query_cap))) {
 			CAM_ERR(CAM_SENSOR, "Failed Copy to User");
 			rc = -EFAULT;
 			goto release_mutex;
@@ -816,7 +802,7 @@
 			}
 		}
 
-		cam_sensor_release_resource(s_ctrl);
+		cam_sensor_release_per_frame_resource(s_ctrl);
 		s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
 		CAM_INFO(CAM_SENSOR,
 			"CAM_STOP_DEV Success, sensor_id:0x%x,sensor_slave_addr:0x%x",
@@ -918,6 +904,8 @@
 		CAM_ERR(CAM_SENSOR, "Device data is NULL");
 		return -EINVAL;
 	}
+
+	mutex_lock(&s_ctrl->cam_sensor_mutex);
 	if (link->link_enable) {
 		s_ctrl->bridge_intf.link_hdl = link->link_hdl;
 		s_ctrl->bridge_intf.crm_cb = link->crm_cb;
@@ -925,6 +913,7 @@
 		s_ctrl->bridge_intf.link_hdl = -1;
 		s_ctrl->bridge_intf.crm_cb = NULL;
 	}
+	mutex_unlock(&s_ctrl->cam_sensor_mutex);
 
 	return 0;
 }
@@ -1005,7 +994,7 @@
 		CAM_ERR(CAM_SENSOR, "failed: power_info %pK", power_info);
 		return -EINVAL;
 	}
-	rc = msm_camera_power_down(power_info, soc_info);
+	rc = cam_sensor_util_power_down(power_info, soc_info);
 	if (rc < 0) {
 		CAM_ERR(CAM_SENSOR, "power down the core is failed:%d", rc);
 		return rc;
@@ -1155,8 +1144,10 @@
 	}
 	CAM_DBG(CAM_REQ, " Sensor update req id: %lld", apply->request_id);
 	trace_cam_apply_req("Sensor", apply->request_id);
+	mutex_lock(&(s_ctrl->cam_sensor_mutex));
 	rc = cam_sensor_apply_settings(s_ctrl, apply->request_id,
 		CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE);
+	mutex_unlock(&(s_ctrl->cam_sensor_mutex));
 	return rc;
 }
 
@@ -1190,7 +1181,9 @@
 			continue;
 
 		if (i2c_set->is_settings_valid == 1) {
+			mutex_lock(&(s_ctrl->cam_sensor_mutex));
 			rc = delete_request(i2c_set);
+			mutex_unlock(&(s_ctrl->cam_sensor_mutex));
 			if (rc < 0)
 				CAM_ERR(CAM_SENSOR,
 					"delete request: %lld rc: %d",
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 46bda05..5c1143e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -296,7 +296,7 @@
 {
 	int16_t                   rc = 0, i = 0;
 	size_t                    len_of_buff = 0;
-	uint64_t                  generic_ptr;
+	uintptr_t                  generic_ptr;
 	uint16_t                  cmd_length_in_bytes = 0;
 
 	for (i = 0; i < num_cmd_buffers; i++) {
@@ -318,11 +318,11 @@
 			continue;
 
 		rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
-			(uint64_t *)&generic_ptr, &len_of_buff);
+			&generic_ptr, &len_of_buff);
 		cmd_buf = (uint32_t *)generic_ptr;
 		if (rc < 0) {
 			CAM_ERR(CAM_SENSOR,
-				"cmd hdl failed:%d, Err: %d, Buffer_len: %ld",
+				"cmd hdl failed:%d, Err: %d, Buffer_len: %zd",
 				cmd_desc[i].mem_handle, rc, len_of_buff);
 			return rc;
 		}
@@ -439,6 +439,75 @@
 	return rc;
 }
 
+int cam_sensor_util_i2c_apply_setting(
+	struct camera_io_master *io_master_info,
+	struct i2c_settings_list *i2c_list)
+{
+	int32_t rc = 0;
+	uint32_t i, size;
+
+	switch (i2c_list->op_code) {
+	case CAM_SENSOR_I2C_WRITE_RANDOM: {
+		rc = camera_io_dev_write(io_master_info,
+			&(i2c_list->i2c_settings));
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Failed to random write I2C settings: %d",
+				rc);
+			return rc;
+		}
+	break;
+	}
+	case CAM_SENSOR_I2C_WRITE_SEQ: {
+		rc = camera_io_dev_write_continuous(
+			io_master_info, &(i2c_list->i2c_settings), 0);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Failed to seq write I2C settings: %d",
+				rc);
+			return rc;
+		}
+	break;
+	}
+	case CAM_SENSOR_I2C_WRITE_BURST: {
+		rc = camera_io_dev_write_continuous(
+			io_master_info, &(i2c_list->i2c_settings), 1);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Failed to burst write I2C settings: %d",
+				rc);
+			return rc;
+		}
+	break;
+	}
+	case CAM_SENSOR_I2C_POLL: {
+		size = i2c_list->i2c_settings.size;
+		for (i = 0; i < size; i++) {
+			rc = camera_io_dev_poll(
+			io_master_info,
+			i2c_list->i2c_settings.reg_setting[i].reg_addr,
+			i2c_list->i2c_settings.reg_setting[i].reg_data,
+			i2c_list->i2c_settings.reg_setting[i].data_mask,
+			i2c_list->i2c_settings.addr_type,
+			i2c_list->i2c_settings.data_type,
+			i2c_list->i2c_settings.reg_setting[i].delay);
+			if (rc < 0) {
+				CAM_ERR(CAM_SENSOR,
+					"i2c poll apply setting Fail: %d", rc);
+				return rc;
+			}
+		}
+	break;
+	}
+	default:
+		CAM_ERR(CAM_SENSOR, "Wrong Opcode: %d", i2c_list->op_code);
+		rc = -EINVAL;
+	break;
+	}
+
+	return rc;
+}
+
 int32_t msm_camera_fill_vreg_params(
 	struct cam_hw_soc_info *soc_info,
 	struct cam_sensor_power_setting *power_setting,
@@ -1710,7 +1779,7 @@
 	return ps;
 }
 
-int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+int cam_sensor_util_power_down(struct cam_sensor_power_ctrl_t *ctrl,
 		struct cam_hw_soc_info *soc_info)
 {
 	int index = 0, ret = 0, num_vreg = 0, i;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
index 6c0287e..583ddb1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
@@ -39,6 +39,9 @@
 	struct i2c_settings_array *i2c_reg_settings,
 	struct cam_cmd_buf_desc *cmd_desc, int32_t num_cmd_buffers);
 
+int cam_sensor_util_i2c_apply_setting(struct camera_io_master *io_master_info,
+	struct i2c_settings_list *i2c_list);
+
 int32_t delete_request(struct i2c_settings_array *i2c_array);
 int cam_sensor_util_request_gpio_table(
 	struct cam_hw_soc_info *soc_info, int gpio_en);
@@ -49,7 +52,7 @@
 int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
 		struct cam_hw_soc_info *soc_info);
 
-int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+int cam_sensor_util_power_down(struct cam_sensor_power_ctrl_t *ctrl,
 		struct cam_hw_soc_info *soc_info);
 
 int msm_camera_fill_vreg_params(struct cam_hw_soc_info *soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_smmu/Makefile b/drivers/media/platform/msm/camera/cam_smmu/Makefile
index e17dac6..96f3968 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/Makefile
+++ b/drivers/media/platform/msm/camera/cam_smmu/Makefile
@@ -1,3 +1,4 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
 
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu_api.o
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index 52da37f..56209c6 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -23,6 +23,7 @@
 #include <linux/genalloc.h>
 #include <soc/qcom/scm.h>
 #include <soc/qcom/secure_buffer.h>
+#include <uapi/media/cam_req_mgr.h>
 #include "cam_smmu_api.h"
 #include "cam_debug_util.h"
 
@@ -34,11 +35,14 @@
 #define COOKIE_SIZE (BYTE_SIZE*COOKIE_NUM_BYTE)
 #define COOKIE_MASK ((1<<COOKIE_SIZE)-1)
 #define HANDLE_INIT (-1)
-#define CAM_SMMU_CB_MAX 2
+#define CAM_SMMU_CB_MAX 5
 
 #define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK))
 #define GET_SMMU_TABLE_IDX(x) (((x) >> COOKIE_SIZE) & COOKIE_MASK)
 
+static int g_num_pf_handled = 4;
+module_param(g_num_pf_handled, int, 0644);
+
 struct firmware_alloc_info {
 	struct device *fw_dev;
 	void *fw_kva;
@@ -129,12 +133,11 @@
 	int handle;
 	enum cam_smmu_ops_param state;
 
-	void (*handler[CAM_SMMU_CB_MAX])(struct iommu_domain *,
-		struct device *, unsigned long,
-		int, void*);
+	cam_smmu_client_page_fault_handler handler[CAM_SMMU_CB_MAX];
 	void *token[CAM_SMMU_CB_MAX];
 	int cb_count;
 	int secure_count;
+	int pf_count;
 };
 
 struct cam_iommu_cb_set {
@@ -252,13 +255,14 @@
 
 static int cam_smmu_probe(struct platform_device *pdev);
 
-static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr);
+static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr);
 
 static void cam_smmu_page_fault_work(struct work_struct *work)
 {
 	int j;
 	int idx;
 	struct cam_smmu_work_payload *payload;
+	uint32_t buf_info;
 
 	mutex_lock(&iommu_cb_set.payload_list_lock);
 	if (list_empty(&iommu_cb_set.payload_list)) {
@@ -275,8 +279,11 @@
 
 	/* Dereference the payload to call the handler */
 	idx = payload->idx;
-	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
-	cam_smmu_check_vaddr_in_range(idx, (void *)payload->iova);
+	buf_info = cam_smmu_find_closest_mapping(idx, (void *)payload->iova);
+	if (buf_info != 0) {
+		CAM_INFO(CAM_SMMU, "closest buf 0x%x idx %d", buf_info, idx);
+	}
+
 	for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
 		if ((iommu_cb_set.cb_info[idx].handler[j])) {
 			iommu_cb_set.cb_info[idx].handler[j](
@@ -284,10 +291,10 @@
 				payload->dev,
 				payload->iova,
 				payload->flags,
-				iommu_cb_set.cb_info[idx].token[j]);
+				iommu_cb_set.cb_info[idx].token[j],
+				buf_info);
 		}
 	}
-	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 	kfree(payload);
 }
 
@@ -333,10 +340,13 @@
 	}
 }
 
-static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr)
+static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr)
 {
-	struct cam_dma_buff_info *mapping;
+	struct cam_dma_buff_info *mapping, *closest_mapping =  NULL;
 	unsigned long start_addr, end_addr, current_addr;
+	uint32_t buf_handle = 0;
+
+	long delta = 0, lowest_delta = 0;
 
 	current_addr = (unsigned long)vaddr;
 	list_for_each_entry(mapping,
@@ -344,31 +354,51 @@
 		start_addr = (unsigned long)mapping->paddr;
 		end_addr = (unsigned long)mapping->paddr + mapping->len;
 
-		if (start_addr <= current_addr && current_addr < end_addr) {
-			CAM_ERR(CAM_SMMU,
-				"va %pK valid: range:%pK-%pK, fd = %d cb: %s",
-				vaddr, (void *)start_addr, (void *)end_addr,
-				mapping->ion_fd,
+		if (start_addr <= current_addr && current_addr <= end_addr) {
+			closest_mapping = mapping;
+			CAM_INFO(CAM_SMMU,
+				"Found va 0x%lx in:0x%lx-0x%lx, fd %d cb:%s",
+				current_addr, start_addr,
+				end_addr, mapping->ion_fd,
 				iommu_cb_set.cb_info[idx].name);
 			goto end;
 		} else {
+			if (start_addr > current_addr)
+				delta =  start_addr - current_addr;
+			else
+				delta = current_addr - end_addr - 1;
+
+			if (delta < lowest_delta || lowest_delta == 0) {
+				lowest_delta = delta;
+				closest_mapping = mapping;
+			}
 			CAM_DBG(CAM_SMMU,
-				"va %pK is not in this range: %pK-%pK, fd = %d",
-				vaddr, (void *)start_addr, (void *)end_addr,
-				mapping->ion_fd);
+				"approx va %lx not in range: %lx-%lx fd = %0x",
+				current_addr, start_addr,
+				end_addr, mapping->ion_fd);
 		}
 	}
-	CAM_ERR(CAM_SMMU,
-		"Cannot find vaddr:%pK in SMMU %s uses invalid virt address",
-		vaddr, iommu_cb_set.cb_info[idx].name);
+
 end:
-	return;
+	if (closest_mapping) {
+		buf_handle = GET_MEM_HANDLE(idx, closest_mapping->ion_fd);
+		CAM_INFO(CAM_SMMU,
+			"Closest map fd %d 0x%lx 0x%lx-0x%lx buf=%pK mem %0x",
+			closest_mapping->ion_fd, current_addr,
+			(unsigned long)closest_mapping->paddr,
+			(unsigned long)closest_mapping->paddr + mapping->len,
+			closest_mapping->buf,
+			buf_handle);
+	} else
+		CAM_INFO(CAM_SMMU,
+			"Cannot find vaddr:%lx in SMMU %s virt address",
+			current_addr, iommu_cb_set.cb_info[idx].name);
+
+	return buf_handle;
 }
 
-void cam_smmu_reg_client_page_fault_handler(int handle,
-	void (*client_page_fault_handler)(struct iommu_domain *,
-	struct device *, unsigned long,
-	int, void*), void *token)
+void cam_smmu_set_client_page_fault_handler(int handle,
+	cam_smmu_client_page_fault_handler handler_cb, void *token)
 {
 	int idx, i = 0;
 
@@ -394,7 +424,7 @@
 		return;
 	}
 
-	if (client_page_fault_handler) {
+	if (handler_cb) {
 		if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) {
 			CAM_ERR(CAM_SMMU,
 				"%s Should not regiester more handlers",
@@ -402,12 +432,14 @@
 			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 			return;
 		}
+
 		iommu_cb_set.cb_info[idx].cb_count++;
+
 		for (i = 0; i < iommu_cb_set.cb_info[idx].cb_count; i++) {
 			if (iommu_cb_set.cb_info[idx].token[i] == NULL) {
 				iommu_cb_set.cb_info[idx].token[i] = token;
 				iommu_cb_set.cb_info[idx].handler[i] =
-					client_page_fault_handler;
+					handler_cb;
 				break;
 			}
 		}
@@ -429,6 +461,47 @@
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 }
 
+void cam_smmu_unset_client_page_fault_handler(int handle, void *token)
+{
+	int idx, i = 0;
+
+	if (!token || (handle == HANDLE_INIT)) {
+		CAM_ERR(CAM_SMMU, "Error: token is NULL or invalid handle");
+		return;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return;
+	}
+
+	for (i = 0; i < CAM_SMMU_CB_MAX; i++) {
+		if (iommu_cb_set.cb_info[idx].token[i] == token) {
+			iommu_cb_set.cb_info[idx].token[i] = NULL;
+			iommu_cb_set.cb_info[idx].handler[i] =
+				NULL;
+			iommu_cb_set.cb_info[idx].cb_count--;
+			break;
+		}
+	}
+	if (i == CAM_SMMU_CB_MAX)
+		CAM_ERR(CAM_SMMU, "Error: hdl %x no matching tokens: %s",
+			handle, iommu_cb_set.cb_info[idx].name);
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+}
+
 static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
 	struct device *dev, unsigned long iova,
 	int flags, void *token)
@@ -459,6 +532,13 @@
 		return -EINVAL;
 	}
 
+	if (++iommu_cb_set.cb_info[idx].pf_count > g_num_pf_handled) {
+		CAM_INFO(CAM_SMMU, "PF already handled %d %d %d",
+			g_num_pf_handled, idx,
+			iommu_cb_set.cb_info[idx].pf_count);
+		return -EINVAL;
+	}
+
 	payload = kzalloc(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC);
 	if (!payload)
 		return -EINVAL;
@@ -474,7 +554,7 @@
 	list_add_tail(&payload->list, &iommu_cb_set.payload_list);
 	mutex_unlock(&iommu_cb_set.payload_list_lock);
 
-	schedule_work(&iommu_cb_set.smmu_work);
+	cam_smmu_page_fault_work(&iommu_cb_set.smmu_work);
 
 	return -EINVAL;
 }
@@ -528,6 +608,7 @@
 		iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH;
 		iommu_cb_set.cb_info[i].dev = NULL;
 		iommu_cb_set.cb_info[i].cb_count = 0;
+		iommu_cb_set.cb_info[i].pf_count = 0;
 		for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
 			iommu_cb_set.cb_info[i].token[j] = NULL;
 			iommu_cb_set.cb_info[i].handler[j] = NULL;
@@ -1009,7 +1090,7 @@
 
 int cam_smmu_alloc_firmware(int32_t smmu_hdl,
 	dma_addr_t *iova,
-	uint64_t *cpuva,
+	uintptr_t *cpuva,
 	size_t *len)
 {
 	int rc;
@@ -1078,7 +1159,7 @@
 	iommu_cb_set.cb_info[idx].is_fw_allocated = true;
 
 	*iova = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
-	*cpuva = (uint64_t)icp_fw.fw_kva;
+	*cpuva = (uintptr_t)icp_fw.fw_kva;
 	*len = firmware_len;
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
index 254e382..caf326d 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -51,6 +51,21 @@
 };
 
 /**
+ * @brief        : Callback function type that gets called back on cam
+ *                     smmu page fault.
+ *
+ * @param domain   : Iommu domain received in iommu page fault handler
+ * @param dev      : Device received in iommu page fault handler
+ * @param iova     : IOVA where page fault occurred
+ * @param flags    : Flags received in iommu page fault handler
+ * @param token    : Userdata given during callback registration
+ * @param buf_info : Closest mapped buffer info
+ */
+typedef void (*cam_smmu_client_page_fault_handler)(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova, int flags, void *token,
+	uint32_t buf_info);
+
+/**
  * @brief            : Structure to store region information
  *
  * @param iova_start : Start address of region
@@ -215,13 +230,19 @@
  * @brief       : Registers smmu fault handler for client
  *
  * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
- * @param client_page_fault_handler: It is triggered in IOMMU page fault
+ * @param handler_cb: It is triggered in IOMMU page fault
  * @param token: It is input param when trigger page fault handler
  */
-void cam_smmu_reg_client_page_fault_handler(int handle,
-	void (*client_page_fault_handler)(struct iommu_domain *,
-	struct device *, unsigned long,
-	int, void*), void *token);
+void cam_smmu_set_client_page_fault_handler(int handle,
+	cam_smmu_client_page_fault_handler handler_cb, void *token);
+
+/**
+ * @brief       : Unregisters smmu fault handler for client
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param token: It is input param when trigger page fault handler
+ */
+void cam_smmu_unset_client_page_fault_handler(int handle, void *token);
 
 /**
  * @brief Maps memory from an ION fd into IOVA space
@@ -297,7 +318,7 @@
  */
 int cam_smmu_alloc_firmware(int32_t smmu_hdl,
 	dma_addr_t *iova,
-	uint64_t *kvaddr,
+	uintptr_t *kvaddr,
 	size_t *len);
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 4525bb5..c9e6e5f 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -426,7 +426,7 @@
 		return -EINVAL;
 
 	if (copy_from_user(&sync_create,
-		(void *)k_ioctl->ioctl_ptr,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
 		k_ioctl->size))
 		return -EFAULT;
 
@@ -434,7 +434,8 @@
 		sync_create.name);
 
 	if (!result)
-		if (copy_to_user((void *)k_ioctl->ioctl_ptr,
+		if (copy_to_user(
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
 			&sync_create,
 			k_ioctl->size))
 			return -EFAULT;
@@ -453,7 +454,7 @@
 		return -EINVAL;
 
 	if (copy_from_user(&sync_signal,
-		(void *)k_ioctl->ioctl_ptr,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
 		k_ioctl->size))
 		return -EFAULT;
 
@@ -478,7 +479,7 @@
 		return -EINVAL;
 
 	if (copy_from_user(&sync_merge,
-		(void *)k_ioctl->ioctl_ptr,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
 		k_ioctl->size))
 		return -EFAULT;
 
@@ -492,8 +493,8 @@
 		return -ENOMEM;
 
 	if (copy_from_user(sync_objs,
-	(void *)sync_merge.sync_objs,
-	sizeof(uint32_t) * sync_merge.num_objs)) {
+		u64_to_user_ptr(sync_merge.sync_objs),
+		sizeof(uint32_t) * sync_merge.num_objs)) {
 		kfree(sync_objs);
 		return -EFAULT;
 	}
@@ -505,7 +506,8 @@
 		&sync_merge.merged);
 
 	if (!result)
-		if (copy_to_user((void *)k_ioctl->ioctl_ptr,
+		if (copy_to_user(
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
 			&sync_merge,
 			k_ioctl->size)) {
 			kfree(sync_objs);
@@ -528,7 +530,7 @@
 		return -EINVAL;
 
 	if (copy_from_user(&sync_wait,
-		(void *)k_ioctl->ioctl_ptr,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
 		k_ioctl->size))
 		return -EFAULT;
 
@@ -549,7 +551,7 @@
 		return -EINVAL;
 
 	if (copy_from_user(&sync_create,
-		(void *)k_ioctl->ioctl_ptr,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
 		k_ioctl->size))
 		return -EFAULT;
 
@@ -573,7 +575,7 @@
 		return -EINVAL;
 
 	if (copy_from_user(&userpayload_info,
-		(void *)k_ioctl->ioctl_ptr,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
 		k_ioctl->size))
 		return -EFAULT;
 
@@ -654,7 +656,7 @@
 	}
 
 	if (copy_from_user(&userpayload_info,
-		(void *)k_ioctl->ioctl_ptr,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
 		k_ioctl->size))
 		return -EFAULT;
 
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
index 3e1281b..47d441f 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
@@ -13,8 +13,14 @@
 #ifndef _CAM_COMMON_UTIL_H_
 #define _CAM_COMMON_UTIL_H_
 
+#include <linux/types.h>
+#include <linux/kernel.h>
+
 #define CAM_BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
 
+#define PTR_TO_U64(ptr) ((uint64_t)(uintptr_t)ptr)
+#define U64_TO_PTR(ptr) ((void *)(uintptr_t)ptr)
+
 /**
  * cam_common_util_get_string_index()
  *
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
index a88ccdb..66a4487 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -21,7 +21,7 @@
 	size_t *len)
 {
 	int rc = 0;
-	uint64_t kmd_buf_addr = 0;
+	uintptr_t kmd_buf_addr = 0;
 
 	rc = cam_mem_get_cpu_buf(handle, &kmd_buf_addr, len);
 	if (rc) {
@@ -30,7 +30,7 @@
 		if (kmd_buf_addr && *len) {
 			*buf_addr = (uint32_t *)kmd_buf_addr;
 		} else {
-			CAM_ERR(CAM_UTIL, "Invalid addr and length :%ld", *len);
+			CAM_ERR(CAM_UTIL, "Invalid addr and length :%zd", *len);
 			rc = -ENOMEM;
 		}
 	}
@@ -101,7 +101,7 @@
 		return rc;
 
 	if (len < cmd_desc->size) {
-		CAM_ERR(CAM_UTIL, "invalid memory len:%ld and cmd desc size:%d",
+		CAM_ERR(CAM_UTIL, "invalid memory len:%zd and cmd desc size:%d",
 			len, cmd_desc->size);
 		return -EINVAL;
 	}
@@ -128,7 +128,7 @@
 {
 	struct cam_patch_desc *patch_desc = NULL;
 	dma_addr_t iova_addr;
-	uint64_t   cpu_addr;
+	uintptr_t   cpu_addr;
 	uint32_t   temp;
 	uint32_t  *dst_cpu_addr;
 	uint32_t  *src_buf_iova_addr;
@@ -209,7 +209,7 @@
 	cam_packet_generic_blob_handler blob_handler_cb, void *user_data)
 {
 	int       rc;
-	uint64_t  cpu_addr;
+	uintptr_t  cpu_addr;
 	size_t    buf_size;
 	uint32_t *blob_ptr;
 	uint32_t  blob_type, blob_size, blob_block_size, len_read;
@@ -233,7 +233,8 @@
 		return rc;
 	}
 
-	blob_ptr = (uint32_t *)((uint8_t *)cpu_addr + cmd_buf->offset);
+	blob_ptr = (uint32_t *)(((uint8_t *)cpu_addr) +
+		cmd_buf->offset);
 
 	CAM_DBG(CAM_UTIL,
 		"GenericCmdBuffer cpuaddr=%pK, blobptr=%pK, len=%d",
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index a5456a9..37d7e7d 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -100,8 +100,10 @@
 		return "SVSL1[4]";
 	case CAM_NOMINAL_VOTE:
 		return "NOM[5]";
+	case CAM_NOMINALL1_VOTE:
+		return "NOML1[6]";
 	case CAM_TURBO_VOTE:
-		return "TURBO[6]";
+		return "TURBO[7]";
 	default:
 		return "";
 	}
@@ -281,6 +283,8 @@
 		*level = CAM_SVSL1_VOTE;
 	} else if (!strcmp(string, "nominal")) {
 		*level = CAM_NOMINAL_VOTE;
+	} else if (!strcmp(string, "nominal_l1")) {
+		*level = CAM_NOMINALL1_VOTE;
 	} else if (!strcmp(string, "turbo")) {
 		*level = CAM_TURBO_VOTE;
 	} else {
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index 4c6ed4b..d7432d9 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -50,14 +50,15 @@
 /**
  * enum cam_vote_level - Enum for voting level
  *
- * @CAM_SUSPEND_VOTE : Suspend vote
- * @CAM_MINSVS_VOTE  : Min SVS vote
- * @CAM_LOWSVS_VOTE  : Low SVS vote
- * @CAM_SVS_VOTE     : SVS vote
- * @CAM_SVSL1_VOTE   : SVS Plus vote
- * @CAM_NOMINAL_VOTE : Nominal vote
- * @CAM_TURBO_VOTE   : Turbo vote
- * @CAM_MAX_VOTE     : Max voting level, This is invalid level.
+ * @CAM_SUSPEND_VOTE  : Suspend vote
+ * @CAM_MINSVS_VOTE   : Min SVS vote
+ * @CAM_LOWSVS_VOTE   : Low SVS vote
+ * @CAM_SVS_VOTE      : SVS vote
+ * @CAM_SVSL1_VOTE    : SVS Plus vote
+ * @CAM_NOMINAL_VOTE  : Nominal vote
+ * @CAM_NOMINALL1_VOTE: Nominal plus vote
+ * @CAM_TURBO_VOTE    : Turbo vote
+ * @CAM_MAX_VOTE      : Max voting level, This is invalid level.
  */
 enum cam_vote_level {
 	CAM_SUSPEND_VOTE,
@@ -66,6 +67,7 @@
 	CAM_SVS_VOTE,
 	CAM_SVSL1_VOTE,
 	CAM_NOMINAL_VOTE,
+	CAM_NOMINALL1_VOTE,
 	CAM_TURBO_VOTE,
 	CAM_MAX_VOTE,
 };
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index 6196a8c..7a0a069 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -676,6 +676,10 @@
 		rc = 0;
 		break;
 	case MSM_ISP_BUFFER_STATE_QUEUED:
+		if (IS_ENABLED(CONFIG_MSM_ISP_V1)) {
+			rc = 0;
+			break;
+		}
 	case MSM_ISP_BUFFER_STATE_DIVERTED:
 	default:
 		WARN(1, "%s: bufq 0x%x, buf idx 0x%x, incorrect state = %d",
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index 7c55ad8..a62c465 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -450,7 +450,7 @@
 {
 	struct page *page;
 	struct vfe_device *vfe_dev = vma->vm_private_data;
-	struct isp_proc *isp_page = NULL;
+	struct isp_kstate *isp_page = NULL;
 
 	isp_page = vfe_dev->isp_page;
 
@@ -728,7 +728,7 @@
 	vfe_dev->buf_mgr->init_done = 1;
 	vfe_dev->vfe_open_cnt = 0;
 	/*Allocate a page in kernel and map it to camera user process*/
-	vfe_dev->isp_page = (struct isp_proc *)get_zeroed_page(GFP_KERNEL);
+	vfe_dev->isp_page = (struct isp_kstate *)get_zeroed_page(GFP_KERNEL);
 	if (vfe_dev->isp_page == NULL) {
 		pr_err("%s: no enough memory\n", __func__);
 		rc = -ENOMEM;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 4f97618..fa55d64 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -761,11 +761,6 @@
 	struct msm_vfe_common_dev_data *common_data;
 };
 
-struct isp_proc {
-	uint32_t  kernel_sofid;
-	uint32_t  vfeid;
-};
-
 struct vfe_device {
 	/* Driver private data */
 	struct platform_device *pdev;
@@ -850,7 +845,7 @@
 	uint32_t recovery_irq1_mask;
 	/* total bandwidth per vfe */
 	uint64_t total_bandwidth;
-	struct isp_proc *isp_page;
+	struct isp_kstate *isp_page;
 };
 
 struct vfe_parent_device {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index a95917c..74679b2 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -26,7 +26,7 @@
 #define VFE32_EQUAL_SLICE_UB 194
 #define VFE32_AXI_SLICE_UB 792
 #define VFE32_WM_BASE(idx) (0x4C + 0x18 * idx)
-#define VFE32_RDI_BASE(idx) (idx ? 0x734 + 0x4 * (idx - 1) : 0x06FC)
+#define VFE32_RDI_BASE(idx) (idx ? 0x734 + 0x70 * (idx - 1) : 0x06FC)
 #define VFE32_XBAR_BASE(idx) (0x40 + 0x4 * (idx / 4))
 #define VFE32_XBAR_SHIFT(idx) ((idx % 4) * 8)
 #define VFE32_PING_PONG_BASE(wm, ping_pong) \
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index c5c05d2..a2d4c4a 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -749,13 +749,13 @@
 		return;
 
 	if (irq_status0 & BIT(2)) {
-		msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
 		ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
 		msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
 					MSM_ISP_COMP_IRQ_EPOCH, ts);
 		msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
 					MSM_ISP_COMP_IRQ_EPOCH);
 		msm_isp_update_error_frame_count(vfe_dev);
+		msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
 		if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
 			&& vfe_dev->axi_data.src_info[VFE_PIX_0].
 			stream_count == 0) {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 1b04e1d..1ddbb94 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -175,6 +175,8 @@
 			stream_info->bufq_handle[k] = 0;
 		stream_info->vfe_mask = 0;
 		stream_info->state = AVAILABLE;
+		memset(&stream_info->request_queue_cmd,
+			0, sizeof(stream_info->request_queue_cmd));
 	}
 }
 
@@ -598,7 +600,8 @@
  *
  * Returns void
  */
-static void msm_isp_update_framedrop_reg(struct msm_vfe_axi_stream *stream_info)
+static void msm_isp_update_framedrop_reg(struct msm_vfe_axi_stream *stream_info,
+		uint32_t drop_reconfig)
 {
 	if (stream_info->stream_type == BURST_STREAM) {
 		if (stream_info->runtime_num_burst_capture == 0 ||
@@ -608,7 +611,8 @@
 				MSM_VFE_STREAM_STOP_PERIOD;
 	}
 
-	if (stream_info->undelivered_request_cnt > 0)
+	if (stream_info->undelivered_request_cnt > 0 &&
+		drop_reconfig != 1)
 		stream_info->current_framedrop_period =
 			MSM_VFE_STREAM_STOP_PERIOD;
 
@@ -663,7 +667,8 @@
 			break;
 		case MSM_ISP_COMP_IRQ_EPOCH:
 			if (stream_info->state == ACTIVE)
-				msm_isp_update_framedrop_reg(stream_info);
+				msm_isp_update_framedrop_reg(stream_info,
+					vfe_dev->isp_page->drop_reconfig);
 			break;
 		default:
 			WARN(1, "Invalid irq %d\n", irq);
@@ -3543,6 +3548,14 @@
 	frame_src = SRC_TO_INTF(stream_info->stream_src);
 	pingpong_status = vfe_dev->hw_info->
 		vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
+
+	/* As MCT is still processing it, need to drop the additional requests*/
+	if (vfe_dev->isp_page->drop_reconfig) {
+		pr_err("%s: MCT has not yet delayed %d drop request %d\n",
+			__func__, vfe_dev->isp_page->drop_reconfig, frame_id);
+		goto error;
+	}
+
 	/*
 	 * If PIX stream is active then RDI path uses SOF frame ID of PIX
 	 * In case of standalone RDI streaming, SOF are used from
@@ -3556,9 +3569,18 @@
 		vfe_dev->axi_data.src_info[frame_src].accept_frame == false) {
 		pr_debug("%s:%d invalid time to request frame %d\n",
 			__func__, __LINE__, frame_id);
-		goto error;
-	}
-	if ((vfe_dev->axi_data.src_info[frame_src].active && (frame_id !=
+		vfe_dev->isp_page->drop_reconfig = 1;
+	} else if ((vfe_dev->axi_data.src_info[frame_src].active) &&
+			(frame_id ==
+			vfe_dev->axi_data.src_info[frame_src].frame_id) &&
+			(stream_info->undelivered_request_cnt <=
+				MAX_BUFFERS_IN_HW)) {
+		vfe_dev->isp_page->drop_reconfig = 1;
+		pr_debug("%s: vfe_%d request_frame %d cur frame id %d pix %d\n",
+			__func__, vfe_dev->pdev->id, frame_id,
+			vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
+			vfe_dev->axi_data.src_info[VFE_PIX_0].active);
+	} else if ((vfe_dev->axi_data.src_info[frame_src].active && (frame_id !=
 		vfe_dev->axi_data.src_info[frame_src].frame_id + vfe_dev->
 		axi_data.src_info[frame_src].sof_counter_step)) ||
 		((!vfe_dev->axi_data.src_info[frame_src].active))) {
@@ -3662,6 +3684,9 @@
 			stream_info->undelivered_request_cnt--;
 			pr_err_ratelimited("%s:%d fail to cfg HAL buffer\n",
 				__func__, __LINE__);
+			queue_req->cmd_used = 0;
+			list_del(&queue_req->list);
+			stream_info->request_q_cnt--;
 			return rc;
 		}
 
@@ -3698,6 +3723,9 @@
 						flags);
 			pr_err_ratelimited("%s:%d fail to cfg HAL buffer\n",
 				__func__, __LINE__);
+			queue_req->cmd_used = 0;
+			list_del(&queue_req->list);
+			stream_info->request_q_cnt--;
 			return rc;
 		}
 	} else {
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index 5057992..6836adc 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -213,32 +213,34 @@
 	long rc = 0;
 	struct ispif_device *ispif =
 		(struct ispif_device *)v4l2_get_subdevdata(sd);
-	struct ispif_cfg_data_ext pcdata;
+	struct ispif_cfg_data_ext pcdata = {0};
 	struct msm_ispif_param_data_ext *params = NULL;
+
+	if (is_compat_task()) {
 #ifdef CONFIG_COMPAT
-	struct ispif_cfg_data_ext_32 *pcdata32 =
-		(struct ispif_cfg_data_ext_32 *)arg;
+		struct ispif_cfg_data_ext_32 *pcdata32 =
+			(struct ispif_cfg_data_ext_32 *)arg;
 
-	if (pcdata32 == NULL) {
-		pr_err("Invalid params passed from user\n");
-		return -EINVAL;
-	}
-	pcdata.cfg_type  = pcdata32->cfg_type;
-	pcdata.size = pcdata32->size;
-	pcdata.data = compat_ptr(pcdata32->data);
-
-#else
-	struct ispif_cfg_data_ext *pcdata64 =
+		if (pcdata32 == NULL) {
+			pr_err("Invalid params passed from user\n");
+			return -EINVAL;
+		}
+		pcdata.cfg_type  = pcdata32->cfg_type;
+		pcdata.size = pcdata32->size;
+		pcdata.data = compat_ptr(pcdata32->data);
+#endif
+	} else {
+		struct ispif_cfg_data_ext *pcdata64 =
 		(struct ispif_cfg_data_ext *)arg;
 
-	if (pcdata64 == NULL) {
-		pr_err("Invalid params passed from user\n");
-		return -EINVAL;
+		if (pcdata64 == NULL) {
+			pr_err("Invalid params passed from user\n");
+			return -EINVAL;
+		}
+		pcdata.cfg_type  = pcdata64->cfg_type;
+		pcdata.size = pcdata64->size;
+		pcdata.data = pcdata64->data;
 	}
-	pcdata.cfg_type  = pcdata64->cfg_type;
-	pcdata.size = pcdata64->size;
-	pcdata.data = pcdata64->data;
-#endif
 	if (pcdata.size != sizeof(struct msm_ispif_param_data_ext)) {
 		pr_err("%s: payload size mismatch\n", __func__);
 		return -EINVAL;
diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
index e458b4df..c86528e 100644
--- a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
@@ -575,15 +575,13 @@
 		k_ioctl = *ptr;
 		switch (k_ioctl.id) {
 		case MSM_CAMERA_BUF_MNGR_IOCTL_ID_GET_BUF_BY_IDX: {
+			struct msm_buf_mngr_info buf_info, *tmp = NULL;
 
 			if (k_ioctl.size != sizeof(struct msm_buf_mngr_info))
 				return -EINVAL;
 			if (!k_ioctl.ioctl_ptr)
 				return -EINVAL;
-#ifndef CONFIG_COMPAT
-			{
-				struct msm_buf_mngr_info buf_info, *tmp = NULL;
-
+			if (!is_compat_task()) {
 				MSM_CAM_GET_IOCTL_ARG_PTR(&tmp,
 					&k_ioctl.ioctl_ptr, sizeof(tmp));
 				if (copy_from_user(&buf_info, tmp,
@@ -592,7 +590,7 @@
 				}
 				k_ioctl.ioctl_ptr = (uintptr_t)&buf_info;
 			}
-#endif
+
 			argp = &k_ioctl;
 			rc = msm_cam_buf_mgr_ops(cmd, argp);
 			}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index 53dd371..ee659f6 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -836,7 +836,7 @@
 		break;
 	case VIDIOC_MSM_CSID_RELEASE:
 	case MSM_SD_SHUTDOWN:
-		if (adsp_shmem_get_state() == CAMERA_STATUS_END) {
+		if (adsp_shmem_get_state() != CAMERA_STATUS_END) {
 			/* aDSP still in use */
 			rc = 0;
 			break;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h
index 4c5ce02..bcd8a9e 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h
@@ -28,6 +28,9 @@
 #define mask_enable_clk_B       0x2
 #define mask_ctrl_1_A           0x5
 #define mask_ctrl_1_B           0xA
+#define mask_reset_A            0x1
+#define mask_reset_B            0x7
+#define mask_shutdown_A         0x3
 #define mask_hs_freq_range      0x7F
 #define mask_osc_freq_2         0xFF
 #define mask_osc_freq_3         0xF00
@@ -56,8 +59,6 @@
 	{0x58C, 0xFF},   /* mipi_csiphy_irq_mask_ctrl_lane_0 */
 	{0x5C8, 0xFF},   /* mipi_csiphy_irq_mask_ctrl_lane_clk_0 */
 	{0x20, 0x0},     /* mipi_csiphy_rx_sys_7_00 */
-	{0x28, 0x43},    /* mipi_csiphy_rx_sys_9_00 */
-	{0x380, 0x0},    /* mipi_csiphy_rx_startup_ovr_0_00 */
 	{0x384, 0x0},    /* mipi_csiphy_rx_startup_ovr_1_00 */
 	{0x388, 0xCC},   /* mipi_csiphy_rx_startup_ovr_2_00 */
 	{0x38C, 0x1},    /* mipi_csiphy_rx_startup_ovr_3_00 */
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h
new file mode 100644
index 0000000..0bfa60c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_4_2_1_hwreg.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_3_4_2_1_HWREG_H
+#define MSM_CSIPHY_3_4_2_1_HWREG_H
+
+#define ULPM_WAKE_UP_TIMER_MODE                   2
+#define GLITCH_ELIMINATION_NUM                    0x12 /* bit [6:4] */
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+static struct csiphy_reg_parms_t csiphy_v3_4_2_1 = {
+	.mipi_csiphy_interrupt_status0_addr = 0x8B0,
+	.mipi_csiphy_interrupt_clear0_addr = 0x858,
+	.mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+	.combo_clk_mask = 0x10,
+};
+
+static struct csiphy_reg_3ph_parms_t csiphy_v3_4_2_1_3ph = {
+	/*MIPI CSI PHY registers*/
+	{0x814, 0x0},
+	{0x818, 0x1},
+	{0x188, 0x7F},
+	{0x18C, 0x7F},
+	{0x190, 0x0},
+	{0x104, 0x6},
+	{0x108, 0x0},
+	{0x10c, 0x0},
+	{0x114, 0x20},
+	{0x118, 0x3E},
+	{0x11c, 0x41},
+	{0x120, 0x41},
+	{0x124, 0x7F},
+	{0x128, 0x0},
+	{0x12c, 0x0},
+	{0x130, 0x1},
+	{0x134, 0x0},
+	{0x138, 0x0},
+	{0x13C, 0x10},
+	{0x140, 0x1},
+	{0x144, GLITCH_ELIMINATION_NUM},
+	{0x148, 0xFE},
+	{0x14C, 0x1},
+	{0x154, 0x0},
+	{0x15C, 0x33},
+	{0x160, ULPM_WAKE_UP_TIMER_MODE},
+	{0x164, 0x48},
+	{0x168, 0xA0},
+	{0x16C, 0x17},
+	{0x170, 0x41},
+	{0x174, 0x41},
+	{0x178, 0x3E},
+	{0x17C, 0x0},
+	{0x180, 0x0},
+	{0x184, 0x7F},
+	{0x1cc, 0x10},
+	{0x81c, 0x6},
+	{0x82c, 0xFF},
+	{0x830, 0xFF},
+	{0x834, 0xFB},
+	{0x838, 0xFF},
+	{0x83c, 0x7F},
+	{0x840, 0xFF},
+	{0x844, 0xFF},
+	{0x848, 0xEF},
+	{0x84c, 0xFF},
+	{0x850, 0xFF},
+	{0x854, 0xFF},
+	{0x28, 0x0},
+	{0x800, 0x2},
+	{0x0, 0x88},
+	{0x4, 0x8},
+	{0x8, 0x0},
+	{0xC, 0xFF},
+	{0x10, 0x56},
+	{0x2C, 0x1},
+	{0x30, 0x0},
+	{0x34, 0x3},
+	{0x38, 0xfe},
+	{0x3C, 0xB8},
+	{0x1C, 0xE7},
+	{0x14, 0x0},
+	{0x14, 0x60},
+	{0x700, 0x80}
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index 9830810..f98e23f 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -25,6 +25,7 @@
 #include "include/msm_csiphy_3_1_hwreg.h"
 #include "include/msm_csiphy_3_2_hwreg.h"
 #include "include/msm_csiphy_3_4_2_hwreg.h"
+#include "include/msm_csiphy_3_4_2_1_hwreg.h"
 #include "include/msm_csiphy_3_5_hwreg.h"
 #include "include/msm_csiphy_5_0_hwreg.h"
 #include "include/msm_csiphy_5_0_1_hwreg.h"
@@ -43,6 +44,7 @@
 #define CSIPHY_VERSION_V31                        0x31
 #define CSIPHY_VERSION_V32                        0x32
 #define CSIPHY_VERSION_V342                       0x342
+#define CSIPHY_VERSION_V342_1                     0x3421
 #define CSIPHY_VERSION_V35                        0x35
 #define CSIPHY_VERSION_V50                        0x500
 #define CSIPHY_VERSION_V501                       0x501
@@ -250,10 +252,13 @@
 		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
 		mipi_csiphy_rx_sys_7_00.addr + offset);
 
-	msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg.
-		mipi_csiphy_rx_sys_9_00.data,
+	value = msm_camera_io_r(csiphybase +
+		csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_rx_clk_lane_6_00.addr + offset);
+	value |= SET_THE_BIT(7);
+	msm_camera_io_w(value,
 		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
-		mipi_csiphy_rx_sys_9_00.addr + offset);
+		mipi_csiphy_rx_clk_lane_6_00.addr + offset);
 
 	msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg.
 		mipi_csiphy_rx_startup_ovr_4_00.data,
@@ -315,7 +320,7 @@
 	uint16_t lane_mask = 0;
 	void __iomem *csiphybase;
 	enum snps_csiphy_mode mode = INVALID_MODE;
-	uint32_t value, num_tries, num_lanes, offset;
+	uint32_t value, num_tries, num_lanes, offset = SNPS_INTERPHY_OFFSET;
 	uint32_t clk_mux_reg = 0;
 
 	csiphybase = csiphy_dev->base;
@@ -495,17 +500,6 @@
 
 		value = msm_camera_io_r(csiphybase +
 			csiphy_dev->ctrl_reg->csiphy_snps_reg.
-			mipi_csiphy_rx_startup_ovr_0_00.addr +
-			SNPS_INTERPHY_OFFSET);
-		value |= SET_THE_BIT(0);
-		value |= SET_THE_BIT(1);
-		msm_camera_io_w(value,
-			csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
-			mipi_csiphy_rx_startup_ovr_0_00.addr +
-			SNPS_INTERPHY_OFFSET);
-
-		value = msm_camera_io_r(csiphybase +
-			csiphy_dev->ctrl_reg->csiphy_snps_reg.
 			mipi_csiphy_rx_startup_ovr_1_00.addr +
 			SNPS_INTERPHY_OFFSET);
 		value &= ~(SET_THE_BIT(0));
@@ -519,6 +513,7 @@
 			csiphy_dev->ctrl_reg->csiphy_snps_reg.
 			mipi_csiphy_rx_clk_lane_6_00.addr);
 		value |= SET_THE_BIT(2);
+		value &= ~(SET_THE_BIT(7));
 		msm_camera_io_w(value,
 			csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
 			mipi_csiphy_rx_clk_lane_6_00.addr);
@@ -528,7 +523,7 @@
 			mipi_csiphy_rx_clk_lane_6_00.addr +
 			SNPS_INTERPHY_OFFSET);
 		value |= SET_THE_BIT(3);
-		value |= SET_THE_BIT(7);
+		value &= ~(SET_THE_BIT(7));
 		value &= ~(SET_THE_BIT(2));
 		msm_camera_io_w(value,
 			csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
@@ -590,36 +585,109 @@
 		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
 		mipi_csiphy_enable_clk.addr);
 
-	value = 0x0;
-	if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_A)
-		value |= mask_ctrl_1_A;
-	if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_B)
-		value |= mask_ctrl_1_B;
-	msm_camera_io_w(value,
+	if (mode == TWO_LANE_PHY_A) {
+		msm_camera_io_w(mask_reset_A,
 		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
 		mipi_csiphy_ctrl_1.addr);
 
-	if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_A)
-		offset = 0x0;
-	else
-		offset = SNPS_INTERPHY_OFFSET;
+		msm_camera_io_w(mask_ctrl_1_A,
+		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_ctrl_1.addr);
 
-	value = 0x0;
-	num_tries = 0;
+		value = 0x0;
+		num_tries = 0;
 
-	do {
-		num_tries++;
-		value = msm_camera_io_r(csiphybase +
-			csiphy_dev->ctrl_reg->csiphy_snps_reg.
-			mipi_csiphy_rx_startup_obs_2_00.addr + offset);
-		if ((value | SET_THE_BIT(4)) == value)
-			break;
-		usleep_range(100, 150);
-	} while (num_tries < 6);
+		do {
+			num_tries++;
+			value = msm_camera_io_r(csiphybase +
+				csiphy_dev->ctrl_reg->csiphy_snps_reg.
+				mipi_csiphy_rx_startup_obs_2_00.addr);
+			if ((value | SET_THE_BIT(4)) == value)
+				break;
+			usleep_range(100, 150);
+		} while (num_tries < 6);
+		if ((value | SET_THE_BIT(4)) != value) {
+			pr_err("%s: SNPS phy config failed\n", __func__);
+			return -EINVAL;
+		}
+	}
 
-	if ((value | SET_THE_BIT(4)) != value) {
-		pr_err("%s: SNPS phy config failed\n", __func__);
-		return -EINVAL;
+	if (mode == TWO_LANE_PHY_B) {
+		msm_camera_io_w(mask_reset_B,
+		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_ctrl_1.addr);
+
+		msm_camera_io_w(mask_ctrl_1_A|mask_ctrl_1_B,
+		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_ctrl_1.addr);
+
+		value = 0x0;
+		num_tries = 0;
+
+		do {
+			num_tries++;
+			value = msm_camera_io_r(csiphybase +
+				csiphy_dev->ctrl_reg->csiphy_snps_reg.
+				mipi_csiphy_rx_startup_obs_2_00.addr + offset);
+			if ((value | SET_THE_BIT(4)) == value)
+				break;
+			usleep_range(100, 150);
+		} while (num_tries < 6);
+
+		if ((value | SET_THE_BIT(4)) != value) {
+			pr_err("%s: SNPS phy config failed\n", __func__);
+			return -EINVAL;
+		}
+	}
+
+	if (mode == AGGREGATE_MODE) {
+		msm_camera_io_w(mask_shutdown_A,
+		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_ctrl_1.addr);
+
+		msm_camera_io_w(mask_reset_B,
+		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_ctrl_1.addr);
+
+		value = 0x0;
+		num_tries = 0;
+
+		do {
+			num_tries++;
+			value = msm_camera_io_r(csiphybase +
+				csiphy_dev->ctrl_reg->csiphy_snps_reg.
+				mipi_csiphy_rx_startup_obs_2_00.addr);
+			if ((value | SET_THE_BIT(4)) == value)
+				break;
+			usleep_range(100, 150);
+		} while (num_tries < 6);
+
+		if ((value | SET_THE_BIT(4)) != value) {
+			pr_err("%s: SNPS phy config failed\n", __func__);
+			return -EINVAL;
+		}
+
+		msm_camera_io_w(mask_ctrl_1_A|mask_ctrl_1_B,
+		csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+		mipi_csiphy_ctrl_1.addr);
+
+		value = 0x0;
+		num_tries = 0;
+
+		do {
+			num_tries++;
+			value = msm_camera_io_r(csiphybase +
+				csiphy_dev->ctrl_reg->csiphy_snps_reg.
+				mipi_csiphy_rx_startup_obs_2_00.addr + offset);
+			if ((value | SET_THE_BIT(4)) == value)
+				break;
+			usleep_range(100, 150);
+		} while (num_tries < 6);
+
+		if ((value | SET_THE_BIT(4)) != value) {
+			pr_err("%s: SNPS phy config failed\n", __func__);
+			return -EINVAL;
+		}
 	}
 
 	msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg.
@@ -940,6 +1008,13 @@
 
 	csiphybase = csiphy_dev->base;
 	lane_mask = csiphy_params->lane_mask & 0x1f;
+
+	if (csiphy_dev->hw_version == CSIPHY_VERSION_V342_1) {
+		lane_enable = msm_camera_io_r(csiphybase +
+			csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+				mipi_csiphy_3ph_cmn_ctrl5.addr);
+	}
+
 	for (i = 0; i < MAX_DPHY_DATA_LN; i++) {
 		if (mask == 0x2) {
 			if (lane_mask & mask)
@@ -1030,7 +1105,8 @@
 				csiphy_dev->ctrl_reg->csiphy_3ph_reg.
 				mipi_csiphy_2ph_lnn_cfg1.addr + offset);
 		}
-		if (csiphy_dev->hw_version == CSIPHY_VERSION_V342 &&
+		if ((csiphy_dev->hw_version == CSIPHY_VERSION_V342 ||
+		    csiphy_dev->hw_version == CSIPHY_VERSION_V342_1) &&
 			csiphy_params->combo_mode == 1) {
 			msm_camera_io_w(0x52,
 				csiphybase +
@@ -1043,8 +1119,9 @@
 				csiphy_dev->ctrl_reg->csiphy_3ph_reg.
 				mipi_csiphy_2ph_lnn_cfg5.addr + offset);
 		}
-		if (clk_lane == 1 &&
-			csiphy_dev->hw_version == CSIPHY_VERSION_V342) {
+		if (clk_lane == 1  &&
+			(csiphy_dev->hw_version == CSIPHY_VERSION_V342 ||
+			csiphy_dev->hw_version == CSIPHY_VERSION_V342_1)) {
 			msm_camera_io_w(0x1f,
 				csiphybase +
 				csiphy_dev->ctrl_reg->csiphy_3ph_reg.
@@ -1060,7 +1137,8 @@
 			mipi_csiphy_2ph_lnn_test_imp.data,
 			csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
 			mipi_csiphy_2ph_lnn_test_imp.addr + offset);
-		if (csiphy_dev->hw_version == CSIPHY_VERSION_V342) {
+		if ((csiphy_dev->hw_version == CSIPHY_VERSION_V342 ||
+			csiphy_dev->hw_version == CSIPHY_VERSION_V342_1)) {
 			msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
 				mipi_csiphy_2ph_lnn_ctrl5.data,
 				csiphybase +
@@ -1069,7 +1147,8 @@
 		}
 		mask <<= 1;
 	}
-	if (csiphy_dev->hw_version == CSIPHY_VERSION_V342 &&
+	if ((csiphy_dev->hw_version == CSIPHY_VERSION_V342 ||
+		csiphy_dev->hw_version == CSIPHY_VERSION_V342_1) &&
 		csiphy_params->combo_mode != 1) {
 		msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
 			mipi_csiphy_3ph_cmn_ctrl0.data,
@@ -1305,7 +1384,8 @@
 
 	if (csiphy_dev->hw_version >= CSIPHY_VERSION_V30 &&
 		csiphy_dev->clk_mux_base != NULL &&
-		csiphy_dev->hw_version < CSIPHY_VERSION_V50) {
+		(csiphy_dev->hw_version == CSIPHY_VERSION_V342_1 ||
+		csiphy_dev->hw_version < CSIPHY_VERSION_V50)) {
 		val = msm_camera_io_r(csiphy_dev->clk_mux_base);
 		if (csiphy_params->combo_mode &&
 			(csiphy_params->lane_mask & 0x18) == 0x18) {
@@ -1336,7 +1416,11 @@
 					csiphy_params);
 			csiphy_dev->num_irq_registers = 20;
 		} else {
-			if (csiphy_dev->hw_dts_version >= CSIPHY_VERSION_V50)
+			if (csiphy_dev->hw_dts_version == CSIPHY_VERSION_V342_1)
+				rc = msm_csiphy_2phase_lane_config(csiphy_dev,
+					csiphy_params);
+			else if (csiphy_dev->hw_dts_version >=
+					CSIPHY_VERSION_V50)
 				rc = msm_csiphy_2phase_lane_config_v50(
 					csiphy_dev, csiphy_params);
 			else
@@ -2380,6 +2464,12 @@
 		new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V342;
 		new_csiphy_dev->csiphy_3phase = CSI_3PHASE_HW;
 	} else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+		"qcom,csiphy-v3.4.2.1")) {
+		new_csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_v3_4_2_1_3ph;
+		new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_4_2_1;
+		new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V342_1;
+		new_csiphy_dev->csiphy_3phase = CSI_3PHASE_HW;
+	} else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
 		"qcom,csiphy-v3.5")) {
 		new_csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_v3_5_3ph;
 		new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_5;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
index 6fc084c..dade2e3 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -83,8 +83,6 @@
 	struct csiphy_reg_t mipi_csiphy_irq_mask_ctrl_lane_0;
 	struct csiphy_reg_t mipi_csiphy_irq_mask_ctrl_lane_clk_0;
 	struct csiphy_reg_t mipi_csiphy_rx_sys_7_00;
-	struct csiphy_reg_t mipi_csiphy_rx_sys_9_00;
-	struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_0_00;
 	struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_1_00;
 	struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_2_00;
 	struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_3_00;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
index e16629c..118cb93 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
@@ -223,6 +223,8 @@
 	rc = msm_camera_qup_i2c_txdata(client, buf, len);
 	if (rc < 0)
 		S_I2C_DBG("%s fail\n", __func__);
+	kfree(buf);
+	buf = NULL;
 	return rc;
 }
 
@@ -272,6 +274,8 @@
 	rc = msm_camera_qup_i2c_txdata(client, buf, len+num_byte);
 	if (rc < 0)
 		S_I2C_DBG("%s fail\n", __func__);
+	kfree(buf);
+	buf = NULL;
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index c15a2e9..802e581 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -932,6 +932,12 @@
 			break;
 		}
 	}
+
+	if (!inst->clk_data.entry) {
+		dprintk(VIDC_ERR, "%s No match found\n", __func__);
+		rc = -EINVAL;
+	}
+
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index b878971..176b9c6 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -3138,7 +3138,11 @@
 		return -EINVAL;
 	}
 
-	msm_comm_init_clocks_and_bus_data(inst);
+	rc = msm_comm_init_clocks_and_bus_data(inst);
+	if (rc) {
+		dprintk(VIDC_ERR, "Failed to initialize clocks and bus data\n");
+		goto exit;
+	}
 
 	dprintk(VIDC_DBG, "%s: inst %pK\n", __func__, inst);
 	rc = call_hfi_op(hdev, session_init, hdev->hfi_device_data,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index f975409..93418fe 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -183,7 +183,7 @@
 	},
 	{
 		.key = "qcom,max-hw-load",
-		.value = 2219400,
+		.value = 2241480,
 	},
 	{
 		.key = "qcom,max-hq-mbs-per-frame",
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 6975c13..c026fb4 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1175,7 +1175,7 @@
 	__strict_check(device);
 
 	if (!__core_in_valid_state(device)) {
-		dprintk(VIDC_DBG, "%s - fw not in init state\n", __func__);
+		dprintk(VIDC_ERR, "%s - fw not in init state\n", __func__);
 		result = -EINVAL;
 		goto err_q_null;
 	}
@@ -2904,8 +2904,6 @@
 {
 	struct hfi_sfr_struct *vsfr = NULL;
 
-	__set_state(device, VENUS_STATE_DEINIT);
-
 	if (__halt_axi(device))
 		dprintk(VIDC_WARN, "Failed to halt AXI after SYS_ERROR\n");
 
@@ -3163,6 +3161,10 @@
 					"Too many packets in message queue to handle at once, deferring read\n");
 			break;
 		}
+
+		/* do not read packets after sys error packet */
+		if (info->response_type == HAL_SYS_ERROR)
+			break;
 	}
 
 	if (requeue_pm_work && device->res->sw_power_collapsible) {
@@ -3225,8 +3227,13 @@
 	for (i = 0; !IS_ERR_OR_NULL(device->response_pkt) &&
 		i < num_responses; ++i) {
 		struct msm_vidc_cb_info *r = &device->response_pkt[i];
-		dprintk(VIDC_DBG, "Processing response %d of %d, type %d\n",
-			(i + 1), num_responses, r->response_type);
+
+		if (!__core_in_valid_state(device)) {
+			dprintk(VIDC_ERR,
+				"Ignore responses from %d to %d as device is in invalid state\n",
+				(i + 1), num_responses);
+			break;
+		}
 		device->callback(r->response_type, &r->response);
 	}
 
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vdec.c b/drivers/media/platform/msm/vidc_3x/msm_vdec.c
index bb4b6c8..2ff610a 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_vdec.c
@@ -1943,6 +1943,7 @@
 	inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->prop.fps = DEFAULT_FPS;
+	inst->prop.operating_rate = 0;
 	memcpy(&inst->fmts[OUTPUT_PORT], &vdec_formats[2],
 						sizeof(struct msm_vidc_format));
 	memcpy(&inst->fmts[CAPTURE_PORT], &vdec_formats[0],
@@ -2551,8 +2552,33 @@
 		 */
 		hal_property.enable = !(ctrl->val);
 		pdata = &hal_property;
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_DISABLE:
+			inst->flags &= ~VIDC_REALTIME;
+			break;
+		case V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_ENABLE:
+			inst->flags |= VIDC_REALTIME;
+			break;
+		default:
+			dprintk(VIDC_WARN,
+				"inst(%pK) invalid priority ctrl value %#x\n",
+				inst, ctrl->val);
+			break;
+		}
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
+		if ((ctrl->val >> 16) < inst->capability.frame_rate.min ||
+			 (ctrl->val >> 16) > inst->capability.frame_rate.max) {
+			dprintk(VIDC_ERR, "Invalid operating rate %u\n",
+				(ctrl->val >> 16));
+			rc = -ENOTSUPP;
+		} else {
+			dprintk(VIDC_DBG,
+				"inst(%pK) operating rate changed from %d to %d\n",
+				inst, inst->prop.operating_rate >> 16,
+					ctrl->val >> 16);
+			inst->prop.operating_rate = ctrl->val;
+		}
 		break;
 	default:
 		break;
diff --git a/drivers/media/platform/msm/vidc_3x/msm_venc.c b/drivers/media/platform/msm/vidc_3x/msm_venc.c
index ef6e360..5e98a5c 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_venc.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_venc.c
@@ -3625,8 +3625,33 @@
 		 */
 		enable.enable = !(ctrl->val);
 		pdata = &enable;
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_DISABLE:
+			inst->flags &= ~VIDC_REALTIME;
+			break;
+		case V4L2_MPEG_VIDC_VIDEO_PRIORITY_REALTIME_ENABLE:
+			inst->flags |= VIDC_REALTIME;
+			break;
+		default:
+			dprintk(VIDC_WARN,
+				"inst(%pK) invalid priority ctrl value %#x\n",
+				inst, ctrl->val);
+			break;
+		}
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
+		if ((ctrl->val >> 16) < inst->capability.frame_rate.min ||
+			 (ctrl->val >> 16) > inst->capability.frame_rate.max) {
+			dprintk(VIDC_ERR, "Invalid operating rate %u\n",
+				(ctrl->val >> 16));
+			rc = -ENOTSUPP;
+		} else {
+			dprintk(VIDC_DBG,
+				"inst(%pK) operating rate changed from %d to %d\n",
+				inst, inst->prop.operating_rate >> 16,
+					ctrl->val >> 16);
+			inst->prop.operating_rate = ctrl->val;
+		}
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_TYPE:
 	{
@@ -4067,6 +4092,7 @@
 	inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->prop.fps = DEFAULT_FPS;
+	inst->prop.operating_rate = 0;
 	inst->capability.pixelprocess_capabilities = 0;
 	memcpy(&inst->fmts[CAPTURE_PORT], &venc_formats[4],
 						sizeof(struct msm_vidc_format));
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc.c b/drivers/media/platform/msm/vidc_3x/msm_vidc.c
index 983e600c..e289527 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc.c
@@ -1084,7 +1084,7 @@
 		q->ops = msm_venc_get_vb2q_ops();
 	q->mem_ops = &msm_vidc_vb2_mem_ops;
 	q->drv_priv = inst;
-	q->allow_zero_bytesused = 1;
+	q->allow_zero_bytesused = !V4L2_TYPE_IS_OUTPUT(type);
 	return vb2_queue_init(q);
 }
 
@@ -1206,6 +1206,7 @@
 	INIT_MSM_VIDC_LIST(&inst->pending_getpropq);
 	INIT_MSM_VIDC_LIST(&inst->outputbufs);
 	INIT_MSM_VIDC_LIST(&inst->registeredbufs);
+	INIT_MSM_VIDC_LIST(&inst->eosbufs);
 
 	kref_init(&inst->kref);
 
@@ -1292,6 +1293,7 @@
 	msm_comm_ctrl_deinit(inst);
 	msm_smem_delete_client(inst->mem_client);
 fail_mem_client:
+	DEINIT_MSM_VIDC_LIST(&inst->eosbufs);
 	kfree(inst);
 	inst = NULL;
 err_invalid_core:
@@ -1323,6 +1325,8 @@
 				"Failed to release persist buffers\n");
 		}
 
+		msm_comm_release_eos_buffers(inst);
+
 		if (msm_comm_release_output_buffers(inst)) {
 			dprintk(VIDC_ERR,
 				"Failed to release output buffers\n");
@@ -1364,6 +1368,8 @@
 	for (i = 0; i < MAX_PORT_NUM; i++)
 		vb2_queue_release(&inst->bufq[i].vb2_bufq);
 
+	DEINIT_MSM_VIDC_LIST(&inst->eosbufs);
+
 	mutex_destroy(&inst->sync_lock);
 	mutex_destroy(&inst->bufq[CAPTURE_PORT].lock);
 	mutex_destroy(&inst->bufq[OUTPUT_PORT].lock);
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
index 502a5c7..a80ae03 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
@@ -258,14 +258,9 @@
 	return 0;
 }
 
-static inline bool is_non_realtime_session(struct msm_vidc_inst *inst)
+static inline bool is_realtime_session(struct msm_vidc_inst *inst)
 {
-	int rc = 0;
-	struct v4l2_control ctrl = {
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY
-	};
-	rc = msm_comm_g_ctrl(inst, &ctrl);
-	return (!rc && ctrl.value);
+	return !!(inst->flags & VIDC_REALTIME);
 }
 
 enum multi_stream msm_comm_get_stream_output_mode(struct msm_vidc_inst *inst)
@@ -297,17 +292,15 @@
 
 static int msm_comm_get_mbs_per_sec(struct msm_vidc_inst *inst)
 {
-	int rc;
 	u32 fps;
-	struct v4l2_control ctrl;
 	int mb_per_frame;
+	u32 oper_rate;
 
 	mb_per_frame = msm_comm_get_mbs_per_frame(inst);
+	oper_rate = inst->prop.operating_rate;
 
-	ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE;
-	rc = msm_comm_g_ctrl(inst, &ctrl);
-	if (!rc && ctrl.value) {
-		fps = (ctrl.value >> 16) ? ctrl.value >> 16 : 1;
+	if (oper_rate) {
+		fps = (oper_rate >> 16) ? oper_rate >> 16 : 1;
 		/*
 		 * Check if operating rate is less than fps.
 		 * If Yes, then use fps to scale the clocks
@@ -354,7 +347,7 @@
 	 * ----------------|----------------------|------------------------|
 	 */
 
-	if (is_non_realtime_session(inst) &&
+	if (is_realtime_session(inst) &&
 		(quirks & LOAD_CALC_IGNORE_NON_REALTIME_LOAD)) {
 		if (!inst->prop.fps) {
 			dprintk(VIDC_INFO, "instance:%pK fps = 0\n", inst);
@@ -535,7 +528,7 @@
 
 	list_for_each_entry(inst, &core->instances, list) {
 		int codec = 0, yuv = 0;
-		struct v4l2_control ctrl;
+		u32 oper_rate;
 
 		codec = inst->session_type == MSM_VIDC_DECODER ?
 			inst->fmts[OUTPUT_PORT].fourcc :
@@ -552,11 +545,11 @@
 		vote_data[i].height = max(inst->prop.height[CAPTURE_PORT],
 			inst->prop.height[OUTPUT_PORT]);
 
-		ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE;
-		rc = msm_comm_g_ctrl(inst, &ctrl);
-		if (!rc && ctrl.value)
-			vote_data[i].fps = (ctrl.value >> 16) ?
-				ctrl.value >> 16 : 1;
+		oper_rate = inst->prop.operating_rate;
+
+		if (oper_rate)
+			vote_data[i].fps = (oper_rate >> 16) ?
+				oper_rate >> 16 : 1;
 		else
 			vote_data[i].fps = inst->prop.fps;
 
@@ -1774,6 +1767,27 @@
 	return vb;
 }
 
+static bool is_eos_buffer(struct msm_vidc_inst *inst, u32 device_addr)
+{
+	struct eos_buf *temp, *next;
+	bool found = false;
+
+	mutex_lock(&inst->eosbufs.lock);
+	list_for_each_entry_safe(temp, next, &inst->eosbufs.list, list) {
+		if (temp->smem.device_addr == device_addr) {
+			found = true;
+			list_del(&temp->list);
+			msm_comm_smem_free(inst, &temp->smem);
+			kfree(temp);
+			break;
+		}
+	}
+	mutex_unlock(&inst->eosbufs.lock);
+
+	return found;
+}
+
+
 static void handle_ebd(enum hal_command_response cmd, void *data)
 {
 	struct msm_vidc_cb_data_done *response = data;
@@ -1808,6 +1822,13 @@
 			response->clnt_data)
 			dprintk(VIDC_INFO, "Client data != bufaddr\n");
 		empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
+		/* If this is internal EOS buffer, handle it in driver */
+		if (is_eos_buffer(inst, empty_buf_done->packet_buffer)) {
+			dprintk(VIDC_DBG, "Received EOS buffer %pK\n",
+				(void *)empty_buf_done->packet_buffer);
+			goto exit;
+		}
+
 		if (empty_buf_done) {
 			if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) {
 				dprintk(VIDC_INFO,
@@ -1844,7 +1865,7 @@
 		mutex_unlock(&inst->bufq[OUTPUT_PORT].lock);
 		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_EBD);
 	}
-
+exit:
 	put_inst(inst);
 }
 
@@ -3502,7 +3523,6 @@
 		flags = dec->flags;
 	}
 
-
 	switch (which_cmd) {
 	case V4L2_QCOM_CMD_FLUSH:
 		if (core->state != VIDC_CORE_INVALID &&
@@ -3552,6 +3572,61 @@
 		}
 		break;
 	}
+	case V4L2_DEC_CMD_STOP:
+	{
+		struct vidc_frame_data data = {0};
+		struct hfi_device *hdev = NULL;
+		struct eos_buf *binfo = NULL;
+		u32 smem_flags = 0;
+
+		if (inst->state != MSM_VIDC_START_DONE) {
+			dprintk(VIDC_DBG,
+				"Inst = %pK is not ready for EOS\n", inst);
+			rc = -EINVAL;
+			break;
+		}
+		if (inst->session_type != MSM_VIDC_DECODER) {
+			dprintk(VIDC_DBG,
+				"Non-Decoder session. DEC_STOP is not valid\n");
+			rc = -EINVAL;
+			break;
+		}
+
+		binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+		if (!binfo) {
+			dprintk(VIDC_ERR, "%s: Out of memory\n", __func__);
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (inst->flags & VIDC_SECURE)
+			smem_flags |= SMEM_SECURE;
+
+		msm_comm_smem_alloc(inst,
+			SZ_4K, 1, smem_flags, HAL_BUFFER_INPUT, 0);
+
+		mutex_lock(&inst->eosbufs.lock);
+		list_add_tail(&binfo->list, &inst->eosbufs.list);
+		mutex_unlock(&inst->eosbufs.lock);
+
+		data.alloc_len = binfo->smem.size;
+		data.device_addr = binfo->smem.device_addr;
+		data.clnt_data = data.device_addr;
+		data.buffer_type = HAL_BUFFER_INPUT;
+		data.filled_len = 0;
+		data.offset = 0;
+		data.flags = HAL_BUFFERFLAG_EOS;
+		data.timestamp = LLONG_MAX;
+		data.extradata_addr = data.device_addr;
+		data.extradata_size = 0;
+		dprintk(VIDC_DBG, "Queueing EOS buffer %pK\n",
+			(void *)data.device_addr);
+		hdev = inst->core->device;
+
+		rc = call_hfi_op(hdev, session_etb, inst->session, &data);
+		break;
+	}
+
 	default:
 		dprintk(VIDC_ERR, "Unknown Command %d\n", which_cmd);
 		rc = -ENOTSUPP;
@@ -4268,6 +4343,26 @@
 	return rc;
 }
 
+void msm_comm_release_eos_buffers(struct msm_vidc_inst *inst)
+{
+	struct eos_buf *buf, *next;
+
+	if (!inst) {
+		dprintk(VIDC_ERR,
+			"Invalid instance pointer = %pK\n", inst);
+		return;
+	}
+
+	mutex_lock(&inst->eosbufs.lock);
+	list_for_each_entry_safe(buf, next, &inst->eosbufs.list, list) {
+		list_del(&buf->list);
+		kfree(buf);
+	}
+
+	INIT_LIST_HEAD(&inst->eosbufs.list);
+	mutex_unlock(&inst->eosbufs.lock);
+}
+
 int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst)
 {
 	struct msm_smem *handle;
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.h b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.h
index 9b71709..8b140a7 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.h
@@ -55,6 +55,7 @@
 int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst,
 					bool check_for_reuse);
 int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst);
+void msm_comm_release_eos_buffers(struct msm_vidc_inst *inst);
 int msm_comm_release_output_buffers(struct msm_vidc_inst *inst);
 int msm_comm_force_cleanup(struct msm_vidc_inst *inst);
 int msm_comm_suspend(int core_id);
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_dcvs.c b/drivers/media/platform/msm/vidc_3x/msm_vidc_dcvs.c
index ac338e1..8f99bfc 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_dcvs.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_dcvs.c
@@ -596,7 +596,7 @@
 	res = &core->resources;
 
 	if (!res->dcvs_limit) {
-		dprintk(VIDC_WARN,
+		dprintk(VIDC_INFO,
 				"%s: dcvs limit table not found\n", __func__);
 		return false;
 	}
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h b/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
index c7eb5f1..56b86d7 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
@@ -121,6 +121,10 @@
 	INIT_LIST_HEAD(&mlist->list);
 }
 
+static inline void DEINIT_MSM_VIDC_LIST(struct msm_vidc_list *mlist)
+{
+	mutex_destroy(&mlist->lock);
+}
 enum buffer_owner {
 	DRIVER,
 	FIRMWARE,
@@ -128,6 +132,11 @@
 	MAX_OWNER
 };
 
+struct eos_buf {
+	struct list_head list;
+	struct msm_smem smem;
+};
+
 struct internal_buf {
 	struct list_head list;
 	enum hal_buffer buffer_type;
@@ -164,6 +173,7 @@
 	u32 height[MAX_PORT_NUM];
 	u32 fps;
 	u32 bitrate;
+	u32 operating_rate;
 };
 
 struct buf_queue {
@@ -230,6 +240,7 @@
 	VIDC_TURBO = BIT(1),
 	VIDC_THUMBNAIL = BIT(2),
 	VIDC_LOW_POWER = BIT(3),
+	VIDC_REALTIME = BIT(4),
 };
 
 struct msm_vidc_core {
@@ -268,6 +279,7 @@
 	struct msm_vidc_list persistbufs;
 	struct msm_vidc_list pending_getpropq;
 	struct msm_vidc_list outputbufs;
+	struct msm_vidc_list eosbufs;
 	struct msm_vidc_list registeredbufs;
 	struct buffer_requirements buff_req;
 	void *mem_client;
diff --git a/drivers/media/platform/msm/vidc_3x/venus_hfi.c b/drivers/media/platform/msm/vidc_3x/venus_hfi.c
index c385088..1bd6ae8 100644
--- a/drivers/media/platform/msm/vidc_3x/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc_3x/venus_hfi.c
@@ -3645,6 +3645,12 @@
 		i < num_responses; ++i) {
 		struct msm_vidc_cb_info *r = &device->response_pkt[i];
 
+		if (!__core_in_valid_state(device)) {
+			dprintk(VIDC_ERR,
+				"Ignore responses from %d to %d as device is in invalid state",
+				(i + 1), num_responses);
+			break;
+		}
 		device->callback(r->response_type, &r->response);
 	}
 
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 0321d84..1e98b48 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -304,7 +304,7 @@
 static int isp_xclk_init(struct isp_device *isp)
 {
 	struct device_node *np = isp->dev->of_node;
-	struct clk_init_data init;
+	struct clk_init_data init = { 0 };
 	unsigned int i;
 
 	for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
@@ -1941,6 +1941,7 @@
 
 static void isp_detach_iommu(struct isp_device *isp)
 {
+	arm_iommu_detach_device(isp->dev);
 	arm_iommu_release_mapping(isp->mapping);
 	isp->mapping = NULL;
 	iommu_group_remove_device(isp->dev);
@@ -1974,8 +1975,7 @@
 	mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
 	if (IS_ERR(mapping)) {
 		dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
-		ret = PTR_ERR(mapping);
-		goto error;
+		return PTR_ERR(mapping);
 	}
 
 	isp->mapping = mapping;
@@ -1990,7 +1990,8 @@
 	return 0;
 
 error:
-	isp_detach_iommu(isp);
+	arm_iommu_release_mapping(isp->mapping);
+	isp->mapping = NULL;
 	return ret;
 }
 
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index d1746ec..db1110a 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -1280,7 +1280,7 @@
 		/* ...issue software reset */
 		ret = jpu_reset(jpu);
 		if (ret)
-			goto device_prepare_rollback;
+			goto jpu_reset_rollback;
 	}
 
 	jpu->ref_count++;
@@ -1288,6 +1288,8 @@
 	mutex_unlock(&jpu->mutex);
 	return 0;
 
+jpu_reset_rollback:
+	clk_disable_unprepare(jpu->clk);
 device_prepare_rollback:
 	mutex_unlock(&jpu->mutex);
 v4l_prepare_rollback:
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 5c9db09..d9710b5 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -117,6 +117,8 @@
 
 	if (camif->sensor.power_count == !on)
 		err = v4l2_subdev_call(sensor->sd, core, s_power, on);
+	if (err == -ENOIOCTLCMD)
+		err = 0;
 	if (!err)
 		sensor->power_count += on ? 1 : -1;
 
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 7c24da5..8051c13 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -249,24 +249,24 @@
 static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
 {
 	struct s5p_mfc_dev *dev = ctx->dev;
-	struct s5p_mfc_buf  *dst_buf, *src_buf;
-	size_t dec_y_addr;
+	struct s5p_mfc_buf *dst_buf, *src_buf;
+	u32 dec_y_addr;
 	unsigned int frame_type;
 
 	/* Make sure we actually have a new frame before continuing. */
 	frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
 	if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
 		return;
-	dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
+	dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
 
 	/* Copy timestamp / timecode from decoded src to dst and set
 	   appropriate flags. */
 	src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
 	list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
-		if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
-				== dec_y_addr) {
-			dst_buf->b->timecode =
-						src_buf->b->timecode;
+		u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
+
+		if (addr == dec_y_addr) {
+			dst_buf->b->timecode = src_buf->b->timecode;
 			dst_buf->b->vb2_buf.timestamp =
 						src_buf->b->vb2_buf.timestamp;
 			dst_buf->b->flags &=
@@ -302,10 +302,10 @@
 {
 	struct s5p_mfc_dev *dev = ctx->dev;
 	struct s5p_mfc_buf  *dst_buf;
-	size_t dspl_y_addr;
+	u32 dspl_y_addr;
 	unsigned int frame_type;
 
-	dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
+	dspl_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
 	if (IS_MFCV6_PLUS(dev))
 		frame_type = s5p_mfc_hw_call(dev->mfc_ops,
 			get_disp_frame_type, ctx);
@@ -324,9 +324,10 @@
 	/* The MFC returns address of the buffer, now we have to
 	 * check which videobuf does it correspond to */
 	list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
+		u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
+
 		/* Check if this is the buffer we're looking for */
-		if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
-				== dspl_y_addr) {
+		if (addr == dspl_y_addr) {
 			list_del(&dst_buf->list);
 			ctx->dst_queue_cnt--;
 			dst_buf->b->sequence = ctx->sequence;
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index ee0470a..f218886 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -96,7 +96,7 @@
  */
 int si470x_get_register(struct si470x_device *radio, int regnr)
 {
-	u16 buf[READ_REG_NUM];
+	__be16 buf[READ_REG_NUM];
 	struct i2c_msg msgs[1] = {
 		{
 			.addr = radio->client->addr,
@@ -121,7 +121,7 @@
 int si470x_set_register(struct si470x_device *radio, int regnr)
 {
 	int i;
-	u16 buf[WRITE_REG_NUM];
+	__be16 buf[WRITE_REG_NUM];
 	struct i2c_msg msgs[1] = {
 		{
 			.addr = radio->client->addr,
@@ -151,7 +151,7 @@
 static int si470x_get_all_registers(struct si470x_device *radio)
 {
 	int i;
-	u16 buf[READ_REG_NUM];
+	__be16 buf[READ_REG_NUM];
 	struct i2c_msg msgs[1] = {
 		{
 			.addr = radio->client->addr,
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 0426b21..ee88ae8 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -273,6 +273,11 @@
 
 	ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
 					THIS_MODULE, &dev->udev->dev, adapter_nr);
+	if (ret < 0) {
+		pr_err("tm6000: couldn't register the adapter!\n");
+		goto err;
+	}
+
 	dvb->adapter.priv = dev;
 
 	if (dvb->frontend) {
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index b5589d5..48503f30 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -163,14 +163,27 @@
 	}
 }
 
+static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
+{
+	/*
+	 * Return the size of the video probe and commit controls, which depends
+	 * on the protocol version.
+	 */
+	if (stream->dev->uvc_version < 0x0110)
+		return 26;
+	else if (stream->dev->uvc_version < 0x0150)
+		return 34;
+	else
+		return 48;
+}
+
 static int uvc_get_video_ctrl(struct uvc_streaming *stream,
 	struct uvc_streaming_control *ctrl, int probe, __u8 query)
 {
+	__u16 size = uvc_video_ctrl_size(stream);
 	__u8 *data;
-	__u16 size;
 	int ret;
 
-	size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
 	if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) &&
 			query == UVC_GET_DEF)
 		return -EIO;
@@ -225,7 +238,7 @@
 	ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]);
 	ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]);
 
-	if (size == 34) {
+	if (size >= 34) {
 		ctrl->dwClockFrequency = get_unaligned_le32(&data[26]);
 		ctrl->bmFramingInfo = data[30];
 		ctrl->bPreferedVersion = data[31];
@@ -254,11 +267,10 @@
 static int uvc_set_video_ctrl(struct uvc_streaming *stream,
 	struct uvc_streaming_control *ctrl, int probe)
 {
+	__u16 size = uvc_video_ctrl_size(stream);
 	__u8 *data;
-	__u16 size;
 	int ret;
 
-	size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
 	data = kzalloc(size, GFP_KERNEL);
 	if (data == NULL)
 		return -ENOMEM;
@@ -275,7 +287,7 @@
 	put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]);
 	put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]);
 
-	if (size == 34) {
+	if (size >= 34) {
 		put_unaligned_le32(ctrl->dwClockFrequency, &data[26]);
 		data[30] = ctrl->bmFramingInfo;
 		data[31] = ctrl->bPreferedVersion;
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 8d3171c..567d868 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -119,14 +119,6 @@
 	if (sev == NULL)
 		return;
 
-	/*
-	 * If the event has been added to the fh->subscribed list, but its
-	 * add op has not completed yet elems will be 0, treat this as
-	 * not being subscribed.
-	 */
-	if (!sev->elems)
-		return;
-
 	/* Increase event sequence number on fh. */
 	fh->sequence++;
 
@@ -212,6 +204,7 @@
 	struct v4l2_subscribed_event *sev, *found_ev;
 	unsigned long flags;
 	unsigned i;
+	int ret = 0;
 
 	if (sub->type == V4L2_EVENT_ALL)
 		return -EINVAL;
@@ -229,31 +222,36 @@
 	sev->flags = sub->flags;
 	sev->fh = fh;
 	sev->ops = ops;
+	sev->elems = elems;
+
+	mutex_lock(&fh->subscribe_lock);
 
 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 	found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
-	if (!found_ev)
-		list_add(&sev->list, &fh->subscribed);
 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
 	if (found_ev) {
+		/* Already listening */
 		kfree(sev);
-		return 0; /* Already listening */
+		goto out_unlock;
 	}
 
 	if (sev->ops && sev->ops->add) {
-		int ret = sev->ops->add(sev, elems);
+		ret = sev->ops->add(sev, elems);
 		if (ret) {
-			sev->ops = NULL;
-			v4l2_event_unsubscribe(fh, sub);
-			return ret;
+			kfree(sev);
+			goto out_unlock;
 		}
 	}
 
-	/* Mark as ready for use */
-	sev->elems = elems;
+	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+	list_add(&sev->list, &fh->subscribed);
+	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
-	return 0;
+out_unlock:
+	mutex_unlock(&fh->subscribe_lock);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
 
@@ -292,6 +290,8 @@
 		return 0;
 	}
 
+	mutex_lock(&fh->subscribe_lock);
+
 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 
 	sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -309,6 +309,8 @@
 	if (sev && sev->ops && sev->ops->del)
 		sev->ops->del(sev);
 
+	mutex_unlock(&fh->subscribe_lock);
+
 	kfree(sev);
 
 	return 0;
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index c183f09..0c5e690 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -50,6 +50,7 @@
 	INIT_LIST_HEAD(&fh->available);
 	INIT_LIST_HEAD(&fh->subscribed);
 	fh->sequence = -1;
+	mutex_init(&fh->subscribe_lock);
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_init);
 
@@ -95,6 +96,7 @@
 		return;
 	v4l_disable_media_source(fh->vdev);
 	v4l2_event_unsubscribe_all(fh);
+	mutex_destroy(&fh->subscribe_lock);
 	fh->vdev = NULL;
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 4299ce0..f7ca1fa 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -914,9 +914,12 @@
 	dprintk(4, "done processing on buffer %d, state: %d\n",
 			vb->index, state);
 
-	/* sync buffers */
-	for (plane = 0; plane < vb->num_planes; ++plane)
-		call_void_memop(vb, finish, vb->planes[plane].mem_priv);
+	if (state != VB2_BUF_STATE_QUEUED &&
+	    state != VB2_BUF_STATE_REQUEUEING) {
+		/* sync buffers */
+		for (plane = 0; plane < vb->num_planes; ++plane)
+			call_void_memop(vb, finish, vb->planes[plane].mem_priv);
+	}
 
 	spin_lock_irqsave(&q->done_lock, flags);
 	if (state == VB2_BUF_STATE_QUEUED ||
@@ -1372,6 +1375,11 @@
 	struct vb2_buffer *vb;
 	int ret;
 
+	if (q->error) {
+		dprintk(1, "fatal error occurred on queue\n");
+		return -EIO;
+	}
+
 	vb = q->bufs[index];
 
 	switch (vb->state) {
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index a4803ac..1d49a8d 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -20,14 +20,6 @@
 #include "mc.h"
 
 #define MC_INTSTATUS 0x000
-#define  MC_INT_DECERR_MTS (1 << 16)
-#define  MC_INT_SECERR_SEC (1 << 13)
-#define  MC_INT_DECERR_VPR (1 << 12)
-#define  MC_INT_INVALID_APB_ASID_UPDATE (1 << 11)
-#define  MC_INT_INVALID_SMMU_PAGE (1 << 10)
-#define  MC_INT_ARBITRATION_EMEM (1 << 9)
-#define  MC_INT_SECURITY_VIOLATION (1 << 8)
-#define  MC_INT_DECERR_EMEM (1 << 6)
 
 #define MC_INTMASK 0x004
 
@@ -248,12 +240,13 @@
 static irqreturn_t tegra_mc_irq(int irq, void *data)
 {
 	struct tegra_mc *mc = data;
-	unsigned long status, mask;
+	unsigned long status;
 	unsigned int bit;
 
 	/* mask all interrupts to avoid flooding */
-	status = mc_readl(mc, MC_INTSTATUS);
-	mask = mc_readl(mc, MC_INTMASK);
+	status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
+	if (!status)
+		return IRQ_NONE;
 
 	for_each_set_bit(bit, &status, 32) {
 		const char *error = status_names[bit] ?: "unknown";
@@ -346,7 +339,6 @@
 	const struct of_device_id *match;
 	struct resource *res;
 	struct tegra_mc *mc;
-	u32 value;
 	int err;
 
 	match = of_match_node(tegra_mc_of_match, pdev->dev.of_node);
@@ -414,11 +406,7 @@
 
 	WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n");
 
-	value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
-		MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
-		MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM;
-
-	mc_writel(mc, value, MC_INTMASK);
+	mc_writel(mc, mc->soc->intmask, MC_INTMASK);
 
 	return 0;
 }
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index ddb1667..24e020b 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -14,6 +14,15 @@
 
 #include <soc/tegra/mc.h>
 
+#define MC_INT_DECERR_MTS (1 << 16)
+#define MC_INT_SECERR_SEC (1 << 13)
+#define MC_INT_DECERR_VPR (1 << 12)
+#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11)
+#define MC_INT_INVALID_SMMU_PAGE (1 << 10)
+#define MC_INT_ARBITRATION_EMEM (1 << 9)
+#define MC_INT_SECURITY_VIOLATION (1 << 8)
+#define MC_INT_DECERR_EMEM (1 << 6)
+
 static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset)
 {
 	return readl(mc->regs + offset);
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index ba8fff3..6d2a5a8 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -930,4 +930,6 @@
 	.atom_size = 32,
 	.client_id_mask = 0x7f,
 	.smmu = &tegra114_smmu_soc,
+	.intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
+		   MC_INT_DECERR_EMEM,
 };
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 5a58e44..9f68a56 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -1020,6 +1020,9 @@
 	.smmu = &tegra124_smmu_soc,
 	.emem_regs = tegra124_mc_emem_regs,
 	.num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs),
+	.intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+		   MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
+		   MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
 };
 #endif /* CONFIG_ARCH_TEGRA_124_SOC */
 
@@ -1042,5 +1045,8 @@
 	.atom_size = 32,
 	.client_id_mask = 0x7f,
 	.smmu = &tegra132_smmu_soc,
+	.intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+		   MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
+		   MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
 };
 #endif /* CONFIG_ARCH_TEGRA_132_SOC */
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index 5e144ab..47c78a6 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -1077,4 +1077,7 @@
 	.atom_size = 64,
 	.client_id_mask = 0xff,
 	.smmu = &tegra210_smmu_soc,
+	.intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+		   MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
+		   MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
 };
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index b447378..d068942 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -952,4 +952,6 @@
 	.atom_size = 16,
 	.client_id_mask = 0x7f,
 	.smmu = &tegra30_smmu_soc,
+	.intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
+		   MC_INT_DECERR_EMEM,
 };
diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
index 84e3131..7b9052e 100644
--- a/drivers/mfd/88pm860x-i2c.c
+++ b/drivers/mfd/88pm860x-i2c.c
@@ -146,14 +146,14 @@
 	unsigned char zero;
 	int ret;
 
-	i2c_lock_adapter(i2c->adapter);
+	i2c_lock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
 	read_device(i2c, 0xFA, 0, &zero);
 	read_device(i2c, 0xFB, 0, &zero);
 	read_device(i2c, 0xFF, 0, &zero);
 	ret = write_device(i2c, reg, 1, &data);
 	read_device(i2c, 0xFE, 0, &zero);
 	read_device(i2c, 0xFC, 0, &zero);
-	i2c_unlock_adapter(i2c->adapter);
+	i2c_unlock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
 	return ret;
 }
 EXPORT_SYMBOL(pm860x_page_reg_write);
@@ -164,14 +164,14 @@
 	unsigned char zero = 0;
 	int ret;
 
-	i2c_lock_adapter(i2c->adapter);
+	i2c_lock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
 	read_device(i2c, 0xfa, 0, &zero);
 	read_device(i2c, 0xfb, 0, &zero);
 	read_device(i2c, 0xff, 0, &zero);
 	ret = read_device(i2c, reg, count, buf);
 	read_device(i2c, 0xFE, 0, &zero);
 	read_device(i2c, 0xFC, 0, &zero);
-	i2c_unlock_adapter(i2c->adapter);
+	i2c_unlock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
 	return ret;
 }
 EXPORT_SYMBOL(pm860x_page_bulk_read);
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index abd8342..3e18d25 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -86,7 +86,11 @@
 
 	mutex_init(&ec_dev->lock);
 
-	cros_ec_query_all(ec_dev);
+	err = cros_ec_query_all(ec_dev);
+	if (err) {
+		dev_err(dev, "Cannot identify the EC: error %d\n", err);
+		return err;
+	}
 
 	if (ec_dev->irq) {
 		err = request_threaded_irq(ec_dev->irq, NULL, ec_irq_thread,
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index 0fc6299..11347a3 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -49,7 +49,7 @@
 	.reg_bits = 32,
 	.reg_stride = HI655X_STRIDE,
 	.val_bits = 8,
-	.max_register = HI655X_BUS_ADDR(0xFFF),
+	.max_register = HI655X_BUS_ADDR(0x400) - HI655X_STRIDE,
 };
 
 static struct resource pwrkey_resources[] = {
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 4053435..3270b8d 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -714,6 +714,7 @@
 	smdev->pdev.name = name;
 	smdev->pdev.id = sm->pdev_id;
 	smdev->pdev.dev.parent = sm->dev;
+	smdev->pdev.dev.coherent_dma_mask = 0xffffffff;
 
 	if (res_count) {
 		smdev->pdev.resource = (struct resource *)(smdev+1);
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index c8f027b..798f0a8 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -209,14 +209,13 @@
 	 * The TSC_ADC_SS controller design assumes the OCP clock is
 	 * at least 6x faster than the ADC clock.
 	 */
-	clk = clk_get(&pdev->dev, "adc_tsc_fck");
+	clk = devm_clk_get(&pdev->dev, "adc_tsc_fck");
 	if (IS_ERR(clk)) {
 		dev_err(&pdev->dev, "failed to get TSC fck\n");
 		err = PTR_ERR(clk);
 		goto err_disable_clk;
 	}
 	clock_rate = clk_get_rate(clk);
-	clk_put(clk);
 	tscadc->clk_div = clock_rate / ADC_CLK;
 
 	/* TSCADC_CLKDIV needs to be configured to the value minus 1 */
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a233173..cf5764e 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -827,3 +827,13 @@
 source "drivers/misc/cxl/Kconfig"
 source "drivers/misc/fpr_FingerprintCard/Kconfig"
 endmenu
+
+config OKL4_LINK_SHBUF
+	tristate "OKL4 link with shared buffer transport"
+	default y
+	depends on OKL4_GUEST
+	help
+	  Enable driver for OKL4 inter-cell links using the "shared-buffer"
+	  transport. This driver presents the link to Linux as a character device
+	  which can be written to or read from to access the shared memory. An ioctl
+	  on the device is used to send a virtual interrupt to the partner cell.
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 8e5d0f6..6494a66 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -80,3 +80,5 @@
 targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
 $(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
 	$(call if_changed,objcopy)
+
+obj-$(CONFIG_OKL4_LINK_SHBUF)    += okl4-link-shbuf.o
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index cc1706a..a078d49 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -293,7 +293,7 @@
 	int rc;
 
 	rc = atomic_inc_unless_negative(&adapter->contexts_num);
-	return rc >= 0 ? 0 : -EBUSY;
+	return rc ? 0 : -EBUSY;
 }
 
 void cxl_adapter_context_put(struct cxl *adapter)
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index 90520d7..9cde4c5 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -27,6 +27,7 @@
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/sysfs.h>
+#include <linux/nospec.h>
 
 static DEFINE_MUTEX(compass_mutex);
 
@@ -50,6 +51,7 @@
 		return ret;
 	if (val >= strlen(map))
 		return -EINVAL;
+	val = array_index_nospec(val, strlen(map));
 	mutex_lock(&compass_mutex);
 	ret = compass_command(c, map[val]);
 	mutex_unlock(&compass_mutex);
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 520f584..65ad7e5 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -507,35 +507,14 @@
 static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
 	void __iomem *address = (void __iomem *)file->private_data;
-	unsigned char *page;
-	int retval;
 	int len = 0;
 	unsigned int value;
-
-	if (*offset < 0)
-		return -EINVAL;
-	if (count == 0 || count > 1024)
-		return 0;
-	if (*offset != 0)
-		return 0;
-
-	page = (unsigned char *)__get_free_page(GFP_KERNEL);
-	if (!page)
-		return -ENOMEM;
+	char lbuf[20];
 
 	value = readl(address);
-	len = sprintf(page, "%d\n", value);
+	len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
 
-	if (copy_to_user(buf, page, len)) {
-		retval = -EFAULT;
-		goto exit;
-	}
-	*offset += len;
-	retval = len;
-
-exit:
-	free_page((unsigned long)page);
-	return retval;
+	return simple_read_from_buffer(buf, count, offset, lbuf, len);
 }
 
 static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
index cfa1039..9966891 100644
--- a/drivers/misc/lkdtm.h
+++ b/drivers/misc/lkdtm.h
@@ -23,6 +23,7 @@
 void lkdtm_ATOMIC_OVERFLOW(void);
 void lkdtm_CORRUPT_LIST_ADD(void);
 void lkdtm_CORRUPT_LIST_DEL(void);
+void lkdtm_CORRUPT_USER_DS(void);
 
 /* lkdtm_heap.c */
 void lkdtm_OVERWRITE_ALLOCATION(void);
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
index bb3bb8e..77949fa 100644
--- a/drivers/misc/lkdtm_bugs.c
+++ b/drivers/misc/lkdtm_bugs.c
@@ -7,6 +7,7 @@
 #include "lkdtm.h"
 #include <linux/list.h>
 #include <linux/sched.h>
+#include <linux/uaccess.h>
 
 struct lkdtm_list {
 	struct list_head node;
@@ -220,3 +221,12 @@
 	else
 		pr_err("list_del() corruption not detected!\n");
 }
+
+void lkdtm_CORRUPT_USER_DS(void)
+{
+	pr_info("setting bad task size limit\n");
+	set_fs(KERNEL_DS);
+
+	/* Make sure we do not keep running with a KERNEL_DS! */
+	force_sig(SIGKILL, current);
+}
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index 4d44084..b72fb64 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -199,6 +199,7 @@
 	CRASHTYPE(OVERFLOW),
 	CRASHTYPE(CORRUPT_LIST_ADD),
 	CRASHTYPE(CORRUPT_LIST_DEL),
+	CRASHTYPE(CORRUPT_USER_DS),
 	CRASHTYPE(CORRUPT_STACK),
 	CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
 	CRASHTYPE(OVERWRITE_ALLOCATION),
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 75b9d4a..371f5f6 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -178,7 +178,7 @@
 
 	ret = 0;
 	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
-	if (bytes_recv < if_version_length) {
+	if (bytes_recv < 0 || bytes_recv < if_version_length) {
 		dev_err(bus->dev, "Could not read IF version\n");
 		ret = -EIO;
 		goto err;
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index dd7f15a..ae4a570 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1137,15 +1137,18 @@
 
 		props_res = (struct hbm_props_response *)mei_msg;
 
-		if (props_res->status) {
+		if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) {
+			dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
+				props_res->me_addr);
+		} else if (props_res->status) {
 			dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n",
 				props_res->status,
 				mei_hbm_status_str(props_res->status));
 			return -EPROTO;
+		} else {
+			mei_hbm_me_cl_add(dev, props_res);
 		}
 
-		mei_hbm_me_cl_add(dev, props_res);
-
 		/* request property for the next client */
 		if (mei_hbm_prop_req(dev, props_res->me_addr + 1))
 			return -EIO;
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 60f5a8d..8904491 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -304,7 +304,6 @@
 		goto out;
 	}
 
-	*offset = 0;
 	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
 	if (!cb) {
 		rets = -ENOMEM;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index f9c6ec4..013a7b3 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -229,8 +229,11 @@
 	if (!pci_dev_run_wake(pdev))
 		mei_me_set_pm_domain(dev);
 
-	if (mei_pg_is_enabled(dev))
+	if (mei_pg_is_enabled(dev)) {
 		pm_runtime_put_noidle(&pdev->dev);
+		if (hw->d0i3_supported)
+			pm_runtime_allow(&pdev->dev);
+	}
 
 	dev_dbg(&pdev->dev, "initialization successful.\n");
 
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index ddc9e4b..56efa9d 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -370,11 +370,10 @@
 			goto scif_bind_exit;
 		}
 	} else {
-		pn = scif_get_new_port();
-		if (!pn) {
-			ret = -ENOSPC;
+		ret = scif_get_new_port();
+		if (ret < 0)
 			goto scif_bind_exit;
-		}
+		pn = ret;
 	}
 
 	ep->state = SCIFEP_BOUND;
@@ -648,13 +647,12 @@
 			err = -EISCONN;
 		break;
 	case SCIFEP_UNBOUND:
-		ep->port.port = scif_get_new_port();
-		if (!ep->port.port) {
-			err = -ENOSPC;
-		} else {
-			ep->port.node = scif_info.nodeid;
-			ep->conn_async_state = ASYNC_CONN_IDLE;
-		}
+		err = scif_get_new_port();
+		if (err < 0)
+			break;
+		ep->port.port = err;
+		ep->port.node = scif_info.nodeid;
+		ep->conn_async_state = ASYNC_CONN_IDLE;
 		/* Fall through */
 	case SCIFEP_BOUND:
 		/*
diff --git a/drivers/misc/okl4-link-shbuf.c b/drivers/misc/okl4-link-shbuf.c
new file mode 100644
index 0000000..de65ea0
--- /dev/null
+++ b/drivers/misc/okl4-link-shbuf.c
@@ -0,0 +1,667 @@
+/*
+ * Driver for inter-cell links using the shared-buffer transport.
+ *
+ * Copyright (c) 2016 Cog Systems Pty Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/atomic.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/version.h>
+#include <microvisor/microvisor.h>
+#include <uapi/linux/okl4-link-shbuf.h>
+
+static const char DEVICE_NAME[] = "okl4_link_shbuf";
+
+/* Created devices will appear as /dev/<DEV_PREFIX><name> */
+static const char DEV_PREFIX[] = "okl4-";
+
+static const struct of_device_id okl4_link_shbuf_match[] = {
+	{
+		.compatible = "okl,microvisor-link-shbuf",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, okl4_link_shbuf_match);
+
+static struct class *link_shbuf_class;
+static dev_t link_shbuf_dev;
+
+/* A lock used to protect access to link_shbuf_dev */
+static spinlock_t device_number_allocate;
+
+/* Sentinel values for indicating missing communication channels */
+static const u32 NO_OUTGOING_IRQ = 0;
+static const int NO_INCOMING_IRQ = -1;
+
+/* Private data for this driver */
+struct link_shbuf_data {
+
+	/* Outgoing vIRQ */
+	u32 virqline;
+
+	/* Incoming vIRQ */
+	int virq;
+	atomic64_t virq_payload;
+	bool virq_pending;
+	wait_queue_head_t virq_wq;
+
+	/* Shared memory region */
+	void *base;
+	fmode_t permissions;
+	struct resource buffer;
+
+	/* Device data */
+	dev_t devt;
+	struct device *dev;
+	struct cdev cdev;
+
+};
+
+static bool link_shbuf_data_invariant(const struct link_shbuf_data *priv)
+{
+	if (!priv)
+		return false;
+
+	if (!priv->base || (uintptr_t)priv->base % PAGE_SIZE != 0)
+		return false;
+
+	if (resource_size(&priv->buffer) == 0)
+		return false;
+
+	if (!priv->dev)
+		return false;
+
+	return true;
+}
+
+static bool link_shbuf_valid_access(size_t size, loff_t pos, size_t count)
+{
+	return pos < size && count <= size && size - count >= pos;
+}
+
+static ssize_t link_shbuf_read(struct file *file, char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	long remaining;
+	const struct link_shbuf_data *priv;
+
+	/* The file should have been opened with read access to reach here */
+	if (WARN_ON(!(file->f_mode & FMODE_READ)))
+		return -EINVAL;
+
+	priv = file->private_data;
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return -EINVAL;
+
+	if (!link_shbuf_valid_access(resource_size(&priv->buffer), *ppos, count))
+		return -EINVAL;
+
+	remaining = copy_to_user(buffer, priv->base + *ppos, count);
+	*ppos += count - remaining;
+	return count - remaining;
+}
+
+static ssize_t link_shbuf_write(struct file *file, const char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	long remaining;
+	const struct link_shbuf_data *priv;
+
+	/* The file should have been opened with write access to reach here */
+	if (WARN_ON(!(file->f_mode & FMODE_WRITE)))
+		return -EINVAL;
+
+	priv = file->private_data;
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return -EINVAL;
+
+	if (!link_shbuf_valid_access(resource_size(&priv->buffer), *ppos, count))
+		return -EINVAL;
+
+	remaining = copy_from_user(priv->base + *ppos, buffer, count);
+	*ppos += count - remaining;
+	return count - remaining;
+}
+
+static unsigned int link_shbuf_poll(struct file *file, poll_table *table)
+{
+	struct link_shbuf_data *priv;
+	unsigned int mask;
+
+	priv = file->private_data;
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return POLLERR;
+
+	poll_wait(file, &priv->virq_wq, table);
+
+	/* The shared memory is always considered ready for reading and writing. */
+	mask = POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
+
+	if (priv->virq_pending)
+		mask |= POLLPRI;
+
+	return mask;
+}
+
+static long link_shbuf_ioctl_irq_tx(const struct link_shbuf_data *priv,
+		unsigned long arg)
+{
+	okl4_error_t err;
+	u64 payload;
+	const u64 __user *user_arg = (const u64 __user*)arg;
+
+	if (priv->virqline == NO_OUTGOING_IRQ)
+		return -EINVAL;
+
+#if defined(CONFIG_ARM) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+	if (copy_from_user(&payload, user_arg, sizeof(payload)))
+                return -EFAULT;
+#else
+	if (get_user(payload, user_arg))
+		return -EFAULT;
+#endif
+
+	err = _okl4_sys_vinterrupt_raise(priv->virqline, payload);
+	if (WARN_ON(err != OKL4_OK))
+		return -EINVAL;
+
+	return 0;
+}
+
+static long link_shbuf_ioctl_irq_clr(struct link_shbuf_data *priv,
+		unsigned long arg)
+{
+	u64 payload;
+	u64 __user *user_arg = (u64 __user*)arg;
+
+	/*
+	 * Check validity of the user pointer before clearing the interrupt to avoid
+	 * races involved with having to undo the latter.
+	 */
+	if (!access_ok(VERIFY_WRITE, user_arg, sizeof(*user_arg)))
+		return -EFAULT;
+
+	/*
+	 * Note that the clearing of the pending flag can race with the setting of
+	 * this flag in the IRQ handler. It is up to the user to coordinate these
+	 * actions.
+	 */
+	priv->virq_pending = false;
+	smp_rmb();
+	payload = atomic64_xchg(&priv->virq_payload, 0);
+
+	/* We've already checked that this access is OK, so no need for put_user. */
+	if (__put_user(payload, user_arg))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long link_shbuf_ioctl(struct file *file, unsigned int request,
+		unsigned long arg)
+{
+	struct link_shbuf_data *priv;
+
+	priv = file->private_data;
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return -EINVAL;
+
+	/* We only support two ioctls */
+	switch (request) {
+
+	case OKL4_LINK_SHBUF_IOCTL_IRQ_TX:
+		return link_shbuf_ioctl_irq_tx(priv, arg);
+
+	case OKL4_LINK_SHBUF_IOCTL_IRQ_CLR:
+		return link_shbuf_ioctl_irq_clr(priv, arg);
+
+	}
+
+	/*
+	 * Handy for debugging when userspace is linking against ioctl headers from
+	 * a different kernel revision.
+	 */
+	dev_dbg(priv->dev, "ioctl request 0x%x received which did not match either "
+		"OKL4_LINK_SHBUF_IOCTL_IRQ_TX (0x%x) or OKL4_LINK_SHBUF_IOCTL_IRQ_CLR "
+		"(0x%x)\n", request, (unsigned)OKL4_LINK_SHBUF_IOCTL_IRQ_TX,
+		(unsigned)OKL4_LINK_SHBUF_IOCTL_IRQ_CLR);
+
+	return -EINVAL;
+}
+
+static int link_shbuf_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	const struct link_shbuf_data *priv;
+	unsigned long offset, pfn, flags;
+	size_t size;
+	pgprot_t prot;
+
+	/* Our caller should have taken the MM semaphore. */
+	if (WARN_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)))
+		return -EINVAL;
+
+	/*
+	 * The file should have been opened with a superset of the mmap requested
+	 * permissions.
+	 */
+	flags = vma->vm_flags;
+	if (WARN_ON((flags & VM_READ) && !(file->f_mode & FMODE_READ)))
+		return -EINVAL;
+	if (WARN_ON((flags & VM_WRITE) && !(file->f_mode & FMODE_WRITE)))
+		return -EINVAL;
+	if (WARN_ON((flags & VM_EXEC) && !(file->f_mode & FMODE_EXEC)))
+		return -EINVAL;
+
+	/* Retrieve our private data. */
+	priv = file->private_data;
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return -EINVAL;
+
+	/* Check the mmap request is within bounds. */
+	size = vma->vm_end - vma->vm_start;
+	offset = vma->vm_pgoff << PAGE_SHIFT;
+	if (!link_shbuf_valid_access(resource_size(&priv->buffer), offset, size))
+		return -EINVAL;
+
+	pfn = (priv->buffer.start + offset) >> PAGE_SHIFT;
+	prot = vm_get_page_prot(flags);
+
+	return remap_pfn_range(vma, vma->vm_start, pfn, size, prot);
+}
+
+static bool link_shbuf_access_ok(fmode_t allowed, fmode_t request)
+{
+	static const fmode_t ACCESS_MASK = FMODE_READ|FMODE_WRITE|FMODE_EXEC;
+	fmode_t relevant = request & ACCESS_MASK;
+	return (relevant & allowed) == relevant;
+}
+
+static int link_shbuf_open(struct inode *inode, struct file *file)
+{
+	struct cdev *cdev;
+	struct link_shbuf_data *priv;
+
+	/* Retrieve a pointer to our private data */
+	cdev = inode->i_cdev;
+	priv = container_of(cdev, struct link_shbuf_data, cdev);
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return -EINVAL;
+
+	if (!link_shbuf_access_ok(priv->permissions, file->f_mode))
+		return -EACCES;
+
+	file->private_data = priv;
+
+	return 0;
+}
+
+static const struct file_operations link_shbuf_ops = {
+	.owner = THIS_MODULE,
+	.read = link_shbuf_read,
+	.write = link_shbuf_write,
+	.poll = link_shbuf_poll,
+	.unlocked_ioctl = link_shbuf_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = link_shbuf_ioctl,
+#endif
+#ifdef CONFIG_MMU
+	.mmap = link_shbuf_mmap,
+#endif
+	.open = link_shbuf_open,
+};
+
+/*
+ * Interrupt handler.
+ *
+ * This function will be called when our link partner uses the ioctl on their
+ * shared memory device to send an outgoing interrupt.
+ */
+static irqreturn_t link_shbuf_irq_handler(int irq, void *data)
+{
+	u64 payload, old, new;
+	struct _okl4_sys_interrupt_get_payload_return _payload;
+
+	/* Retrieve a pointer to our private data. */
+	struct link_shbuf_data *priv = data;
+	if (WARN_ON(!link_shbuf_data_invariant(priv)))
+		return IRQ_NONE;
+
+	/*
+	 * We should only ever be handling a single interrupt, and only if there
+	 * was an incoming interrupt in the configuration.
+	 */
+	if (WARN_ON(priv->virq < 0 || priv->virq != irq))
+		return IRQ_NONE;
+
+	_payload = _okl4_sys_interrupt_get_payload(irq);
+	payload = (u64)_payload.payload;
+
+	/*
+	 * At this point, it is possible the pending flag is already set. It is up to
+	 * the user to synchronise their transmission and acknowledgement of
+	 * interrupts.
+	 */
+
+	/* We open code atomic64_or which is not universally available. */
+	do {
+		old = atomic64_read(&priv->virq_payload);
+		new = old | payload;
+	} while (atomic64_cmpxchg(&priv->virq_payload, old, new) != old);
+	smp_wmb();
+	priv->virq_pending = true;
+
+	wake_up_interruptible(&priv->virq_wq);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Allocate a unique device number for this device.
+ *
+ * Note that this function needs to lock its access to link_shbuf_dev as there
+ * may be multiple threads attempting to acquire a new device number.
+ */
+static int link_shbuf_allocate_device(dev_t *devt)
+{
+	int ret = 0;
+	dev_t next;
+
+	spin_lock(&device_number_allocate);
+
+	*devt = link_shbuf_dev;
+	next = MKDEV(MAJOR(link_shbuf_dev), MINOR(link_shbuf_dev) + 1);
+	/* Check for overflow */
+	if (MINOR(next) != MINOR(link_shbuf_dev) + 1)
+		ret = -ENOSPC;
+	else
+		link_shbuf_dev = next;
+
+	spin_unlock(&device_number_allocate);
+
+	return ret;
+}
+
+/*
+ * Discover and add a new shared-buffer link.
+ *
+ * In the following function, we are expecting to parse device tree entries
+ * looking like the following:
+ *
+ *	hypervisor {
+ *		...
+ *		interrupt-line@1d {
+ *				compatible = "okl,microvisor-interrupt-line",
+ *				"okl,microvisor-capability";
+ *			phandle = <0x7>;
+ *			reg = <0x1d>;
+ *			label = "foo_virqline";
+ *		};
+ *	 ;
+ *
+ *	foo@41003000 {
+ *		compatible = "okl,microvisor-link-shbuf",
+ *			"okl,microvisor-shared-memory";
+ *		phandle = <0xd>;
+ *		reg = <0x0 0x41003000 0x2000>;
+ *		label = "foo";
+ *		okl,rwx = <0x6>;
+ *		okl,interrupt-line = <0x7>;
+ *		interrupts = <0x0 0x4 0x1>;
+ *		interrupt-parent = <0x1>;
+ *	};
+ */
+static int link_shbuf_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node, *virqline;
+	struct link_shbuf_data *priv;
+	const char *name;
+	u32 permissions;
+
+	node = pdev->dev.of_node;
+
+	if (!node)
+		return -ENODEV;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	/*
+	 * Retrieve the outgoing vIRQ cap. Note, this is configurable and we
+	 * anticipate that it may not exist.
+	 */
+	virqline = of_parse_phandle(node, "okl,interrupt-line", 0);
+	if (!virqline) {
+		priv->virqline = NO_OUTGOING_IRQ;
+	} else {
+		ret = of_property_read_u32(virqline, "reg", &priv->virqline);
+		if (ret < 0 || priv->virqline == OKL4_KCAP_INVALID) {
+			of_node_put(virqline);
+			ret = -ENODEV;
+			goto err_free_dev;
+		}
+	}
+	of_node_put(virqline);
+
+	/* Retrieve the incoming vIRQ number. Again, this is configurable and we
+	 * anticipate that it may not exist.
+	 */
+	priv->virq = platform_get_irq(pdev, 0);
+	if (priv->virq < 0)
+		priv->virq = NO_INCOMING_IRQ;
+
+	/* If we have a valid incoming vIRQ, register to handle it. */
+	if (priv->virq >= 0) {
+		ret = devm_request_irq(&pdev->dev, priv->virq, link_shbuf_irq_handler,
+			0, dev_name(&pdev->dev), priv);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "failed request for IRQ\n");
+			goto err_free_dev;
+		}
+	}
+
+	init_waitqueue_head(&priv->virq_wq);
+	priv->virq_pending = false;
+
+	/* Retrieve information about the shared memory region. */
+	ret = of_address_to_resource(node, 0, &priv->buffer);
+	if (ret < 0)
+		goto err_free_irq;
+	/*
+	 * We expect the Elfweaver to have validated that we have a non-NULL,
+	 * page-aligned region.
+	 */
+	if (WARN_ON(priv->buffer.start == 0) ||
+			WARN_ON(resource_size(&priv->buffer) % PAGE_SIZE != 0))
+		goto err_free_irq;
+	if (!devm_request_mem_region(&pdev->dev, priv->buffer.start,
+			resource_size(&priv->buffer), dev_name(&pdev->dev))) {
+		ret = -ENODEV;
+		goto err_free_irq;
+	}
+	priv->base = devm_ioremap(&pdev->dev, priv->buffer.start,
+			resource_size(&priv->buffer));
+	if (!priv->base)
+		goto err_release_region;
+
+	/* Read the permissions of the shared memory region. */
+	ret = of_property_read_u32(node, "okl,rwx", &permissions);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to read shared memory permissions\n");
+		goto err_unmap_dev;
+	}
+	if (permissions & ~S_IRWXO) {
+		ret = -EINVAL;
+		goto err_unmap_dev;
+	}
+	priv->permissions = ((permissions & S_IROTH) ? FMODE_READ : 0) |
+			((permissions & S_IWOTH) ? FMODE_WRITE : 0) |
+			((permissions & S_IXOTH) ? FMODE_EXEC : 0);
+	if (WARN_ON(priv->permissions == 0)) {
+		ret = -EINVAL;
+		goto err_unmap_dev;
+	}
+
+	/* Retrieve the label of this device. This will be the "name" attribute of
+	 * the corresponding "link" tag in the system's XML specification.
+	 */
+	ret = of_property_read_string(node, "label", &name);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to read label\n");
+		goto err_unmap_dev;
+	}
+
+	cdev_init(&priv->cdev, &link_shbuf_ops);
+	ret = cdev_add(&priv->cdev, link_shbuf_dev, 1);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to add char dev region\n");
+		goto err_unmap_dev;
+	}
+
+	ret = link_shbuf_allocate_device(&priv->devt);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to allocate new device number\n");
+		goto err_unmap_dev;
+	}
+
+	/* We're now ready to create the device itself. */
+	BUG_ON(name == NULL);
+	priv->dev = device_create(link_shbuf_class, &pdev->dev, priv->devt,
+		priv, "%s%s", DEV_PREFIX, name);
+	if (IS_ERR(priv->dev)) {
+		dev_err(&pdev->dev, "failed to create device\n");
+		ret = PTR_ERR(priv->dev);
+		goto err_del_dev;
+	}
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	return 0;
+
+err_del_dev:
+	cdev_del(&priv->cdev);
+err_unmap_dev:
+	devm_iounmap(&pdev->dev, priv->base);
+err_release_region:
+	devm_release_mem_region(&pdev->dev, priv->buffer.start,
+			resource_size(&priv->buffer));
+err_free_irq:
+	if (priv->virq != NO_INCOMING_IRQ)
+		devm_free_irq(&pdev->dev, priv->virq, priv);
+err_free_dev:
+	devm_kfree(&pdev->dev, priv);
+	return ret;
+}
+
+static int link_shbuf_remove(struct platform_device *pdev)
+{
+	struct link_shbuf_data *priv;
+
+	priv = dev_get_drvdata(&pdev->dev);
+	WARN_ON(!link_shbuf_data_invariant(priv));
+
+	device_destroy(link_shbuf_class, priv->devt);
+
+	cdev_del(&priv->cdev);
+
+	/*
+	 * None of the following is strictly required, as these are all managed
+	 * resources, but we clean it up anyway for clarity.
+	 */
+
+	devm_iounmap(&pdev->dev, priv->base);
+
+	devm_release_mem_region(&pdev->dev, priv->buffer.start,
+			resource_size(&priv->buffer));
+
+	if (priv->virq != NO_INCOMING_IRQ)
+		devm_free_irq(&pdev->dev, priv->virq, priv);
+
+	devm_kfree(&pdev->dev, priv);
+
+	return 0;
+}
+
+static struct platform_driver of_plat_link_shbuf_driver = {
+	.driver = {
+		.name = "okl4-shbuf",
+		.owner = THIS_MODULE,
+		.of_match_table = okl4_link_shbuf_match,
+	},
+	.probe = link_shbuf_probe,
+	.remove = link_shbuf_remove,
+};
+
+/* Maximum number of minor device numbers */
+enum {
+	MAX_MINOR = 1 << MINORBITS,
+};
+
+static int __init okl4_link_shbuf_init(void)
+{
+	int ret;
+
+	link_shbuf_class = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(link_shbuf_class)) {
+		pr_err("failed to create class\n");
+		ret = PTR_ERR(link_shbuf_class);
+		return ret;
+	}
+
+	ret = alloc_chrdev_region(&link_shbuf_dev, 0, MAX_MINOR, DEVICE_NAME);
+	if (ret < 0) {
+		pr_err("failed to allocate char dev region\n");
+		goto err_destroy_class;
+	}
+
+	ret = platform_driver_register(&of_plat_link_shbuf_driver);
+	if (ret < 0) {
+		pr_err("failed to register driver\n");
+		goto err_unregister_dev_region;
+	}
+
+	spin_lock_init(&device_number_allocate);
+
+	return 0;
+
+err_unregister_dev_region:
+	unregister_chrdev_region(link_shbuf_dev, MAX_MINOR);
+err_destroy_class:
+	class_destroy(link_shbuf_class);
+	return ret;
+}
+module_init(okl4_link_shbuf_init);
+
+static void __exit okl4_link_shbuf_exit(void)
+{
+	platform_driver_unregister(&of_plat_link_shbuf_driver);
+	unregister_chrdev_region(link_shbuf_dev, MAX_MINOR);
+	class_destroy(link_shbuf_class);
+}
+module_exit(okl4_link_shbuf_exit);
+
+MODULE_DESCRIPTION("OKL4 shared buffer link driver");
+MODULE_AUTHOR("Cog Systems Pty Ltd");
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 8e5987c..45d3301 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -109,6 +109,9 @@
 #define DEFAULT_CE_INFO_UNIT 0
 #define DEFAULT_NUM_CE_INFO_UNIT 1
 
+#define FDE_FLAG_POS    4
+#define ENABLE_KEY_WRAP_IN_KS    (1 << FDE_FLAG_POS)
+
 enum qseecom_clk_definitions {
 	CLK_DFAB = 0,
 	CLK_SFPB,
@@ -182,6 +185,7 @@
 	size_t sb_length;
 	struct ion_handle *ihandle; /* Retrieve phy addr */
 	wait_queue_head_t          rcv_req_wq;
+	/* rcv_req_flag: -1: not ready; 0: ready and empty; 1: received req */
 	int                        rcv_req_flag;
 	int                        send_resp_flag;
 	bool                       listener_in_use;
@@ -277,6 +281,7 @@
 	unsigned int ce_opp_freq_hz;
 	bool appsbl_qseecom_support;
 	uint32_t qsee_reentrancy_support;
+	bool enable_key_wrap_in_ks;
 
 	uint32_t app_block_ref_cnt;
 	wait_queue_head_t app_block_wq;
@@ -1191,9 +1196,10 @@
 			rcvd_lstnr.sb_size))
 		return -EFAULT;
 
-	data->listener.id = 0;
+	data->listener.id = rcvd_lstnr.listener_id;
 	if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
-		pr_err("Service is not unique and is already registered\n");
+		pr_err("Service %d is not unique and failed to register\n",
+				rcvd_lstnr.listener_id);
 		data->released = true;
 		return -EBUSY;
 	}
@@ -1202,18 +1208,18 @@
 	if (!new_entry)
 		return -ENOMEM;
 	memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
-	new_entry->rcv_req_flag = 0;
+	new_entry->rcv_req_flag = -1;
 
 	new_entry->svc.listener_id = rcvd_lstnr.listener_id;
 	new_entry->sb_length = rcvd_lstnr.sb_size;
 	new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
 	if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
-		pr_err("qseecom_set_sb_memoryfailed\n");
+		pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
+				rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
 		kzfree(new_entry);
 		return -ENOMEM;
 	}
 
-	data->listener.id = rcvd_lstnr.listener_id;
 	init_waitqueue_head(&new_entry->rcv_req_wq);
 	init_waitqueue_head(&new_entry->listener_block_app_wq);
 	new_entry->send_resp_flag = 0;
@@ -1222,6 +1228,7 @@
 	list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
 	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
 
+	pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
 	return ret;
 }
 
@@ -1245,13 +1252,17 @@
 static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
 {
 	int ret = 0;
-	unsigned long flags;
-	uint32_t unmap_mem = 0;
 	struct qseecom_register_listener_ireq req;
 	struct qseecom_registered_listener_list *ptr_svc = NULL;
 	struct qseecom_command_scm_resp resp;
 	struct ion_handle *ihandle = NULL;		/* Retrieve phy addr */
 
+	ptr_svc = __qseecom_find_svc(data->listener.id);
+	if (!ptr_svc) {
+		pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
+		return -ENODATA;
+	}
+
 	req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
 	req.listener_id = data->listener.id;
 	resp.result = QSEOS_RESULT_INCOMPLETE;
@@ -1261,59 +1272,41 @@
 	if (ret) {
 		pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
 				ret, data->listener.id);
-		return ret;
+		goto exit;
 	}
 
 	if (resp.result != QSEOS_RESULT_SUCCESS) {
 		pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
 				resp.result, data->listener.id);
-		return -EPERM;
+		ret = -EPERM;
+		goto exit;
 	}
 
 	data->abort = 1;
-	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
-	list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
-			list) {
-		if (ptr_svc->svc.listener_id == data->listener.id) {
-			ptr_svc->abort = 1;
-			wake_up_all(&ptr_svc->rcv_req_wq);
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+	ptr_svc->abort = 1;
+	wake_up_all(&ptr_svc->rcv_req_wq);
 
 	while (atomic_read(&data->ioctl_count) > 1) {
 		if (wait_event_freezable(data->abort_wq,
 				atomic_read(&data->ioctl_count) <= 1)) {
 			pr_err("Interrupted from abort\n");
 			ret = -ERESTARTSYS;
-			return ret;
 		}
 	}
 
-	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
-	list_for_each_entry(ptr_svc,
-			&qseecom.registered_listener_list_head, list) {
-		if (ptr_svc->svc.listener_id == data->listener.id) {
-			if (ptr_svc->sb_virt) {
-				unmap_mem = 1;
-				ihandle = ptr_svc->ihandle;
-			}
-			list_del(&ptr_svc->list);
-			kzfree(ptr_svc);
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
-
-	/* Unmap the memory */
-	if (unmap_mem) {
+exit:
+	if (ptr_svc->sb_virt) {
+		ihandle = ptr_svc->ihandle;
 		if (!IS_ERR_OR_NULL(ihandle)) {
 			ion_unmap_kernel(qseecom.ion_clnt, ihandle);
 			ion_free(qseecom.ion_clnt, ihandle);
 		}
 	}
+	list_del(&ptr_svc->list);
+	kzfree(ptr_svc);
+
 	data->released = true;
+	pr_warn("Service %d is unregistered\n", data->listener.id);
 	return ret;
 }
 
@@ -1651,6 +1644,12 @@
 	}
 }
 
+static int __is_listener_rcv_wq_not_ready(
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	return ptr_svc->rcv_req_flag == -1;
+}
+
 static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
 					struct qseecom_command_scm_resp *resp)
 {
@@ -1668,6 +1667,7 @@
 	void *cmd_buf = NULL;
 	size_t cmd_len;
 	struct sglist_info *table = NULL;
+	bool not_ready = false;
 
 	while (resp->result == QSEOS_RESULT_INCOMPLETE) {
 		lstnr = resp->data;
@@ -1679,6 +1679,10 @@
 		list_for_each_entry(ptr_svc,
 				&qseecom.registered_listener_list_head, list) {
 			if (ptr_svc->svc.listener_id == lstnr) {
+				if (__is_listener_rcv_wq_not_ready(ptr_svc)) {
+					not_ready = true;
+					break;
+				}
 				ptr_svc->listener_in_use = true;
 				ptr_svc->rcv_req_flag = 1;
 				wake_up_interruptible(&ptr_svc->rcv_req_wq);
@@ -1718,6 +1722,16 @@
 			status = QSEOS_RESULT_FAILURE;
 			goto err_resp;
 		}
+
+		if (not_ready) {
+			pr_err("Service %d is not ready to receive request\n",
+					lstnr);
+			rc = -ENOENT;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+
+		}
+
 		pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
 
 		/* initialize the new signal mask with all signals*/
@@ -1794,7 +1808,7 @@
 		else
 			*(uint32_t *)cmd_buf =
 				QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
-		if (ptr_svc) {
+		if (ptr_svc && ptr_svc->ihandle) {
 			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
 					ptr_svc->ihandle,
 					ptr_svc->sb_virt, ptr_svc->sb_length,
@@ -1979,6 +1993,7 @@
 	void *cmd_buf = NULL;
 	size_t cmd_len;
 	struct sglist_info *table = NULL;
+	bool not_ready = false;
 
 	while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
 		lstnr = resp->data;
@@ -1990,6 +2005,10 @@
 		list_for_each_entry(ptr_svc,
 				&qseecom.registered_listener_list_head, list) {
 			if (ptr_svc->svc.listener_id == lstnr) {
+				if (__is_listener_rcv_wq_not_ready(ptr_svc)) {
+					not_ready = true;
+					break;
+				}
 				ptr_svc->listener_in_use = true;
 				ptr_svc->rcv_req_flag = 1;
 				wake_up_interruptible(&ptr_svc->rcv_req_wq);
@@ -2029,6 +2048,16 @@
 			status = QSEOS_RESULT_FAILURE;
 			goto err_resp;
 		}
+
+		if (not_ready) {
+			pr_err("Service %d is not ready to receive request\n",
+					lstnr);
+			rc = -ENOENT;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+
+		}
+
 		pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
 
 		/* initialize the new signal mask with all signals*/
@@ -2096,7 +2125,7 @@
 		else
 			*(uint32_t *)cmd_buf =
 				QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
-		if (ptr_svc) {
+		if (ptr_svc && ptr_svc->ihandle) {
 			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
 					ptr_svc->ihandle,
 					ptr_svc->sb_virt, ptr_svc->sb_length,
@@ -3836,7 +3865,7 @@
 {
 	int ret;
 
-	ret = (svc->rcv_req_flag != 0);
+	ret = (svc->rcv_req_flag == 1);
 	return ret || data->abort || svc->abort;
 }
 
@@ -3851,13 +3880,17 @@
 		return -ENODATA;
 	}
 
+	if (this_lstnr->rcv_req_flag == -1)
+		this_lstnr->rcv_req_flag = 0;
+
 	while (1) {
 		if (wait_event_freezable(this_lstnr->rcv_req_wq,
 				__qseecom_listener_has_rcvd_req(data,
 				this_lstnr))) {
-			pr_debug("Interrupted: exiting Listener Service = %d\n",
+			pr_warn("Interrupted: exiting Listener Service = %d\n",
 						(uint32_t)data->listener.id);
 			/* woken up for different reason */
+			this_lstnr->rcv_req_flag = -1;
 			return -ERESTARTSYS;
 		}
 
@@ -4545,7 +4578,6 @@
 		strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
 		if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
 			ret = -EIO;
-			kfree(entry);
 			goto exit_entry_free;
 		}
 		entry->app_arch = app_arch;
@@ -5916,6 +5948,9 @@
 	else
 		flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
 
+	if (qseecom.enable_key_wrap_in_ks == true)
+		flags |= ENABLE_KEY_WRAP_IN_KS;
+
 	generate_key_ireq.flags = flags;
 	generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
 	memset((void *)generate_key_ireq.key_id,
@@ -7699,6 +7734,7 @@
 			data->type, data->mode, data);
 		switch (data->type) {
 		case QSEECOM_LISTENER_SERVICE:
+			pr_warn("release lsnr svc %d\n", data->listener.id);
 			__qseecom_listener_abort_all(1);
 			mutex_lock(&app_access_lock);
 			ret = qseecom_unregister_listener(data);
@@ -8642,6 +8678,14 @@
 				qseecom.qsee_reentrancy_support);
 		}
 
+		qseecom.enable_key_wrap_in_ks =
+			of_property_read_bool((&pdev->dev)->of_node,
+					"qcom,enable-key-wrap-in-ks");
+		if (qseecom.enable_key_wrap_in_ks) {
+			pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
+					qseecom.enable_key_wrap_in_ks);
+		}
+
 		/*
 		 * The qseecom bus scaling flag can not be enabled when
 		 * crypto clock is not handled by HLOS.
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index bf0d770..9be6433 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -756,14 +756,14 @@
 	err = gpio_request(kim_gdata->nshutdown, "kim");
 	if (unlikely(err)) {
 		pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
-		return err;
+		goto err_sysfs_group;
 	}
 
 	/* Configure nShutdown GPIO as output=0 */
 	err = gpio_direction_output(kim_gdata->nshutdown, 0);
 	if (unlikely(err)) {
 		pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
-		return err;
+		goto err_sysfs_group;
 	}
 	/* get reference of pdev for request_firmware
 	 */
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 87a1337..eb57610 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -177,7 +177,7 @@
 		} else
 			lux = 0;
 	else
-		return -EAGAIN;
+		return 0;
 
 	/* LUX range check */
 	return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index a940f97..345f229 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -130,7 +130,7 @@
 	struct mm_struct *mm = task->mm;
 
 	/* fill the first TASK_COMM_LEN bytes with thread name */
-	get_task_comm(task_entry->comm, task);
+	__get_task_comm(task_entry->comm, TASK_COMM_LEN, task);
 	i = strlen(task_entry->comm);
 	while (i < TASK_COMM_LEN)
 		task_entry->comm[i++] = ' ';
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index fe90b7e..5e9122cd 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -45,6 +45,7 @@
 #include <linux/seq_file.h>
 #include <linux/vmw_vmci_defs.h>
 #include <linux/vmw_vmci_api.h>
+#include <linux/io.h>
 #include <asm/hypervisor.h>
 
 MODULE_AUTHOR("VMware, Inc.");
@@ -341,7 +342,13 @@
 		success = false;
 	}
 
-	if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
+	/*
+	 * 2MB pages are only supported with batching. If batching is for some
+	 * reason disabled, do not use 2MB pages, since otherwise the legacy
+	 * mechanism is used with 2MB pages, causing a failure.
+	 */
+	if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
+	    (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
 		b->supported_page_sizes = 2;
 	else
 		b->supported_page_sizes = 1;
@@ -450,7 +457,7 @@
 
 	pfn32 = (u32)pfn;
 	if (pfn32 != pfn)
-		return -1;
+		return -EINVAL;
 
 	STATS_INC(b->stats.lock[false]);
 
@@ -460,14 +467,14 @@
 
 	pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
 	STATS_INC(b->stats.lock_fail[false]);
-	return 1;
+	return -EIO;
 }
 
 static int vmballoon_send_batched_lock(struct vmballoon *b,
 		unsigned int num_pages, bool is_2m_pages, unsigned int *target)
 {
 	unsigned long status;
-	unsigned long pfn = page_to_pfn(b->page);
+	unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
 
 	STATS_INC(b->stats.lock[is_2m_pages]);
 
@@ -515,7 +522,7 @@
 		unsigned int num_pages, bool is_2m_pages, unsigned int *target)
 {
 	unsigned long status;
-	unsigned long pfn = page_to_pfn(b->page);
+	unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
 
 	STATS_INC(b->stats.unlock[is_2m_pages]);
 
@@ -597,11 +604,12 @@
 
 	locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
 								target);
-	if (locked > 0) {
+	if (locked) {
 		STATS_INC(b->stats.refused_alloc[false]);
 
-		if (hv_status == VMW_BALLOON_ERROR_RESET ||
-				hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
+		if (locked == -EIO &&
+		    (hv_status == VMW_BALLOON_ERROR_RESET ||
+		     hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
 			vmballoon_free_page(page, false);
 			return -EIO;
 		}
@@ -617,7 +625,7 @@
 		} else {
 			vmballoon_free_page(page, false);
 		}
-		return -EIO;
+		return locked;
 	}
 
 	/* track allocated page */
@@ -1029,29 +1037,30 @@
  */
 static int vmballoon_vmci_init(struct vmballoon *b)
 {
-	int error = 0;
+	unsigned long error, dummy;
 
-	if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
-		error = vmci_doorbell_create(&b->vmci_doorbell,
-				VMCI_FLAG_DELAYED_CB,
-				VMCI_PRIVILEGE_FLAG_RESTRICTED,
-				vmballoon_doorbell, b);
+	if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
+		return 0;
 
-		if (error == VMCI_SUCCESS) {
-			VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
-					b->vmci_doorbell.context,
-					b->vmci_doorbell.resource, error);
-			STATS_INC(b->stats.doorbell_set);
-		}
-	}
+	error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
+				     VMCI_PRIVILEGE_FLAG_RESTRICTED,
+				     vmballoon_doorbell, b);
 
-	if (error != 0) {
-		vmballoon_vmci_cleanup(b);
+	if (error != VMCI_SUCCESS)
+		goto fail;
 
-		return -EIO;
-	}
+	error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
+				   b->vmci_doorbell.resource, dummy);
+
+	STATS_INC(b->stats.doorbell_set);
+
+	if (error != VMW_BALLOON_SUCCESS)
+		goto fail;
 
 	return 0;
+fail:
+	vmballoon_vmci_cleanup(b);
+	return -EIO;
 }
 
 /*
@@ -1289,7 +1298,14 @@
 
 	return 0;
 }
-module_init(vmballoon_init);
+
+/*
+ * Using late_initcall() instead of module_init() allows the balloon to use the
+ * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
+ * VMCI is probed only after the balloon is initialized. If the balloon is used
+ * as a module, late_initcall() is equivalent to module_init().
+ */
+late_initcall(vmballoon_init);
 
 static void __exit vmballoon_exit(void)
 {
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f735ab4..5927db04 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -755,7 +755,7 @@
 	retval = get_user_pages_fast((uintptr_t) produce_uva,
 				     produce_q->kernel_if->num_pages, 1,
 				     produce_q->kernel_if->u.h.header_page);
-	if (retval < produce_q->kernel_if->num_pages) {
+	if (retval < (int)produce_q->kernel_if->num_pages) {
 		pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
 			retval);
 		qp_release_pages(produce_q->kernel_if->u.h.header_page,
@@ -767,7 +767,7 @@
 	retval = get_user_pages_fast((uintptr_t) consume_uva,
 				     consume_q->kernel_if->num_pages, 1,
 				     consume_q->kernel_if->u.h.header_page);
-	if (retval < consume_q->kernel_if->num_pages) {
+	if (retval < (int)consume_q->kernel_if->num_pages) {
 		pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
 			retval);
 		qp_release_pages(consume_q->kernel_if->u.h.header_page,
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a1488fe..04ac554 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1214,7 +1214,7 @@
 	mmc_get_card(card);
 
 	if (mmc_card_cmdq(card)) {
-		err = mmc_cmdq_halt_on_empty_queue(card->host);
+		err = mmc_cmdq_halt_on_empty_queue(card->host, 0);
 		if (err) {
 			pr_err("%s: halt failed while doing %s err (%d)\n",
 					mmc_hostname(card->host),
@@ -1867,11 +1867,6 @@
 	struct mmc_cmdq_req *cmdq_req;
 	struct mmc_queue_req *active_mqrq;
 
-	BUG_ON(req->tag > card->ext_csd.cmdq_depth);
-	BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
-
-	set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
-
 	active_mqrq = &mq->mqrq_cmdq[req->tag];
 	active_mqrq->req = req;
 
@@ -1879,6 +1874,17 @@
 	cmdq_req->cmdq_req_flags |= QBR;
 	cmdq_req->mrq.cmd = &cmdq_req->cmd;
 	cmdq_req->tag = req->tag;
+
+	/*
+	 * To avoid potential race condition with the error handler work,
+	 * do the following:
+	 * 1. set init_completion() only once
+	 * 2. set the CMDQ_STATE_DCMD_ACTIVE only after it's tag is set
+	 */
+	init_completion(&cmdq_req->mrq.completion);
+	WARN_ON(req->tag > card->ext_csd.cmdq_depth);
+	WARN_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
+	set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
 	return cmdq_req;
 }
 
@@ -1922,8 +1928,21 @@
 	}
 	err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
 clear_dcmd:
-	mmc_host_clk_hold(card->host);
-	blk_complete_request(req);
+	/*
+	 * If some other request got an error while there is a DCMD request
+	 * in the command queue, then err will be updated with -EAGAIN by the
+	 * error handler, which indicates that caller must not call
+	 * blk_complete_request() and let the request by handled by error
+	 * hanlder. In all other cases, the caller only must call
+	 * blk_complete_request().
+	 */
+	if (err != -EAGAIN) {
+		mmc_host_clk_hold(card->host);
+		blk_complete_request(req);
+	} else {
+		pr_err("%s: err(%d) handled by cmdq-error handler\n",
+			__func__, err);
+	}
 out:
 	return err ? 1 : 0;
 }
@@ -2028,8 +2047,13 @@
 				MMC_SECURE_TRIM2_ARG);
 	}
 clear_dcmd:
-	mmc_host_clk_hold(card->host);
-	blk_complete_request(req);
+	if (err != -EAGAIN) {
+		mmc_host_clk_hold(card->host);
+		blk_complete_request(req);
+	} else {
+		pr_err("%s: err(%d) handled by cmdq-error handler\n",
+			__func__, err);
+	}
 out:
 	return err ? 1 : 0;
 }
@@ -3212,8 +3236,14 @@
 	struct mmc_cmdq_req *mc_rq;
 	u8 active_small_sector_read = 0;
 	int ret = 0;
+	unsigned long timeout_ms = 10000; /* 10 sec safe timeout */
 
-	mmc_deferred_scaling(host);
+	mmc_cmdq_up_rwsem(host);
+	mmc_deferred_scaling(host, timeout_ms);
+	ret = mmc_cmdq_down_rwsem(host, req);
+	if (ret)
+		return ret;
+
 	mmc_cmdq_clk_scaling_start_busy(host, true);
 
 	BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
@@ -3246,9 +3276,18 @@
 	 * empty faster and we will be able to scale up to Nominal frequency
 	 * when needed.
 	 */
-	if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW))
-		wait_event_interruptible(ctx->queue_empty_wq,
-					(!ctx->active_reqs));
+
+	if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW)) {
+
+		ret = wait_event_interruptible_timeout(ctx->queue_empty_wq,
+				(!ctx->active_reqs &&
+				 !test_bit(CMDQ_STATE_ERR, &ctx->curr_state)),
+				msecs_to_jiffies(5000));
+		if (!ret)
+			pr_err("%s: queue_empty_wq timeout case? ret = (%d)\n",
+				__func__, ret);
+		ret = 0;
+	}
 
 	if (ret) {
 		/* clear pending request */
@@ -3339,20 +3378,17 @@
 }
 
 /**
- * is_cmdq_dcmd_req - Checks if tag belongs to DCMD request.
+ * get_cmdq_req_by_tag - returns cmdq_rq based on tag.
  * @q:		request_queue pointer.
  * @tag:	tag number of request to check.
  *
- * This function checks if the request with tag number "tag"
- * is a DCMD request or not based on cmdq_req_flags set.
- *
- * returns true if DCMD req, otherwise false.
  */
-static bool is_cmdq_dcmd_req(struct request_queue *q, int tag)
+static struct mmc_cmdq_req *get_cmdq_req_by_tag(struct request_queue *q,
+						int tag)
 {
 	struct request *req;
 	struct mmc_queue_req *mq_rq;
-	struct mmc_cmdq_req *cmdq_req;
+	struct mmc_cmdq_req *cmdq_req = NULL;
 
 	req = blk_queue_find_tag(q, tag);
 	if (WARN_ON(!req))
@@ -3361,9 +3397,8 @@
 	if (WARN_ON(!mq_rq))
 		goto out;
 	cmdq_req = &(mq_rq->cmdq_req);
-	return (cmdq_req->cmdq_req_flags & DCMD);
 out:
-	return -ENOENT;
+	return cmdq_req;
 }
 
 /**
@@ -3383,7 +3418,9 @@
 	struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
 	struct request_queue *q;
 	int itag = 0;
-	int ret = 0;
+	struct mmc_cmdq_req *cmdq_req = NULL;
+	struct mmc_request *dcmd_mrq;
+	bool is_err_mrq_dcmd = false;
 
 	if (WARN_ON(!mrq))
 		return;
@@ -3399,18 +3436,31 @@
 
 	mmc_blk_cmdq_reset(host, false);
 
+	if (mrq->cmdq_req->cmdq_req_flags & DCMD)
+		is_err_mrq_dcmd = true;
+
 	for_each_set_bit(itag, &ctx_info->active_reqs,
 			host->num_cq_slots) {
-		ret = is_cmdq_dcmd_req(q, itag);
-		if (WARN_ON(ret == -ENOENT))
+		cmdq_req = get_cmdq_req_by_tag(q, itag);
+		if (WARN_ON(!cmdq_req))
 			continue;
-		if (!ret) {
+		if (!(cmdq_req->cmdq_req_flags & DCMD)) {
 			WARN_ON(!test_and_clear_bit(itag,
 				 &ctx_info->data_active_reqs));
 			mmc_cmdq_post_req(host, itag, err);
 		} else {
-			clear_bit(CMDQ_STATE_DCMD_ACTIVE,
-					&ctx_info->curr_state);
+			dcmd_mrq = &cmdq_req->mrq;
+			WARN_ON(!test_and_clear_bit(CMDQ_STATE_DCMD_ACTIVE,
+					&ctx_info->curr_state));
+			pr_debug("%s: cmd(%u), req_op(%llu)\n", __func__,
+				 dcmd_mrq->cmd->opcode, req_op(dcmd_mrq->req));
+			if (!is_err_mrq_dcmd && !dcmd_mrq->cmd->error &&
+				(req_op(dcmd_mrq->req) == REQ_OP_SECURE_ERASE ||
+				 req_op(dcmd_mrq->req) == REQ_OP_DISCARD)) {
+				dcmd_mrq->cmd->error = -EAGAIN;
+				complete(&dcmd_mrq->completion);
+			}
+
 		}
 		WARN_ON(!test_and_clear_bit(itag,
 					&ctx_info->active_reqs));
@@ -3538,6 +3588,7 @@
 	if (WARN_ON(!mrq))
 		return;
 
+	down_write(&ctx_info->err_rwsem);
 	q = mrq->req->q;
 	err = mmc_cmdq_halt(host, true);
 	if (err) {
@@ -3590,6 +3641,24 @@
 	host->err_mrq = NULL;
 	clear_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
 	WARN_ON(!test_and_clear_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+
+#ifdef CONFIG_MMC_CLKGATE
+	pr_err("%s: clk-rqs(%d), claim-cnt(%d), claimed(%d), claimer(%s)\n",
+		__func__, host->clk_requests, host->claim_cnt, host->claimed,
+		host->claimer->comm);
+#else
+	pr_err("%s: claim-cnt(%d), claimed(%d), claimer(%s)\n", __func__,
+			host->claim_cnt, host->claimed, host->claimer->comm);
+#endif
+	sched_show_task(mq->thread);
+	if (host->claimed && host->claimer)
+		sched_show_task(host->claimer);
+#ifdef CONFIG_MMC_CLKGATE
+	WARN_ON(host->clk_requests < 0);
+#endif
+	WARN_ON(host->claim_cnt < 0);
+
+	up_write(&ctx_info->err_rwsem);
 	wake_up(&ctx_info->wait);
 }
 
@@ -3604,6 +3673,16 @@
 	struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
 	int err = 0;
 	bool is_dcmd = false;
+	bool err_rwsem = false;
+
+	if (down_read_trylock(&ctx_info->err_rwsem)) {
+		err_rwsem = true;
+	} else {
+		pr_err("%s: err_rwsem lock failed to acquire => err handler active\n",
+		    __func__);
+		WARN_ON_ONCE(!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+		goto out;
+	}
 
 	if (mrq->cmd && mrq->cmd->error)
 		err = mrq->cmd->error;
@@ -3625,12 +3704,6 @@
 		}
 		goto out;
 	}
-	/*
-	 * In case of error CMDQ is expected to be either in halted
-	 * or disable state so cannot receive any completion of
-	 * other requests.
-	 */
-	WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
 
 	/* clear pending request */
 	BUG_ON(!test_and_clear_bit(cmdq_req->tag,
@@ -3664,9 +3737,10 @@
 out:
 
 	mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
-	if (!(err || cmdq_req->resp_err)) {
+	if (err_rwsem && !(err || cmdq_req->resp_err)) {
 		mmc_host_clk_release(host);
 		wake_up(&ctx_info->wait);
+		host->last_completed_rq_time = ktime_get();
 		mmc_put_card(host->card);
 	}
 
@@ -3676,6 +3750,8 @@
 	if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
 		complete(&mq->cmdq_shutdown_complete);
 
+	if (err_rwsem)
+		up_read(&ctx_info->err_rwsem);
 	return;
 }
 
@@ -4056,6 +4132,7 @@
 		if (mmc_req_is_special(req) &&
 		    (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
 		    ctx->active_small_sector_read_reqs) {
+			mmc_cmdq_up_rwsem(host);
 			ret = wait_event_interruptible(ctx->queue_empty_wq,
 						      !ctx->active_reqs);
 			if (ret) {
@@ -4064,6 +4141,10 @@
 					__func__, ret);
 				BUG_ON(1);
 			}
+			ret = mmc_cmdq_down_rwsem(host, req);
+			if (ret)
+				return ret;
+
 			/* clear the counter now */
 			ctx->active_small_sector_read_reqs = 0;
 			/*
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 66165d9..21e4fbc 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -133,7 +133,14 @@
 		if (kthread_should_stop())
 			break;
 
+		ret = mmc_cmdq_down_rwsem(host, mq->cmdq_req_peeked);
+		if (ret) {
+			mmc_cmdq_up_rwsem(host);
+			continue;
+		}
 		ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked);
+		mmc_cmdq_up_rwsem(host);
+
 		/*
 		 * Don't requeue if issue_fn fails.
 		 * Recovery will be come by completion softirq
@@ -645,6 +652,7 @@
 
 	init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq);
 	init_waitqueue_head(&card->host->cmdq_ctx.wait);
+	init_rwsem(&card->host->cmdq_ctx.err_rwsem);
 
 	mq->mqrq_cmdq = kzalloc(
 			sizeof(struct mmc_queue_req) * q_depth, GFP_KERNEL);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 6278377..9fbf3ae 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -137,6 +137,34 @@
 	}
 }
 
+void mmc_cmdq_up_rwsem(struct mmc_host *host)
+{
+	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+
+	up_read(&ctx->err_rwsem);
+}
+EXPORT_SYMBOL(mmc_cmdq_up_rwsem);
+
+int mmc_cmdq_down_rwsem(struct mmc_host *host, struct request *rq)
+{
+	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+
+	down_read(&ctx->err_rwsem);
+	/*
+	 * This is to prevent a case where issue context has already
+	 * called blk_queue_start_tag(), immediately after which error
+	 * handler work has run and called blk_queue_invalidate_tags().
+	 * In this case, the issue context should check for REQ_QUEUED
+	 * before proceeding with that request. It should ideally call
+	 * blk_queue_start_tag() again on the requeued request.
+	 */
+	if (!(rq->cmd_flags & REQ_QUEUED))
+		return -EINVAL;
+	else
+		return 0;
+}
+EXPORT_SYMBOL(mmc_cmdq_down_rwsem);
+
 static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
 {
 	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
@@ -345,12 +373,23 @@
 	return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
 }
 
-int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host)
+int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host, unsigned long timeout)
 {
 	int err = 0;
 
-	err = wait_event_interruptible(host->cmdq_ctx.queue_empty_wq,
-				(!host->cmdq_ctx.active_reqs));
+	if (!timeout) {
+		err = wait_event_interruptible(host->cmdq_ctx.queue_empty_wq,
+					(!host->cmdq_ctx.active_reqs));
+	} else {
+		err = wait_event_interruptible_timeout(
+				host->cmdq_ctx.queue_empty_wq,
+				(!host->cmdq_ctx.active_reqs),
+				msecs_to_jiffies(timeout));
+		if (!err)
+			pr_err("%s: halt_on_empty_queue timeout case: err(%d)\n",
+					__func__, err);
+	}
+
 	if (host->cmdq_ctx.active_reqs) {
 		pr_err("%s: %s: unexpected active requests (%lu)\n",
 			mmc_hostname(host), __func__,
@@ -371,7 +410,8 @@
 EXPORT_SYMBOL(mmc_cmdq_halt_on_empty_queue);
 
 int mmc_clk_update_freq(struct mmc_host *host,
-		unsigned long freq, enum mmc_load state)
+		unsigned long freq, enum mmc_load state,
+		unsigned long timeout)
 {
 	int err = 0;
 	bool cmdq_mode;
@@ -413,7 +453,7 @@
 	}
 
 	if (cmdq_mode) {
-		err = mmc_cmdq_halt_on_empty_queue(host);
+		err = mmc_cmdq_halt_on_empty_queue(host, timeout);
 		if (err) {
 			pr_err("%s: %s: failed halting queue (%d)\n",
 				mmc_hostname(host), __func__, err);
@@ -427,12 +467,16 @@
 		goto invalid_state;
 	}
 
+	MMC_TRACE(host, "clock scale state %d freq %lu\n",
+			state, freq);
 	err = host->bus_ops->change_bus_speed(host, &freq);
 	if (!err)
 		host->clk_scaling.curr_freq = freq;
 	else
 		pr_err("%s: %s: failed (%d) at freq=%lu\n",
 			mmc_hostname(host), __func__, err, freq);
+	MMC_TRACE(host, "clock scale state %d freq %lu done with err %d\n",
+			state, freq, err);
 
 invalid_state:
 	if (cmdq_mode) {
@@ -538,7 +582,7 @@
 	clk_scaling->need_freq_change = false;
 
 	mmc_host_clk_hold(host);
-	err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
+	err = mmc_clk_update_freq(host, *freq, clk_scaling->state, 0);
 	if (err && err != -EAGAIN) {
 		pr_err("%s: clock scale to %lu failed with error %d\n",
 			mmc_hostname(host), *freq, err);
@@ -564,7 +608,7 @@
  * This function does clock scaling in case "need_freq_change" flag was set
  * by the clock scaling logic.
  */
-void mmc_deferred_scaling(struct mmc_host *host)
+void mmc_deferred_scaling(struct mmc_host *host, unsigned long timeout)
 {
 	unsigned long target_freq;
 	int err;
@@ -599,7 +643,7 @@
 				target_freq, current->comm);
 
 	err = mmc_clk_update_freq(host, target_freq,
-		clk_scaling.state);
+		clk_scaling.state, timeout);
 	if (err && err != -EAGAIN) {
 		pr_err("%s: failed on deferred scale clocks (%d)\n",
 			mmc_hostname(host), err);
@@ -1201,7 +1245,7 @@
 	led_trigger_event(host->led, LED_FULL);
 
 	if (mmc_is_data_request(mrq)) {
-		mmc_deferred_scaling(host);
+		mmc_deferred_scaling(host, 0);
 		mmc_clk_scaling_start_busy(host, true);
 	}
 
@@ -1841,14 +1885,15 @@
 	struct mmc_command *cmd = mrq->cmd;
 	int err = 0;
 
-	init_completion(&mrq->completion);
 	mrq->done = mmc_cmdq_dcmd_req_done;
 	err = mmc_cmdq_start_req(host, cmdq_req);
 	if (err)
 		return err;
 
+	mmc_cmdq_up_rwsem(host);
 	wait_for_completion_io(&mrq->completion);
-	if (cmd->error) {
+	err = mmc_cmdq_down_rwsem(host, mrq->req);
+	if (err || cmd->error) {
 		pr_err("%s: DCMD %d failed with err %d\n",
 				mmc_hostname(host), cmd->opcode,
 				cmd->error);
@@ -3717,7 +3762,7 @@
 	if (err) {
 		pr_err("mmc_erase: group start error %d, status %#x\n",
 				err, cmd->resp[0]);
-		return -EIO;
+		return err;
 	}
 	return 0;
 }
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 2adf42c..240dbd3 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -26,7 +26,8 @@
 void mmc_set_chip_select(struct mmc_host *host, int mode);
 void mmc_set_clock(struct mmc_host *host, unsigned int hz);
 int mmc_clk_update_freq(struct mmc_host *host,
-		unsigned long freq, enum mmc_load state);
+		unsigned long freq, enum mmc_load state,
+		unsigned long timeout);
 void mmc_gate_clock(struct mmc_host *host);
 void mmc_ungate_clock(struct mmc_host *host);
 void mmc_set_ungated(struct mmc_host *host);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 15c3e9e..c3183c1 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -275,7 +275,7 @@
 	mmc_host_clk_hold(host);
 
 	/* change frequency from sysfs manually */
-	err = mmc_clk_update_freq(host, val, host->clk_scaling.state);
+	err = mmc_clk_update_freq(host, val, host->clk_scaling.state, 0);
 	if (err == -EAGAIN)
 		err = 0;
 	else if (err)
@@ -547,7 +547,7 @@
 
 	mmc_get_card(card);
 	if (mmc_card_cmdq(card)) {
-		ret = mmc_cmdq_halt_on_empty_queue(card->host);
+		ret = mmc_cmdq_halt_on_empty_queue(card->host, 0);
 		if (ret) {
 			pr_err("%s: halt failed while doing %s err (%d)\n",
 					mmc_hostname(card->host), __func__,
@@ -589,7 +589,7 @@
 
 	mmc_get_card(card);
 	if (mmc_card_cmdq(card)) {
-		err = mmc_cmdq_halt_on_empty_queue(card->host);
+		err = mmc_cmdq_halt_on_empty_queue(card->host, 0);
 		if (err) {
 			pr_err("%s: halt failed while doing %s err (%d)\n",
 					mmc_hostname(card->host), __func__,
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 02dffe9..8fa3b70 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -155,6 +155,7 @@
 		mmc_gate_clock(host);
 		spin_lock_irqsave(&host->clk_lock, flags);
 		pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
+		MMC_TRACE(host, "clocks are gated\n");
 	}
 	spin_unlock_irqrestore(&host->clk_lock, flags);
 	mutex_unlock(&host->clk_gate_mutex);
@@ -193,6 +194,7 @@
 
 		spin_lock_irqsave(&host->clk_lock, flags);
 		pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
+		MMC_TRACE(host, "clocks are ungated\n");
 	}
 	host->clk_requests++;
 	spin_unlock_irqrestore(&host->clk_lock, flags);
@@ -766,7 +768,7 @@
 		host->clk_scaling.state = MMC_LOAD_HIGH;
 		/* Set to max. frequency when disabling */
 		mmc_clk_update_freq(host, host->card->clk_scaling_highest,
-					host->clk_scaling.state);
+					host->clk_scaling.state, 0);
 	} else if (value) {
 		/* Unmask host capability and resume scaling */
 		host->caps2 |= MMC_CAP2_CLK_SCALE;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 59082ad..65998b7 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -3092,7 +3092,7 @@
 	host->caps2 &= ~MMC_CAP2_CLK_SCALE;
 	host->clk_scaling.state = MMC_LOAD_HIGH;
 	ret = mmc_clk_update_freq(host, host->card->clk_scaling_highest,
-				host->clk_scaling.state);
+				host->clk_scaling.state, 0);
 	if (ret)
 		pr_err("%s: %s: Setting clk frequency to max failed: %d\n",
 				mmc_hostname(host), __func__, ret);
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index 1304160..8cd9ddf 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -39,14 +39,18 @@
 	struct gpio_descs *reset_gpios = pwrseq->reset_gpios;
 
 	if (!IS_ERR(reset_gpios)) {
-		int i;
-		int values[reset_gpios->ndescs];
+		int i, *values;
+		int nvalues = reset_gpios->ndescs;
 
-		for (i = 0; i < reset_gpios->ndescs; i++)
+		values = kmalloc_array(nvalues, sizeof(int), GFP_KERNEL);
+		if (!values)
+			return;
+
+		for (i = 0; i < nvalues; i++)
 			values[i] = value;
 
-		gpiod_set_array_value_cansleep(
-			reset_gpios->ndescs, reset_gpios->desc, values);
+		gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc, values);
+		kfree(values);
 	}
 }
 
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index 01811d9..a4d35d9 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -864,6 +864,33 @@
 	return err;
 }
 
+static int cmdq_get_first_valid_tag(struct cmdq_host *cq_host)
+{
+	u32 dbr_set = 0, tag = 0;
+
+	dbr_set = cmdq_readl(cq_host, CQTDBR);
+	if (!dbr_set) {
+		pr_err("%s: spurious/force error interrupt\n",
+				mmc_hostname(cq_host->mmc));
+		cmdq_halt_poll(cq_host->mmc, false);
+		mmc_host_clr_halt(cq_host->mmc);
+		return -EINVAL;
+	}
+
+	tag = ffs(dbr_set) - 1;
+	pr_err("%s: error tag selected: tag = %d\n",
+		mmc_hostname(cq_host->mmc), tag);
+	return tag;
+}
+
+static bool cmdq_is_valid_tag(struct mmc_host *mmc, unsigned int tag)
+{
+	struct mmc_cmdq_context_info *ctx_info = &mmc->cmdq_ctx;
+
+	return
+	(!!(ctx_info->data_active_reqs & (1 << tag)) || tag == DCMD_SLOT);
+}
+
 static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
 {
 	struct mmc_request *mrq;
@@ -884,11 +911,13 @@
 
 	cmdq_runtime_pm_put(cq_host);
 
-	if (cq_host->ops->crypto_cfg_end) {
-		err = cq_host->ops->crypto_cfg_end(mmc, mrq);
-		if (err) {
-			pr_err("%s: failed to end ice config: err %d tag %d\n",
-					mmc_hostname(mmc), err, tag);
+	if (!(mrq->cmdq_req->cmdq_req_flags & DCMD)) {
+		if (cq_host->ops->crypto_cfg_end) {
+			err = cq_host->ops->crypto_cfg_end(mmc, mrq);
+			if (err) {
+				pr_err("%s: failed to end ice config: err %d tag %d\n",
+						mmc_hostname(mmc), err, tag);
+			}
 		}
 	}
 	if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) &&
@@ -897,7 +926,7 @@
 	mrq->done(mrq);
 }
 
-irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
+irqreturn_t cmdq_irq(struct mmc_host *mmc, int err, bool is_cmd_err)
 {
 	u32 status;
 	unsigned long tag = 0, comp_status;
@@ -922,6 +951,8 @@
 		err_info = cmdq_readl(cq_host, CQTERRI);
 		pr_err("%s: err: %d status: 0x%08x task-err-info (0x%08lx)\n",
 		       mmc_hostname(mmc), err, status, err_info);
+		/* Dump the registers before clearing Interrupt */
+		cmdq_dumpregs(cq_host);
 
 		/*
 		 * Need to halt CQE in case of error in interrupt context itself
@@ -945,7 +976,6 @@
 		 */
 		cmdq_writel(cq_host, status, CQIS);
 
-		cmdq_dumpregs(cq_host);
 
 		if (!err_info) {
 			/*
@@ -958,18 +988,10 @@
 			 *   have caused such error, so check for any first
 			 *   bit set in doorbell and proceed with an error.
 			 */
-			dbr_set = cmdq_readl(cq_host, CQTDBR);
-			if (!dbr_set) {
-				pr_err("%s: spurious/force error interrupt\n",
-						mmc_hostname(mmc));
-				cmdq_halt_poll(mmc, false);
-				mmc_host_clr_halt(mmc);
-				return IRQ_HANDLED;
-			}
+			tag = cmdq_get_first_valid_tag(cq_host);
+			if (tag == -EINVAL)
+				goto hac;
 
-			tag = ffs(dbr_set) - 1;
-			pr_err("%s: error tag selected: tag = %lu\n",
-					mmc_hostname(mmc), tag);
 			mrq = get_req_by_tag(cq_host, tag);
 			if (mrq->data)
 				mrq->data->error = err;
@@ -984,10 +1006,24 @@
 			goto skip_cqterri;
 		}
 
-		if (err_info & CQ_RMEFV) {
+		if (is_cmd_err && (err_info & CQ_RMEFV)) {
 			tag = GET_CMD_ERR_TAG(err_info);
 			pr_err("%s: CMD err tag: %lu\n", __func__, tag);
 
+			/*
+			 * In some cases CQTERRI is not providing reliable tag
+			 * info. If the tag is not valid, complete the request
+			 * with any valid tag so that all tags will get
+			 * requeued.
+			 */
+			if (!cmdq_is_valid_tag(mmc, tag)) {
+				pr_err("%s: CMD err tag is invalid: %lu\n",
+						__func__, tag);
+				tag = cmdq_get_first_valid_tag(cq_host);
+				if (tag == -EINVAL)
+					goto hac;
+			}
+
 			mrq = get_req_by_tag(cq_host, tag);
 			/* CMD44/45/46/47 will not have a valid cmd */
 			if (mrq->cmd)
@@ -997,8 +1033,26 @@
 		} else {
 			tag = GET_DAT_ERR_TAG(err_info);
 			pr_err("%s: Dat err  tag: %lu\n", __func__, tag);
+
+			/*
+			 * In some cases CQTERRI is not providing reliable tag
+			 * info. If the tag is not valid, complete the request
+			 * with any valid tag so that all tags will get
+			 * requeued.
+			 */
+			if (!cmdq_is_valid_tag(mmc, tag)) {
+				pr_err("%s: CMD err tag is invalid: %lu\n",
+						__func__, tag);
+				tag = cmdq_get_first_valid_tag(cq_host);
+				if (tag == -EINVAL)
+					goto hac;
+			}
 			mrq = get_req_by_tag(cq_host, tag);
-			mrq->data->error = err;
+
+			if (mrq->data)
+				mrq->data->error = err;
+			else
+				mrq->cmd->error = err;
 		}
 
 skip_cqterri:
@@ -1100,6 +1154,7 @@
 			}
 		}
 		cmdq_finish_data(mmc, tag);
+		goto hac;
 	} else {
 		cmdq_writel(cq_host, status, CQIS);
 	}
@@ -1108,7 +1163,7 @@
 		/* read CQTCN and complete the request */
 		comp_status = cmdq_readl(cq_host, CQTCN);
 		if (!comp_status)
-			goto out;
+			goto hac;
 		/*
 		 * The CQTCN must be cleared before notifying req completion
 		 * to upper layers to avoid missing completion notification
@@ -1135,7 +1190,7 @@
 			}
 		}
 	}
-
+hac:
 	if (status & CQIS_HAC) {
 		if (cq_host->ops->post_cqe_halt)
 			cq_host->ops->post_cqe_halt(mmc);
@@ -1146,7 +1201,6 @@
 		complete(&cq_host->halt_comp);
 	}
 
-out:
 	return IRQ_HANDLED;
 }
 EXPORT_SYMBOL(cmdq_irq);
diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h
index 1aabce9..0349989 100644
--- a/drivers/mmc/host/cmdq_hci.h
+++ b/drivers/mmc/host/cmdq_hci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -244,7 +244,7 @@
 		return readl_relaxed(host->mmio + reg);
 }
 
-extern irqreturn_t cmdq_irq(struct mmc_host *mmc, int err);
+extern irqreturn_t cmdq_irq(struct mmc_host *mmc, int err, bool is_cmd_err);
 extern int cmdq_init(struct cmdq_host *cq_host, struct mmc_host *mmc,
 		     bool dma64);
 extern struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index d382dbd..e10a00d 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -981,8 +981,8 @@
 	 * It's used when HS400 mode is enabled.
 	 */
 	if (data->flags & MMC_DATA_WRITE &&
-		!(host->timing != MMC_TIMING_MMC_HS400))
-		return;
+		host->timing != MMC_TIMING_MMC_HS400)
+		goto disable;
 
 	if (data->flags & MMC_DATA_WRITE)
 		enable = SDMMC_CARD_WR_THR_EN;
@@ -990,7 +990,8 @@
 		enable = SDMMC_CARD_RD_THR_EN;
 
 	if (host->timing != MMC_TIMING_MMC_HS200 &&
-	    host->timing != MMC_TIMING_UHS_SDR104)
+	    host->timing != MMC_TIMING_UHS_SDR104 &&
+	    host->timing != MMC_TIMING_MMC_HS400)
 		goto disable;
 
 	blksz_depth = blksz / (1 << host->data_shift);
@@ -1163,6 +1164,8 @@
 	if (host->state == STATE_WAITING_CMD11_DONE)
 		sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
 
+	slot->mmc->actual_clock = 0;
+
 	if (!clock) {
 		mci_writel(host, CLKENA, 0);
 		mci_send_cmd(slot, sdmmc_cmd_bits, 0);
@@ -1208,6 +1211,8 @@
 
 		/* keep the last clock value that was requested from core */
 		slot->__clk_old = clock;
+		slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
+					  host->bus_hz;
 	}
 
 	host->current_speed = clock;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index a082aa3..f7d1c8c 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2215,6 +2215,7 @@
 	dma_release_channel(host->tx_chan);
 	dma_release_channel(host->rx_chan);
 
+	dev_pm_clear_wake_irq(host->dev);
 	pm_runtime_dont_use_autosuspend(host->dev);
 	pm_runtime_put_sync(host->dev);
 	pm_runtime_disable(host->dev);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 1119292..7b221c3 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -94,6 +94,7 @@
 #define CORE_DDR_DLL_LOCK	(1 << 11)
 
 #define CORE_CLK_PWRSAVE		(1 << 1)
+#define CORE_VNDR_SPEC_ADMA_ERR_SIZE_EN	(1 << 7)
 #define CORE_HC_MCLK_SEL_DFLT		(2 << 8)
 #define CORE_HC_MCLK_SEL_HS400		(3 << 8)
 #define CORE_HC_MCLK_SEL_MASK		(3 << 8)
@@ -4924,6 +4925,12 @@
 	writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
 	host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
 
+	/* This enable ADMA error interrupt in case of length mismatch */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC) |
+			CORE_VNDR_SPEC_ADMA_ERR_SIZE_EN),
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+
 	/*
 	 * Ensure SDHCI FIFO is enabled by disabling alternative FIFO
 	 */
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 20b6ff5..088a3ae 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -350,7 +350,8 @@
 		  SDHCI_QUIRK_NO_HISPD_BIT |
 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
-	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+		   SDHCI_QUIRK2_BROKEN_HS200,
 	.ops  = &tegra_sdhci_ops,
 };
 
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 36aecd2..01c8b90 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -149,6 +149,7 @@
 	}
 
 	host->mmc->err_occurred = true;
+	host->mmc->last_failed_rq_time = ktime_get();
 
 	if (host->ops->dump_vendor_regs)
 		host->ops->dump_vendor_regs(host);
@@ -3288,7 +3289,7 @@
 	} else if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
 		host->mmc->err_stats[MMC_ERR_DAT_CRC]++;
 		return -EILSEQ;
-	} else if (intmask & MMC_ERR_ADMA) {
+	} else if (intmask & SDHCI_INT_ADMA_ERROR) {
 		host->mmc->err_stats[MMC_ERR_ADMA]++;
 		return -EIO;
 	}
@@ -3300,13 +3301,18 @@
 	int err = 0;
 	u32 mask = 0;
 	irqreturn_t ret;
+	bool is_cmd_err = false;
 
-	if (intmask & SDHCI_INT_CMD_MASK)
+	if (intmask & SDHCI_INT_CMD_MASK) {
 		err = sdhci_get_cmd_err(host, intmask);
-	else if (intmask & SDHCI_INT_DATA_MASK)
+		is_cmd_err = true;
+	} else if (intmask & SDHCI_INT_DATA_MASK) {
 		err = sdhci_get_data_err(host, intmask);
+		if (intmask & SDHCI_INT_DATA_TIMEOUT)
+			is_cmd_err = sdhci_card_busy(host->mmc);
+	}
 
-	ret = cmdq_irq(host->mmc, err);
+	ret = cmdq_irq(host->mmc, err, is_cmd_err);
 	if (err) {
 		/* Clear the error interrupts */
 		mask = intmask & SDHCI_INT_ERROR_MASK;
@@ -4299,14 +4305,21 @@
 	    !(mmc->caps2 & MMC_CAP2_NONHOTPLUG) && !host->mmc->extcon)
 		mmc->caps |= MMC_CAP_NEEDS_POLL;
 
-	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
 	if (!IS_ERR(mmc->supply.vqmmc)) {
 		ret = regulator_enable(mmc->supply.vqmmc);
+
+		/* If vqmmc provides no 1.8V signalling, then there's no UHS */
 		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
 						    1950000))
 			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
 					 SDHCI_SUPPORT_SDR50 |
 					 SDHCI_SUPPORT_DDR50);
+
+		/* In eMMC case vqmmc might be a fixed 1.8V regulator */
+		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
+						    3600000))
+			host->flags &= ~SDHCI_SIGNALING_330;
+
 		if (ret) {
 			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
 				mmc_hostname(mmc), ret);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 9cf7fcd..16a7df2 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -172,7 +172,8 @@
 
 	t[1].rx_buf = buf;
 	t[1].rx_nbits = m25p80_rx_nbits(nor);
-	t[1].len = min(len, spi_max_transfer_size(spi));
+	t[1].len = min3(len, spi_max_transfer_size(spi),
+			spi_max_message_size(spi) - t[0].len);
 	spi_message_add_tail(&t[1], &m);
 
 	ret = spi_sync(spi, &m);
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index bb580bc..c07f21b 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -59,9 +59,9 @@
 			return -ENXIO;
 		}
 	}
-	printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n",
-	       soleng_flash_map.phys & 0x1fffffff,
-	       soleng_eprom_map.phys & 0x1fffffff);
+	printk(KERN_NOTICE "Solution Engine: Flash at 0x%pap, EPROM at 0x%pap\n",
+	       &soleng_flash_map.phys,
+	       &soleng_eprom_map.phys);
 	flash_mtd->owner = THIS_MODULE;
 
 	eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index b4092ea..95b6a66 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -160,8 +160,12 @@
 
 	pr_debug("MTD_read\n");
 
-	if (*ppos + count > mtd->size)
-		count = mtd->size - *ppos;
+	if (*ppos + count > mtd->size) {
+		if (*ppos < mtd->size)
+			count = mtd->size - *ppos;
+		else
+			count = 0;
+	}
 
 	if (!count)
 		return 0;
@@ -246,7 +250,7 @@
 
 	pr_debug("MTD_write\n");
 
-	if (*ppos == mtd->size)
+	if (*ppos >= mtd->size)
 		return -ENOSPC;
 
 	if (*ppos + count > mtd->size)
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 2f6b552..4c3b986 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -372,9 +372,16 @@
 
 	case NAND_CMD_READID:
 	case NAND_CMD_PARAM: {
+		/*
+		 * For READID, read 8 bytes that are currently used.
+		 * For PARAM, read all 3 copies of 256-bytes pages.
+		 */
+		int len = 8;
 		int timing = IFC_FIR_OP_RB;
-		if (command == NAND_CMD_PARAM)
+		if (command == NAND_CMD_PARAM) {
 			timing = IFC_FIR_OP_RBCD;
+			len = 256 * 3;
+		}
 
 		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
 			  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
@@ -384,12 +391,8 @@
 			  &ifc->ifc_nand.nand_fcr0);
 		ifc_out32(column, &ifc->ifc_nand.row3);
 
-		/*
-		 * although currently it's 8 bytes for READID, we always read
-		 * the maximum 256 bytes(for PARAM)
-		 */
-		ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
-		ifc_nand_ctrl->read_bytes = 256;
+		ifc_out32(len, &ifc->ifc_nand.nand_fbcr);
+		ifc_nand_ctrl->read_bytes = len;
 
 		set_addr(mtd, 0, 0, 0);
 		fsl_ifc_run_command(mtd);
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 6f0fd15..dc49431 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -2008,6 +2008,9 @@
 
 	nand_set_flash_node(chip, dn);
 	mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
+	if (!mtd->name)
+		return -ENOMEM;
+
 	mtd->owner = THIS_MODULE;
 	mtd->dev.parent = dev;
 
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 23a6986..a8f74d9 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1615,8 +1615,10 @@
 		cond_resched();
 
 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
-		if (!e)
+		if (!e) {
+			err = -ENOMEM;
 			goto out_free;
+		}
 
 		e->pnum = aeb->pnum;
 		e->ec = aeb->ec;
@@ -1635,8 +1637,10 @@
 			cond_resched();
 
 			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
-			if (!e)
+			if (!e) {
+				err = -ENOMEM;
 				goto out_free;
+			}
 
 			e->pnum = aeb->pnum;
 			e->ec = aeb->ec;
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 2e46496..4e98e5a 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -284,8 +284,12 @@
                 case SIOCFINDIPDDPRT:
 			spin_lock_bh(&ipddp_route_lock);
 			rp = __ipddp_find_route(&rcp);
-			if (rp)
-				memcpy(&rcp2, rp, sizeof(rcp2));
+			if (rp) {
+				memset(&rcp2, 0, sizeof(rcp2));
+				rcp2.ip    = rp->ip;
+				rcp2.at    = rp->at;
+				rcp2.flags = rp->flags;
+			}
 			spin_unlock_bh(&ipddp_route_lock);
 
 			if (rp) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index f5fcc08..8a5e0ae 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1682,6 +1682,8 @@
 		goto err_upper_unlink;
 	}
 
+	bond->nest_level = dev_get_nest_level(bond_dev) + 1;
+
 	/* If the mode uses primary, then the following is handled by
 	 * bond_change_active_slave().
 	 */
@@ -1729,7 +1731,6 @@
 	if (bond_mode_uses_xmit_hash(bond))
 		bond_update_slave_arr(bond, NULL);
 
-	bond->nest_level = dev_get_nest_level(bond_dev);
 
 	netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
 		    slave_dev->name,
@@ -3359,6 +3360,13 @@
 	}
 }
 
+static int bond_get_nest_level(struct net_device *bond_dev)
+{
+	struct bonding *bond = netdev_priv(bond_dev);
+
+	return bond->nest_level;
+}
+
 static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
 						struct rtnl_link_stats64 *stats)
 {
@@ -3367,7 +3375,7 @@
 	struct list_head *iter;
 	struct slave *slave;
 
-	spin_lock(&bond->stats_lock);
+	spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
 	memcpy(stats, &bond->bond_stats, sizeof(*stats));
 
 	rcu_read_lock();
@@ -4163,6 +4171,7 @@
 	.ndo_neigh_setup	= bond_neigh_setup,
 	.ndo_vlan_rx_add_vid	= bond_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid	= bond_vlan_rx_kill_vid,
+	.ndo_get_lock_subclass  = bond_get_nest_level,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_netpoll_setup	= bond_netpoll_setup,
 	.ndo_netpoll_cleanup	= bond_netpoll_cleanup,
@@ -4655,6 +4664,7 @@
 	if (!bond->wq)
 		return -ENOMEM;
 
+	bond->nest_level = SINGLE_DEPTH_NESTING;
 	netdev_lockdep_set_classes(bond_dev);
 
 	list_add_tail(&bond->bond_list, &bn->dev_list);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c7427bd..2949a38 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -86,6 +86,11 @@
 		return 0;
 	}
 	cdm = of_iomap(np_cdm, 0);
+	if (!cdm) {
+		of_node_put(np_cdm);
+		dev_err(&ofdev->dev, "can't map clock node!\n");
+		return 0;
+	}
 
 	if (in_8(&cdm->ipb_clk_sel) & 0x1)
 		freq *= 2;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index b003582..d0846ae 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1071,6 +1071,7 @@
 		usb_free_urb(dev->intr_urb);
 
 		kfree(dev->intr_in_buffer);
+		kfree(dev->tx_msg_buffer);
 	}
 }
 
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index c71a035..e680bab 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -2,6 +2,7 @@
  *
  * Copyright (C) 2012 - 2014 Xilinx, Inc.
  * Copyright (C) 2009 PetaLogix. All rights reserved.
+ * Copyright (C) 2017 Sandvik Mining and Construction Oy
  *
  * Description:
  * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
@@ -25,8 +26,10 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/skbuff.h>
+#include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/can/dev.h>
@@ -101,7 +104,7 @@
 #define XCAN_INTR_ALL		(XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
 				 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
 				 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
-				 XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
+				 XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
 
 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
 #define XCAN_BTR_SJW_SHIFT		7  /* Synchronous jump width */
@@ -118,6 +121,7 @@
 /**
  * struct xcan_priv - This definition define CAN driver instance
  * @can:			CAN private data structure.
+ * @tx_lock:			Lock for synchronizing TX interrupt handling
  * @tx_head:			Tx CAN packets ready to send on the queue
  * @tx_tail:			Tx CAN packets successfully sended on the queue
  * @tx_max:			Maximum number packets the driver can send
@@ -132,6 +136,7 @@
  */
 struct xcan_priv {
 	struct can_priv can;
+	spinlock_t tx_lock;
 	unsigned int tx_head;
 	unsigned int tx_tail;
 	unsigned int tx_max;
@@ -159,6 +164,11 @@
 	.brp_inc = 1,
 };
 
+#define XCAN_CAP_WATERMARK	0x0001
+struct xcan_devtype_data {
+	unsigned int caps;
+};
+
 /**
  * xcan_write_reg_le - Write a value to the device register little endian
  * @priv:	Driver private data structure
@@ -238,6 +248,10 @@
 		usleep_range(500, 10000);
 	}
 
+	/* reset clears FIFOs */
+	priv->tx_head = 0;
+	priv->tx_tail = 0;
+
 	return 0;
 }
 
@@ -392,6 +406,7 @@
 	struct net_device_stats *stats = &ndev->stats;
 	struct can_frame *cf = (struct can_frame *)skb->data;
 	u32 id, dlc, data[2] = {0, 0};
+	unsigned long flags;
 
 	if (can_dropped_invalid_skb(ndev, skb))
 		return NETDEV_TX_OK;
@@ -439,6 +454,9 @@
 		data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
 
 	can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+
+	spin_lock_irqsave(&priv->tx_lock, flags);
+
 	priv->tx_head++;
 
 	/* Write the Frame to Xilinx CAN TX FIFO */
@@ -454,10 +472,16 @@
 		stats->tx_bytes += cf->can_dlc;
 	}
 
+	/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
+	if (priv->tx_max > 1)
+		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
+
 	/* Check if the TX buffer is full */
 	if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
 		netif_stop_queue(ndev);
 
+	spin_unlock_irqrestore(&priv->tx_lock, flags);
+
 	return NETDEV_TX_OK;
 }
 
@@ -530,6 +554,123 @@
 }
 
 /**
+ * xcan_current_error_state - Get current error state from HW
+ * @ndev:	Pointer to net_device structure
+ *
+ * Checks the current CAN error state from the HW. Note that this
+ * only checks for ERROR_PASSIVE and ERROR_WARNING.
+ *
+ * Return:
+ * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
+ * otherwise.
+ */
+static enum can_state xcan_current_error_state(struct net_device *ndev)
+{
+	struct xcan_priv *priv = netdev_priv(ndev);
+	u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
+
+	if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
+		return CAN_STATE_ERROR_PASSIVE;
+	else if (status & XCAN_SR_ERRWRN_MASK)
+		return CAN_STATE_ERROR_WARNING;
+	else
+		return CAN_STATE_ERROR_ACTIVE;
+}
+
+/**
+ * xcan_set_error_state - Set new CAN error state
+ * @ndev:	Pointer to net_device structure
+ * @new_state:	The new CAN state to be set
+ * @cf:		Error frame to be populated or NULL
+ *
+ * Set new CAN error state for the device, updating statistics and
+ * populating the error frame if given.
+ */
+static void xcan_set_error_state(struct net_device *ndev,
+				 enum can_state new_state,
+				 struct can_frame *cf)
+{
+	struct xcan_priv *priv = netdev_priv(ndev);
+	u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
+	u32 txerr = ecr & XCAN_ECR_TEC_MASK;
+	u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
+
+	priv->can.state = new_state;
+
+	if (cf) {
+		cf->can_id |= CAN_ERR_CRTL;
+		cf->data[6] = txerr;
+		cf->data[7] = rxerr;
+	}
+
+	switch (new_state) {
+	case CAN_STATE_ERROR_PASSIVE:
+		priv->can.can_stats.error_passive++;
+		if (cf)
+			cf->data[1] = (rxerr > 127) ?
+					CAN_ERR_CRTL_RX_PASSIVE :
+					CAN_ERR_CRTL_TX_PASSIVE;
+		break;
+	case CAN_STATE_ERROR_WARNING:
+		priv->can.can_stats.error_warning++;
+		if (cf)
+			cf->data[1] |= (txerr > rxerr) ?
+					CAN_ERR_CRTL_TX_WARNING :
+					CAN_ERR_CRTL_RX_WARNING;
+		break;
+	case CAN_STATE_ERROR_ACTIVE:
+		if (cf)
+			cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
+		break;
+	default:
+		/* non-ERROR states are handled elsewhere */
+		WARN_ON(1);
+		break;
+	}
+}
+
+/**
+ * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
+ * @ndev:	Pointer to net_device structure
+ *
+ * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
+ * the performed RX/TX has caused it to drop to a lesser state and set
+ * the interface state accordingly.
+ */
+static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
+{
+	struct xcan_priv *priv = netdev_priv(ndev);
+	enum can_state old_state = priv->can.state;
+	enum can_state new_state;
+
+	/* changing error state due to successful frame RX/TX can only
+	 * occur from these states
+	 */
+	if (old_state != CAN_STATE_ERROR_WARNING &&
+	    old_state != CAN_STATE_ERROR_PASSIVE)
+		return;
+
+	new_state = xcan_current_error_state(ndev);
+
+	if (new_state != old_state) {
+		struct sk_buff *skb;
+		struct can_frame *cf;
+
+		skb = alloc_can_err_skb(ndev, &cf);
+
+		xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
+
+		if (skb) {
+			struct net_device_stats *stats = &ndev->stats;
+
+			stats->rx_packets++;
+			stats->rx_bytes += cf->can_dlc;
+			netif_rx(skb);
+		}
+	}
+}
+
+/**
  * xcan_err_interrupt - error frame Isr
  * @ndev:	net_device pointer
  * @isr:	interrupt status register value
@@ -544,16 +685,12 @@
 	struct net_device_stats *stats = &ndev->stats;
 	struct can_frame *cf;
 	struct sk_buff *skb;
-	u32 err_status, status, txerr = 0, rxerr = 0;
+	u32 err_status;
 
 	skb = alloc_can_err_skb(ndev, &cf);
 
 	err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
 	priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
-	txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
-	rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
-			XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
-	status = priv->read_reg(priv, XCAN_SR_OFFSET);
 
 	if (isr & XCAN_IXR_BSOFF_MASK) {
 		priv->can.state = CAN_STATE_BUS_OFF;
@@ -563,28 +700,10 @@
 		can_bus_off(ndev);
 		if (skb)
 			cf->can_id |= CAN_ERR_BUSOFF;
-	} else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
-		priv->can.state = CAN_STATE_ERROR_PASSIVE;
-		priv->can.can_stats.error_passive++;
-		if (skb) {
-			cf->can_id |= CAN_ERR_CRTL;
-			cf->data[1] = (rxerr > 127) ?
-					CAN_ERR_CRTL_RX_PASSIVE :
-					CAN_ERR_CRTL_TX_PASSIVE;
-			cf->data[6] = txerr;
-			cf->data[7] = rxerr;
-		}
-	} else if (status & XCAN_SR_ERRWRN_MASK) {
-		priv->can.state = CAN_STATE_ERROR_WARNING;
-		priv->can.can_stats.error_warning++;
-		if (skb) {
-			cf->can_id |= CAN_ERR_CRTL;
-			cf->data[1] |= (txerr > rxerr) ?
-					CAN_ERR_CRTL_TX_WARNING :
-					CAN_ERR_CRTL_RX_WARNING;
-			cf->data[6] = txerr;
-			cf->data[7] = rxerr;
-		}
+	} else {
+		enum can_state new_state = xcan_current_error_state(ndev);
+
+		xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
 	}
 
 	/* Check for Arbitration lost interrupt */
@@ -600,7 +719,6 @@
 	if (isr & XCAN_IXR_RXOFLW_MASK) {
 		stats->rx_over_errors++;
 		stats->rx_errors++;
-		priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
 		if (skb) {
 			cf->can_id |= CAN_ERR_CRTL;
 			cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
@@ -709,26 +827,20 @@
 
 	isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
 	while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
-		if (isr & XCAN_IXR_RXOK_MASK) {
-			priv->write_reg(priv, XCAN_ICR_OFFSET,
-				XCAN_IXR_RXOK_MASK);
-			work_done += xcan_rx(ndev);
-		} else {
-			priv->write_reg(priv, XCAN_ICR_OFFSET,
-				XCAN_IXR_RXNEMP_MASK);
-			break;
-		}
+		work_done += xcan_rx(ndev);
 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
 		isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
 	}
 
-	if (work_done)
+	if (work_done) {
 		can_led_event(ndev, CAN_LED_EVENT_RX);
+		xcan_update_error_state_after_rxtx(ndev);
+	}
 
 	if (work_done < quota) {
 		napi_complete(napi);
 		ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-		ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
+		ier |= XCAN_IXR_RXNEMP_MASK;
 		priv->write_reg(priv, XCAN_IER_OFFSET, ier);
 	}
 	return work_done;
@@ -743,18 +855,71 @@
 {
 	struct xcan_priv *priv = netdev_priv(ndev);
 	struct net_device_stats *stats = &ndev->stats;
+	unsigned int frames_in_fifo;
+	int frames_sent = 1; /* TXOK => at least 1 frame was sent */
+	unsigned long flags;
+	int retries = 0;
 
-	while ((priv->tx_head - priv->tx_tail > 0) &&
-			(isr & XCAN_IXR_TXOK_MASK)) {
+	/* Synchronize with xmit as we need to know the exact number
+	 * of frames in the FIFO to stay in sync due to the TXFEMP
+	 * handling.
+	 * This also prevents a race between netif_wake_queue() and
+	 * netif_stop_queue().
+	 */
+	spin_lock_irqsave(&priv->tx_lock, flags);
+
+	frames_in_fifo = priv->tx_head - priv->tx_tail;
+
+	if (WARN_ON_ONCE(frames_in_fifo == 0)) {
+		/* clear TXOK anyway to avoid getting back here */
 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+		spin_unlock_irqrestore(&priv->tx_lock, flags);
+		return;
+	}
+
+	/* Check if 2 frames were sent (TXOK only means that at least 1
+	 * frame was sent).
+	 */
+	if (frames_in_fifo > 1) {
+		WARN_ON(frames_in_fifo > priv->tx_max);
+
+		/* Synchronize TXOK and isr so that after the loop:
+		 * (1) isr variable is up-to-date at least up to TXOK clear
+		 *     time. This avoids us clearing a TXOK of a second frame
+		 *     but not noticing that the FIFO is now empty and thus
+		 *     marking only a single frame as sent.
+		 * (2) No TXOK is left. Having one could mean leaving a
+		 *     stray TXOK as we might process the associated frame
+		 *     via TXFEMP handling as we read TXFEMP *after* TXOK
+		 *     clear to satisfy (1).
+		 */
+		while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
+			priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+			isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+		}
+
+		if (isr & XCAN_IXR_TXFEMP_MASK) {
+			/* nothing in FIFO anymore */
+			frames_sent = frames_in_fifo;
+		}
+	} else {
+		/* single frame in fifo, just clear TXOK */
+		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+	}
+
+	while (frames_sent--) {
 		can_get_echo_skb(ndev, priv->tx_tail %
 					priv->tx_max);
 		priv->tx_tail++;
 		stats->tx_packets++;
-		isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
 	}
-	can_led_event(ndev, CAN_LED_EVENT_TX);
+
 	netif_wake_queue(ndev);
+
+	spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+	can_led_event(ndev, CAN_LED_EVENT_TX);
+	xcan_update_error_state_after_rxtx(ndev);
 }
 
 /**
@@ -773,6 +938,7 @@
 	struct net_device *ndev = (struct net_device *)dev_id;
 	struct xcan_priv *priv = netdev_priv(ndev);
 	u32 isr, ier;
+	u32 isr_errors;
 
 	/* Get the interrupt status from Xilinx CAN */
 	isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@@ -791,18 +957,17 @@
 		xcan_tx_interrupt(ndev, isr);
 
 	/* Check for the type of error interrupt and Processing it */
-	if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
-			XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
-		priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
-				XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
-				XCAN_IXR_ARBLST_MASK));
+	isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+			    XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
+	if (isr_errors) {
+		priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
 		xcan_err_interrupt(ndev, isr);
 	}
 
 	/* Check for the type of receive interrupt and Processing it */
-	if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
+	if (isr & XCAN_IXR_RXNEMP_MASK) {
 		ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-		ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
+		ier &= ~XCAN_IXR_RXNEMP_MASK;
 		priv->write_reg(priv, XCAN_IER_OFFSET, ier);
 		napi_schedule(&priv->napi);
 	}
@@ -819,13 +984,9 @@
 static void xcan_chip_stop(struct net_device *ndev)
 {
 	struct xcan_priv *priv = netdev_priv(ndev);
-	u32 ier;
 
 	/* Disable interrupts and leave the can in configuration mode */
-	ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-	ier &= ~XCAN_INTR_ALL;
-	priv->write_reg(priv, XCAN_IER_OFFSET, ier);
-	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+	set_reset_mode(ndev);
 	priv->can.state = CAN_STATE_STOPPED;
 }
 
@@ -958,10 +1119,15 @@
  */
 static int __maybe_unused xcan_suspend(struct device *dev)
 {
-	if (!device_may_wakeup(dev))
-		return pm_runtime_force_suspend(dev);
+	struct net_device *ndev = dev_get_drvdata(dev);
 
-	return 0;
+	if (netif_running(ndev)) {
+		netif_stop_queue(ndev);
+		netif_device_detach(ndev);
+		xcan_chip_stop(ndev);
+	}
+
+	return pm_runtime_force_suspend(dev);
 }
 
 /**
@@ -973,11 +1139,27 @@
  */
 static int __maybe_unused xcan_resume(struct device *dev)
 {
-	if (!device_may_wakeup(dev))
-		return pm_runtime_force_resume(dev);
+	struct net_device *ndev = dev_get_drvdata(dev);
+	int ret;
+
+	ret = pm_runtime_force_resume(dev);
+	if (ret) {
+		dev_err(dev, "pm_runtime_force_resume failed on resume\n");
+		return ret;
+	}
+
+	if (netif_running(ndev)) {
+		ret = xcan_chip_start(ndev);
+		if (ret) {
+			dev_err(dev, "xcan_chip_start failed on resume\n");
+			return ret;
+		}
+
+		netif_device_attach(ndev);
+		netif_start_queue(ndev);
+	}
 
 	return 0;
-
 }
 
 /**
@@ -992,14 +1174,6 @@
 	struct net_device *ndev = dev_get_drvdata(dev);
 	struct xcan_priv *priv = netdev_priv(ndev);
 
-	if (netif_running(ndev)) {
-		netif_stop_queue(ndev);
-		netif_device_detach(ndev);
-	}
-
-	priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
-	priv->can.state = CAN_STATE_SLEEPING;
-
 	clk_disable_unprepare(priv->bus_clk);
 	clk_disable_unprepare(priv->can_clk);
 
@@ -1018,7 +1192,6 @@
 	struct net_device *ndev = dev_get_drvdata(dev);
 	struct xcan_priv *priv = netdev_priv(ndev);
 	int ret;
-	u32 isr, status;
 
 	ret = clk_prepare_enable(priv->bus_clk);
 	if (ret) {
@@ -1032,27 +1205,6 @@
 		return ret;
 	}
 
-	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
-	isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
-	status = priv->read_reg(priv, XCAN_SR_OFFSET);
-
-	if (netif_running(ndev)) {
-		if (isr & XCAN_IXR_BSOFF_MASK) {
-			priv->can.state = CAN_STATE_BUS_OFF;
-			priv->write_reg(priv, XCAN_SRR_OFFSET,
-					XCAN_SRR_RESET_MASK);
-		} else if ((status & XCAN_SR_ESTAT_MASK) ==
-					XCAN_SR_ESTAT_MASK) {
-			priv->can.state = CAN_STATE_ERROR_PASSIVE;
-		} else if (status & XCAN_SR_ERRWRN_MASK) {
-			priv->can.state = CAN_STATE_ERROR_WARNING;
-		} else {
-			priv->can.state = CAN_STATE_ERROR_ACTIVE;
-		}
-		netif_device_attach(ndev);
-		netif_start_queue(ndev);
-	}
-
 	return 0;
 }
 
@@ -1061,6 +1213,18 @@
 	SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
 };
 
+static const struct xcan_devtype_data xcan_zynq_data = {
+	.caps = XCAN_CAP_WATERMARK,
+};
+
+/* Match table for OF platform binding */
+static const struct of_device_id xcan_of_match[] = {
+	{ .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
+	{ .compatible = "xlnx,axi-can-1.00.a", },
+	{ /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xcan_of_match);
+
 /**
  * xcan_probe - Platform registration call
  * @pdev:	Handle to the platform device structure
@@ -1075,8 +1239,10 @@
 	struct resource *res; /* IO mem resources */
 	struct net_device *ndev;
 	struct xcan_priv *priv;
+	const struct of_device_id *of_id;
+	int caps = 0;
 	void __iomem *addr;
-	int ret, rx_max, tx_max;
+	int ret, rx_max, tx_max, tx_fifo_depth;
 
 	/* Get the virtual base address for the device */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1086,7 +1252,8 @@
 		goto err;
 	}
 
-	ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
+	ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
+				   &tx_fifo_depth);
 	if (ret < 0)
 		goto err;
 
@@ -1094,6 +1261,30 @@
 	if (ret < 0)
 		goto err;
 
+	of_id = of_match_device(xcan_of_match, &pdev->dev);
+	if (of_id) {
+		const struct xcan_devtype_data *devtype_data = of_id->data;
+
+		if (devtype_data)
+			caps = devtype_data->caps;
+	}
+
+	/* There is no way to directly figure out how many frames have been
+	 * sent when the TXOK interrupt is processed. If watermark programming
+	 * is supported, we can have 2 frames in the FIFO and use TXFEMP
+	 * to determine if 1 or 2 frames have been sent.
+	 * Theoretically we should be able to use TXFWMEMP to determine up
+	 * to 3 frames, but it seems that after putting a second frame in the
+	 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
+	 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
+	 * sent), which is not a sensible state - possibly TXFWMEMP is not
+	 * completely synchronized with the rest of the bits?
+	 */
+	if (caps & XCAN_CAP_WATERMARK)
+		tx_max = min(tx_fifo_depth, 2);
+	else
+		tx_max = 1;
+
 	/* Create a CAN device instance */
 	ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
 	if (!ndev)
@@ -1108,6 +1299,7 @@
 					CAN_CTRLMODE_BERR_REPORTING;
 	priv->reg_base = addr;
 	priv->tx_max = tx_max;
+	spin_lock_init(&priv->tx_lock);
 
 	/* Get IRQ for the device */
 	ndev->irq = platform_get_irq(pdev, 0);
@@ -1172,9 +1364,9 @@
 
 	pm_runtime_put(&pdev->dev);
 
-	netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
+	netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
 			priv->reg_base, ndev->irq, priv->can.clock.freq,
-			priv->tx_max);
+			tx_fifo_depth, priv->tx_max);
 
 	return 0;
 
@@ -1208,14 +1400,6 @@
 	return 0;
 }
 
-/* Match table for OF platform binding */
-static const struct of_device_id xcan_of_match[] = {
-	{ .compatible = "xlnx,zynq-can-1.0", },
-	{ .compatible = "xlnx,axi-can-1.00.a", },
-	{ /* end of list */ },
-};
-MODULE_DEVICE_TABLE(of, xcan_of_match);
-
 static struct platform_driver xcan_driver = {
 	.probe = xcan_probe,
 	.remove	= xcan_remove,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index b3df70d..7f64a76 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -474,7 +474,7 @@
 static void
 qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
 {
-	u32 mask = QCA8K_PORT_STATUS_TXMAC;
+	u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
 
 	/* Port 0 and 6 have no internal PHY */
 	if ((port > 0) && (port < 6))
@@ -491,6 +491,7 @@
 {
 	struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
 	int ret, i, phy_mode = -1;
+	u32 mask;
 
 	/* Make sure that port 0 is the cpu port */
 	if (!dsa_is_cpu_port(ds, 0)) {
@@ -516,7 +517,10 @@
 	if (ret < 0)
 		return ret;
 
-	/* Enable CPU Port */
+	/* Enable CPU Port, force it to maximum bandwidth and full-duplex */
+	mask = QCA8K_PORT_STATUS_SPEED_1000 | QCA8K_PORT_STATUS_TXFLOW |
+	       QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_DUPLEX;
+	qca8k_write(priv, QCA8K_REG_PORT_STATUS(QCA8K_CPU_PORT), mask);
 	qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
 		      QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
 	qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1);
@@ -585,6 +589,47 @@
 	return 0;
 }
 
+static void
+qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
+{
+	struct qca8k_priv *priv = ds->priv;
+	u32 reg;
+
+	/* Force fixed-link setting for CPU port, skip others. */
+	if (!phy_is_pseudo_fixed_link(phy))
+		return;
+
+	/* Set port speed */
+	switch (phy->speed) {
+	case 10:
+		reg = QCA8K_PORT_STATUS_SPEED_10;
+		break;
+	case 100:
+		reg = QCA8K_PORT_STATUS_SPEED_100;
+		break;
+	case 1000:
+		reg = QCA8K_PORT_STATUS_SPEED_1000;
+		break;
+	default:
+		dev_dbg(priv->dev, "port%d link speed %dMbps not supported.\n",
+			port, phy->speed);
+		return;
+	}
+
+	/* Set duplex mode */
+	if (phy->duplex == DUPLEX_FULL)
+		reg |= QCA8K_PORT_STATUS_DUPLEX;
+
+	/* Force flow control */
+	if (dsa_is_cpu_port(ds, port))
+		reg |= QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_TXFLOW;
+
+	/* Force link down before changing MAC options */
+	qca8k_port_set_status(priv, port, 0);
+	qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
+	qca8k_port_set_status(priv, port, 1);
+}
+
 static int
 qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
 {
@@ -914,6 +959,7 @@
 static struct dsa_switch_ops qca8k_switch_ops = {
 	.get_tag_protocol	= qca8k_get_tag_protocol,
 	.setup			= qca8k_setup,
+	.adjust_link            = qca8k_adjust_link,
 	.get_strings		= qca8k_get_strings,
 	.phy_read		= qca8k_phy_read,
 	.phy_write		= qca8k_phy_write,
@@ -946,6 +992,7 @@
 		return -ENOMEM;
 
 	priv->bus = mdiodev->bus;
+	priv->dev = &mdiodev->dev;
 
 	/* read the switches ID register */
 	id = qca8k_read(priv, QCA8K_REG_MASK_CTRL);
@@ -1018,6 +1065,7 @@
 			 qca8k_suspend, qca8k_resume);
 
 static const struct of_device_id qca8k_of_match[] = {
+	{ .compatible = "qca,qca8334" },
 	{ .compatible = "qca,qca8337" },
 	{ /* sentinel */ },
 };
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index 2014647..9c22bc3 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -51,8 +51,10 @@
 #define QCA8K_GOL_MAC_ADDR0				0x60
 #define QCA8K_GOL_MAC_ADDR1				0x64
 #define QCA8K_REG_PORT_STATUS(_i)			(0x07c + (_i) * 4)
-#define   QCA8K_PORT_STATUS_SPEED			GENMASK(2, 0)
-#define   QCA8K_PORT_STATUS_SPEED_S			0
+#define   QCA8K_PORT_STATUS_SPEED			GENMASK(1, 0)
+#define   QCA8K_PORT_STATUS_SPEED_10			0
+#define   QCA8K_PORT_STATUS_SPEED_100			0x1
+#define   QCA8K_PORT_STATUS_SPEED_1000			0x2
 #define   QCA8K_PORT_STATUS_TXMAC			BIT(2)
 #define   QCA8K_PORT_STATUS_RXMAC			BIT(3)
 #define   QCA8K_PORT_STATUS_TXFLOW			BIT(4)
@@ -167,6 +169,7 @@
 	struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
 	struct dsa_switch *ds;
 	struct mutex reg_mutex;
+	struct device *dev;
 };
 
 struct qca8k_mib_desc {
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 5b7658b..5c3ef9f 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -32,7 +32,7 @@
 
 config 3C515
 	tristate "3c515 ISA \"Fast EtherLink\""
-	depends on ISA && ISA_DMA_API
+	depends on ISA && ISA_DMA_API && !PPC32
 	---help---
 	  If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
 	  network card, say Y here.
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index e13c9cd..bcd9931 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -331,6 +331,7 @@
 
 	memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
 
+	io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
 	io_sq->desc_entry_size =
 		(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
 		sizeof(struct ena_eth_io_tx_desc) :
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 0d9ce08..1d92e03 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -422,7 +422,7 @@
 		return -ENOMEM;
 	}
 
-	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
+	dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
 			   DMA_FROM_DEVICE);
 	if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
 		u64_stats_update_begin(&rx_ring->syncp);
@@ -439,7 +439,7 @@
 	rx_info->page_offset = 0;
 	ena_buf = &rx_info->ena_buf;
 	ena_buf->paddr = dma;
-	ena_buf->len = PAGE_SIZE;
+	ena_buf->len = ENA_PAGE_SIZE;
 
 	return 0;
 }
@@ -456,7 +456,7 @@
 		return;
 	}
 
-	dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
+	dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
 		       DMA_FROM_DEVICE);
 
 	__free_page(page);
@@ -849,10 +849,10 @@
 	do {
 		dma_unmap_page(rx_ring->dev,
 			       dma_unmap_addr(&rx_info->ena_buf, paddr),
-			       PAGE_SIZE, DMA_FROM_DEVICE);
+			       ENA_PAGE_SIZE, DMA_FROM_DEVICE);
 
 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
-				rx_info->page_offset, len, PAGE_SIZE);
+				rx_info->page_offset, len, ENA_PAGE_SIZE);
 
 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
 			  "rx skb updated. len %d. data_len %d\n",
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index c5eaf76..008f2d5 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -321,4 +321,15 @@
 
 int ena_get_sset_count(struct net_device *netdev, int sset);
 
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passas 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
 #endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 0038709..ec59425 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -44,7 +44,7 @@
 
 config LANCE
 	tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
-	depends on ISA && ISA_DMA_API && !ARM
+	depends on ISA && ISA_DMA_API && !ARM && !PPC32
 	---help---
 	  If you have a network (Ethernet) card of this type, say Y here.
 	  Some LinkSys cards are of this type.
@@ -138,7 +138,7 @@
 
 config NI65
 	tristate "NI6510 support"
-	depends on ISA && ISA_DMA_API && !ARM
+	depends on ISA && ISA_DMA_API && !ARM && !PPC32
 	---help---
 	  If you have a network (Ethernet) card of this type, say Y here.
 
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 84c5d29..6848358 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -877,14 +877,14 @@
 
 		if (pdata->tx_pause != pdata->phy.tx_pause) {
 			new_state = 1;
-			pdata->hw_if.config_tx_flow_control(pdata);
 			pdata->tx_pause = pdata->phy.tx_pause;
+			pdata->hw_if.config_tx_flow_control(pdata);
 		}
 
 		if (pdata->rx_pause != pdata->phy.rx_pause) {
 			new_state = 1;
-			pdata->hw_if.config_rx_flow_control(pdata);
 			pdata->rx_pause = pdata->phy.rx_pause;
+			pdata->hw_if.config_rx_flow_control(pdata);
 		}
 
 		/* Speed support */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index a3200ea..85e7177 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1678,6 +1678,7 @@
 	skb = build_skb(page_address(page) + adapter->rx_page_offset,
 			adapter->rx_frag_size);
 	if (likely(skb)) {
+		skb_reserve(skb, NET_SKB_PAD);
 		adapter->rx_page_offset += adapter->rx_frag_size;
 		if (adapter->rx_page_offset >= PAGE_SIZE)
 			adapter->rx_page = NULL;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 08d91ef..c407840 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1063,7 +1063,8 @@
 	val = enet_readl(priv, ENET_CTL_REG);
 	val |= ENET_CTL_ENABLE_MASK;
 	enet_writel(priv, val, ENET_CTL_REG);
-	enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
+	if (priv->dma_has_sram)
+		enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
 	enet_dmac_writel(priv, priv->dma_chan_en_mask,
 			 ENETDMAC_CHANCFG, priv->rx_chan);
 
@@ -1790,7 +1791,9 @@
 		ret = PTR_ERR(priv->mac_clk);
 		goto out;
 	}
-	clk_prepare_enable(priv->mac_clk);
+	ret = clk_prepare_enable(priv->mac_clk);
+	if (ret)
+		goto out_put_clk_mac;
 
 	/* initialize default and fetch platform data */
 	priv->rx_ring_size = BCMENET_DEF_RX_DESC;
@@ -1822,9 +1825,11 @@
 		if (IS_ERR(priv->phy_clk)) {
 			ret = PTR_ERR(priv->phy_clk);
 			priv->phy_clk = NULL;
-			goto out_put_clk_mac;
+			goto out_disable_clk_mac;
 		}
-		clk_prepare_enable(priv->phy_clk);
+		ret = clk_prepare_enable(priv->phy_clk);
+		if (ret)
+			goto out_put_clk_phy;
 	}
 
 	/* do minimal hardware init to be able to probe mii bus */
@@ -1915,13 +1920,16 @@
 out_uninit_hw:
 	/* turn off mdc clock */
 	enet_writel(priv, 0, ENET_MIISC_REG);
-	if (priv->phy_clk) {
+	if (priv->phy_clk)
 		clk_disable_unprepare(priv->phy_clk);
-		clk_put(priv->phy_clk);
-	}
 
-out_put_clk_mac:
+out_put_clk_phy:
+	if (priv->phy_clk)
+		clk_put(priv->phy_clk);
+
+out_disable_clk_mac:
 	clk_disable_unprepare(priv->mac_clk);
+out_put_clk_mac:
 	clk_put(priv->mac_clk);
 out:
 	free_netdev(dev);
@@ -2766,7 +2774,9 @@
 		ret = PTR_ERR(priv->mac_clk);
 		goto out_unmap;
 	}
-	clk_enable(priv->mac_clk);
+	ret = clk_prepare_enable(priv->mac_clk);
+	if (ret)
+		goto out_put_clk;
 
 	priv->rx_chan = 0;
 	priv->tx_chan = 1;
@@ -2787,7 +2797,7 @@
 
 	ret = register_netdev(dev);
 	if (ret)
-		goto out_put_clk;
+		goto out_disable_clk;
 
 	netif_carrier_off(dev);
 	platform_set_drvdata(pdev, dev);
@@ -2796,6 +2806,9 @@
 
 	return 0;
 
+out_disable_clk:
+	clk_disable_unprepare(priv->mac_clk);
+
 out_put_clk:
 	clk_put(priv->mac_clk);
 
@@ -2827,6 +2840,9 @@
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	release_mem_region(res->start, resource_size(res));
 
+	clk_disable_unprepare(priv->mac_clk);
+	clk_put(priv->mac_clk);
+
 	free_netdev(dev);
 	return 0;
 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 7dd7490..f3f2d66 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1529,6 +1529,7 @@
 	struct link_vars	link_vars;
 	u32			link_cnt;
 	struct bnx2x_link_report_data last_reported_link;
+	bool			force_link_down;
 
 	struct mdio_if_info	mdio;
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 31287ce..2cd1dcd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1265,6 +1265,11 @@
 {
 	struct bnx2x_link_report_data cur_data;
 
+	if (bp->force_link_down) {
+		bp->link_vars.link_up = 0;
+		return;
+	}
+
 	/* reread mf_cfg */
 	if (IS_PF(bp) && !CHIP_IS_E1(bp))
 		bnx2x_read_mf_cfg(bp);
@@ -2822,6 +2827,7 @@
 		bp->pending_max = 0;
 	}
 
+	bp->force_link_down = false;
 	if (bp->port.pmf) {
 		rc = bnx2x_initial_phy_init(bp, load_mode);
 		if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 5f19427..8aecd8e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3367,14 +3367,18 @@
 			DP(BNX2X_MSG_ETHTOOL,
 			   "rss re-configured, UDP 4-tupple %s\n",
 			   udp_rss_requested ? "enabled" : "disabled");
-			return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+			if (bp->state == BNX2X_STATE_OPEN)
+				return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+						 true);
 		} else if ((info->flow_type == UDP_V6_FLOW) &&
 			   (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
 			bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
 			DP(BNX2X_MSG_ETHTOOL,
 			   "rss re-configured, UDP 4-tupple %s\n",
 			   udp_rss_requested ? "enabled" : "disabled");
-			return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+			if (bp->state == BNX2X_STATE_OPEN)
+				return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+						 true);
 		}
 		return 0;
 
@@ -3488,7 +3492,10 @@
 		bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
 	}
 
-	return bnx2x_config_rss_eth(bp, false);
+	if (bp->state == BNX2X_STATE_OPEN)
+		return bnx2x_config_rss_eth(bp, false);
+
+	return 0;
 }
 
 /**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 554c408..54dab4e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10279,6 +10279,12 @@
 		bp->sp_rtnl_state = 0;
 		smp_mb();
 
+		/* Immediately indicate link as down */
+		bp->link_vars.link_up = 0;
+		bp->force_link_down = true;
+		netif_carrier_off(bp->dev);
+		BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
+
 		bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
 		bnx2x_nic_load(bp, LOAD_NORMAL);
 
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ca57eb5..72297b7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -5257,6 +5257,9 @@
 	}
 	mutex_unlock(&bp->hwrm_cmd_lock);
 
+	if (!BNXT_SINGLE_PF(bp))
+		return 0;
+
 	diff = link_info->support_auto_speeds ^ link_info->advertising;
 	if ((link_info->support_auto_speeds | diff) !=
 	    link_info->support_auto_speeds) {
@@ -5557,7 +5560,7 @@
 		rc = bnxt_request_irq(bp);
 		if (rc) {
 			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
-			goto open_err;
+			goto open_err_irq;
 		}
 	}
 
@@ -5590,6 +5593,8 @@
 
 open_err:
 	bnxt_disable_napi(bp);
+
+open_err_irq:
 	bnxt_del_napi(bp);
 
 open_err_free_mem:
@@ -6859,11 +6864,11 @@
 	int rx, tx, cp;
 
 	_bnxt_get_max_rings(bp, &rx, &tx, &cp);
+	*max_rx = rx;
+	*max_tx = tx;
 	if (!rx || !tx || !cp)
 		return -ENOMEM;
 
-	*max_rx = rx;
-	*max_tx = tx;
 	return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
 }
 
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index db7f289..3f8858d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -185,6 +185,9 @@
 #define UMAC_MAC1			0x010
 #define UMAC_MAX_FRAME_LEN		0x014
 
+#define UMAC_MODE			0x44
+#define  MODE_LINK_STATUS		(1 << 5)
+
 #define UMAC_EEE_CTRL			0x064
 #define  EN_LPI_RX_PAUSE		(1 << 0)
 #define  EN_LPI_TX_PFC			(1 << 1)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 2f92819..3b9e1a5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -167,8 +167,14 @@
 static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
 					  struct fixed_phy_status *status)
 {
-	if (dev && dev->phydev && status)
-		status->link = dev->phydev->link;
+	struct bcmgenet_priv *priv;
+	u32 reg;
+
+	if (dev && dev->phydev && status) {
+		priv = netdev_priv(dev);
+		reg = bcmgenet_umac_readl(priv, UMAC_MODE);
+		status->link = !!(reg & MODE_LINK_STATUS);
+	}
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 4ffbe85..6250989 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -9276,6 +9276,15 @@
 
 	tg3_restore_clk(tp);
 
+	/* Increase the core clock speed to fix tx timeout issue for 5762
+	 * with 100Mbps link speed.
+	 */
+	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+		     TG3_CPMU_MAC_ORIDE_ENABLE);
+	}
+
 	/* Reprobe ASF enable state.  */
 	tg3_flag_clear(tp, ENABLE_ASF);
 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ec09fce..2e15856 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -517,7 +517,7 @@
 		if (!(status & MACB_BIT(TGO)))
 			return 0;
 
-		usleep_range(10, 250);
+		udelay(250);
 	} while (time_before(halt_time, timeout));
 
 	return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 43da891..dc0efbd 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -50,6 +50,7 @@
 #include <linux/stringify.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 #include <asm/uaccess.h>
 
 #include "common.h"
@@ -2259,6 +2260,7 @@
 
 		if (t.qset_idx >= nqsets)
 			return -EINVAL;
+		t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
 
 		q = &adapter->params.sge.qset[q1 + t.qset_idx];
 		t.rspq_size = q->rspq_size;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c395b21..c71a52a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -274,7 +274,7 @@
 				"Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
 				enable ? "set" : "unset", pi->port_id, i, -err);
 		else
-			txq->dcb_prio = value;
+			txq->dcb_prio = enable ? value : 0;
 	}
 }
 
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 5ab9129..ec0b545 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -19,6 +19,7 @@
 config CS89x0
 	tristate "CS89x0 support"
 	depends on ISA || EISA || ARM
+	depends on !PPC32
 	---help---
 	  Support for CS89x0 chipset based Ethernet cards. If you have a
 	  network (Ethernet) card of this type, say Y and read the file
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 3c677ed..4d9014d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -78,7 +78,6 @@
 	enic->rfs_h.max = enic->config.num_arfs;
 	enic->rfs_h.free = enic->rfs_h.max;
 	enic->rfs_h.toclean = 0;
-	enic_rfs_timer_start(enic);
 }
 
 void enic_rfs_flw_tbl_free(struct enic *enic)
@@ -87,7 +86,6 @@
 
 	enic_rfs_timer_stop(enic);
 	spin_lock_bh(&enic->rfs_h.lock);
-	enic->rfs_h.free = 0;
 	for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
 		struct hlist_head *hhead;
 		struct hlist_node *tmp;
@@ -98,6 +96,7 @@
 			enic_delfltr(enic, n->fltr_id);
 			hlist_del(&n->node);
 			kfree(n);
+			enic->rfs_h.free++;
 		}
 	}
 	spin_unlock_bh(&enic->rfs_h.lock);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 99f593b..f314be0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1760,7 +1760,7 @@
 		vnic_intr_unmask(&enic->intr[i]);
 
 	enic_notify_timer_start(enic);
-	enic_rfs_flw_tbl_init(enic);
+	enic_rfs_timer_start(enic);
 
 	return 0;
 
@@ -1842,10 +1842,32 @@
 	return 0;
 }
 
+static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	bool running = netif_running(netdev);
+	int err = 0;
+
+	ASSERT_RTNL();
+	if (running) {
+		err = enic_stop(netdev);
+		if (err)
+			return err;
+	}
+
+	netdev->mtu = new_mtu;
+
+	if (running) {
+		err = enic_open(netdev);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int enic_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct enic *enic = netdev_priv(netdev);
-	int running = netif_running(netdev);
 
 	if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
 		return -EINVAL;
@@ -1853,20 +1875,12 @@
 	if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
 		return -EOPNOTSUPP;
 
-	if (running)
-		enic_stop(netdev);
-
-	netdev->mtu = new_mtu;
-
 	if (netdev->mtu > enic->port_mtu)
 		netdev_warn(netdev,
-			"interface MTU (%d) set higher than port MTU (%d)\n",
-			netdev->mtu, enic->port_mtu);
+			    "interface MTU (%d) set higher than port MTU (%d)\n",
+			    netdev->mtu, enic->port_mtu);
 
-	if (running)
-		enic_open(netdev);
-
-	return 0;
+	return _enic_change_mtu(netdev, new_mtu);
 }
 
 static void enic_change_mtu_work(struct work_struct *work)
@@ -1874,47 +1888,9 @@
 	struct enic *enic = container_of(work, struct enic, change_mtu_work);
 	struct net_device *netdev = enic->netdev;
 	int new_mtu = vnic_dev_mtu(enic->vdev);
-	int err;
-	unsigned int i;
-
-	new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
 
 	rtnl_lock();
-
-	/* Stop RQ */
-	del_timer_sync(&enic->notify_timer);
-
-	for (i = 0; i < enic->rq_count; i++)
-		napi_disable(&enic->napi[i]);
-
-	vnic_intr_mask(&enic->intr[0]);
-	enic_synchronize_irqs(enic);
-	err = vnic_rq_disable(&enic->rq[0]);
-	if (err) {
-		rtnl_unlock();
-		netdev_err(netdev, "Unable to disable RQ.\n");
-		return;
-	}
-	vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
-	vnic_cq_clean(&enic->cq[0]);
-	vnic_intr_clean(&enic->intr[0]);
-
-	/* Fill RQ with new_mtu-sized buffers */
-	netdev->mtu = new_mtu;
-	vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
-	/* Need at least one buffer on ring to get going */
-	if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
-		rtnl_unlock();
-		netdev_err(netdev, "Unable to alloc receive buffers.\n");
-		return;
-	}
-
-	/* Start RQ */
-	vnic_rq_enable(&enic->rq[0]);
-	napi_enable(&enic->napi[0]);
-	vnic_intr_unmask(&enic->intr[0]);
-	enic_notify_timer_start(enic);
-
+	(void)_enic_change_mtu(netdev, new_mtu);
 	rtnl_unlock();
 
 	netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
@@ -2692,6 +2668,7 @@
 	enic->notify_timer.function = enic_notify_timer;
 	enic->notify_timer.data = (unsigned long)enic;
 
+	enic_rfs_flw_tbl_init(enic);
 	enic_set_rx_coal_setting(enic);
 	INIT_WORK(&enic->reset, enic_reset);
 	INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
@@ -2704,7 +2681,6 @@
 	 */
 
 	enic->port_mtu = enic->config.mtu;
-	(void)enic_change_mtu(netdev, enic->port_mtu);
 
 	err = enic_set_mac_addr(netdev, enic->mac_addr);
 	if (err) {
@@ -2754,6 +2730,7 @@
 		netdev->features |= NETIF_F_HIGHDMA;
 
 	netdev->priv_flags |= IFF_UNICAST_FLT;
+	netdev->mtu = enic->port_mtu;
 
 	err = register_netdev(netdev);
 	if (err) {
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 30e8550..8887dd3 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -4500,7 +4500,7 @@
 				port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
 			}
 		}
-		return status;
+		goto err;
 	}
 
 	pcie = be_get_pcie_desc(resp->func_param, desc_count,
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index e093cbf..f9d6845 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -213,10 +213,10 @@
 
 	/* priv data for the desc, e.g. skb when use with ip stack*/
 	void *priv;
-	u16 page_offset;
-	u16 reuse_flag;
+	u32 page_offset;
+	u32 length;     /* length of the buffer */
 
-	u16 length;     /* length of the buffer */
+	u16 reuse_flag;
 
        /* desc type, used by the ring user to mark the type of the priv data */
 	u16 type;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 111e1aa..8a2a07e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -529,7 +529,7 @@
 	}
 
 	skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
-			size - pull_len, truesize - pull_len);
+			size - pull_len, truesize);
 
 	 /* avoid re-using remote pages,flag default unreuse */
 	if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 6be0cae..4cd1633 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -243,7 +243,9 @@
 	}
 
 	if (h->dev->ops->adjust_link) {
+		netif_carrier_off(net_dev);
 		h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
+		netif_carrier_on(net_dev);
 		return 0;
 	}
 
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 631dbc7..0988bf1 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -2636,7 +2636,7 @@
 		/* Wait for link to drop */
 		time = jiffies + (HZ / 10);
 		do {
-			if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+			if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
 				break;
 			if (!in_interrupt())
 				schedule_timeout_interruptible(1);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 975eeb8..e84574b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -645,14 +645,14 @@
 		adapter->tx_ring = tx_old;
 		e1000_free_all_rx_resources(adapter);
 		e1000_free_all_tx_resources(adapter);
-		kfree(tx_old);
-		kfree(rx_old);
 		adapter->rx_ring = rxdr;
 		adapter->tx_ring = txdr;
 		err = e1000_up(adapter);
 		if (err)
 			goto err_setup;
 	}
+	kfree(tx_old);
+	kfree(rx_old);
 
 	clear_bit(__E1000_RESETTING, &adapter->flags);
 	return 0;
@@ -665,7 +665,8 @@
 err_alloc_rx:
 	kfree(txdr);
 err_alloc_tx:
-	e1000_up(adapter);
+	if (netif_running(adapter->netdev))
+		e1000_up(adapter);
 err_setup:
 	clear_bit(__E1000_RESETTING, &adapter->flags);
 	return err;
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index afb7ebe..824fd44 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -400,6 +400,10 @@
 #define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */
 #define E1000_ICR_RXO           0x00000040 /* Receiver Overrun */
 #define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_MDAC          0x00000200 /* MDIO Access Complete */
+#define E1000_ICR_SRPD          0x00010000 /* Small Receive Packet Detected */
+#define E1000_ICR_ACK           0x00020000 /* Receive ACK Frame Detected */
+#define E1000_ICR_MNG           0x00040000 /* Manageability Event Detected */
 #define E1000_ICR_ECCER         0x00400000 /* Uncorrectable ECC Error */
 /* If this bit asserted, the driver should claim the interrupt */
 #define E1000_ICR_INT_ASSERTED	0x80000000
@@ -407,7 +411,7 @@
 #define E1000_ICR_RXQ1          0x00200000 /* Rx Queue 1 Interrupt */
 #define E1000_ICR_TXQ0          0x00400000 /* Tx Queue 0 Interrupt */
 #define E1000_ICR_TXQ1          0x00800000 /* Tx Queue 1 Interrupt */
-#define E1000_ICR_OTHER         0x01000000 /* Other Interrupts */
+#define E1000_ICR_OTHER         0x01000000 /* Other Interrupt */
 
 /* PBA ECC Register */
 #define E1000_PBA_ECC_COUNTER_MASK  0xFFF00000 /* ECC counter mask */
@@ -431,12 +435,27 @@
 	E1000_IMS_RXSEQ  |    \
 	E1000_IMS_LSC)
 
+/* These are all of the events related to the OTHER interrupt.
+ */
+#define IMS_OTHER_MASK ( \
+	E1000_IMS_LSC  | \
+	E1000_IMS_RXO  | \
+	E1000_IMS_MDAC | \
+	E1000_IMS_SRPD | \
+	E1000_IMS_ACK  | \
+	E1000_IMS_MNG)
+
 /* Interrupt Mask Set */
 #define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
 #define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
 #define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
 #define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
+#define E1000_IMS_RXO       E1000_ICR_RXO       /* Receiver Overrun */
 #define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
+#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO Access Complete */
+#define E1000_IMS_SRPD      E1000_ICR_SRPD      /* Small Receive Packet */
+#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive ACK Frame Detected */
+#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability Event */
 #define E1000_IMS_ECCER     E1000_ICR_ECCER     /* Uncorrectable ECC Error */
 #define E1000_IMS_RXQ0      E1000_ICR_RXQ0      /* Rx Queue 0 Interrupt */
 #define E1000_IMS_RXQ1      E1000_ICR_RXQ1      /* Rx Queue 1 Interrupt */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 7ddac95..dc7d671b 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1364,9 +1364,6 @@
  *  Checks to see of the link status of the hardware has changed.  If a
  *  change in link status has been detected, then we read the PHY registers
  *  to get the current speed/duplex if link exists.
- *
- *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
- *  up).
  **/
 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
 {
@@ -1382,7 +1379,8 @@
 	 * Change or Rx Sequence Error interrupt.
 	 */
 	if (!mac->get_link_status)
-		return 1;
+		return 0;
+	mac->get_link_status = false;
 
 	/* First we want to see if the MII Status Register reports
 	 * link.  If so, then we want to get the current speed/duplex
@@ -1390,12 +1388,12 @@
 	 */
 	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
 	if (ret_val)
-		return ret_val;
+		goto out;
 
 	if (hw->mac.type == e1000_pchlan) {
 		ret_val = e1000_k1_gig_workaround_hv(hw, link);
 		if (ret_val)
-			return ret_val;
+			goto out;
 	}
 
 	/* When connected at 10Mbps half-duplex, some parts are excessively
@@ -1430,7 +1428,7 @@
 
 		ret_val = hw->phy.ops.acquire(hw);
 		if (ret_val)
-			return ret_val;
+			goto out;
 
 		if (hw->mac.type == e1000_pch2lan)
 			emi_addr = I82579_RX_CONFIG;
@@ -1453,7 +1451,7 @@
 		hw->phy.ops.release(hw);
 
 		if (ret_val)
-			return ret_val;
+			goto out;
 
 		if (hw->mac.type == e1000_pch_spt) {
 			u16 data;
@@ -1462,14 +1460,14 @@
 			if (speed == SPEED_1000) {
 				ret_val = hw->phy.ops.acquire(hw);
 				if (ret_val)
-					return ret_val;
+					goto out;
 
 				ret_val = e1e_rphy_locked(hw,
 							  PHY_REG(776, 20),
 							  &data);
 				if (ret_val) {
 					hw->phy.ops.release(hw);
-					return ret_val;
+					goto out;
 				}
 
 				ptr_gap = (data & (0x3FF << 2)) >> 2;
@@ -1483,18 +1481,18 @@
 				}
 				hw->phy.ops.release(hw);
 				if (ret_val)
-					return ret_val;
+					goto out;
 			} else {
 				ret_val = hw->phy.ops.acquire(hw);
 				if (ret_val)
-					return ret_val;
+					goto out;
 
 				ret_val = e1e_wphy_locked(hw,
 							  PHY_REG(776, 20),
 							  0xC023);
 				hw->phy.ops.release(hw);
 				if (ret_val)
-					return ret_val;
+					goto out;
 
 			}
 		}
@@ -1521,7 +1519,7 @@
 	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
 		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
 		if (ret_val)
-			return ret_val;
+			goto out;
 	}
 	if ((hw->mac.type == e1000_pch_lpt) ||
 	    (hw->mac.type == e1000_pch_spt)) {
@@ -1530,7 +1528,7 @@
 		 */
 		ret_val = e1000_platform_pm_pch_lpt(hw, link);
 		if (ret_val)
-			return ret_val;
+			goto out;
 	}
 
 	/* Clear link partner's EEE ability */
@@ -1550,9 +1548,7 @@
 	}
 
 	if (!link)
-		return 0;	/* No link detected */
-
-	mac->get_link_status = false;
+		goto out;
 
 	switch (hw->mac.type) {
 	case e1000_pch2lan:
@@ -1600,7 +1596,7 @@
 	 * we have already determined whether we have link or not.
 	 */
 	if (!mac->autoneg)
-		return 1;
+		return -E1000_ERR_CONFIG;
 
 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
 	 * of MAC speed/duplex configuration.  So we only need to
@@ -1614,12 +1610,14 @@
 	 * different link partner.
 	 */
 	ret_val = e1000e_config_fc_after_link_up(hw);
-	if (ret_val) {
+	if (ret_val)
 		e_dbg("Error configuring flow control\n");
-		return ret_val;
-	}
 
-	return 1;
+	return ret_val;
+
+out:
+	mac->get_link_status = true;
+	return ret_val;
 }
 
 static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index db73564..5bdc3a2d 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -410,9 +410,6 @@
  *  Checks to see of the link status of the hardware has changed.  If a
  *  change in link status has been detected, then we read the PHY registers
  *  to get the current speed/duplex if link exists.
- *
- *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
- *  up).
  **/
 s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
 {
@@ -426,20 +423,16 @@
 	 * Change or Rx Sequence Error interrupt.
 	 */
 	if (!mac->get_link_status)
-		return 1;
+		return 0;
+	mac->get_link_status = false;
 
 	/* First we want to see if the MII Status Register reports
 	 * link.  If so, then we want to get the current speed/duplex
 	 * of the PHY.
 	 */
 	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
-	if (ret_val)
-		return ret_val;
-
-	if (!link)
-		return 0;	/* No link detected */
-
-	mac->get_link_status = false;
+	if (ret_val || !link)
+		goto out;
 
 	/* Check if there was DownShift, must be checked
 	 * immediately after link-up
@@ -450,7 +443,7 @@
 	 * we have already determined whether we have link or not.
 	 */
 	if (!mac->autoneg)
-		return 1;
+		return -E1000_ERR_CONFIG;
 
 	/* Auto-Neg is enabled.  Auto Speed Detection takes care
 	 * of MAC speed/duplex configuration.  So we only need to
@@ -464,12 +457,14 @@
 	 * different link partner.
 	 */
 	ret_val = e1000e_config_fc_after_link_up(hw);
-	if (ret_val) {
+	if (ret_val)
 		e_dbg("Error configuring flow control\n");
-		return ret_val;
-	}
 
-	return 1;
+	return ret_val;
+
+out:
+	mac->get_link_status = true;
+	return ret_val;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 9c95222..6855b33 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1911,30 +1911,20 @@
 	struct net_device *netdev = data;
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
-	u32 icr;
-	bool enable = true;
+	u32 icr = er32(ICR);
 
-	icr = er32(ICR);
-	if (icr & E1000_ICR_RXO) {
-		ew32(ICR, E1000_ICR_RXO);
-		enable = false;
-		/* napi poll will re-enable Other, make sure it runs */
-		if (napi_schedule_prep(&adapter->napi)) {
-			adapter->total_rx_bytes = 0;
-			adapter->total_rx_packets = 0;
-			__napi_schedule(&adapter->napi);
-		}
-	}
+	if (icr & adapter->eiac_mask)
+		ew32(ICS, (icr & adapter->eiac_mask));
+
 	if (icr & E1000_ICR_LSC) {
-		ew32(ICR, E1000_ICR_LSC);
 		hw->mac.get_link_status = true;
 		/* guard against interrupt when we're going down */
 		if (!test_bit(__E1000_DOWN, &adapter->state))
 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
 	}
 
-	if (enable && !test_bit(__E1000_DOWN, &adapter->state))
-		ew32(IMS, E1000_IMS_OTHER);
+	if (!test_bit(__E1000_DOWN, &adapter->state))
+		ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK);
 
 	return IRQ_HANDLED;
 }
@@ -2037,7 +2027,6 @@
 		       hw->hw_addr + E1000_EITR_82574(vector));
 	else
 		writel(1, hw->hw_addr + E1000_EITR_82574(vector));
-	adapter->eiac_mask |= E1000_IMS_OTHER;
 
 	/* Cause Tx interrupts on every write back */
 	ivar |= BIT(31);
@@ -2262,7 +2251,8 @@
 
 	if (adapter->msix_entries) {
 		ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
-		ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC);
+		ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER |
+		     IMS_OTHER_MASK);
 	} else if ((hw->mac.type == e1000_pch_lpt) ||
 		   (hw->mac.type == e1000_pch_spt)) {
 		ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
@@ -2705,8 +2695,7 @@
 		napi_complete_done(napi, work_done);
 		if (!test_bit(__E1000_DOWN, &adapter->state)) {
 			if (adapter->msix_entries)
-				ew32(IMS, adapter->rx_ring->ims_val |
-				     E1000_IMS_OTHER);
+				ew32(IMS, adapter->rx_ring->ims_val);
 			else
 				e1000_irq_enable(adapter);
 		}
@@ -5085,7 +5074,7 @@
 	case e1000_media_type_copper:
 		if (hw->mac.get_link_status) {
 			ret_val = hw->mac.ops.check_for_link(hw);
-			link_active = ret_val > 0;
+			link_active = !hw->mac.get_link_status;
 		} else {
 			link_active = true;
 		}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index ad33622..0d2baec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1847,7 +1847,12 @@
 	if (enable_addr != 0)
 		rar_high |= IXGBE_RAH_AV;
 
+	/* Record lower 32 bits of MAC address and then make
+	 * sure that write is flushed to hardware before writing
+	 * the upper 16 bits and setting the valid bit.
+	 */
 	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+	IXGBE_WRITE_FLUSH(hw);
 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
 
 	return 0;
@@ -1879,8 +1884,13 @@
 	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
 	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
 
-	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+	/* Clear the address valid bit and upper 16 bits of the address
+	 * before clearing the lower bits. This way we aren't updating
+	 * a live filter.
+	 */
 	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+	IXGBE_WRITE_FLUSH(hw);
+	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
 
 	/* clear VMDq pool/queue selection for this RAR */
 	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1499ce2b..0295132 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3729,6 +3729,7 @@
 		return -EPERM;
 
 	ether_addr_copy(hw->mac.addr, addr->sa_data);
+	ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
 
 	return 0;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 17b8178..c92ffdf 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3117,7 +3117,6 @@
 
 	on_each_cpu(mvneta_percpu_enable, pp, true);
 	mvneta_start_dev(pp);
-	mvneta_port_up(pp);
 
 	netdev_update_features(dev);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index d6b06be..9d1a7d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2916,7 +2916,7 @@
 	u32 srqn = qp_get_srqn(qpc) & 0xffffff;
 	int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
 	struct res_srq *srq;
-	int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+	int local_qpn = vhcr->in_modifier & 0xffffff;
 
 	err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
 	if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 6631fb0..9680c880 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -784,6 +784,7 @@
 	struct semaphore *sem;
 	unsigned long flags;
 	int alloc_ret;
+	int cmd_mode;
 
 	sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
 	down(sem);
@@ -830,6 +831,7 @@
 	set_signature(ent, !cmd->checksum_disabled);
 	dump_command(dev, ent, 1);
 	ent->ts1 = ktime_get_ns();
+	cmd_mode = cmd->mode;
 
 	if (ent->callback)
 		schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
@@ -854,7 +856,7 @@
 	iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
 	mmiowb();
 	/* if not in polling don't use ent after this point */
-	if (cmd->mode == CMD_MODE_POLLING) {
+	if (cmd_mode == CMD_MODE_POLLING) {
 		poll_timeout(ent);
 		/* make sure we read the descriptor after ownership is SW */
 		rmb();
@@ -1256,7 +1258,7 @@
 {
 	struct mlx5_core_dev *dev = filp->private_data;
 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
-	char outlen_str[8];
+	char outlen_str[8] = {0};
 	int outlen;
 	void *ptr;
 	int err;
@@ -1271,8 +1273,6 @@
 	if (copy_from_user(outlen_str, buf, count))
 		return -EFAULT;
 
-	outlen_str[7] = 0;
-
 	err = sscanf(outlen_str, "%d", &outlen);
 	if (err < 0)
 		return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index a9dbc28..524fff2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -288,16 +288,17 @@
 		}
 }
 
-static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
 {
-	return (u16)((dev->pdev->bus->number << 8) |
+	return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
+		     (dev->pdev->bus->number << 8) |
 		     PCI_SLOT(dev->pdev->devfn));
 }
 
 /* Must be called with intf_mutex held */
 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
 {
-	u16 pci_id = mlx5_gen_pci_id(dev);
+	u32 pci_id = mlx5_gen_pci_id(dev);
 	struct mlx5_core_dev *res = NULL;
 	struct mlx5_core_dev *tmp_dev;
 	struct mlx5_priv *priv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index a8cb387..4a51fc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -383,14 +383,14 @@
 	HLIST_HEAD(del_list);
 	spin_lock_bh(&priv->fs.arfs.arfs_lock);
 	mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
-		if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
-			break;
 		if (!work_pending(&arfs_rule->arfs_work) &&
 		    rps_may_expire_flow(priv->netdev,
 					arfs_rule->rxq, arfs_rule->flow_id,
 					arfs_rule->filter_id)) {
 			hlist_del_init(&arfs_rule->hlist);
 			hlist_add_head(&arfs_rule->hlist, &del_list);
+			if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
+				break;
 		}
 	}
 	spin_unlock_bh(&priv->fs.arfs.arfs_lock);
@@ -715,6 +715,9 @@
 	    skb->protocol != htons(ETH_P_IPV6))
 		return -EPROTONOSUPPORT;
 
+	if (skb->encapsulation)
+		return -EPROTONOSUPPORT;
+
 	arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
 	if (!arfs_t)
 		return -EPROTONOSUPPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 1612ec0..f8b99d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -233,6 +233,7 @@
 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
 {
 	struct mlx5e_tstamp *tstamp = &priv->tstamp;
+	u64 overflow_cycles;
 	u64 ns;
 	u64 frac = 0;
 	u32 dev_freq;
@@ -257,10 +258,17 @@
 
 	/* Calculate period in seconds to call the overflow watchdog - to make
 	 * sure counter is checked at least once every wrap around.
+	 * The period is calculated as the minimum between max HW cycles count
+	 * (The clock source mask) and max amount of cycles that can be
+	 * multiplied by clock multiplier where the result doesn't exceed
+	 * 64bits.
 	 */
-	ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask,
+	overflow_cycles = div64_u64(~0ULL >> 1, tstamp->cycles.mult);
+	overflow_cycles = min(overflow_cycles, tstamp->cycles.mask >> 1);
+
+	ns = cyclecounter_cyc2ns(&tstamp->cycles, overflow_cycles,
 				 frac, &frac);
-	do_div(ns, NSEC_PER_SEC / 2 / HZ);
+	do_div(ns, NSEC_PER_SEC / HZ);
 	tstamp->overflow_period = ns;
 
 	INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 8beecd6..448e71e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -339,9 +339,17 @@
 	add_timer(&health->timer);
 }
 
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
 {
 	struct mlx5_core_health *health = &dev->priv.health;
+	unsigned long flags;
+
+	if (disable_health) {
+		spin_lock_irqsave(&health->wq_lock, flags);
+		set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+		set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
+		spin_unlock_irqrestore(&health->wq_lock, flags);
+	}
 
 	del_timer_sync(&health->timer);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3c183b8..6698a3a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -787,8 +787,10 @@
 	priv->numa_node = dev_to_node(&dev->pdev->dev);
 
 	priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
-	if (!priv->dbg_root)
+	if (!priv->dbg_root) {
+		dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n");
 		return -ENOMEM;
+	}
 
 	err = mlx5_pci_enable_device(dev);
 	if (err) {
@@ -837,7 +839,7 @@
 	pci_clear_master(dev->pdev);
 	release_bar(dev->pdev);
 	mlx5_pci_disable_device(dev);
-	debugfs_remove(priv->dbg_root);
+	debugfs_remove_recursive(priv->dbg_root);
 }
 
 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
@@ -1130,7 +1132,7 @@
 		mlx5_cleanup_once(dev);
 
 err_stop_poll:
-	mlx5_stop_health_poll(dev);
+	mlx5_stop_health_poll(dev, boot);
 	if (mlx5_cmd_teardown_hca(dev)) {
 		dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
 		goto out_err;
@@ -1187,7 +1189,7 @@
 	mlx5_disable_msix(dev);
 	if (cleanup)
 		mlx5_cleanup_once(dev);
-	mlx5_stop_health_poll(dev);
+	mlx5_stop_health_poll(dev, cleanup);
 	err = mlx5_cmd_teardown_hca(dev);
 	if (err) {
 		dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 34e7184..43d7c83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -575,7 +575,7 @@
 static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
 				   int inlen)
 {
-	u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+	u32 out[MLX5_ST_SZ_DW(qetc_reg)];
 
 	if (!MLX5_CAP_GEN(mdev, ets))
 		return -ENOTSUPP;
@@ -587,7 +587,7 @@
 static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
 				     int outlen)
 {
-	u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+	u32 in[MLX5_ST_SZ_DW(qetc_reg)];
 
 	if (!MLX5_CAP_GEN(mdev, ets))
 		return -ENOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index eee6e59..2e8703d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -990,7 +990,7 @@
  * @nn:		NFP Net device
  * @tx_ring:	TX ring structure
  *
- * Assumes that the device is stopped
+ * Assumes that the device is stopped, must be idempotent.
  */
 static void
 nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
@@ -1144,13 +1144,18 @@
  * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
  * @rx_ring:	RX ring structure
  *
- * Warning: Do *not* call if ring buffers were never put on the FW freelist
- *	    (i.e. device was not enabled)!
+ * Assumes that the device is stopped, must be idempotent.
  */
 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
 {
 	unsigned int wr_idx, last_idx;
 
+	/* wr_p == rd_p means ring was never fed FL bufs.  RX rings are always
+	 * kept at cnt - 1 FL bufs.
+	 */
+	if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
+		return;
+
 	/* Move the empty entry to the end of the list */
 	wr_idx = rx_ring->wr_p % rx_ring->cnt;
 	last_idx = rx_ring->cnt - 1;
@@ -1919,6 +1924,8 @@
 /**
  * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
  * @nn:      NFP Net device to reconfigure
+ *
+ * Warning: must be fully idempotent.
  */
 static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
 {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 9d59cb8..7b6824e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -677,9 +677,9 @@
 	p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
 
 	memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
-	       ARRAY_SIZE(p_local->local_chassis_id));
+	       sizeof(p_local->local_chassis_id));
 	memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
-	       ARRAY_SIZE(p_local->local_port_id));
+	       sizeof(p_local->local_port_id));
 }
 
 static void
@@ -692,9 +692,9 @@
 	p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
 
 	memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
-	       ARRAY_SIZE(p_remote->peer_chassis_id));
+	       sizeof(p_remote->peer_chassis_id));
 	memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
-	       ARRAY_SIZE(p_remote->peer_port_id));
+	       sizeof(p_remote->peer_port_id));
 }
 
 static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index ddd410a..715776e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -313,7 +313,7 @@
 
 	p_ramrod->common.update_approx_mcast_flg = 1;
 	for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
-		u32 *p_bins = (u32 *)p_params->bins;
+		u32 *p_bins = p_params->bins;
 
 		p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
 	}
@@ -1182,8 +1182,8 @@
 			enum spq_mode comp_mode,
 			struct qed_spq_comp_cb *p_comp_data)
 {
-	unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
 	struct vport_update_ramrod_data *p_ramrod = NULL;
+	u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
 	struct qed_spq_entry *p_ent = NULL;
 	struct qed_sp_init_data init_data;
 	u8 abs_vport_id = 0;
@@ -1219,26 +1219,25 @@
 	/* explicitly clear out the entire vector */
 	memset(&p_ramrod->approx_mcast.bins, 0,
 	       sizeof(p_ramrod->approx_mcast.bins));
-	memset(bins, 0, sizeof(unsigned long) *
-	       ETH_MULTICAST_MAC_BINS_IN_REGS);
+	memset(bins, 0, sizeof(bins));
 	/* filter ADD op is explicit set op and it removes
 	 *  any existing filters for the vport
 	 */
 	if (p_filter_cmd->opcode == QED_FILTER_ADD) {
 		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
-			u32 bit;
+			u32 bit, nbits;
 
 			bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
-			__set_bit(bit, bins);
+			nbits = sizeof(u32) * BITS_PER_BYTE;
+			bins[bit / nbits] |= 1 << (bit % nbits);
 		}
 
 		/* Convert to correct endianity */
 		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
 			struct vport_update_ramrod_mcast *p_ramrod_bins;
-			u32 *p_bins = (u32 *)bins;
 
 			p_ramrod_bins = &p_ramrod->approx_mcast;
-			p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
+			p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
 		}
 	}
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index e495d62..14d0017 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -156,7 +156,7 @@
 	u8				anti_spoofing_en;
 	u8				update_accept_any_vlan_flg;
 	u8				accept_any_vlan;
-	unsigned long			bins[8];
+	u32				bins[8];
 	struct qed_rss_params		*rss_params;
 	struct qed_filter_accept_flags	accept_flags;
 	struct qed_sge_tpa_params	*sge_tpa_params;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 0b949c6..1ed13a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -23,6 +23,7 @@
 #include <linux/vmalloc.h>
 #include <linux/qed/qed_if.h>
 #include <linux/qed/qed_ll2_if.h>
+#include <linux/crash_dump.h>
 
 #include "qed.h"
 #include "qed_sriov.h"
@@ -501,8 +502,16 @@
 		/* Fastpath interrupts */
 		for (j = 0; j < 64; j++) {
 			if ((0x2ULL << j) & status) {
-				hwfn->simd_proto_handler[j].func(
-					hwfn->simd_proto_handler[j].token);
+				struct qed_simd_fp_handler *p_handler =
+					&hwfn->simd_proto_handler[j];
+
+				if (p_handler->func)
+					p_handler->func(p_handler->token);
+				else
+					DP_NOTICE(hwfn,
+						  "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
+						  j, status);
+
 				status &= ~(0x2ULL << j);
 				rc = IRQ_HANDLED;
 			}
@@ -701,6 +710,14 @@
 	/* We want a minimum of one slowpath and one fastpath vector per hwfn */
 	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
 
+	if (is_kdump_kernel()) {
+		DP_INFO(cdev,
+			"Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
+			cdev->int_params.in.min_msix_cnt);
+		cdev->int_params.in.num_vectors =
+			cdev->int_params.in.min_msix_cnt;
+	}
+
 	rc = qed_set_int_mode(cdev, false);
 	if (rc)  {
 		DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 8b7d2f9..e175fcd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -97,18 +97,57 @@
 	return 0;
 }
 
+/* Maximum of 1 sec to wait for the SHMEM ready indication */
+#define QED_MCP_SHMEM_RDY_MAX_RETRIES	20
+#define QED_MCP_SHMEM_RDY_ITER_MS	50
+
 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
 	struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+	u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
+	u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
 	u32 drv_mb_offsize, mfw_mb_offsize;
 	u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
 
 	p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
-	if (!p_info->public_base)
-		return 0;
+	if (!p_info->public_base) {
+		DP_NOTICE(p_hwfn,
+			  "The address of the MCP scratch-pad is not configured\n");
+		return -EINVAL;
+	}
 
 	p_info->public_base |= GRCBASE_MCP;
 
+	/* Get the MFW MB address and number of supported messages */
+	mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+				SECTION_OFFSIZE_ADDR(p_info->public_base,
+						     PUBLIC_MFW_MB));
+	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+	p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
+					    p_info->mfw_mb_addr +
+					    offsetof(struct public_mfw_mb,
+						     sup_msgs));
+
+	/* The driver can notify that there was an MCP reset, and might read the
+	 * SHMEM values before the MFW has completed initializing them.
+	 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
+	 * data ready indication.
+	 */
+	while (!p_info->mfw_mb_length && --cnt) {
+		msleep(msec);
+		p_info->mfw_mb_length =
+			(u16)qed_rd(p_hwfn, p_ptt,
+				    p_info->mfw_mb_addr +
+				    offsetof(struct public_mfw_mb, sup_msgs));
+	}
+
+	if (!cnt) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to get the SHMEM ready notification after %d msec\n",
+			  QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
+		return -EBUSY;
+	}
+
 	/* Calculate the driver and MFW mailbox address */
 	drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
 				SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -118,13 +157,6 @@
 		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
 		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
 
-	/* Set the MFW MB address */
-	mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
-				SECTION_OFFSIZE_ADDR(p_info->public_base,
-						     PUBLIC_MFW_MB));
-	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
-	p_info->mfw_mb_length =	(u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
-
 	/* Get the current driver mailbox sequence before sending
 	 * the first command
 	 */
@@ -613,6 +645,7 @@
 		break;
 	default:
 		p_link->speed = 0;
+		p_link->link_up = 0;
 	}
 
 	if (p_link->link_up && p_link->speed)
@@ -1197,31 +1230,61 @@
 	return rc;
 }
 
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define QED_MCP_HALT_SLEEP_MS		10
+#define QED_MCP_HALT_MAX_RETRIES	10
+
 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-	u32 resp = 0, param = 0;
+	u32 resp = 0, param = 0, cpu_state, cnt = 0;
 	int rc;
 
 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
 			 &param);
-	if (rc)
+	if (rc) {
 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+		return rc;
+	}
 
-	return rc;
+	do {
+		msleep(QED_MCP_HALT_SLEEP_MS);
+		cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+		if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+			break;
+	} while (++cnt < QED_MCP_HALT_MAX_RETRIES);
+
+	if (cnt == QED_MCP_HALT_MAX_RETRIES) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+			  qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+		return -EBUSY;
+	}
+
+	return 0;
 }
 
+#define QED_MCP_RESUME_SLEEP_MS	10
+
 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-	u32 value, cpu_mode;
+	u32 cpu_mode, cpu_state;
 
 	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
 
-	value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
-	value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
-	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
 	cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+	cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+	msleep(QED_MCP_RESUME_SLEEP_MS);
+	cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
 
-	return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
+	if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+			  cpu_mode, cpu_state);
+		return -EBUSY;
+	}
+
+	return 0;
 }
 
 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index b414a05..56be1d6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -510,6 +510,7 @@
 	0
 #define MCP_REG_CPU_STATE \
 	0xe05004UL
+#define MCP_REG_CPU_STATE_SOFT_HALTED	(0x1UL << 10)
 #define MCP_REG_CPU_EVENT_MASK \
 	0xe05008UL
 #define PGLUE_B_REG_PF_BAR0_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 48bc5c1..6379bfe 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -2157,7 +2157,7 @@
 
 	p_data->update_approx_mcast_flg = 1;
 	memcpy(p_data->bins, p_mcast_tlv->bins,
-	       sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+	       sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
 }
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 0645124..faf8215 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -786,7 +786,7 @@
 		resp_size += sizeof(struct pfvf_def_resp_tlv);
 
 		memcpy(p_mcast_tlv->bins, p_params->bins,
-		       sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+		       sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
 	}
 
 	update_rx = p_params->accept_flags.update_rx_mode_config;
@@ -972,7 +972,7 @@
 			u32 bit;
 
 			bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
-			__set_bit(bit, sp_params.bins);
+			sp_params.bins[bit / 32] |= 1 << (bit % 32);
 		}
 	}
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 35db7a28..b962ef8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -336,7 +336,12 @@
 	struct channel_tlv tl;
 	u8 padding[4];
 
-	u64 bins[8];
+	/* There are only 256 approx bins, and in HSI they're divided into
+	 * 32-bit values. As old VFs used to set-bit to the values on its side,
+	 * the upper half of the array is never expected to contain any data.
+	 */
+	u64 bins[4];
+	u64 obsolete_bins[4];
 };
 
 struct vfpf_vport_update_accept_param_tlv {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index ccbb045..b53a18e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1128,6 +1128,8 @@
 	struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
 
 	ret = kstrtoul(buf, 16, &data);
+	if (ret)
+		return ret;
 
 	switch (data) {
 	case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index fd4a8e4..6a50754 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2387,26 +2387,20 @@
 	return status;
 }
 
-static netdev_features_t qlge_fix_features(struct net_device *ndev,
-	netdev_features_t features)
-{
-	int err;
-
-	/* Update the behavior of vlan accel in the adapter */
-	err = qlge_update_hw_vlan_features(ndev, features);
-	if (err)
-		return err;
-
-	return features;
-}
-
 static int qlge_set_features(struct net_device *ndev,
 	netdev_features_t features)
 {
 	netdev_features_t changed = ndev->features ^ features;
+	int err;
 
-	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+		/* Update the behavior of vlan accel in the adapter */
+		err = qlge_update_hw_vlan_features(ndev, features);
+		if (err)
+			return err;
+
 		qlge_vlan_mode(ndev, features);
+	}
 
 	return 0;
 }
@@ -4719,7 +4713,6 @@
 	.ndo_set_mac_address	= qlge_set_mac_address,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_tx_timeout		= qlge_tx_timeout,
-	.ndo_fix_features	= qlge_fix_features,
 	.ndo_set_features	= qlge_set_features,
 	.ndo_vlan_rx_add_vid	= qlge_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid	= qlge_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 8bbb55f..21f5465 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -635,7 +635,7 @@
 		return ret;
 	}
 
-	netif_start_queue(qca->net_dev);
+	/* SPI thread takes care of TX queue */
 
 	return 0;
 }
@@ -739,6 +739,9 @@
 	qca->net_dev->stats.tx_errors++;
 	/* Trigger tx queue flush and QCA7000 reset */
 	qca->sync = QCASPI_SYNC_UNKNOWN;
+
+	if (qca->spi_thread)
+		wake_up_process(qca->spi_thread);
 }
 
 static int
@@ -865,22 +868,22 @@
 
 	if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
 	    (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
-		dev_info(&spi->dev, "Invalid clkspeed: %d\n",
-			 qcaspi_clkspeed);
+		dev_err(&spi->dev, "Invalid clkspeed: %d\n",
+			qcaspi_clkspeed);
 		return -EINVAL;
 	}
 
 	if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
 	    (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
-		dev_info(&spi->dev, "Invalid burst len: %d\n",
-			 qcaspi_burst_len);
+		dev_err(&spi->dev, "Invalid burst len: %d\n",
+			qcaspi_burst_len);
 		return -EINVAL;
 	}
 
 	if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
 	    (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
-		dev_info(&spi->dev, "Invalid pluggable: %d\n",
-			 qcaspi_pluggable);
+		dev_err(&spi->dev, "Invalid pluggable: %d\n",
+			qcaspi_pluggable);
 		return -EINVAL;
 	}
 
@@ -941,8 +944,8 @@
 	}
 
 	if (register_netdev(qcaspi_devs)) {
-		dev_info(&spi->dev, "Unable to register net device %s\n",
-			 qcaspi_devs->name);
+		dev_err(&spi->dev, "Unable to register net device %s\n",
+			qcaspi_devs->name);
 		free_netdev(qcaspi_devs);
 		return -EFAULT;
 	}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 59b932d..20f5c0c 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -329,6 +329,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8161), 0, 0, RTL_CFG_1 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8167), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8168), 0, 0, RTL_CFG_1 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_NCUBE,	0x8168), 0, 0, RTL_CFG_1 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), 0, 0, RTL_CFG_0 },
 	{ PCI_VENDOR_ID_DLINK,			0x4300,
 		PCI_VENDOR_ID_DLINK, 0x4b10,		 0, 0, RTL_CFG_1 },
@@ -759,7 +760,7 @@
 };
 
 enum rtl_flag {
-	RTL_FLAG_TASK_ENABLED,
+	RTL_FLAG_TASK_ENABLED = 0,
 	RTL_FLAG_TASK_SLOW_PENDING,
 	RTL_FLAG_TASK_RESET_PENDING,
 	RTL_FLAG_TASK_PHY_PENDING,
@@ -7636,7 +7637,8 @@
 	rtl8169_update_counters(dev);
 
 	rtl_lock_work(tp);
-	clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+	/* Clear all task flags */
+	bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
 
 	rtl8169_down(dev);
 	rtl_unlock_work(tp);
@@ -7819,7 +7821,9 @@
 
 	rtl_lock_work(tp);
 	napi_disable(&tp->napi);
-	clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+	/* Clear all task flags */
+	bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
+
 	rtl_unlock_work(tp);
 
 	rtl_pll_power_down(tp);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 10d3a9f..307ecd5 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -955,6 +955,13 @@
 	struct ravb_private *priv = netdev_priv(ndev);
 	struct phy_device *phydev = ndev->phydev;
 	bool new_state = false;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	/* Disable TX and RX right over here, if E-MAC change is ignored */
+	if (priv->no_avb_link)
+		ravb_rcv_snd_disable(ndev);
 
 	if (phydev->link) {
 		if (phydev->duplex != priv->duplex) {
@@ -972,18 +979,21 @@
 			ravb_modify(ndev, ECMR, ECMR_TXF, 0);
 			new_state = true;
 			priv->link = phydev->link;
-			if (priv->no_avb_link)
-				ravb_rcv_snd_enable(ndev);
 		}
 	} else if (priv->link) {
 		new_state = true;
 		priv->link = 0;
 		priv->speed = 0;
 		priv->duplex = -1;
-		if (priv->no_avb_link)
-			ravb_rcv_snd_disable(ndev);
 	}
 
+	/* Enable TX and RX right over here, if E-MAC change is ignored */
+	if (priv->no_avb_link && phydev->link)
+		ravb_rcv_snd_enable(ndev);
+
+	mmiowb();
+	spin_unlock_irqrestore(&priv->lock, flags);
+
 	if (new_state && netif_msg_link(priv))
 		phy_print_status(phydev);
 }
@@ -1085,52 +1095,18 @@
 static int ravb_set_link_ksettings(struct net_device *ndev,
 				   const struct ethtool_link_ksettings *cmd)
 {
-	struct ravb_private *priv = netdev_priv(ndev);
-	unsigned long flags;
-	int error;
-
 	if (!ndev->phydev)
 		return -ENODEV;
 
-	spin_lock_irqsave(&priv->lock, flags);
-
-	/* Disable TX and RX */
-	ravb_rcv_snd_disable(ndev);
-
-	error = phy_ethtool_ksettings_set(ndev->phydev, cmd);
-	if (error)
-		goto error_exit;
-
-	if (cmd->base.duplex == DUPLEX_FULL)
-		priv->duplex = 1;
-	else
-		priv->duplex = 0;
-
-	ravb_set_duplex(ndev);
-
-error_exit:
-	mdelay(1);
-
-	/* Enable TX and RX */
-	ravb_rcv_snd_enable(ndev);
-
-	mmiowb();
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	return error;
+	return phy_ethtool_ksettings_set(ndev->phydev, cmd);
 }
 
 static int ravb_nway_reset(struct net_device *ndev)
 {
-	struct ravb_private *priv = netdev_priv(ndev);
 	int error = -ENODEV;
-	unsigned long flags;
 
-	if (ndev->phydev) {
-		spin_lock_irqsave(&priv->lock, flags);
+	if (ndev->phydev)
 		error = phy_start_aneg(ndev->phydev);
-		spin_unlock_irqrestore(&priv->lock, flags);
-	}
 
 	return error;
 }
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c8fd99b..c59e8fe 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1743,8 +1743,15 @@
 {
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 	struct phy_device *phydev = ndev->phydev;
+	unsigned long flags;
 	int new_state = 0;
 
+	spin_lock_irqsave(&mdp->lock, flags);
+
+	/* Disable TX and RX right over here, if E-MAC change is ignored */
+	if (mdp->cd->no_psr || mdp->no_ether_link)
+		sh_eth_rcv_snd_disable(ndev);
+
 	if (phydev->link) {
 		if (phydev->duplex != mdp->duplex) {
 			new_state = 1;
@@ -1763,18 +1770,21 @@
 			sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
 			new_state = 1;
 			mdp->link = phydev->link;
-			if (mdp->cd->no_psr || mdp->no_ether_link)
-				sh_eth_rcv_snd_enable(ndev);
 		}
 	} else if (mdp->link) {
 		new_state = 1;
 		mdp->link = 0;
 		mdp->speed = 0;
 		mdp->duplex = -1;
-		if (mdp->cd->no_psr || mdp->no_ether_link)
-			sh_eth_rcv_snd_disable(ndev);
 	}
 
+	/* Enable TX and RX right over here, if E-MAC change is ignored */
+	if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
+		sh_eth_rcv_snd_enable(ndev);
+
+	mmiowb();
+	spin_unlock_irqrestore(&mdp->lock, flags);
+
 	if (new_state && netif_msg_link(mdp))
 		phy_print_status(phydev);
 }
@@ -1856,39 +1866,10 @@
 static int sh_eth_set_link_ksettings(struct net_device *ndev,
 				     const struct ethtool_link_ksettings *cmd)
 {
-	struct sh_eth_private *mdp = netdev_priv(ndev);
-	unsigned long flags;
-	int ret;
-
 	if (!ndev->phydev)
 		return -ENODEV;
 
-	spin_lock_irqsave(&mdp->lock, flags);
-
-	/* disable tx and rx */
-	sh_eth_rcv_snd_disable(ndev);
-
-	ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
-	if (ret)
-		goto error_exit;
-
-	if (cmd->base.duplex == DUPLEX_FULL)
-		mdp->duplex = 1;
-	else
-		mdp->duplex = 0;
-
-	if (mdp->cd->set_duplex)
-		mdp->cd->set_duplex(ndev);
-
-error_exit:
-	mdelay(1);
-
-	/* enable tx and rx */
-	sh_eth_rcv_snd_enable(ndev);
-
-	spin_unlock_irqrestore(&mdp->lock, flags);
-
-	return ret;
+	return phy_ethtool_ksettings_set(ndev->phydev, cmd);
 }
 
 /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
@@ -2079,18 +2060,10 @@
 
 static int sh_eth_nway_reset(struct net_device *ndev)
 {
-	struct sh_eth_private *mdp = netdev_priv(ndev);
-	unsigned long flags;
-	int ret;
-
 	if (!ndev->phydev)
 		return -ENODEV;
 
-	spin_lock_irqsave(&mdp->lock, flags);
-	ret = phy_start_aneg(ndev->phydev);
-	spin_unlock_irqrestore(&mdp->lock, flags);
-
-	return ret;
+	return phy_start_aneg(ndev->phydev);
 }
 
 static u32 sh_eth_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 4b78168..0d03682 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -83,7 +83,7 @@
 config DWMAC_SOCFPGA
 	tristate "SOCFPGA dwmac support"
 	default ARCH_SOCFPGA
-	depends on OF && (ARCH_SOCFPGA || COMPILE_TEST)
+	depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
 	select MFD_SYSCON
 	help
 	  Support for ethernet controller on Altera SOCFPGA
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 0c420e9..c3a78c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -55,6 +55,7 @@
 	struct	device *dev;
 	struct regmap *sys_mgr_base_addr;
 	struct reset_control *stmmac_rst;
+	struct reset_control *stmmac_ocp_rst;
 	void __iomem *splitter_base;
 	bool f2h_ptp_ref_clk;
 	struct tse_pcs pcs;
@@ -262,8 +263,8 @@
 		val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
 
 	/* Assert reset to the enet controller before changing the phy mode */
-	if (dwmac->stmmac_rst)
-		reset_control_assert(dwmac->stmmac_rst);
+	reset_control_assert(dwmac->stmmac_ocp_rst);
+	reset_control_assert(dwmac->stmmac_rst);
 
 	regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
 	ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
@@ -285,8 +286,8 @@
 	/* Deassert reset for the phy configuration to be sampled by
 	 * the enet controller, and operation to start in requested mode
 	 */
-	if (dwmac->stmmac_rst)
-		reset_control_deassert(dwmac->stmmac_rst);
+	reset_control_deassert(dwmac->stmmac_ocp_rst);
+	reset_control_deassert(dwmac->stmmac_rst);
 	if (phymode == PHY_INTERFACE_MODE_SGMII) {
 		if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
 			dev_err(dwmac->dev, "Unable to initialize TSE PCS");
@@ -321,6 +322,15 @@
 		goto err_remove_config_dt;
 	}
 
+	dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp");
+	if (IS_ERR(dwmac->stmmac_ocp_rst)) {
+		ret = PTR_ERR(dwmac->stmmac_ocp_rst);
+		dev_err(dev, "error getting reset control of ocp %d\n", ret);
+		goto err_remove_config_dt;
+	}
+
+	reset_control_deassert(dwmac->stmmac_ocp_rst);
+
 	ret = socfpga_dwmac_parse_data(dwmac, dev);
 	if (ret) {
 		dev_err(dev, "Unable to parse OF data\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b3bc128..0df7186 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -55,7 +55,7 @@
 #include <linux/of_mdio.h>
 #include "dwmac1000.h"
 
-#define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
+#define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
 
 /* Module parameters */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 56c8a23..eafc281 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -183,7 +183,7 @@
 		return -ENOMEM;
 
 	/* Enable pci device */
-	ret = pcim_enable_device(pdev);
+	ret = pci_enable_device(pdev);
 	if (ret) {
 		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
 			__func__);
@@ -232,9 +232,45 @@
 static void stmmac_pci_remove(struct pci_dev *pdev)
 {
 	stmmac_dvr_remove(&pdev->dev);
+	pci_disable_device(pdev);
 }
 
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
+static int stmmac_pci_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	int ret;
+
+	ret = stmmac_suspend(dev);
+	if (ret)
+		return ret;
+
+	ret = pci_save_state(pdev);
+	if (ret)
+		return ret;
+
+	pci_disable_device(pdev);
+	pci_wake_from_d3(pdev, true);
+	return 0;
+}
+
+static int stmmac_pci_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	int ret;
+
+	pci_restore_state(pdev);
+	pci_set_power_state(pdev, PCI_D0);
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	pci_set_master(pdev);
+
+	return stmmac_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
 
 #define STMMAC_VENDOR_ID 0x700
 #define STMMAC_QUARK_ID  0x0937
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index d6ad0fb..920321b 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -59,8 +59,7 @@
 #include <linux/sungem_phy.h>
 #include "sungem.h"
 
-/* Stripping FCS is causing problems, disabled for now */
-#undef STRIP_FCS
+#define STRIP_FCS
 
 #define DEFAULT_MSG	(NETIF_MSG_DRV		| \
 			 NETIF_MSG_PROBE	| \
@@ -434,7 +433,7 @@
 	writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
 	writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
 	val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
-	       ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+	       (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
 	writel(val, gp->regs + RXDMA_CFG);
 	if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
 		writel(((5 & RXDMA_BLANK_IPKTS) |
@@ -759,7 +758,6 @@
 	struct net_device *dev = gp->dev;
 	int entry, drops, work_done = 0;
 	u32 done;
-	__sum16 csum;
 
 	if (netif_msg_rx_status(gp))
 		printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
@@ -854,9 +852,13 @@
 			skb = copy_skb;
 		}
 
-		csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
-		skb->csum = csum_unfold(csum);
-		skb->ip_summed = CHECKSUM_COMPLETE;
+		if (likely(dev->features & NETIF_F_RXCSUM)) {
+			__sum16 csum;
+
+			csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+			skb->csum = csum_unfold(csum);
+			skb->ip_summed = CHECKSUM_COMPLETE;
+		}
 		skb->protocol = eth_type_trans(skb, gp->dev);
 
 		napi_gro_receive(&gp->napi, skb);
@@ -1754,7 +1756,7 @@
 	writel(0, gp->regs + TXDMA_KICK);
 
 	val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
-	       ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+	       (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
 	writel(val, gp->regs + RXDMA_CFG);
 
 	writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
@@ -2972,8 +2974,8 @@
 	pci_set_drvdata(pdev, dev);
 
 	/* We can do scatter/gather and HW checksum */
-	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
-	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+	dev->features = dev->hw_features;
 	if (pci_using_dac)
 		dev->features |= NETIF_F_HIGHDMA;
 
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 1801364..0c1adad 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -177,12 +177,18 @@
 	}
 
 	dev = bus_find_device(&platform_bus_type, NULL, node, match);
-	of_node_put(node);
+	if (!dev) {
+		dev_err(dev, "unable to find platform device for %pOF\n", node);
+		goto out;
+	}
+
 	priv = dev_get_drvdata(dev);
 
 	priv->cpsw_phy_sel(priv, phy_mode, slave);
 
 	put_device(dev);
+out:
+	of_node_put(node);
 }
 EXPORT_SYMBOL_GPL(cpsw_phy_sel);
 
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 481c7bf..413cf14 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1387,6 +1387,10 @@
 
 static int match_first_device(struct device *dev, void *data)
 {
+	if (dev->parent && dev->parent->of_node)
+		return of_device_is_compatible(dev->parent->of_node,
+					       "ti,davinci_mdio");
+
 	return !strncmp(dev_name(dev), "davinci_mdio", 12);
 }
 
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 63307ea..9beea13 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -217,6 +217,7 @@
 	ret = of_mdiobus_register(bus, np1);
 	if (ret) {
 		mdiobus_free(bus);
+		lp->mii_bus = NULL;
 		return ret;
 	}
 	return 0;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 622ab3a..f5e0983 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -89,10 +89,6 @@
 static const char banner[] __initconst = KERN_INFO \
 	"AX.25: bpqether driver version 004\n";
 
-static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
-
-static char bpq_eth_addr[6];
-
 static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
 static int bpq_device_event(struct notifier_block *, unsigned long, void *);
 
@@ -515,8 +511,8 @@
 	bpq->ethdev = edev;
 	bpq->axdev = ndev;
 
-	memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
-	memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
+	eth_broadcast_addr(bpq->dest_addr);
+	eth_broadcast_addr(bpq->acpt_addr);
 
 	err = register_netdevice(ndev);
 	if (err)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 36a04e1..53602fd 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -29,6 +29,7 @@
 #include <linux/netdevice.h>
 #include <linux/inetdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/pci.h>
 #include <linux/skbuff.h>
 #include <linux/if_vlan.h>
 #include <linux/in.h>
@@ -1228,11 +1229,15 @@
 {
 	struct net_device *ndev;
 	struct net_device_context *net_device_ctx;
+	struct device *pdev = vf_netdev->dev.parent;
 	struct netvsc_device *netvsc_dev;
 
 	if (vf_netdev->addr_len != ETH_ALEN)
 		return NOTIFY_DONE;
 
+	if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
+		return NOTIFY_DONE;
+
 	/*
 	 * We will use the MAC address to locate the synthetic interface to
 	 * associate with the VF interface. If we don't find a matching
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 9f10da6..ce3b7fb 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -941,7 +941,7 @@
 static int
 at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
 {
-	BUG_ON(!level);
+	WARN_ON(!level);
 	*level = 0xbe;
 	return 0;
 }
@@ -1117,8 +1117,7 @@
 	if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
 		u16 addr = le16_to_cpu(filt->short_addr);
 
-		dev_vdbg(&lp->spi->dev,
-			 "at86rf230_set_hw_addr_filt called for saddr\n");
+		dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__);
 		__at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
 		__at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
 	}
@@ -1126,8 +1125,7 @@
 	if (changed & IEEE802154_AFILT_PANID_CHANGED) {
 		u16 pan = le16_to_cpu(filt->pan_id);
 
-		dev_vdbg(&lp->spi->dev,
-			 "at86rf230_set_hw_addr_filt called for pan id\n");
+		dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__);
 		__at86rf230_write(lp, RG_PAN_ID_0, pan);
 		__at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
 	}
@@ -1136,15 +1134,13 @@
 		u8 i, addr[8];
 
 		memcpy(addr, &filt->ieee_addr, 8);
-		dev_vdbg(&lp->spi->dev,
-			 "at86rf230_set_hw_addr_filt called for IEEE addr\n");
+		dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__);
 		for (i = 0; i < 8; i++)
 			__at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
 	}
 
 	if (changed & IEEE802154_AFILT_PANC_CHANGED) {
-		dev_vdbg(&lp->spi->dev,
-			 "at86rf230_set_hw_addr_filt called for panc change\n");
+		dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__);
 		if (filt->pan_coord)
 			at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
 		else
@@ -1248,7 +1244,6 @@
 	return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
 }
 
-
 static int
 at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
 {
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index ec387ef..6853981 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -49,7 +49,7 @@
 
 static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
 {
-	BUG_ON(!level);
+	WARN_ON(!level);
 	*level = 0xbe;
 
 	return 0;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index dfbc4ef..b299277 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -63,10 +63,23 @@
 {
 	struct ipvl_dev *ipvlan;
 	struct net_device *mdev = port->dev;
-	int err = 0;
+	unsigned int flags;
+	int err;
 
 	ASSERT_RTNL();
 	if (port->mode != nval) {
+		list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+			flags = ipvlan->dev->flags;
+			if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
+				err = dev_change_flags(ipvlan->dev,
+						       flags | IFF_NOARP);
+			} else {
+				err = dev_change_flags(ipvlan->dev,
+						       flags & ~IFF_NOARP);
+			}
+			if (unlikely(err))
+				goto fail;
+		}
 		if (nval == IPVLAN_MODE_L3S) {
 			/* New mode is L3S */
 			err = ipvlan_register_nf_hook();
@@ -74,21 +87,28 @@
 				mdev->l3mdev_ops = &ipvl_l3mdev_ops;
 				mdev->priv_flags |= IFF_L3MDEV_MASTER;
 			} else
-				return err;
+				goto fail;
 		} else if (port->mode == IPVLAN_MODE_L3S) {
 			/* Old mode was L3S */
 			mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
 			ipvlan_unregister_nf_hook();
 			mdev->l3mdev_ops = NULL;
 		}
-		list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
-			if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
-				ipvlan->dev->flags |= IFF_NOARP;
-			else
-				ipvlan->dev->flags &= ~IFF_NOARP;
-		}
 		port->mode = nval;
 	}
+	return 0;
+
+fail:
+	/* Undo the flags changes that have been done so far. */
+	list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
+		flags = ipvlan->dev->flags;
+		if (port->mode == IPVLAN_MODE_L3 ||
+		    port->mode == IPVLAN_MODE_L3S)
+			dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
+		else
+			dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
+	}
+
 	return err;
 }
 
@@ -525,7 +545,8 @@
 	ipvlan->dev = dev;
 	ipvlan->port = port;
 	ipvlan->sfeatures = IPVLAN_FEATURES;
-	ipvlan_adjust_mtu(ipvlan, phy_dev);
+	if (!tb[IFLA_MTU])
+		ipvlan_adjust_mtu(ipvlan, phy_dev);
 	INIT_LIST_HEAD(&ipvlan->addrs);
 
 	/* TODO Probably put random address here to be presented to the
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index 0a5f62e..bd9f9b9 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -22,7 +22,7 @@
 #include <linux/mdio-mux.h>
 #include <linux/delay.h>
 
-#define MDIO_PARAM_OFFSET		0x00
+#define MDIO_PARAM_OFFSET		0x23c
 #define MDIO_PARAM_MIIM_CYCLE		29
 #define MDIO_PARAM_INTERNAL_SEL		25
 #define MDIO_PARAM_BUS_ID		22
@@ -30,20 +30,22 @@
 #define MDIO_PARAM_PHY_ID		16
 #define MDIO_PARAM_PHY_DATA		0
 
-#define MDIO_READ_OFFSET		0x04
+#define MDIO_READ_OFFSET		0x240
 #define MDIO_READ_DATA_MASK		0xffff
-#define MDIO_ADDR_OFFSET		0x08
+#define MDIO_ADDR_OFFSET		0x244
 
-#define MDIO_CTRL_OFFSET		0x0C
+#define MDIO_CTRL_OFFSET		0x248
 #define MDIO_CTRL_WRITE_OP		0x1
 #define MDIO_CTRL_READ_OP		0x2
 
-#define MDIO_STAT_OFFSET		0x10
+#define MDIO_STAT_OFFSET		0x24c
 #define MDIO_STAT_DONE			1
 
 #define BUS_MAX_ADDR			32
 #define EXT_BUS_START_ADDR		16
 
+#define MDIO_REG_ADDR_SPACE_SIZE	0x250
+
 struct iproc_mdiomux_desc {
 	void *mux_handle;
 	void __iomem *base;
@@ -169,6 +171,14 @@
 	md->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res->start & 0xfff) {
+		/* For backward compatibility in case the
+		 * base address is specified with an offset.
+		 */
+		dev_info(&pdev->dev, "fix base address in dt-blob\n");
+		res->start &= ~0xfff;
+		res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
+	}
 	md->base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(md->base)) {
 		dev_err(&pdev->dev, "failed to ioremap register\n");
@@ -218,7 +228,7 @@
 
 static int mdio_mux_iproc_remove(struct platform_device *pdev)
 {
-	struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev);
+	struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
 
 	mdio_mux_uninit(md->mux_handle);
 	mdiobus_unregister(md->mii_bus);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 4d21764..5fde8e3 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -598,7 +598,7 @@
 	 * negotiation may already be done and aneg interrupt may not be
 	 * generated.
 	 */
-	if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
+	if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) {
 		err = phy_aneg_done(phydev);
 		if (err > 0) {
 			trigger = true;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bf02f8e..b131e55 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1579,11 +1579,8 @@
 
 static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
 {
-	/* The default values for phydev->supported are provided by the PHY
-	 * driver "features" member, we want to reset to sane defaults first
-	 * before supporting higher speeds.
-	 */
-	phydev->supported &= PHY_DEFAULT_FEATURES;
+	phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
+			       PHY_10BT_FEATURES);
 
 	switch (max_speed) {
 	default:
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 2e5150b..7a14e81 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -40,8 +40,11 @@
 {
 	struct gmii2rgmii *priv = phydev->priv;
 	u16 val = 0;
+	int err;
 
-	priv->phy_drv->read_status(phydev);
+	err = priv->phy_drv->read_status(phydev);
+	if (err < 0)
+		return err;
 
 	val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
 	val &= ~XILINX_GMII2RGMII_SPEED_MASK;
@@ -81,6 +84,11 @@
 		return -EPROBE_DEFER;
 	}
 
+	if (!priv->phy_dev->drv) {
+		dev_info(dev, "Attached phy not ready\n");
+		return -EPROBE_DEFER;
+	}
+
 	priv->addr = mdiodev->addr;
 	priv->phy_drv = priv->phy_dev->drv;
 	memcpy(&priv->conv_phy_drv, priv->phy_dev->drv,
diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c
index 3a45cf8..8ed8091 100644
--- a/drivers/net/ppp/pppolac.c
+++ b/drivers/net/ppp/pppolac.c
@@ -83,7 +83,7 @@
 
 	/* Put it back if it is a control packet. */
 	if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT)
-		return opt->backlog_rcv(sk_udp, skb);
+		return 2;
 
 	/* Skip UDP header. */
 	skb_pull(skb, sizeof(struct udphdr));
@@ -190,9 +190,10 @@
 
 static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb)
 {
+	int retval;
 	sock_hold(sk_udp);
-	sk_receive_skb(sk_udp, skb, 0);
-	return 0;
+	retval =  sk_receive_skb(sk_udp, skb, 0);
+	return (retval >> 1);
 }
 
 static struct sk_buff_head delivery_queue;
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 32e9ec8..5be6b67 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -640,10 +640,12 @@
 				     priv->presvd_phy_advertise);
 
 		/* Restore BMCR */
+		if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
+			priv->presvd_phy_bmcr |= BMCR_ANRESTART;
+
 		asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
 				     priv->presvd_phy_bmcr);
 
-		mii_nway_restart(&dev->mii);
 		priv->presvd_phy_advertise = 0;
 		priv->presvd_phy_bmcr = 0;
 	}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 3e66e40..88f16f9 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1170,6 +1170,8 @@
 			mod_timer(&dev->stat_monitor,
 				  jiffies + STAT_UPDATE_TIMER);
 		}
+
+		tasklet_schedule(&dev->bh);
 	}
 
 	return ret;
@@ -2969,6 +2971,7 @@
 	pkt_cnt = 0;
 	count = 0;
 	length = 0;
+	spin_lock_irqsave(&tqp->lock, flags);
 	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
 		if (skb_is_gso(skb)) {
 			if (pkt_cnt) {
@@ -2977,7 +2980,8 @@
 			}
 			count = 1;
 			length = skb->len - TX_OVERHEAD;
-			skb2 = skb_dequeue(tqp);
+			__skb_unlink(skb, tqp);
+			spin_unlock_irqrestore(&tqp->lock, flags);
 			goto gso_skb;
 		}
 
@@ -2986,6 +2990,7 @@
 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
 		pkt_cnt++;
 	}
+	spin_unlock_irqrestore(&tqp->lock, flags);
 
 	/* copy to a single skb */
 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 85bc0ca..0d4440f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -946,12 +946,14 @@
 	{QMI_FIXED_INTF(0x413c, 0x81b3, 8)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
 	{QMI_FIXED_INTF(0x413c, 0x81b6, 8)},	/* Dell Wireless 5811e */
 	{QMI_FIXED_INTF(0x413c, 0x81b6, 10)},	/* Dell Wireless 5811e */
+	{QMI_FIXED_INTF(0x413c, 0x81d7, 0)},	/* Dell Wireless 5821e */
 	{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},	/* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
 	{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},	/* HP lt4120 Snapdragon X5 LTE */
 	{QMI_FIXED_INTF(0x22de, 0x9061, 3)},	/* WeTelecom WPD-600N */
 	{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},	/* SIMCom 7230E */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)},	/* Quectel EC25, EC20 R2.0  Mini PCIe */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)},	/* Quectel EC21 Mini PCIe */
+	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)},	/* Quectel EG91 */
 	{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},	/* Quectel BG96 */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)},	/* Quectel EP06 Mini PCIe */
 
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d3d89b0..5988674 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3327,7 +3327,8 @@
 #ifdef CONFIG_PM_SLEEP
 	unregister_pm_notifier(&tp->pm_notifier);
 #endif
-	napi_disable(&tp->napi);
+	if (!test_bit(RTL8152_UNPLUG, &tp->flags))
+		napi_disable(&tp->napi);
 	clear_bit(WORK_ENABLE, &tp->flags);
 	usb_kill_urb(tp->intr_urb);
 	cancel_delayed_work_sync(&tp->schedule);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index dc4f7ea..9504800 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -681,7 +681,7 @@
 		   (netdev->flags & IFF_ALLMULTI)) {
 		rx_creg &= 0xfffe;
 		rx_creg |= 0x0002;
-		dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
+		dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name);
 	} else {
 		/* ~RX_MULTICAST, ~RX_PROMISCUOUS */
 		rx_creg &= 0x00fc;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 2cc0f28..03d0401 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -82,6 +82,9 @@
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
+static int smsc75xx_link_ok_nopm(struct usbnet *dev);
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev);
+
 static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
 					    u32 *data, int in_pm)
 {
@@ -852,6 +855,9 @@
 		return -EIO;
 	}
 
+	/* phy workaround for gig link */
+	smsc75xx_phy_gig_workaround(dev);
+
 	smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
 		ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
 		ADVERTISE_PAUSE_ASYM);
@@ -990,6 +996,62 @@
 	return -EIO;
 }
 
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev)
+{
+	struct mii_if_info *mii = &dev->mii;
+	int ret = 0, timeout = 0;
+	u32 buf, link_up = 0;
+
+	/* Set the phy in Gig loopback */
+	smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040);
+
+	/* Wait for the link up */
+	do {
+		link_up = smsc75xx_link_ok_nopm(dev);
+		usleep_range(10000, 20000);
+		timeout++;
+	} while ((!link_up) && (timeout < 1000));
+
+	if (timeout >= 1000) {
+		netdev_warn(dev->net, "Timeout waiting for PHY link up\n");
+		return -EIO;
+	}
+
+	/* phy reset */
+	ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+	if (ret < 0) {
+		netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
+		return ret;
+	}
+
+	buf |= PMT_CTL_PHY_RST;
+
+	ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
+	if (ret < 0) {
+		netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret);
+		return ret;
+	}
+
+	timeout = 0;
+	do {
+		usleep_range(10000, 20000);
+		ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+		if (ret < 0) {
+			netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n",
+				    ret);
+			return ret;
+		}
+		timeout++;
+	} while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
+
+	if (timeout >= 100) {
+		netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
 static int smsc75xx_reset(struct usbnet *dev)
 {
 	struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index a8bd68f..7a62316 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -161,7 +161,7 @@
 	priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
 				ALIGNMENT_OF_UCC_HDLC_PRAM);
 
-	if (priv->ucc_pram_offset < 0) {
+	if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
 		dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
 		ret = -ENOMEM;
 		goto free_tx_bd;
@@ -197,14 +197,14 @@
 
 	/* Alloc riptr, tiptr */
 	riptr = qe_muram_alloc(32, 32);
-	if (riptr < 0) {
+	if (IS_ERR_VALUE(riptr)) {
 		dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
 		ret = -ENOMEM;
 		goto free_tx_skbuff;
 	}
 
 	tiptr = qe_muram_alloc(32, 32);
-	if (tiptr < 0) {
+	if (IS_ERR_VALUE(tiptr)) {
 		dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
 		ret = -ENOMEM;
 		goto free_riptr;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 299140c..04b60ed 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1372,7 +1372,7 @@
             case 0x001:
                 printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
                 break;
-            case 0x010:
+            case 0x002:
                 printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
                 break;
             default:
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 0dadc60..b106a06 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -161,6 +162,8 @@
 void ath10k_debug_print_board_info(struct ath10k *ar)
 {
 	char boardinfo[100];
+	const struct firmware *board;
+	u32 crc;
 
 	if (ar->id.bmi_ids_valid)
 		scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
@@ -168,11 +171,16 @@
 	else
 		scnprintf(boardinfo, sizeof(boardinfo), "N/A");
 
+	board = ar->normal_mode_fw.board;
+	if (!IS_ERR_OR_NULL(board))
+		crc = crc32_le(0, board->data, board->size);
+	else
+		crc = 0;
+
 	ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
 		    ar->bd_api,
 		    boardinfo,
-		    crc32_le(0, ar->normal_mode_fw.board->data,
-			     ar->normal_mode_fw.board->size));
+		    crc);
 }
 
 void ath10k_debug_print_boot_info(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index ba1fe61..a3c2180 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -214,11 +214,12 @@
 	spin_lock_bh(&htt->rx_ring.lock);
 	ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
 					      htt->rx_ring.fill_cnt));
-	spin_unlock_bh(&htt->rx_ring.lock);
 
 	if (ret)
 		ath10k_htt_rx_ring_free(htt);
 
+	spin_unlock_bh(&htt->rx_ring.lock);
+
 	return ret;
 }
 
@@ -230,7 +231,9 @@
 	skb_queue_purge(&htt->rx_in_ord_compl_q);
 	skb_queue_purge(&htt->tx_fetch_ind_q);
 
+	spin_lock_bh(&htt->rx_ring.lock);
 	ath10k_htt_rx_ring_free(htt);
+	spin_unlock_bh(&htt->rx_ring.lock);
 
 	dma_free_coherent(htt->ar->dev,
 			  (htt->rx_ring.size *
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index d68f4f2..5fe6841 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3003,6 +3003,13 @@
 			passive = channel->flags & IEEE80211_CHAN_NO_IR;
 			ch->passive = passive;
 
+			/* the firmware is ignoring the "radar" flag of the
+			 * channel and is scanning actively using Probe Requests
+			 * on "Radar detection"/DFS channels which are not
+			 * marked as "available"
+			 */
+			ch->passive |= ch->chan_radar;
+
 			ch->freq = channel->center_freq;
 			ch->band_center_freq1 = channel->center_freq;
 			ch->min_power = 0;
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index e0d00ce..5b974bb 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -152,10 +152,9 @@
 );
 
 TRACE_EVENT(ath10k_wmi_cmd,
-	TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
-		 int ret),
+	TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
 
-	TP_ARGS(ar, id, buf, buf_len, ret),
+	TP_ARGS(ar, id, buf, buf_len),
 
 	TP_STRUCT__entry(
 		__string(device, dev_name(ar->dev))
@@ -163,7 +162,6 @@
 		__field(unsigned int, id)
 		__field(size_t, buf_len)
 		__dynamic_array(u8, buf, buf_len)
-		__field(int, ret)
 	),
 
 	TP_fast_assign(
@@ -171,17 +169,15 @@
 		__assign_str(driver, dev_driver_string(ar->dev));
 		__entry->id = id;
 		__entry->buf_len = buf_len;
-		__entry->ret = ret;
 		memcpy(__get_dynamic_array(buf), buf, buf_len);
 	),
 
 	TP_printk(
-		"%s %s id %d len %zu ret %d",
+		"%s %s id %d len %zu",
 		__get_str(driver),
 		__get_str(device),
 		__entry->id,
-		__entry->buf_len,
-		__entry->ret
+		__entry->buf_len
 	)
 );
 
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 0e4d49a..642a441 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1451,6 +1451,11 @@
 	cfg->keep_alive_pattern_size = __cpu_to_le32(0);
 	cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
 	cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
+	cfg->wmi_send_separate = __cpu_to_le32(0);
+	cfg->num_ocb_vdevs = __cpu_to_le32(0);
+	cfg->num_ocb_channels = __cpu_to_le32(0);
+	cfg->num_ocb_schedules = __cpu_to_le32(0);
+	cfg->host_capab = __cpu_to_le32(0);
 
 	ath10k_wmi_put_host_mem_chunks(ar, chunks);
 
@@ -1481,10 +1486,10 @@
 	bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
 	ie_len = roundup(arg->ie_len, 4);
 	len = (sizeof(*tlv) + sizeof(*cmd)) +
-	      (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
-	      (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
-	      (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
-	      (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+	      sizeof(*tlv) + chan_len +
+	      sizeof(*tlv) + ssid_len +
+	      sizeof(*tlv) + bssid_len +
+	      sizeof(*tlv) + ie_len;
 
 	skb = ath10k_wmi_alloc_skb(ar, len);
 	if (!skb)
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index b8aa600..2c94fe3 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1227,6 +1227,11 @@
 	__le32 keep_alive_pattern_size;
 	__le32 max_tdls_concurrent_sleep_sta;
 	__le32 max_tdls_concurrent_buffer_sta;
+	__le32 wmi_send_separate;
+	__le32 num_ocb_vdevs;
+	__le32 num_ocb_channels;
+	__le32 num_ocb_schedules;
+	__le32 host_capab;
 } __packed;
 
 struct wmi_tlv_init_cmd {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index e518b64..75f7a7b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1711,8 +1711,8 @@
 	cmd_hdr->cmd_id = __cpu_to_le32(cmd);
 
 	memset(skb_cb, 0, sizeof(*skb_cb));
+	trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
 	ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
-	trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
 
 	if (ret)
 		goto err_pull;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index acef4ec9..951bac2 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2915,16 +2915,19 @@
 	struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
 	struct ieee80211_channel *channel;
 	int chan_pwr, new_pwr;
+	u16 ctl = NO_CTL;
 
 	if (!chan)
 		return;
 
+	if (!test)
+		ctl = ath9k_regd_get_ctl(reg, chan);
+
 	channel = chan->chan;
 	chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
 	new_pwr = min_t(int, chan_pwr, reg->power_limit);
 
-	ah->eep_ops->set_txpower(ah, chan,
-				 ath9k_regd_get_ctl(reg, chan),
+	ah->eep_ops->set_txpower(ah, chan, ctl,
 				 get_antenna_gain(ah, chan), new_pwr, test);
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index e47286b..8a504af 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -84,7 +84,8 @@
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_sta *sta = info->status.status_driver_data[0];
 
-	if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+	if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
+			   IEEE80211_TX_STATUS_EOSP)) {
 		ieee80211_tx_status(hw, skb);
 		return;
 	}
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 565d307..8553ab4 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -68,12 +68,14 @@
 	CTRY_AUSTRALIA = 36,
 	CTRY_AUSTRIA = 40,
 	CTRY_AZERBAIJAN = 31,
+	CTRY_BAHAMAS = 44,
 	CTRY_BAHRAIN = 48,
 	CTRY_BANGLADESH = 50,
 	CTRY_BARBADOS = 52,
 	CTRY_BELARUS = 112,
 	CTRY_BELGIUM = 56,
 	CTRY_BELIZE = 84,
+	CTRY_BERMUDA = 60,
 	CTRY_BOLIVIA = 68,
 	CTRY_BOSNIA_HERZ = 70,
 	CTRY_BRAZIL = 76,
@@ -159,6 +161,7 @@
 	CTRY_ROMANIA = 642,
 	CTRY_RUSSIA = 643,
 	CTRY_SAUDI_ARABIA = 682,
+	CTRY_SERBIA = 688,
 	CTRY_SERBIA_MONTENEGRO = 891,
 	CTRY_SINGAPORE = 702,
 	CTRY_SLOVAKIA = 703,
@@ -170,11 +173,13 @@
 	CTRY_SWITZERLAND = 756,
 	CTRY_SYRIA = 760,
 	CTRY_TAIWAN = 158,
+	CTRY_TANZANIA = 834,
 	CTRY_THAILAND = 764,
 	CTRY_TRINIDAD_Y_TOBAGO = 780,
 	CTRY_TUNISIA = 788,
 	CTRY_TURKEY = 792,
 	CTRY_UAE = 784,
+	CTRY_UGANDA = 800,
 	CTRY_UKRAINE = 804,
 	CTRY_UNITED_KINGDOM = 826,
 	CTRY_UNITED_STATES = 840,
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index bdd2b4d..15bbd1e 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -35,6 +35,7 @@
 	FRANCE_RES = 0x31,
 	FCC3_FCCA = 0x3A,
 	FCC3_WORLD = 0x3B,
+	FCC3_ETSIC = 0x3F,
 
 	ETSI1_WORLD = 0x37,
 	ETSI3_ETSIA = 0x32,
@@ -44,6 +45,7 @@
 	ETSI4_ETSIC = 0x38,
 	ETSI5_WORLD = 0x39,
 	ETSI6_WORLD = 0x34,
+	ETSI8_WORLD = 0x3D,
 	ETSI_RESERVED = 0x33,
 
 	MKK1_MKKA = 0x40,
@@ -59,6 +61,7 @@
 	MKK1_MKKA1 = 0x4A,
 	MKK1_MKKA2 = 0x4B,
 	MKK1_MKKC = 0x4C,
+	APL2_FCCA = 0x4D,
 
 	APL3_FCCA = 0x50,
 	APL1_WORLD = 0x52,
@@ -67,6 +70,7 @@
 	APL1_ETSIC = 0x55,
 	APL2_ETSIC = 0x56,
 	APL5_WORLD = 0x58,
+	APL13_WORLD = 0x5A,
 	APL6_WORLD = 0x5B,
 	APL7_FCCA = 0x5C,
 	APL8_WORLD = 0x5D,
@@ -168,6 +172,7 @@
 	{FCC2_ETSIC, CTL_FCC, CTL_ETSI},
 	{FCC3_FCCA, CTL_FCC, CTL_FCC},
 	{FCC3_WORLD, CTL_FCC, CTL_ETSI},
+	{FCC3_ETSIC, CTL_FCC, CTL_ETSI},
 	{FCC4_FCCA, CTL_FCC, CTL_FCC},
 	{FCC5_FCCA, CTL_FCC, CTL_FCC},
 	{FCC6_FCCA, CTL_FCC, CTL_FCC},
@@ -179,6 +184,7 @@
 	{ETSI4_WORLD, CTL_ETSI, CTL_ETSI},
 	{ETSI5_WORLD, CTL_ETSI, CTL_ETSI},
 	{ETSI6_WORLD, CTL_ETSI, CTL_ETSI},
+	{ETSI8_WORLD, CTL_ETSI, CTL_ETSI},
 
 	/* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */
 	{ETSI3_ETSIA, CTL_ETSI, CTL_ETSI},
@@ -188,9 +194,11 @@
 	{FCC1_FCCA, CTL_FCC, CTL_FCC},
 	{APL1_WORLD, CTL_FCC, CTL_ETSI},
 	{APL2_WORLD, CTL_FCC, CTL_ETSI},
+	{APL2_FCCA, CTL_FCC, CTL_FCC},
 	{APL3_WORLD, CTL_FCC, CTL_ETSI},
 	{APL4_WORLD, CTL_FCC, CTL_ETSI},
 	{APL5_WORLD, CTL_FCC, CTL_ETSI},
+	{APL13_WORLD, CTL_ETSI, CTL_ETSI},
 	{APL6_WORLD, CTL_ETSI, CTL_ETSI},
 	{APL8_WORLD, CTL_ETSI, CTL_ETSI},
 	{APL9_WORLD, CTL_ETSI, CTL_ETSI},
@@ -298,6 +306,7 @@
 	{CTRY_AUSTRALIA2, FCC6_WORLD, "AU"},
 	{CTRY_AUSTRIA, ETSI1_WORLD, "AT"},
 	{CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"},
+	{CTRY_BAHAMAS, FCC3_WORLD, "BS"},
 	{CTRY_BAHRAIN, APL6_WORLD, "BH"},
 	{CTRY_BANGLADESH, NULL1_WORLD, "BD"},
 	{CTRY_BARBADOS, FCC2_WORLD, "BB"},
@@ -305,6 +314,7 @@
 	{CTRY_BELGIUM, ETSI1_WORLD, "BE"},
 	{CTRY_BELGIUM2, ETSI4_WORLD, "BL"},
 	{CTRY_BELIZE, APL1_ETSIC, "BZ"},
+	{CTRY_BERMUDA, FCC3_FCCA, "BM"},
 	{CTRY_BOLIVIA, APL1_ETSIC, "BO"},
 	{CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"},
 	{CTRY_BRAZIL, FCC3_WORLD, "BR"},
@@ -444,6 +454,7 @@
 	{CTRY_ROMANIA, NULL1_WORLD, "RO"},
 	{CTRY_RUSSIA, NULL1_WORLD, "RU"},
 	{CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"},
+	{CTRY_SERBIA, ETSI1_WORLD, "RS"},
 	{CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"},
 	{CTRY_SINGAPORE, APL6_WORLD, "SG"},
 	{CTRY_SLOVAKIA, ETSI1_WORLD, "SK"},
@@ -455,10 +466,12 @@
 	{CTRY_SWITZERLAND, ETSI1_WORLD, "CH"},
 	{CTRY_SYRIA, NULL1_WORLD, "SY"},
 	{CTRY_TAIWAN, APL3_FCCA, "TW"},
+	{CTRY_TANZANIA, APL1_WORLD, "TZ"},
 	{CTRY_THAILAND, FCC3_WORLD, "TH"},
 	{CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"},
 	{CTRY_TUNISIA, ETSI3_WORLD, "TN"},
 	{CTRY_TURKEY, ETSI3_WORLD, "TR"},
+	{CTRY_UGANDA, FCC3_WORLD, "UG"},
 	{CTRY_UKRAINE, NULL1_WORLD, "UA"},
 	{CTRY_UAE, NULL1_WORLD, "AE"},
 	{CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"},
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index f70420f..846e47a 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -21,7 +21,8 @@
 #include "ftm.h"
 
 #define WIL_MAX_ROC_DURATION_MS 5000
-#define CTRY_CHINA "CN"
+#define WIL_BRD_SUFFIX_CN "CN"
+#define WIL_BRD_SUFFIX_FCC "FCC"
 
 bool disable_ap_sme;
 module_param(disable_ap_sme, bool, 0444);
@@ -60,6 +61,25 @@
 };
 #endif
 
+struct wil_regd_2_brd_suffix {
+	const char regdomain[3]; /* alpha2 */
+	const char *brd_suffix;
+};
+
+static struct wil_regd_2_brd_suffix wil_regd_2_brd_suffix_map[] = {
+	{"BO", WIL_BRD_SUFFIX_FCC},
+	{"CN", WIL_BRD_SUFFIX_CN},
+	{"EC", WIL_BRD_SUFFIX_FCC},
+	{"GU", WIL_BRD_SUFFIX_FCC},
+	{"HN", WIL_BRD_SUFFIX_FCC},
+	{"JM", WIL_BRD_SUFFIX_FCC},
+	{"MX", WIL_BRD_SUFFIX_FCC},
+	{"NI", WIL_BRD_SUFFIX_FCC},
+	{"PY", WIL_BRD_SUFFIX_FCC},
+	{"TT", WIL_BRD_SUFFIX_FCC},
+	{"US", WIL_BRD_SUFFIX_FCC},
+};
+
 enum wil_nl_60g_cmd_type {
 	NL_60G_CMD_FW_WMI,
 	NL_60G_CMD_DEBUG,
@@ -1989,24 +2009,43 @@
 	return 0;
 }
 
+static void wil_get_brd_reg_suffix(struct wil6210_priv *wil,
+				   const u8 *new_regdomain,
+				   char *brd_reg_suffix, size_t len)
+{
+	int i;
+	struct wil_regd_2_brd_suffix *entry;
+
+	for (i = 0; i < ARRAY_SIZE(wil_regd_2_brd_suffix_map); i++) {
+		entry = &wil_regd_2_brd_suffix_map[i];
+		if (!memcmp(entry->regdomain, new_regdomain, 2)) {
+			strlcpy(brd_reg_suffix, entry->brd_suffix, len);
+			return;
+		}
+	}
+
+	/* regdomain not found in our map, set suffix to none */
+	brd_reg_suffix[0] = '\0';
+}
+
 static int wil_switch_board_file(struct wil6210_priv *wil,
 				 const u8 *new_regdomain)
 {
 	int rc = 0;
+	char brd_reg_suffix[WIL_BRD_SUFFIX_LEN];
 
 	if (!country_specific_board_file)
 		return 0;
 
-	if (memcmp(wil->regdomain, CTRY_CHINA, 2) == 0) {
-		wil_info(wil, "moving out of China reg domain, use default board file\n");
-		wil->board_file_country[0] = '\0';
-	} else if (memcmp(new_regdomain, CTRY_CHINA, 2) == 0) {
-		wil_info(wil, "moving into China reg domain, use country specific board file\n");
-		strlcpy(wil->board_file_country, CTRY_CHINA,
-			sizeof(wil->board_file_country));
-	} else {
+	wil_get_brd_reg_suffix(wil, new_regdomain, brd_reg_suffix,
+			       sizeof(brd_reg_suffix));
+	if (!strcmp(wil->board_file_reg_suffix, brd_reg_suffix))
 		return 0;
-	}
+
+	wil_info(wil, "switch board file suffix '%s' => '%s'\n",
+		 wil->board_file_reg_suffix, brd_reg_suffix);
+	strlcpy(wil->board_file_reg_suffix, brd_reg_suffix,
+		sizeof(wil->board_file_reg_suffix));
 
 	/* need to switch board file - reset the device */
 
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index f37254d..4cd9411 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -954,19 +954,19 @@
 	const char *ext;
 	int prefix_len;
 
-	if (wil->board_file_country[0] == '\0') {
+	if (wil->board_file_reg_suffix[0] == '\0') {
 		strlcpy(buf, board_file, len);
 		return;
 	}
 
 	/* use country specific board file */
-	if (len < strlen(board_file) + 4 /* for _XX and terminating null */)
+	if (len < strlen(board_file) + 1 + WIL_BRD_SUFFIX_LEN) /* 1 for '_' */
 		return;
 
 	ext = strrchr(board_file, '.');
 	prefix_len = (ext ? ext - board_file : strlen(board_file));
-	snprintf(buf, len, "%.*s_%.2s",
-		 prefix_len, board_file, wil->board_file_country);
+	snprintf(buf, len, "%.*s_%.3s",
+		 prefix_len, board_file, wil->board_file_reg_suffix);
 	if (ext)
 		strlcat(buf, ext, len);
 }
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 2b71deb..633e30f 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -50,6 +50,8 @@
 #define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
 #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
 
+#define WIL_BRD_SUFFIX_LEN 4 /* max 3 letters + terminating null */
+
 /**
  * extract bits [@b0:@b1] (inclusive) from the value @x
  * it should be @b0 <= @b1, or result is incorrect
@@ -683,7 +685,7 @@
 	const char *hw_name;
 	const char *wil_fw_name;
 	char *board_file;
-	char board_file_country[3]; /* alpha2 */
+	char board_file_reg_suffix[WIL_BRD_SUFFIX_LEN]; /* empty or CN or FCC */
 	u32 brd_file_addr;
 	u32 brd_file_max_size;
 	DECLARE_BITMAP(hw_capa, hw_capa_last);
diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
index cb987c2..87131f6 100644
--- a/drivers/net/wireless/broadcom/b43/leds.c
+++ b/drivers/net/wireless/broadcom/b43/leds.c
@@ -131,7 +131,7 @@
 	led->wl = dev->wl;
 	led->index = led_index;
 	led->activelow = activelow;
-	strncpy(led->name, name, sizeof(led->name));
+	strlcpy(led->name, name, sizeof(led->name));
 	atomic_set(&led->state, 0);
 
 	led->led_dev.name = led->name;
diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c
index fd45653..bc92211 100644
--- a/drivers/net/wireless/broadcom/b43legacy/leds.c
+++ b/drivers/net/wireless/broadcom/b43legacy/leds.c
@@ -101,7 +101,7 @@
 	led->dev = dev;
 	led->index = led_index;
 	led->activelow = activelow;
-	strncpy(led->name, name, sizeof(led->name));
+	strlcpy(led->name, name, sizeof(led->name));
 
 	led->led_dev.name = led->name;
 	led->led_dev.default_trigger = default_trigger;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 746f8c9..e69cf0e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1099,6 +1099,7 @@
 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
+ 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index d46f086..de52d82 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4229,6 +4229,13 @@
 	brcmf_dbg(TRACE, "Enter\n");
 
 	if (bus) {
+		/* Stop watchdog task */
+		if (bus->watchdog_tsk) {
+			send_sig(SIGTERM, bus->watchdog_tsk, 1);
+			kthread_stop(bus->watchdog_tsk);
+			bus->watchdog_tsk = NULL;
+		}
+
 		/* De-register interrupt handler */
 		brcmf_sdiod_intr_unregister(bus->sdiodev);
 
diff --git a/drivers/net/wireless/cnss2/bus.c b/drivers/net/wireless/cnss2/bus.c
index 6a8e67c..accd50a 100644
--- a/drivers/net/wireless/cnss2/bus.c
+++ b/drivers/net/wireless/cnss2/bus.c
@@ -332,3 +332,19 @@
 		return -EINVAL;
 	}
 }
+
+int cnss_bus_update_status(struct cnss_plat_data *plat_priv,
+			   enum cnss_driver_status status)
+{
+	if (!plat_priv)
+		return -ENODEV;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_update_status(plat_priv->bus_priv, status);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return -EINVAL;
+	}
+}
diff --git a/drivers/net/wireless/cnss2/bus.h b/drivers/net/wireless/cnss2/bus.h
index 91356e9..cfa3524 100644
--- a/drivers/net/wireless/cnss2/bus.h
+++ b/drivers/net/wireless/cnss2/bus.h
@@ -51,5 +51,7 @@
 int cnss_bus_unregister_driver_hdlr(struct cnss_plat_data *plat_priv);
 int cnss_bus_call_driver_modem_status(struct cnss_plat_data *plat_priv,
 				      int modem_current_status);
+int cnss_bus_update_status(struct cnss_plat_data *plat_priv,
+			   enum cnss_driver_status status);
 
 #endif /* _CNSS_BUS_H */
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index 53a4620..30e37a0 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -428,6 +428,82 @@
 	.llseek		= seq_lseek,
 };
 
+static ssize_t cnss_runtime_pm_debug_write(struct file *fp,
+					   const char __user *user_buf,
+					   size_t count, loff_t *off)
+{
+	struct cnss_plat_data *plat_priv =
+		((struct seq_file *)fp->private_data)->private;
+	struct cnss_pci_data *pci_priv;
+	char buf[64];
+	char *cmd;
+	unsigned int len = 0;
+	int ret = 0;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	pci_priv = plat_priv->bus_priv;
+	if (!pci_priv)
+		return -ENODEV;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	cmd = buf;
+
+	if (sysfs_streq(cmd, "usage_count")) {
+		cnss_pci_pm_runtime_show_usage_count(pci_priv);
+	} else if (sysfs_streq(cmd, "get")) {
+		ret = cnss_pci_pm_runtime_get(pci_priv);
+	} else if (sysfs_streq(cmd, "get_noresume")) {
+		cnss_pci_pm_runtime_get_noresume(pci_priv);
+	} else if (sysfs_streq(cmd, "put_autosuspend")) {
+		ret = cnss_pci_pm_runtime_put_autosuspend(pci_priv);
+	} else if (sysfs_streq(cmd, "put_noidle")) {
+		cnss_pci_pm_runtime_put_noidle(pci_priv);
+	} else if (sysfs_streq(cmd, "mark_last_busy")) {
+		cnss_pci_pm_runtime_mark_last_busy(pci_priv);
+	} else {
+		cnss_pr_err("Runtime PM debugfs command is invalid\n");
+		ret = -EINVAL;
+	}
+
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static int cnss_runtime_pm_debug_show(struct seq_file *s, void *data)
+{
+	seq_puts(s, "\nUsage: echo <action> > <debugfs_path>/cnss/runtime_pm\n");
+	seq_puts(s, "<action> can be one of below:\n");
+	seq_puts(s, "usage_count: get runtime PM usage count\n");
+	seq_puts(s, "get: do runtime PM get\n");
+	seq_puts(s, "get_noresume: do runtime PM get noresume\n");
+	seq_puts(s, "put_noidle: do runtime PM put noidle\n");
+	seq_puts(s, "put_autosuspend: do runtime PM put autosuspend\n");
+	seq_puts(s, "mark_last_busy: do runtime PM mark last busy\n");
+
+	return 0;
+}
+
+static int cnss_runtime_pm_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cnss_runtime_pm_debug_show, inode->i_private);
+}
+
+static const struct file_operations cnss_runtime_pm_debug_fops = {
+	.read		= seq_read,
+	.write		= cnss_runtime_pm_debug_write,
+	.open		= cnss_runtime_pm_debug_open,
+	.owner		= THIS_MODULE,
+	.llseek		= seq_lseek,
+};
+
 #ifdef CONFIG_CNSS2_DEBUG
 static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
 {
@@ -439,6 +515,8 @@
 			    &cnss_reg_read_debug_fops);
 	debugfs_create_file("reg_write", 0600, root_dentry, plat_priv,
 			    &cnss_reg_write_debug_fops);
+	debugfs_create_file("runtime_pm", 0600, root_dentry, plat_priv,
+			    &cnss_runtime_pm_debug_fops);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 4433ae0..e711ab4 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -426,6 +426,7 @@
 
 	del_timer(&plat_priv->fw_boot_timer);
 	set_bit(CNSS_FW_READY, &plat_priv->driver_state);
+	clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
 
 	if (test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state)) {
 		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
@@ -901,7 +902,6 @@
 		break;
 	case CNSS_REASON_RDDM:
 		cnss_bus_collect_dump_info(plat_priv, false);
-		clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
 		break;
 	case CNSS_REASON_DEFAULT:
 	case CNSS_REASON_TIMEOUT:
@@ -994,6 +994,8 @@
 	struct cnss_recovery_data *data;
 	int gfp = GFP_KERNEL;
 
+	cnss_bus_update_status(plat_priv, CNSS_FW_DOWN);
+
 	if (in_interrupt() || irqs_disabled())
 		gfp = GFP_ATOMIC;
 
@@ -1039,19 +1041,34 @@
 {
 	int ret = 0;
 
-	set_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
-	ret = cnss_bus_dev_powerup(plat_priv);
-	if (ret)
-		clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
+	if (test_bit(CNSS_FW_READY, &plat_priv->driver_state) ||
+	    test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
+	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
+		cnss_pr_dbg("Device is already active, ignore calibration\n");
+		goto out;
+	}
 
+	set_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
+	reinit_completion(&plat_priv->cal_complete);
+	ret = cnss_bus_dev_powerup(plat_priv);
+	if (ret) {
+		complete(&plat_priv->cal_complete);
+		clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
+	}
+
+out:
 	return ret;
 }
 
 static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv)
 {
+	if (!test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state))
+		return 0;
+
 	plat_priv->cal_done = true;
 	cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
 	cnss_bus_dev_shutdown(plat_priv);
+	complete(&plat_priv->cal_complete);
 	clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
 
 	return 0;
@@ -1537,6 +1554,36 @@
 	destroy_workqueue(plat_priv->event_wq);
 }
 
+static int cnss_misc_init(struct cnss_plat_data *plat_priv)
+{
+	int ret;
+
+	setup_timer(&plat_priv->fw_boot_timer, cnss_bus_fw_boot_timeout_hdlr,
+		    (unsigned long)plat_priv);
+
+	register_pm_notifier(&cnss_pm_notifier);
+
+	ret = device_init_wakeup(&plat_priv->plat_dev->dev, true);
+	if (ret)
+		cnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
+			    ret);
+
+	init_completion(&plat_priv->power_up_complete);
+	init_completion(&plat_priv->cal_complete);
+	mutex_init(&plat_priv->dev_lock);
+
+	return 0;
+}
+
+static void cnss_misc_deinit(struct cnss_plat_data *plat_priv)
+{
+	complete_all(&plat_priv->cal_complete);
+	complete_all(&plat_priv->power_up_complete);
+	device_init_wakeup(&plat_priv->plat_dev->dev, false);
+	unregister_pm_notifier(&cnss_pm_notifier);
+	del_timer(&plat_priv->fw_boot_timer);
+}
+
 static const struct platform_device_id cnss_platform_id_table[] = {
 	{ .name = "qca6174", .driver_data = QCA6174_DEVICE_ID, },
 	{ .name = "qca6290", .driver_data = QCA6290_DEVICE_ID, },
@@ -1630,23 +1677,16 @@
 	if (ret)
 		goto deinit_qmi;
 
-	setup_timer(&plat_priv->fw_boot_timer, cnss_bus_fw_boot_timeout_hdlr,
-		    (unsigned long)plat_priv);
-
-	register_pm_notifier(&cnss_pm_notifier);
-
-	ret = device_init_wakeup(&plat_dev->dev, true);
+	ret = cnss_misc_init(plat_priv);
 	if (ret)
-		cnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
-			    ret);
-
-	init_completion(&plat_priv->power_up_complete);
-	mutex_init(&plat_priv->dev_lock);
+		goto destroy_debugfs;
 
 	cnss_pr_info("Platform driver probed successfully.\n");
 
 	return 0;
 
+destroy_debugfs:
+	cnss_debugfs_destroy(plat_priv);
 deinit_qmi:
 	cnss_qmi_deinit(plat_priv);
 deinit_event_work:
@@ -1676,10 +1716,7 @@
 {
 	struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
 
-	complete_all(&plat_priv->power_up_complete);
-	device_init_wakeup(&plat_dev->dev, false);
-	unregister_pm_notifier(&cnss_pm_notifier);
-	del_timer(&plat_priv->fw_boot_timer);
+	cnss_misc_deinit(plat_priv);
 	cnss_debugfs_destroy(plat_priv);
 	cnss_qmi_deinit(plat_priv);
 	cnss_event_work_deinit(plat_priv);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index a2f346a..dd14bbe 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -243,6 +243,7 @@
 	atomic_t pm_count;
 	struct timer_list fw_boot_timer;
 	struct completion power_up_complete;
+	struct completion cal_complete;
 	struct mutex dev_lock; /* mutex for register access through debugfs */
 	u32 diag_reg_read_addr;
 	u32 diag_reg_read_mem_type;
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 0c07bef..c974a1bf 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -350,6 +350,25 @@
 	return 0;
 }
 
+int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
+			   enum cnss_driver_status status)
+{
+	struct cnss_wlan_driver *driver_ops;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	driver_ops = pci_priv->driver_ops;
+	if (!driver_ops || !driver_ops->update_status)
+		return -EINVAL;
+
+	cnss_pr_dbg("Update driver status: %d\n", status);
+
+	driver_ops->update_status(pci_priv->pci_dev, status);
+
+	return 0;
+}
+
 static int cnss_qca6174_powerup(struct cnss_pci_data *pci_priv)
 {
 	int ret = 0;
@@ -693,6 +712,7 @@
 	int ret = 0;
 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
 	struct cnss_pci_data *pci_priv;
+	unsigned int timeout;
 
 	if (!plat_priv) {
 		cnss_pr_err("plat_priv is NULL\n");
@@ -710,10 +730,27 @@
 		return -EEXIST;
 	}
 
+	if (!test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state))
+		goto register_driver;
+
+	cnss_pr_dbg("Start to wait for calibration to complete\n");
+
+	timeout = cnss_get_boot_timeout(&pci_priv->pci_dev->dev);
+	ret = wait_for_completion_timeout(&plat_priv->cal_complete,
+					  msecs_to_jiffies(timeout) << 2);
+	if (!ret) {
+		cnss_pr_err("Timeout waiting for calibration to complete\n");
+		ret = -EAGAIN;
+		goto out;
+	}
+
+register_driver:
 	ret = cnss_driver_event_post(plat_priv,
 				     CNSS_DRIVER_EVENT_REGISTER_DRIVER,
 				     CNSS_EVENT_SYNC_UNINTERRUPTIBLE,
 				     driver_ops);
+
+out:
 	return ret;
 }
 EXPORT_SYMBOL(cnss_wlan_register_driver);
@@ -1124,6 +1161,59 @@
 }
 EXPORT_SYMBOL(cnss_wlan_pm_control);
 
+void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv)
+{
+	struct device *dev;
+
+	if (!pci_priv)
+		return;
+
+	dev = &pci_priv->pci_dev->dev;
+
+	cnss_pr_dbg("Runtime PM usage count: %d\n",
+		    atomic_read(&dev->power.usage_count));
+}
+
+int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv)
+{
+	if (!pci_priv)
+		return -ENODEV;
+
+	return pm_runtime_get(&pci_priv->pci_dev->dev);
+}
+
+void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv)
+{
+	if (!pci_priv)
+		return;
+
+	return pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
+}
+
+int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv)
+{
+	if (!pci_priv)
+		return -ENODEV;
+
+	return pm_runtime_put_autosuspend(&pci_priv->pci_dev->dev);
+}
+
+void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv)
+{
+	if (!pci_priv)
+		return;
+
+	pm_runtime_put_noidle(&pci_priv->pci_dev->dev);
+}
+
+void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv)
+{
+	if (!pci_priv)
+		return;
+
+	pm_runtime_mark_last_busy(&pci_priv->pci_dev->dev);
+}
+
 int cnss_auto_suspend(struct device *dev)
 {
 	int ret = 0;
@@ -1238,6 +1328,94 @@
 	return pm_request_resume(&pci_dev->dev);
 }
 
+#ifdef CONFIG_CNSS_QCA6390
+int cnss_pci_force_wake_request(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct mhi_controller *mhi_ctrl;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	if (pci_priv->device_id != QCA6390_DEVICE_ID)
+		return 0;
+
+	mhi_ctrl = pci_priv->mhi_ctrl;
+	if (!mhi_ctrl)
+		return -EINVAL;
+
+	read_lock_bh(&mhi_ctrl->pm_lock);
+	mhi_ctrl->wake_get(mhi_ctrl, true);
+	read_unlock_bh(&mhi_ctrl->pm_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_request);
+
+int cnss_pci_is_device_awake(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct mhi_controller *mhi_ctrl;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	if (pci_priv->device_id != QCA6390_DEVICE_ID)
+		return true;
+
+	mhi_ctrl = pci_priv->mhi_ctrl;
+	if (!mhi_ctrl)
+		return -EINVAL;
+
+	return mhi_ctrl->dev_state == MHI_STATE_M0 ? true : false;
+}
+EXPORT_SYMBOL(cnss_pci_is_device_awake);
+
+int cnss_pci_force_wake_release(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+	struct mhi_controller *mhi_ctrl;
+
+	if (!pci_priv)
+		return -ENODEV;
+
+	if (pci_priv->device_id != QCA6390_DEVICE_ID)
+		return 0;
+
+	mhi_ctrl = pci_priv->mhi_ctrl;
+	if (!mhi_ctrl)
+		return -EINVAL;
+
+	read_lock_bh(&mhi_ctrl->pm_lock);
+	mhi_ctrl->wake_put(mhi_ctrl, false);
+	read_unlock_bh(&mhi_ctrl->pm_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_release);
+#else
+int cnss_pci_force_wake_request(struct device *dev)
+{
+	return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_request);
+
+int cnss_pci_is_device_awake(struct device *dev)
+{
+	return true;
+}
+EXPORT_SYMBOL(cnss_pci_is_device_awake);
+
+int cnss_pci_force_wake_release(struct device *dev)
+{
+	return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_release);
+#endif
+
 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
@@ -1851,9 +2029,10 @@
 
 	cnss_pr_dbg("MHI status cb is called with reason %d\n", reason);
 
-	if (pci_priv->driver_ops && pci_priv->driver_ops->update_status)
-		pci_priv->driver_ops->update_status(pci_priv->pci_dev,
-						    CNSS_FW_DOWN);
+	if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+		cnss_pr_dbg("Driver unload is in progress, ignore device error\n");
+		return;
+	}
 
 	switch (reason) {
 	case MHI_CB_EE_RDDM:
@@ -1867,8 +2046,7 @@
 	set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
 	del_timer(&plat_priv->fw_boot_timer);
 
-	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
-			       cnss_reason);
+	cnss_schedule_recovery(&pci_priv->pci_dev->dev, cnss_reason);
 }
 
 static int cnss_pci_get_mhi_msi(struct cnss_pci_data *pci_priv)
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index c8de4d7..6476ce1 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -143,5 +143,13 @@
 int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv);
 int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
 				      int modem_current_status);
+void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv);
+int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv);
+void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv);
+int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv);
+void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv);
+void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv);
+int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
+			   enum cnss_driver_status status);
 
 #endif /* _CNSS_PCI_H */
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index 2c375bb..d857b9f 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -242,6 +242,8 @@
 	req.fw_init_done_enable = 1;
 	req.pin_connect_result_enable_valid = 1;
 	req.pin_connect_result_enable = 1;
+	req.cal_done_enable_valid = 1;
+	req.cal_done_enable = 1;
 
 	req_desc.max_msg_len = WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN;
 	req_desc.msg_id = QMI_WLFW_IND_REGISTER_REQ_V01;
@@ -1054,6 +1056,11 @@
 	case QMI_WLFW_PIN_CONNECT_RESULT_IND_V01:
 		cnss_qmi_pin_result_ind_hdlr(plat_priv, msg, msg_len);
 		break;
+	case QMI_WLFW_CAL_DONE_IND_V01:
+		cnss_driver_event_post(plat_priv,
+				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
+				       0, NULL);
+		break;
 	default:
 		cnss_pr_err("Invalid QMI WLFW indication, msg_id: 0x%x\n",
 			    msg_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 6fe5546..996a928 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -898,6 +898,8 @@
 						WQ_HIGHPRI | WQ_UNBOUND, 1);
 	INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
 
+	cancel_work_sync(&rba->rx_alloc);
+
 	spin_lock(&rba->lock);
 	atomic_set(&rba->req_pending, 0);
 	atomic_set(&rba->req_ready, 0);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 95e9641..4bb36dc 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2569,9 +2569,6 @@
 				IEEE80211_VHT_CAP_SHORT_GI_80 |
 				IEEE80211_VHT_CAP_SHORT_GI_160 |
 				IEEE80211_VHT_CAP_TXSTBC |
-				IEEE80211_VHT_CAP_RXSTBC_1 |
-				IEEE80211_VHT_CAP_RXSTBC_2 |
-				IEEE80211_VHT_CAP_RXSTBC_3 |
 				IEEE80211_VHT_CAP_RXSTBC_4 |
 				IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
 			sband->vht_cap.vht_mcs.rx_mcs_map =
diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h
index edf710b..3de1457 100644
--- a/drivers/net/wireless/marvell/libertas/dev.h
+++ b/drivers/net/wireless/marvell/libertas/dev.h
@@ -103,6 +103,7 @@
 	u8 fw_ready;
 	u8 surpriseremoved;
 	u8 setup_fw_on_resume;
+	u8 power_up_on_resume;
 	int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
 	void (*reset_card) (struct lbs_private *priv);
 	int (*power_save) (struct lbs_private *priv);
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 47f4a14..a0ae8d8 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1341,15 +1341,23 @@
 static int if_sdio_suspend(struct device *dev)
 {
 	struct sdio_func *func = dev_to_sdio_func(dev);
-	int ret;
 	struct if_sdio_card *card = sdio_get_drvdata(func);
+	struct lbs_private *priv = card->priv;
+	int ret;
 
 	mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
+	priv->power_up_on_resume = false;
 
 	/* If we're powered off anyway, just let the mmc layer remove the
 	 * card. */
-	if (!lbs_iface_active(card->priv))
-		return -ENOSYS;
+	if (!lbs_iface_active(priv)) {
+		if (priv->fw_ready) {
+			priv->power_up_on_resume = true;
+			if_sdio_power_off(card);
+		}
+
+		return 0;
+	}
 
 	dev_info(dev, "%s: suspend: PM flags = 0x%x\n",
 		 sdio_func_id(func), flags);
@@ -1357,9 +1365,14 @@
 	/* If we aren't being asked to wake on anything, we should bail out
 	 * and let the SD stack power down the card.
 	 */
-	if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) {
+	if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
 		dev_info(dev, "Suspend without wake params -- powering down card\n");
-		return -ENOSYS;
+		if (priv->fw_ready) {
+			priv->power_up_on_resume = true;
+			if_sdio_power_off(card);
+		}
+
+		return 0;
 	}
 
 	if (!(flags & MMC_PM_KEEP_POWER)) {
@@ -1372,7 +1385,7 @@
 	if (ret)
 		return ret;
 
-	ret = lbs_suspend(card->priv);
+	ret = lbs_suspend(priv);
 	if (ret)
 		return ret;
 
@@ -1387,6 +1400,11 @@
 
 	dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func));
 
+	if (card->priv->power_up_on_resume) {
+		if_sdio_power_on(card);
+		wait_event(card->pwron_waitq, card->priv->fw_ready);
+	}
+
 	ret = lbs_resume(card->priv);
 
 	return ret;
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 73eb084..09185a1 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -624,6 +624,9 @@
 					 MWIFIEX_FUNC_SHUTDOWN);
 	}
 
+	if (adapter->workqueue)
+		flush_workqueue(adapter->workqueue);
+
 	mwifiex_usb_free(card);
 
 	mwifiex_dbg(adapter, FATAL,
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 18fbb96..d75756c 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -723,12 +723,14 @@
 			   s8 nflr)
 {
 	struct mwifiex_histogram_data *phist_data = priv->hist_data;
+	s8 nf   = -nflr;
+	s8 rssi = snr - nflr;
 
 	atomic_inc(&phist_data->num_samples);
 	atomic_inc(&phist_data->rx_rate[rx_rate]);
-	atomic_inc(&phist_data->snr[snr]);
-	atomic_inc(&phist_data->noise_flr[128 + nflr]);
-	atomic_inc(&phist_data->sig_str[nflr - snr]);
+	atomic_inc(&phist_data->snr[snr + 128]);
+	atomic_inc(&phist_data->noise_flr[nf + 128]);
+	atomic_inc(&phist_data->sig_str[rssi + 128]);
 }
 
 /* function to reset histogram data during init/reset */
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 4da4e45..9526643 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -131,7 +131,6 @@
 		       firmware->size);
 		rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
 	}
-	rtlpriv->rtlhal.fwsize = firmware->size;
 	release_firmware(firmware);
 }
 
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index ae87b39..2e92872 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2928,6 +2928,8 @@
 
 	while (buflen >= sizeof(*auth_req)) {
 		auth_req = (void *)buf;
+		if (buflen < le32_to_cpu(auth_req->length))
+			return;
 		type = "unknown";
 		flags = le32_to_cpu(auth_req->flags);
 		pairwise_error = false;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 8428858..fc895b4 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -155,7 +155,6 @@
 	int err;
 	struct mmc_card *card = pfunction->card;
 	struct mmc_host *host = card->host;
-	s32 bit = (fls(host->ocr_avail) - 1);
 	u8 cmd52_resp;
 	u32 clock, resp, i;
 	u16 rca;
@@ -175,7 +174,6 @@
 	msleep(20);
 
 	/* Initialize the SDIO card */
-	host->ios.vdd = bit;
 	host->ios.chip_select = MMC_CS_DONTCARE;
 	host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
 	host->ios.power_mode = MMC_POWER_UP;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 7f4da72..96f83f0 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -35,6 +35,7 @@
 #include "wl12xx_80211.h"
 #include "cmd.h"
 #include "event.h"
+#include "ps.h"
 #include "tx.h"
 #include "hw_ops.h"
 
@@ -191,6 +192,10 @@
 
 	timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
 
+	ret = wl1271_ps_elp_wakeup(wl);
+	if (ret < 0)
+		return ret;
+
 	do {
 		if (time_after(jiffies, timeout_time)) {
 			wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
@@ -222,6 +227,7 @@
 	} while (!event);
 
 out:
+	wl1271_ps_elp_sleep(wl);
 	kfree(events_vector);
 	return ret;
 }
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index b9e1404..7367f09 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -59,7 +59,7 @@
 static void wl1271_rx_status(struct wl1271 *wl,
 			     struct wl1271_rx_descriptor *desc,
 			     struct ieee80211_rx_status *status,
-			     u8 beacon)
+			     u8 beacon, u8 probe_rsp)
 {
 	memset(status, 0, sizeof(struct ieee80211_rx_status));
 
@@ -106,6 +106,9 @@
 		}
 	}
 
+	if (beacon || probe_rsp)
+		status->boottime_ns = ktime_get_boot_ns();
+
 	if (beacon)
 		wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
 						status->band);
@@ -194,7 +197,8 @@
 	if (ieee80211_is_data_present(hdr->frame_control))
 		is_data = 1;
 
-	wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
+	wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
+			 ieee80211_is_probe_resp(hdr->frame_control));
 	wlcore_hw_set_rx_csum(wl, desc, skb);
 
 	seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 47fe7f9..6921cb0 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -404,6 +404,11 @@
 	mmc_pm_flag_t sdio_flags;
 	int ret = 0;
 
+	if (!wl) {
+		dev_err(dev, "no wilink module was probed\n");
+		goto out;
+	}
+
 	dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
 		wl->wow_enabled);
 
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 3c4c58b..3b6fb5b 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -332,20 +332,22 @@
 u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
 			    u32 off)
 {
-	u32 *mapping = &vif->hash.mapping[off];
+	u32 *mapping = vif->hash.mapping;
 	struct gnttab_copy copy_op = {
 		.source.u.ref = gref,
 		.source.domid = vif->domid,
-		.dest.u.gmfn = virt_to_gfn(mapping),
 		.dest.domid = DOMID_SELF,
-		.dest.offset = xen_offset_in_page(mapping),
-		.len = len * sizeof(u32),
+		.len = len * sizeof(*mapping),
 		.flags = GNTCOPY_source_gref
 	};
 
-	if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+	if ((off + len < off) || (off + len > vif->hash.size) ||
+	    len > XEN_PAGE_SIZE / sizeof(*mapping))
 		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
 
+	copy_op.dest.u.gmfn = virt_to_gfn(mapping + off);
+	copy_op.dest.offset = xen_offset_in_page(mapping + off);
+
 	while (len-- != 0)
 		if (mapping[off++] >= vif->num_queues)
 			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 520050e..3c1adb3 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -86,7 +86,7 @@
 /* IRQ name is queue name with "-tx" or "-rx" appended */
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
-static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
+static DECLARE_WAIT_QUEUE_HEAD(module_wq);
 
 struct netfront_stats {
 	u64			packets;
@@ -238,7 +238,7 @@
 static int netfront_tx_slot_available(struct netfront_queue *queue)
 {
 	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
-		(NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
+		(NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
 }
 
 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
@@ -789,7 +789,7 @@
 	RING_IDX cons = queue->rx.rsp_cons;
 	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
 	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
-	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
+	int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
 	int slots = 1;
 	int err = 0;
 	unsigned long ret;
@@ -892,7 +892,6 @@
 				  struct sk_buff *skb,
 				  struct sk_buff_head *list)
 {
-	struct skb_shared_info *shinfo = skb_shinfo(skb);
 	RING_IDX cons = queue->rx.rsp_cons;
 	struct sk_buff *nskb;
 
@@ -901,15 +900,20 @@
 			RING_GET_RESPONSE(&queue->rx, ++cons);
 		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 
-		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
+		if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
 			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
 			BUG_ON(pull_to <= skb_headlen(skb));
 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 		}
-		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
+		if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+			queue->rx.rsp_cons = ++cons;
+			kfree_skb(nskb);
+			return ~0U;
+		}
 
-		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+				skb_frag_page(nfrag),
 				rx->offset, rx->status, PAGE_SIZE);
 
 		skb_shinfo(nskb)->nr_frags = 0;
@@ -1043,6 +1047,8 @@
 		skb->len += rx->status;
 
 		i = xennet_fill_frags(queue, skb, &tmpq);
+		if (unlikely(i == ~0U))
+			goto err;
 
 		if (rx->flags & XEN_NETRXF_csum_blank)
 			skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1349,6 +1355,11 @@
 	netif_carrier_off(netdev);
 
 	xenbus_switch_state(dev, XenbusStateInitialising);
+	wait_event(module_wq,
+		   xenbus_read_driver_state(dev->otherend) !=
+		   XenbusStateClosed &&
+		   xenbus_read_driver_state(dev->otherend) !=
+		   XenbusStateUnknown);
 	return netdev;
 
  exit:
@@ -1616,6 +1627,7 @@
 {
 	unsigned short i;
 	int err = 0;
+	char *devid;
 
 	spin_lock_init(&queue->tx_lock);
 	spin_lock_init(&queue->rx_lock);
@@ -1623,8 +1635,9 @@
 	setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
 		    (unsigned long)queue);
 
-	snprintf(queue->name, sizeof(queue->name), "%s-q%u",
-		 queue->info->netdev->name, queue->id);
+	devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
+	snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
+		 devid, queue->id);
 
 	/* Initialise tx_skbs as a free chain containing every entry. */
 	queue->tx_skb_freelist = 0;
@@ -2031,15 +2044,14 @@
 
 	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
 
+	wake_up_all(&module_wq);
+
 	switch (backend_state) {
 	case XenbusStateInitialising:
 	case XenbusStateInitialised:
 	case XenbusStateReconfiguring:
 	case XenbusStateReconfigured:
-		break;
-
 	case XenbusStateUnknown:
-		wake_up_all(&module_unload_q);
 		break;
 
 	case XenbusStateInitWait:
@@ -2055,12 +2067,10 @@
 		break;
 
 	case XenbusStateClosed:
-		wake_up_all(&module_unload_q);
 		if (dev->state == XenbusStateClosed)
 			break;
 		/* Missed the backend's CLOSING state -- fallthrough */
 	case XenbusStateClosing:
-		wake_up_all(&module_unload_q);
 		xenbus_frontend_closed(dev);
 		break;
 	}
@@ -2168,14 +2178,14 @@
 
 	if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
 		xenbus_switch_state(dev, XenbusStateClosing);
-		wait_event(module_unload_q,
+		wait_event(module_wq,
 			   xenbus_read_driver_state(dev->otherend) ==
 			   XenbusStateClosing ||
 			   xenbus_read_driver_state(dev->otherend) ==
 			   XenbusStateUnknown);
 
 		xenbus_switch_state(dev, XenbusStateClosed);
-		wait_event(module_unload_q,
+		wait_event(module_wq,
 			   xenbus_read_driver_state(dev->otherend) ==
 			   XenbusStateClosed ||
 			   xenbus_read_driver_state(dev->otherend) ==
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 0280d42..9e00509 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -534,6 +534,32 @@
 		usleep_range(10000, 10100);
 		gpio_set_value(nqx_dev->en_gpio, 1);
 		usleep_range(10000, 10100);
+	} else if (arg == 4) {
+		/*
+		 * Setting firmware download gpio to HIGH for SN100U
+		 * before FW download start
+		 */
+		dev_dbg(&nqx_dev->client->dev, "SN100 fw gpio HIGH\n");
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
+			gpio_set_value(nqx_dev->firm_gpio, 1);
+			usleep_range(10000, 10100);
+		} else {
+			dev_err(&nqx_dev->client->dev,
+				"firm_gpio is invalid\n");
+		}
+	} else if (arg == 6) {
+		/*
+		 * Setting firmware download gpio to LOW for SN100U
+		 * FW download finished
+		 */
+		dev_dbg(&nqx_dev->client->dev, "SN100 fw gpio LOW\n");
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
+			gpio_set_value(nqx_dev->firm_gpio, 0);
+			usleep_range(10000, 10100);
+		} else {
+			dev_err(&nqx_dev->client->dev,
+				"firm_gpio is invalid\n");
+		}
 	} else {
 		r = -ENOIOCTLCMD;
 	}
@@ -653,6 +679,7 @@
 {
 	int ret = 0;
 
+	int gpio_retry_count = 0;
 	unsigned char raw_nci_reset_cmd[] =  {0x20, 0x00, 0x01, 0x00};
 	unsigned char raw_nci_init_cmd[] =   {0x20, 0x01, 0x00};
 	unsigned char nci_init_rsp[28];
@@ -660,6 +687,7 @@
 	unsigned char init_rsp_len = 0;
 	unsigned int enable_gpio = nqx_dev->en_gpio;
 
+reset_enable_gpio:
 	/* making sure that the NFCC starts in a clean state. */
 	gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
 	/* hardware dependent delay */
@@ -685,6 +713,9 @@
 	if (ret < 0) {
 		dev_err(&client->dev,
 		"%s: - i2c_master_recv Error\n", __func__);
+		gpio_retry_count = gpio_retry_count + 1;
+		if (gpio_retry_count < MAX_RETRY_COUNT)
+			goto reset_enable_gpio;
 		goto err_nfcc_hw_check;
 	}
 	ret = nqx_standby_write(nqx_dev, raw_nci_init_cmd,
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index 33ed78b..3a897f5 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -71,7 +71,7 @@
 	struct sk_buff *skb = NULL;
 
 	if (!urb->status) {
-		skb = alloc_skb(urb->actual_length, GFP_KERNEL);
+		skb = alloc_skb(urb->actual_length, GFP_ATOMIC);
 		if (!skb) {
 			nfc_err(&phy->udev->dev, "failed to alloc memory\n");
 		} else {
@@ -180,7 +180,7 @@
 
 	if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
 		/* request for response for sent packet directly */
-		rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC);
+		rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
 		if (rc)
 			goto error;
 	} else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index c1a65ce..de6d3b7 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -748,9 +748,9 @@
 		 * overshoots the remainder by 4 bytes, assume it was
 		 * including 'status'.
 		 */
-		if (out_field[1] - 8 == remainder)
+		if (out_field[1] - 4 == remainder)
 			return remainder;
-		return out_field[1] - 4;
+		return out_field[1] - 8;
 	} else if (cmd == ND_CMD_CALL) {
 		struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
 
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c823e93..979c6ec 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2042,7 +2042,8 @@
 	mutex_lock(&ctrl->namespaces_mutex);
 
 	/* Forcibly start all queues to avoid having stuck requests */
-	blk_mq_start_hw_queues(ctrl->admin_q);
+	if (ctrl->admin_q)
+		blk_mq_start_hw_queues(ctrl->admin_q);
 
 	list_for_each_entry(ns, &ctrl->namespaces, list) {
 		/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a55d112..fadf151 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1034,17 +1034,15 @@
 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
 				int qid, int depth)
 {
-	if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
-		unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
-						      dev->ctrl.page_size);
-		nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
-		nvmeq->sq_cmds_io = dev->cmb + offset;
-	} else {
-		nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
-					&nvmeq->sq_dma_addr, GFP_KERNEL);
-		if (!nvmeq->sq_cmds)
-			return -ENOMEM;
-	}
+
+	/* CMB SQEs will be mapped before creation */
+	if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz))
+		return 0;
+
+	nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+					    &nvmeq->sq_dma_addr, GFP_KERNEL);
+	if (!nvmeq->sq_cmds)
+		return -ENOMEM;
 
 	return 0;
 }
@@ -1117,6 +1115,13 @@
 	struct nvme_dev *dev = nvmeq->dev;
 	int result;
 
+	if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
+		unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
+						      dev->ctrl.page_size);
+		nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
+		nvmeq->sq_cmds_io = dev->cmb + offset;
+	}
+
 	nvmeq->cq_vector = qid - 1;
 	result = adapter_alloc_cq(dev, qid, nvmeq);
 	if (result < 0)
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 64b40a1..f12753e 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -578,6 +578,14 @@
 	}
 
 	ctrl->csts = NVME_CSTS_RDY;
+
+	/*
+	 * Controllers that are not yet enabled should not really enforce the
+	 * keep alive timeout, but we still want to track a timeout and cleanup
+	 * in case a host died before it enabled the controller.  Hence, simply
+	 * reset the keep alive timer when the controller is enabled.
+	 */
+	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
 }
 
 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 53bd325..2dfd877 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -65,6 +65,7 @@
 
 	struct nvmet_req	req;
 
+	bool			allocated;
 	u8			n_rdma;
 	u32			flags;
 	u32			invalidate_rkey;
@@ -167,11 +168,19 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&queue->rsps_lock, flags);
-	rsp = list_first_entry(&queue->free_rsps,
+	rsp = list_first_entry_or_null(&queue->free_rsps,
 				struct nvmet_rdma_rsp, free_list);
-	list_del(&rsp->free_list);
+	if (likely(rsp))
+		list_del(&rsp->free_list);
 	spin_unlock_irqrestore(&queue->rsps_lock, flags);
 
+	if (unlikely(!rsp)) {
+		rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+		if (unlikely(!rsp))
+			return NULL;
+		rsp->allocated = true;
+	}
+
 	return rsp;
 }
 
@@ -180,6 +189,11 @@
 {
 	unsigned long flags;
 
+	if (rsp->allocated) {
+		kfree(rsp);
+		return;
+	}
+
 	spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
 	list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
 	spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
@@ -755,6 +769,15 @@
 
 	cmd->queue = queue;
 	rsp = nvmet_rdma_get_rsp(queue);
+	if (unlikely(!rsp)) {
+		/*
+		 * we get here only under memory pressure,
+		 * silently drop and have the host retry
+		 * as we can't even fail it.
+		 */
+		nvmet_rdma_post_recv(queue->dev, cmd);
+		return;
+	}
 	rsp->queue = queue;
 	rsp->cmd = cmd;
 	rsp->flags = 0;
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 1b4d93e..824e282 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -1031,6 +1031,8 @@
 
 		/* setup the first byte with lsb bits from nvmem */
 		rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
+		if (rc)
+			goto err;
 		*b++ |= GENMASK(bit_offset - 1, 0) & v;
 
 		/* setup rest of the byte if any */
@@ -1049,11 +1051,16 @@
 		/* setup the last byte with msb bits from nvmem */
 		rc = nvmem_reg_read(nvmem,
 				    cell->offset + cell->bytes - 1, &v, 1);
+		if (rc)
+			goto err;
 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
 
 	}
 
 	return buf;
+err:
+	kfree(buf);
+	return ERR_PTR(rc);
 }
 
 /**
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 90b5a89..0a1ebbb 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -548,6 +548,9 @@
 	struct of_phandle_args args;
 	int i, rc;
 
+	if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+		return;
+
 	np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
 	if (!np) {
 		pr_err("missing testcase data\n");
@@ -622,6 +625,9 @@
 	struct of_phandle_args args;
 	int i, rc;
 
+	if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+		return;
+
 	np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
 	if (!np) {
 		pr_err("missing testcase data\n");
@@ -778,15 +784,19 @@
 	pdev = of_find_device_by_node(np);
 	unittest(pdev, "device 1 creation failed\n");
 
-	irq = platform_get_irq(pdev, 0);
-	unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
+	if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
+		irq = platform_get_irq(pdev, 0);
+		unittest(irq == -EPROBE_DEFER,
+			 "device deferred probe failed - %d\n", irq);
 
-	/* Test that a parsing failure does not return -EPROBE_DEFER */
-	np = of_find_node_by_path("/testcase-data/testcase-device2");
-	pdev = of_find_device_by_node(np);
-	unittest(pdev, "device 2 creation failed\n");
-	irq = platform_get_irq(pdev, 0);
-	unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
+		/* Test that a parsing failure does not return -EPROBE_DEFER */
+		np = of_find_node_by_path("/testcase-data/testcase-device2");
+		pdev = of_find_device_by_node(np);
+		unittest(pdev, "device 2 creation failed\n");
+		irq = platform_get_irq(pdev, 0);
+		unittest(irq < 0 && irq != -EPROBE_DEFER,
+			 "device parsing error failed - %d\n", irq);
+	}
 
 	np = of_find_node_by_path("/testcase-data/platform-tests");
 	unittest(np, "No testcase data in device tree\n");
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 01cf1c1..8de3295 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -286,12 +286,16 @@
 
 	ops = kmemdup(&parport_sunbpp_ops, sizeof(struct parport_operations),
 		      GFP_KERNEL);
-        if (!ops)
+	if (!ops) {
+		err = -ENOMEM;
 		goto out_unmap;
+	}
 
 	dprintk(("register_port\n"));
-	if (!(p = parport_register_port((unsigned long)base, irq, dma, ops)))
+	if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) {
+		err = -ENOMEM;
 		goto out_free_ops;
+	}
 
 	p->size = size;
 	p->dev = &op->dev;
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 11bad82..1dbd09c 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -976,6 +976,7 @@
 		return -ENOMEM;
 	}
 
+	pci_bus_size_bridges(bus);
 	pci_bus_assign_resources(bus);
 
 	list_for_each_entry(child, &bus->children, node)
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index e3c48b5..5c90d7b 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -45,7 +45,7 @@
 
 		switch (resource_type(res)) {
 		case IORESOURCE_IO:
-			err = pci_remap_iospace(res, iobase);
+			err = devm_pci_remap_iospace(dev, res, iobase);
 			if (err) {
 				dev_warn(dev, "error %d: failed to map resource %pR\n",
 					 err, res);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index d392a55..b4d8ccf 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -52,6 +52,8 @@
 #include <linux/pci.h>
 #include <linux/semaphore.h>
 #include <linux/irqdomain.h>
+#include <linux/irq.h>
+
 #include <asm/irqdomain.h>
 #include <asm/apic.h>
 #include <linux/msi.h>
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 90e0b6f..23d7f73 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -1236,7 +1236,7 @@
 		pcie->realio.start = PCIBIOS_MIN_IO;
 		pcie->realio.end = min_t(resource_size_t,
 					 IO_SPACE_LIMIT,
-					 resource_size(&pcie->io));
+					 resource_size(&pcie->io) - 1);
 	} else
 		pcie->realio = pcie->io;
 
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index b7dc070..4096cce 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -89,7 +89,7 @@
 
 		switch (resource_type(res)) {
 		case IORESOURCE_IO:
-			err = pci_remap_iospace(res, iobase);
+			err = devm_pci_remap_iospace(dev, res, iobase);
 			if (err) {
 				dev_warn(dev, "error %d: failed to map resource %pR\n",
 					 err, res);
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 62700d1..d6196f7 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1102,7 +1102,7 @@
 		struct resource *res = win->res;
 
 		if (resource_type(res) == IORESOURCE_IO) {
-			err = pci_remap_iospace(res, iobase);
+			err = devm_pci_remap_iospace(dev, res, iobase);
 			if (err) {
 				dev_warn(dev, "error %d: failed to map resource %pR\n",
 					 err, res);
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 43eaa4a..94fdd29 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -532,7 +532,7 @@
 							INTX_NUM,
 							&legacy_domain_ops,
 							pcie);
-
+	of_node_put(legacy_intc_node);
 	if (!pcie->legacy_irq_domain) {
 		dev_err(dev, "failed to create IRQ domain\n");
 		return -ENOMEM;
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index c8616fa..61332f4 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -527,6 +527,7 @@
 	port->leg_domain = irq_domain_add_linear(pcie_intc_node, 4,
 						 &intx_domain_ops,
 						 port);
+	of_node_put(pcie_intc_node);
 	if (!port->leg_domain) {
 		dev_err(dev, "Failed to get a INTx IRQ domain\n");
 		return -ENODEV;
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index fea0b8b..0a3b3f7 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -455,8 +455,17 @@
 	list_add(&slot->slot_list, &pci_hotplug_slot_list);
 
 	result = fs_add_slot(pci_slot);
+	if (result)
+		goto err_list_del;
+
 	kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
 	dbg("Added slot %s to the list\n", name);
+	goto out;
+
+err_list_del:
+	list_del(&slot->slot_list);
+	pci_slot->hotplug = NULL;
+	pci_destroy_slot(pci_slot);
 out:
 	mutex_unlock(&pci_hp_mutex);
 	return result;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 2bba848..56c0b60 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -132,6 +132,7 @@
 void pciehp_queue_pushbutton_work(struct work_struct *work);
 struct controller *pcie_init(struct pcie_device *dev);
 int pcie_init_notification(struct controller *ctrl);
+void pcie_shutdown_notification(struct controller *ctrl);
 int pciehp_enable_slot(struct slot *p_slot);
 int pciehp_disable_slot(struct slot *p_slot);
 void pcie_reenable_notification(struct controller *ctrl);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 6620b10..a7485bc1 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -76,6 +76,12 @@
  */
 static void release_slot(struct hotplug_slot *hotplug_slot)
 {
+	struct slot *slot = hotplug_slot->private;
+
+	/* queued work needs hotplug_slot name */
+	cancel_delayed_work(&slot->work);
+	drain_workqueue(slot->wq);
+
 	kfree(hotplug_slot->ops);
 	kfree(hotplug_slot->info);
 	kfree(hotplug_slot);
@@ -278,6 +284,7 @@
 {
 	struct controller *ctrl = get_service_data(dev);
 
+	pcie_shutdown_notification(ctrl);
 	cleanup_slot(ctrl);
 	pciehp_release_ctrl(ctrl);
 }
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 8d811ea..8b8b096 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -562,8 +562,6 @@
 {
 	struct controller *ctrl = (struct controller *)dev_id;
 	struct pci_dev *pdev = ctrl_dev(ctrl);
-	struct pci_bus *subordinate = pdev->subordinate;
-	struct pci_dev *dev;
 	struct slot *slot = ctrl->slot;
 	u16 status, events;
 	u8 present;
@@ -611,14 +609,9 @@
 		wake_up(&ctrl->queue);
 	}
 
-	if (subordinate) {
-		list_for_each_entry(dev, &subordinate->devices, bus_list) {
-			if (dev->ignore_hotplug) {
-				ctrl_dbg(ctrl, "ignoring hotplug event %#06x (%s requested no hotplug)\n",
-					 events, pci_name(dev));
-				return IRQ_HANDLED;
-			}
-		}
+	if (pdev->ignore_hotplug) {
+		ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
+		return IRQ_HANDLED;
 	}
 
 	/* Check Attention Button Pressed */
@@ -786,7 +779,7 @@
 	return 0;
 }
 
-static void pcie_shutdown_notification(struct controller *ctrl)
+void pcie_shutdown_notification(struct controller *ctrl)
 {
 	if (ctrl->notification_enabled) {
 		pcie_disable_notification(ctrl);
@@ -821,7 +814,7 @@
 static void pcie_cleanup_slot(struct controller *ctrl)
 {
 	struct slot *slot = ctrl->slot;
-	cancel_delayed_work(&slot->work);
+
 	destroy_workqueue(slot->wq);
 	kfree(slot);
 }
@@ -902,7 +895,6 @@
 
 void pciehp_release_ctrl(struct controller *ctrl)
 {
-	pcie_shutdown_notification(ctrl);
 	pcie_cleanup_slot(ctrl);
 	kfree(ctrl);
 }
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index d966d47..d38d379 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -567,7 +567,7 @@
 	union acpi_object *obj;
 	struct pci_host_bridge *bridge;
 
-	if (acpi_pci_disabled || !bus->bridge)
+	if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
 		return;
 
 	acpi_pci_slot_enumerate(bus);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index f9f4d1c..e5d8e2e 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -180,13 +180,16 @@
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	if (!val) {
-		if (pci_is_enabled(pdev))
-			pci_disable_device(pdev);
-		else
-			result = -EIO;
-	} else
+	device_lock(dev);
+	if (dev->driver)
+		result = -EBUSY;
+	else if (val)
 		result = pci_enable_device(pdev);
+	else if (pci_is_enabled(pdev))
+		pci_disable_device(pdev);
+	else
+		result = -EIO;
+	device_unlock(dev);
 
 	return result < 0 ? result : count;
 }
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 9c13aee..ccbbd4c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1114,12 +1114,12 @@
 EXPORT_SYMBOL(pci_save_state);
 
 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
-				     u32 saved_val, int retry)
+				     u32 saved_val, int retry, bool force)
 {
 	u32 val;
 
 	pci_read_config_dword(pdev, offset, &val);
-	if (val == saved_val)
+	if (!force && val == saved_val)
 		return;
 
 	for (;;) {
@@ -1138,25 +1138,36 @@
 }
 
 static void pci_restore_config_space_range(struct pci_dev *pdev,
-					   int start, int end, int retry)
+					   int start, int end, int retry,
+					   bool force)
 {
 	int index;
 
 	for (index = end; index >= start; index--)
 		pci_restore_config_dword(pdev, 4 * index,
 					 pdev->saved_config_space[index],
-					 retry);
+					 retry, force);
 }
 
 static void pci_restore_config_space(struct pci_dev *pdev)
 {
 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-		pci_restore_config_space_range(pdev, 10, 15, 0);
+		pci_restore_config_space_range(pdev, 10, 15, 0, false);
 		/* Restore BARs before the command register. */
-		pci_restore_config_space_range(pdev, 4, 9, 10);
-		pci_restore_config_space_range(pdev, 0, 3, 0);
+		pci_restore_config_space_range(pdev, 4, 9, 10, false);
+		pci_restore_config_space_range(pdev, 0, 3, 0, false);
+	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+		pci_restore_config_space_range(pdev, 12, 15, 0, false);
+
+		/*
+		 * Force rewriting of prefetch registers to avoid S3 resume
+		 * issues on Intel PCI bridges that occur when these
+		 * registers are not explicitly written.
+		 */
+		pci_restore_config_space_range(pdev, 9, 11, 0, true);
+		pci_restore_config_space_range(pdev, 0, 8, 0, false);
 	} else {
-		pci_restore_config_space_range(pdev, 0, 15, 0);
+		pci_restore_config_space_range(pdev, 0, 15, 0, false);
 	}
 }
 
@@ -3407,6 +3418,44 @@
 #endif
 }
 
+static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
+{
+	struct resource **res = ptr;
+
+	pci_unmap_iospace(*res);
+}
+
+/**
+ * devm_pci_remap_iospace - Managed pci_remap_iospace()
+ * @dev: Generic device to remap IO address for
+ * @res: Resource describing the I/O space
+ * @phys_addr: physical address of range to be mapped
+ *
+ * Managed pci_remap_iospace().  Map is automatically unmapped on driver
+ * detach.
+ */
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+			   phys_addr_t phys_addr)
+{
+	const struct resource **ptr;
+	int error;
+
+	ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	error = pci_remap_iospace(res, phys_addr);
+	if (error) {
+		devres_free(ptr);
+	} else	{
+		*ptr = res;
+		devres_add(dev, ptr);
+	}
+
+	return error;
+}
+EXPORT_SYMBOL(devm_pci_remap_iospace);
+
 static void __pci_set_master(struct pci_dev *dev, bool enable)
 {
 	u16 old_cmd, cmd;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 56340ab..16611cf 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1363,6 +1363,10 @@
 	if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
 		return;
 
+	/* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
+	if (dev->is_virtfn)
+		return;
+
 	mps = pcie_get_mps(dev);
 	p_mps = pcie_get_mps(bridge);
 
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index a05d143..c7a695c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4236,11 +4236,6 @@
  *
  * 0x9d10-0x9d1b PCI Express Root port #{1-12}
  *
- * The 300 series chipset suffers from the same bug so include those root
- * ports here as well.
- *
- * 0xa32c-0xa343 PCI Express Root port #{0-24}
- *
  * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
  * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
  * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
@@ -4258,7 +4253,6 @@
 	case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
 	case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
 	case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
-	case 0xa32c ... 0xa343:				/* 300 series */
 		return true;
 	}
 
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index 35c1765..87618a4 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -460,8 +460,8 @@
 	const struct nsp_pin_function *func;
 	const struct nsp_pin_group *grp;
 
-	if (grp_select > pinctrl->num_groups ||
-		func_select > pinctrl->num_functions)
+	if (grp_select >= pinctrl->num_groups ||
+	    func_select >= pinctrl->num_functions)
 		return -EINVAL;
 
 	func = &pinctrl->functions[func_select];
@@ -577,6 +577,8 @@
 		return PTR_ERR(pinctrl->base0);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res)
+		return -EINVAL;
 	pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
 					      resource_size(res));
 	if (!pinctrl->base1) {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index a4e9f43..e2cca91 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -433,7 +433,7 @@
 	const char *name;
 	int i, ret;
 
-	if (group > info->ngroups)
+	if (group >= info->ngroups)
 		return;
 
 	seq_puts(s, "\n");
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index b40a074..15aeeb2 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -604,12 +604,17 @@
 {
 	struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
 	void __iomem *reg;
+	u32 padcfg0;
 
 	reg = intel_get_padcfg(pctrl, offset, PADCFG0);
 	if (!reg)
 		return -EINVAL;
 
-	return !!(readl(reg) & PADCFG0_GPIORXSTATE);
+	padcfg0 = readl(reg);
+	if (!(padcfg0 & PADCFG0_GPIOTXDIS))
+		return !!(padcfg0 & PADCFG0_GPIOTXSTATE);
+
+	return !!(padcfg0 & PADCFG0_GPIORXSTATE);
 }
 
 static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 28bbc1b..88ba9c5 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -573,8 +573,10 @@
 		for_each_child_of_node(np_config, np) {
 			ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map,
 						    &reserved_maps, num_maps);
-			if (ret < 0)
+			if (ret < 0) {
+				of_node_put(np);
 				break;
+			}
 		}
 	}
 
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 9413da3..56ea1f4 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -139,6 +139,16 @@
 	  Technologies Inc MSM8917 platform.
 	  If unsure say N.
 
+config PINCTRL_MSM8940
+	tristate "Qualcomm Technologies Inc MSM8940 pin controller driver"
+	depends on GPIOLIB && OF
+	select PINCTRL_MSM
+	help
+	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
+	  Technologies Inc MSM8940 platform.
+	  If unsure say N.
+
 config PINCTRL_SDM845
 	tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
 	depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index dcfe105..4cf3aba 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -629,6 +629,7 @@
 static void msm_gpio_irq_unmask(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	uint32_t irqtype = irqd_get_trigger_type(d);
 	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
 	const struct msm_pingroup *g;
 	unsigned long flags;
@@ -638,6 +639,12 @@
 
 	spin_lock_irqsave(&pctrl->lock, flags);
 
+	if (irqtype & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
+		val = readl_relaxed(pctrl->regs + g->intr_status_reg);
+		val &= ~BIT(g->intr_status_bit);
+		writel_relaxed(val, pctrl->regs + g->intr_status_reg);
+	}
+
 	val = readl(pctrl->regs + g->intr_cfg_reg);
 	val |= BIT(g->intr_enable_bit);
 	writel(val, pctrl->regs + g->intr_cfg_reg);
@@ -1307,7 +1314,7 @@
 		pctrl = gpiochip_get_data(gc);
 		if (pctrl->spi_cfg_regs) {
 			spi_cfg_reg = pctrl->spi_cfg_regs +
-					(dir_conn_data->hwirq / 32) * 4;
+					((dir_conn_data->hwirq - 32) / 32) * 4;
 			if (spi_cfg_reg < pctrl->spi_cfg_end) {
 				spin_lock_irqsave(&pctrl->lock, flags);
 				val = scm_io_read(spi_cfg_reg);
@@ -1315,7 +1322,8 @@
 				 * Clear the respective bit for edge type
 				 * interrupt
 				 */
-				val &= ~(1 << (dir_conn_data->hwirq % 32));
+				val &= ~(1 << ((dir_conn_data->hwirq - 32)
+									% 32));
 				WARN_ON(scm_io_write(spi_cfg_reg, val));
 				spin_unlock_irqrestore(&pctrl->lock, flags);
 			} else
@@ -1392,13 +1400,13 @@
 
 	if (pctrl->spi_cfg_regs && type != IRQ_TYPE_NONE) {
 		spi_cfg_reg = pctrl->spi_cfg_regs +
-				(parent_data->hwirq / 32) * 4;
+				((parent_data->hwirq - 32) / 32) * 4;
 		if (spi_cfg_reg < pctrl->spi_cfg_end) {
 			spin_lock_irqsave(&pctrl->lock, flags);
 			val = scm_io_read(spi_cfg_reg);
-			val &= ~(1 << (parent_data->hwirq % 32));
+			val &= ~(1 << ((parent_data->hwirq - 32) % 32));
 			if (config_val)
-				val |= (1 << (parent_data->hwirq % 32));
+				val |= (1 << ((parent_data->hwirq - 32)  % 32));
 			WARN_ON(scm_io_write(spi_cfg_reg, val));
 			spin_unlock_irqrestore(&pctrl->lock, flags);
 		} else
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index bdce49b..ac83345 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -411,31 +411,47 @@
 
 	switch (param) {
 	case PIN_CONFIG_DRIVE_PUSH_PULL:
-		arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_CMOS;
+		if (pad->buffer_type != PMIC_GPIO_OUT_BUF_CMOS)
+			return -EINVAL;
+		arg = 1;
 		break;
 	case PIN_CONFIG_DRIVE_OPEN_DRAIN:
-		arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS;
+		if (pad->buffer_type != PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS)
+			return -EINVAL;
+		arg = 1;
 		break;
 	case PIN_CONFIG_DRIVE_OPEN_SOURCE:
-		arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS;
+		if (pad->buffer_type != PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS)
+			return -EINVAL;
+		arg = 1;
 		break;
 	case PIN_CONFIG_BIAS_PULL_DOWN:
-		arg = pad->pullup == PMIC_GPIO_PULL_DOWN;
+		if (pad->pullup != PMIC_GPIO_PULL_DOWN)
+			return -EINVAL;
+		arg = 1;
 		break;
 	case PIN_CONFIG_BIAS_DISABLE:
-		arg = pad->pullup = PMIC_GPIO_PULL_DISABLE;
+		if (pad->pullup != PMIC_GPIO_PULL_DISABLE)
+			return -EINVAL;
+		arg = 1;
 		break;
 	case PIN_CONFIG_BIAS_PULL_UP:
-		arg = pad->pullup == PMIC_GPIO_PULL_UP_30;
+		if (pad->pullup != PMIC_GPIO_PULL_UP_30)
+			return -EINVAL;
+		arg = 1;
 		break;
 	case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
-		arg = !pad->is_enabled;
+		if (pad->is_enabled)
+			return -EINVAL;
+		arg = 1;
 		break;
 	case PIN_CONFIG_POWER_SOURCE:
 		arg = pad->power_source;
 		break;
 	case PIN_CONFIG_INPUT_ENABLE:
-		arg = pad->input_enabled;
+		if (!pad->input_enabled)
+			return -EINVAL;
+		arg = 1;
 		break;
 	case PIN_CONFIG_OUTPUT_ENABLE:
 		arg = pad->output_enabled;
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_com.h b/drivers/platform/msm/ep_pcie/ep_pcie_com.h
index 36d49e4..ebc3b8f 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie_com.h
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_com.h
@@ -55,6 +55,9 @@
 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE_HI     0x35C
 #define PCIE20_PARF_ATU_BASE_ADDR      0x634
 #define PCIE20_PARF_ATU_BASE_ADDR_HI   0x638
+#define PCIE20_PARF_BUS_DISCONNECT_CTRL          0x648
+#define PCIE20_PARF_BUS_DISCONNECT_STATUS        0x64c
+
 #define PCIE20_PARF_DEVICE_TYPE        0x1000
 
 #define PCIE20_ELBI_VERSION            0x00
@@ -63,6 +66,8 @@
 #define PCIE20_ELBI_CS2_ENABLE         0xA4
 
 #define PCIE20_DEVICE_ID_VENDOR_ID     0x00
+#define PCIE20_MASK_DEVICE_ID          GENMASK(31, 16)
+#define PCIE20_MASK_VENDOR_ID          GENMASK(15, 0)
 #define PCIE20_COMMAND_STATUS          0x04
 #define PCIE20_CLASS_CODE_REVISION_ID  0x08
 #define PCIE20_BIST_HDR_TYPE           0x0C
@@ -323,6 +328,8 @@
 
 	struct msm_bus_scale_pdata   *bus_scale_table;
 	u32                          bus_client;
+	u16                          vendor_id;
+	u16                          device_id;
 	u32                          link_speed;
 	bool                         active_config;
 	bool                         aggregated_irq;
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
index 0ada0bf..8e21e85 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie_core.c
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
@@ -133,6 +133,63 @@
 	return true;
 }
 
+static int ep_pcie_reset_init(struct ep_pcie_dev_t *dev)
+{
+	int i, rc = 0;
+	struct ep_pcie_reset_info_t *reset_info;
+
+	for (i = 0; i < EP_PCIE_MAX_RESET; i++) {
+		reset_info = &dev->reset[i];
+		if (!reset_info->hdl)
+			continue;
+
+		rc = reset_control_assert(reset_info->hdl);
+		if (rc) {
+			if (!reset_info->required) {
+				EP_PCIE_ERR(dev,
+				"PCIe V%d: Optional reset: %s assert failed\n",
+					dev->rev, reset_info->name);
+				continue;
+			} else {
+				EP_PCIE_ERR(dev,
+				"PCIe V%d: failed to assert reset for %s\n",
+					dev->rev, reset_info->name);
+				return rc;
+			}
+		} else {
+			EP_PCIE_DBG(dev,
+			"PCIe V%d: successfully asserted reset for %s\n",
+				dev->rev, reset_info->name);
+		}
+		EP_PCIE_ERR(dev, "After Reset assert %s\n",
+						reset_info->name);
+		/* add a 1ms delay to ensure the reset is asserted */
+		usleep_range(1000, 1005);
+
+		rc = reset_control_deassert(reset_info->hdl);
+		if (rc) {
+			if (!reset_info->required) {
+				EP_PCIE_ERR(dev,
+				"PCIe V%d: Optional reset: %s deassert failed\n",
+					dev->rev, reset_info->name);
+				continue;
+			} else {
+				EP_PCIE_ERR(dev,
+				"PCIe V%d: failed to deassert reset for %s\n",
+					dev->rev, reset_info->name);
+				return rc;
+			}
+		} else {
+			EP_PCIE_DBG(dev,
+			"PCIe V%d: successfully deasserted reset for %s\n",
+				dev->rev, reset_info->name);
+		}
+		EP_PCIE_ERR(dev, "After Reset de-assert %s\n",
+						reset_info->name);
+	}
+	return 0;
+}
+
 static int ep_pcie_gpio_init(struct ep_pcie_dev_t *dev)
 {
 	int i, rc = 0;
@@ -277,7 +334,6 @@
 {
 	int i, rc = 0;
 	struct ep_pcie_clk_info_t *info;
-	struct ep_pcie_reset_info_t *reset_info;
 
 	EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
 
@@ -349,34 +405,6 @@
 		regulator_disable(dev->gdsc);
 	}
 
-	for (i = 0; i < EP_PCIE_MAX_RESET; i++) {
-		reset_info = &dev->reset[i];
-		if (reset_info->hdl) {
-			rc = reset_control_assert(reset_info->hdl);
-			if (rc)
-				EP_PCIE_ERR(dev,
-					"PCIe V%d: failed to assert reset for %s.\n",
-					dev->rev, reset_info->name);
-			else
-				EP_PCIE_DBG(dev,
-					"PCIe V%d: successfully asserted reset for %s.\n",
-					dev->rev, reset_info->name);
-
-			/* add a 1ms delay to ensure the reset is asserted */
-			usleep_range(1000, 1005);
-
-			rc = reset_control_deassert(reset_info->hdl);
-			if (rc)
-				EP_PCIE_ERR(dev,
-					"PCIe V%d: failed to deassert reset for %s.\n",
-					dev->rev, reset_info->name);
-			else
-				EP_PCIE_DBG(dev,
-					"PCIe V%d: successfully deasserted reset for %s.\n",
-					dev->rev, reset_info->name);
-		}
-	}
-
 	return rc;
 }
 
@@ -524,11 +552,21 @@
 
 static void ep_pcie_core_init(struct ep_pcie_dev_t *dev, bool configured)
 {
+	uint32_t val = 0;
 	EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev);
 
 	/* enable debug IRQ */
 	ep_pcie_write_mask(dev->parf + PCIE20_PARF_DEBUG_INT_EN,
 			0, BIT(3) | BIT(2) | BIT(1));
+	/* Reconnect AXI master port */
+	val = readl_relaxed(dev->parf + PCIE20_PARF_BUS_DISCONNECT_STATUS);
+	if (val & BIT(0)) {
+		EP_PCIE_DBG(dev,
+		"PCIe V%d: AXI Master port was disconnected, reconnecting...\n",
+			dev->rev);
+		ep_pcie_write_mask(dev->parf + PCIE20_PARF_BUS_DISCONNECT_CTRL,
+								0, BIT(0));
+	}
 
 	if (!configured) {
 		/* Configure PCIe to endpoint mode */
@@ -635,6 +673,17 @@
 		ep_pcie_write_mask(dev->dm_core + PCIE20_MISC_CONTROL_1, 0,
 			BIT(0));
 
+		/* Set Vendor ID and Device ID */
+		if (ep_pcie_dev.device_id != 0xFFFF)
+			ep_pcie_write_reg_field(dev->dm_core,
+						PCIE20_DEVICE_ID_VENDOR_ID,
+						PCIE20_MASK_DEVICE_ID,
+						ep_pcie_dev.device_id);
+		if (ep_pcie_dev.vendor_id != 0xFFFF)
+			ep_pcie_write_reg_field(dev->dm_core,
+						PCIE20_DEVICE_ID_VENDOR_ID,
+						PCIE20_MASK_VENDOR_ID,
+						ep_pcie_dev.vendor_id);
 		/* Set class code and revision ID */
 		ep_pcie_write_reg(dev->dm_core, PCIE20_CLASS_CODE_REVISION_ID,
 			0xff000000);
@@ -722,8 +771,8 @@
 		ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
 	}
 
-	/* Configure MMIO */
-	ep_pcie_config_mmio(dev);
+	if (!configured)
+		ep_pcie_config_mmio(dev);
 }
 
 static void ep_pcie_config_inbound_iatu(struct ep_pcie_dev_t *dev)
@@ -1293,43 +1342,46 @@
 		}
 
 		dev->power_on = true;
+		/* check link status during initial bootup */
+		if (!dev->enumerated) {
+			val = readl_relaxed(dev->parf + PCIE20_PARF_PM_STTS);
+			val = val & PARF_XMLH_LINK_UP;
+			EP_PCIE_DBG(dev, "PCIe V%d: Link status is 0x%x.\n",
+					dev->rev, val);
+			if (val) {
+				EP_PCIE_INFO(dev,
+					"PCIe V%d: link initialized by bootloader for LE PCIe endpoint; skip link training in HLOS.\n",
+					dev->rev);
+				ep_pcie_core_init(dev, true);
+				dev->link_status = EP_PCIE_LINK_UP;
+				dev->l23_ready = false;
+				goto checkbme;
+			} else {
+				ltssm_en = readl_relaxed(dev->parf
+					+ PCIE20_PARF_LTSSM) & BIT(8);
+
+				if (ltssm_en) {
+					EP_PCIE_ERR(dev,
+						"PCIe V%d: link is not up when LTSSM has already enabled by bootloader.\n",
+						dev->rev);
+					ret = EP_PCIE_ERROR;
+					goto link_fail;
+				} else {
+					EP_PCIE_DBG(dev,
+						"PCIe V%d: Proceed with regular link training.\n",
+						dev->rev);
+				}
+			}
+		}
+
+		ret = ep_pcie_reset_init(dev);
+		if (ret)
+			goto link_fail;
 	}
 
 	if (!(opt & EP_PCIE_OPT_ENUM))
 		goto out;
 
-	/* check link status during initial bootup */
-	if (!dev->enumerated) {
-		val = readl_relaxed(dev->parf + PCIE20_PARF_PM_STTS);
-		val = val & PARF_XMLH_LINK_UP;
-		EP_PCIE_DBG(dev, "PCIe V%d: Link status is 0x%x.\n", dev->rev,
-				val);
-		if (val) {
-			EP_PCIE_INFO(dev,
-				"PCIe V%d: link initialized by bootloader for LE PCIe endpoint; skip link training in HLOS.\n",
-				dev->rev);
-			ep_pcie_core_init(dev, true);
-			dev->link_status = EP_PCIE_LINK_UP;
-			dev->l23_ready = false;
-			goto checkbme;
-		} else {
-			ltssm_en = readl_relaxed(dev->parf
-					+ PCIE20_PARF_LTSSM) & BIT(8);
-
-			if (ltssm_en) {
-				EP_PCIE_ERR(dev,
-					"PCIe V%d: link is not up when LTSSM has already enabled by bootloader.\n",
-					dev->rev);
-				ret = EP_PCIE_ERROR;
-				goto link_fail;
-			} else {
-				EP_PCIE_DBG(dev,
-					"PCIe V%d: Proceed with regular link training.\n",
-					dev->rev);
-			}
-		}
-	}
-
 	if (opt & EP_PCIE_OPT_AST_WAKE) {
 		/* assert PCIe WAKE# */
 		EP_PCIE_INFO(dev, "PCIe V%d: assert PCIe WAKE#.\n",
@@ -2477,6 +2529,30 @@
 		EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: pcie-link-speed:%d.\n",
 			ep_pcie_dev.rev, ep_pcie_dev.link_speed);
 
+	ep_pcie_dev.vendor_id = 0xFFFF;
+	ret = of_property_read_u16((&pdev->dev)->of_node,
+				"qcom,pcie-vendor-id",
+				&ep_pcie_dev.vendor_id);
+	if (ret)
+		EP_PCIE_DBG(&ep_pcie_dev,
+				"PCIe V%d: pcie-vendor-id does not exist.\n",
+				ep_pcie_dev.rev);
+	else
+		EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: pcie-vendor-id:%d.\n",
+				ep_pcie_dev.rev, ep_pcie_dev.vendor_id);
+
+	ep_pcie_dev.device_id = 0xFFFF;
+	ret = of_property_read_u16((&pdev->dev)->of_node,
+				"qcom,pcie-device-id",
+				&ep_pcie_dev.device_id);
+	if (ret)
+		EP_PCIE_DBG(&ep_pcie_dev,
+				"PCIe V%d: pcie-device-id does not exist.\n",
+				ep_pcie_dev.rev);
+	else
+		EP_PCIE_DBG(&ep_pcie_dev, "PCIe V%d: pcie-device-id:%d.\n",
+				ep_pcie_dev.rev, ep_pcie_dev.device_id);
+
 	ret = of_property_read_u32((&pdev->dev)->of_node,
 				"qcom,dbi-base-reg",
 				&ep_pcie_dev.dbi_base_reg);
@@ -2727,7 +2803,7 @@
 	platform_driver_unregister(&ep_pcie_driver);
 }
 
-module_init(ep_pcie_init);
+subsys_initcall(ep_pcie_init);
 module_exit(ep_pcie_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("MSM PCIe Endpoint Driver");
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 3a75bdd..0ab6e90 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -182,7 +182,12 @@
 	__stringify(IPA_CLIENT_TEST3_CONS),
 	__stringify(IPA_CLIENT_TEST4_PROD),
 	__stringify(IPA_CLIENT_TEST4_CONS),
+	__stringify(RESERVERD_PROD_72),
 	__stringify(IPA_CLIENT_DUMMY_CONS),
+	__stringify(RESERVERD_PROD_74),
+	__stringify(IPA_CLIENT_MHI_DPL_CONS),
+	__stringify(RESERVERD_PROD_76),
+	__stringify(IPA_CLIENT_DUMMY_CONS1)
 };
 
 /**
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c
index 4e3a565..9693014 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c
@@ -160,6 +160,7 @@
 	struct mutex iface_lock[MAX_SUPPORTED_IFACE];
 	spinlock_t iface_spinlock[MAX_SUPPORTED_IFACE];
 	u32 pm_hdl;
+	atomic_t disconnect_in_progress;
 };
 
 static struct ipa_gsb_context *ipa_gsb_ctx;
@@ -676,7 +677,7 @@
 
 	skb = (struct sk_buff *)data;
 
-	while (skb->len) {
+	while (skb && skb->len) {
 		mux_hdr = (struct ipa_gsb_mux_hdr *)skb->data;
 		pkt_size = mux_hdr->pkt_size;
 		/* 4-byte padding */
@@ -743,7 +744,8 @@
 	/* change to host order */
 	*(u32 *)mux_hdr = ntohl(*(u32 *)mux_hdr);
 	hdl = mux_hdr->iface_hdl;
-	if (!ipa_gsb_ctx->iface[hdl]) {
+	if ((hdl < 0) || (hdl >= MAX_SUPPORTED_IFACE) ||
+		!ipa_gsb_ctx->iface[hdl]) {
 		IPA_GSB_ERR("invalid hdl: %d and cb, drop the skb\n", hdl);
 		dev_kfree_skb_any(skb);
 		return;
@@ -851,16 +853,19 @@
 		return 0;
 	}
 
+	mutex_lock(&ipa_gsb_ctx->lock);
 	if (ipa_gsb_ctx->num_connected_iface == 0) {
 		ret = ipa_pm_activate_sync(ipa_gsb_ctx->pm_hdl);
 		if (ret) {
 			IPA_GSB_ERR("failed to activate ipa pm\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
 			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 			return ret;
 		}
 		ret = ipa_gsb_connect_sys_pipe();
 		if (ret) {
 			IPA_GSB_ERR("fail to connect pipe\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
 			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 			return ret;
 		}
@@ -876,7 +881,7 @@
 	ipa_gsb_ctx->num_resumed_iface++;
 	IPA_GSB_DBG("num resumed iface: %d\n",
 		ipa_gsb_ctx->num_resumed_iface);
-
+	mutex_unlock(&ipa_gsb_ctx->lock);
 	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 	return 0;
 }
@@ -908,7 +913,7 @@
 
 int ipa_bridge_disconnect(u32 hdl)
 {
-	int ret;
+	int ret = 0;
 
 	if (!ipa_gsb_ctx) {
 		IPA_GSB_ERR("ipa_gsb_ctx was not initialized\n");
@@ -923,31 +928,34 @@
 	IPA_GSB_DBG("client hdl: %d\n", hdl);
 
 	mutex_lock(&ipa_gsb_ctx->iface_lock[hdl]);
+	atomic_set(&ipa_gsb_ctx->disconnect_in_progress, 1);
+
 	if (!ipa_gsb_ctx->iface[hdl]) {
 		IPA_GSB_ERR("fail to find interface, hdl: %d\n", hdl);
-		mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
-		return -EFAULT;
+		ret = -EFAULT;
+		goto fail;
 	}
 
 	if (!ipa_gsb_ctx->iface[hdl]->is_connected) {
 		IPA_GSB_DBG("iface was not connected\n");
-		mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
-		return 0;
+		ret = 0;
+		goto fail;
 	}
 
+	mutex_lock(&ipa_gsb_ctx->lock);
 	if (ipa_gsb_ctx->num_connected_iface == 1) {
 		ret = ipa_gsb_disconnect_sys_pipe();
 		if (ret) {
 			IPA_GSB_ERR("fail to discon pipes\n");
-			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
-			return -EFAULT;
+			ret = -EFAULT;
+			goto fail;
 		}
 
 		ret = ipa_pm_deactivate_sync(ipa_gsb_ctx->pm_hdl);
 		if (ret) {
 			IPA_GSB_ERR("failed to deactivate ipa pm\n");
-			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
-			return -EFAULT;
+			ret = -EFAULT;
+			goto fail;
 		}
 	}
 
@@ -964,8 +972,11 @@
 			ipa_gsb_ctx->num_resumed_iface);
 	}
 
+fail:
+	mutex_unlock(&ipa_gsb_ctx->lock);
+	atomic_set(&ipa_gsb_ctx->disconnect_in_progress, 0);
 	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL(ipa_bridge_disconnect);
 
@@ -1004,10 +1015,12 @@
 		return 0;
 	}
 
+	mutex_lock(&ipa_gsb_ctx->lock);
 	if (ipa_gsb_ctx->num_resumed_iface == 0) {
 		ret = ipa_pm_activate_sync(ipa_gsb_ctx->pm_hdl);
 		if (ret) {
 			IPA_GSB_ERR("fail to activate ipa pm\n");
+			mutex_unlock(&ipa_gsb_ctx->lock);
 			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 			return ret;
 		}
@@ -1018,6 +1031,7 @@
 			IPA_GSB_ERR(
 				"fail to start con ep %d\n",
 				ret);
+			mutex_unlock(&ipa_gsb_ctx->lock);
 			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 			return ret;
 		}
@@ -1028,6 +1042,7 @@
 	IPA_GSB_DBG_LOW("num resumed iface: %d\n",
 		ipa_gsb_ctx->num_resumed_iface);
 
+	mutex_unlock(&ipa_gsb_ctx->lock);
 	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 	return 0;
 }
@@ -1068,6 +1083,7 @@
 		return 0;
 	}
 
+	mutex_lock(&ipa_gsb_ctx->lock);
 	if (ipa_gsb_ctx->num_resumed_iface == 1) {
 		ret = ipa_stop_gsi_channel(
 			ipa_gsb_ctx->cons_hdl);
@@ -1075,6 +1091,7 @@
 			IPA_GSB_ERR(
 				"fail to stop cons ep %d\n",
 				ret);
+			mutex_unlock(&ipa_gsb_ctx->lock);
 			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 			return ret;
 		}
@@ -1083,6 +1100,7 @@
 		if (ret) {
 			IPA_GSB_ERR("fail to deactivate ipa pm\n");
 			ipa_start_gsi_channel(ipa_gsb_ctx->cons_hdl);
+			mutex_unlock(&ipa_gsb_ctx->lock);
 			mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 			return ret;
 		}
@@ -1093,6 +1111,7 @@
 	IPA_GSB_DBG_LOW("num resumed iface: %d\n",
 		ipa_gsb_ctx->num_resumed_iface);
 
+	mutex_unlock(&ipa_gsb_ctx->lock);
 	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 	return 0;
 }
@@ -1140,6 +1159,11 @@
 		return -EFAULT;
 	}
 
+	if (unlikely(atomic_read(&ipa_gsb_ctx->disconnect_in_progress))) {
+		IPA_GSB_ERR("ipa bridge disconnect_in_progress\n");
+		return -EFAULT;
+	}
+
 	/* make sure skb has enough headroom */
 	if (unlikely(skb_headroom(skb) < sizeof(struct ipa_gsb_mux_hdr))) {
 		IPA_GSB_DBG_LOW("skb doesn't have enough headroom\n");
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index 5085d75..6c66600 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -67,7 +67,7 @@
 #define IPA_MHI_SUSPEND_SLEEP_MAX 1100
 
 #define IPA_MHI_MAX_UL_CHANNELS 1
-#define IPA_MHI_MAX_DL_CHANNELS 1
+#define IPA_MHI_MAX_DL_CHANNELS 2
 
 /* bit #40 in address should be asserted for MHI transfers over pcie */
 #define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \
diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
index 2b58121..286e90e 100644
--- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
@@ -485,6 +485,11 @@
 	.zeroes = {0},
 };
 
+static void rndis_ipa_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	kfree(buff);
+}
+
 /**
  * rndis_ipa_init() - create network device and initialize internal
  *  data structures
@@ -709,6 +714,8 @@
 	int result;
 	int ret;
 	unsigned long flags;
+	struct ipa_ecm_msg *rndis_msg;
+	struct ipa_msg_meta msg_meta;
 
 	RNDIS_IPA_LOG_ENTRY();
 
@@ -796,6 +803,26 @@
 	}
 	RNDIS_IPA_DEBUG("netif_carrier_on() was called\n");
 
+	rndis_msg = kzalloc(sizeof(*rndis_msg), GFP_KERNEL);
+	if (!rndis_msg) {
+		result = -ENOMEM;
+		goto fail;
+	}
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = ECM_CONNECT;
+	msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+	strlcpy(rndis_msg->name, rndis_ipa_ctx->net->name,
+		IPA_RESOURCE_NAME_MAX);
+	rndis_msg->ifindex = rndis_ipa_ctx->net->ifindex;
+
+	result = ipa_send_msg(&msg_meta, rndis_msg, rndis_ipa_msg_free_cb);
+	if (result) {
+		RNDIS_IPA_ERROR("fail to send ECM_CONNECT for rndis\n");
+		kfree(rndis_msg);
+		goto fail;
+	}
+
 	spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
 	next_state = rndis_ipa_next_state(rndis_ipa_ctx->state,
 					  RNDIS_IPA_CONNECT);
@@ -1260,6 +1287,8 @@
 	int retval;
 	int ret;
 	unsigned long flags;
+	struct ipa_ecm_msg *rndis_msg;
+	struct ipa_msg_meta msg_meta;
 
 	RNDIS_IPA_LOG_ENTRY();
 
@@ -1291,6 +1320,24 @@
 	netif_carrier_off(rndis_ipa_ctx->net);
 	RNDIS_IPA_DEBUG("carrier_off notification was sent\n");
 
+	rndis_msg = kzalloc(sizeof(*rndis_msg), GFP_KERNEL);
+	if (!rndis_msg)
+		return -ENOMEM;
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = ECM_DISCONNECT;
+	msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+	strlcpy(rndis_msg->name, rndis_ipa_ctx->net->name,
+		IPA_RESOURCE_NAME_MAX);
+	rndis_msg->ifindex = rndis_ipa_ctx->net->ifindex;
+
+	retval = ipa_send_msg(&msg_meta, rndis_msg, rndis_ipa_msg_free_cb);
+	if (retval) {
+		RNDIS_IPA_ERROR("fail to send ECM_DISCONNECT for rndis\n");
+		kfree(rndis_msg);
+		return -EPERM;
+	}
+
 	netif_stop_queue(rndis_ipa_ctx->net);
 	RNDIS_IPA_DEBUG("queue stopped\n");
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index 0351442..3f4f0e8 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -35,6 +35,13 @@
 #define NAT_TABLE_ENTRY_SIZE_BYTE 32
 #define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4
 
+/*
+ * Max NAT table entries is limited 1000 entries.
+ * Limit the memory size required by user to prevent kernel memory starvation
+ */
+#define IPA_TABLE_MAX_ENTRIES 1000
+#define MAX_ALLOC_NAT_SIZE (IPA_TABLE_MAX_ENTRIES * NAT_TABLE_ENTRY_SIZE_BYTE)
+
 static int ipa_nat_vma_fault_remap(
 	 struct vm_area_struct *vma, struct vm_fault *vmf)
 {
@@ -270,6 +277,13 @@
 		goto bail;
 	}
 
+	if (mem->size > MAX_ALLOC_NAT_SIZE) {
+		IPAERR("Trying allocate more size = %zu, Max allowed = %d\n",
+				mem->size, MAX_ALLOC_NAT_SIZE);
+		result = -EPERM;
+		goto bail;
+	}
+
 	if (mem->size <= 0 ||
 			nat_ctx->is_dev_init == true) {
 		IPAERR_RL("Invalid Parameters or device is already init\n");
@@ -327,14 +341,18 @@
 	size_t tmp;
 	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
+	mutex_lock(&ipa_ctx->nat_mem.lock);
+
 	if (!ipa_ctx->nat_mem.is_dev_init) {
 		IPAERR_RL("Nat table not initialized\n");
+		mutex_unlock(&ipa_ctx->nat_mem.lock);
 		return -EPERM;
 	}
 
 	IPADBG("\n");
 	if (init->table_entries == 0) {
 		IPADBG("Table entries is zero\n");
+		mutex_unlock(&ipa_ctx->nat_mem.lock);
 		return -EPERM;
 	}
 
@@ -342,6 +360,7 @@
 	if (init->ipv4_rules_offset >
 		(UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1)))) {
 		IPAERR_RL("Detected overflow\n");
+		mutex_unlock(&ipa_ctx->nat_mem.lock);
 		return -EPERM;
 	}
 	/* Check Table Entry offset is not
@@ -354,6 +373,7 @@
 		IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
 			init->ipv4_rules_offset, (init->table_entries + 1),
 			tmp, ipa_ctx->nat_mem.size);
+		mutex_unlock(&ipa_ctx->nat_mem.lock);
 		return -EPERM;
 	}
 
@@ -361,6 +381,7 @@
 	if (init->expn_rules_offset >
 		UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
 		IPAERR_RL("Detected overflow\n");
+		mutex_unlock(&ipa_ctx->nat_mem.lock);
 		return -EPERM;
 	}
 	/* Check Expn Table Entry offset is not
@@ -373,6 +394,7 @@
 		IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
 			init->expn_rules_offset, init->expn_table_entries,
 			tmp, ipa_ctx->nat_mem.size);
+		mutex_unlock(&ipa_ctx->nat_mem.lock);
 		return -EPERM;
 	}
 
@@ -380,6 +402,7 @@
 	if (init->index_offset >
 		UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
 		IPAERR_RL("Detected overflow\n");
+		mutex_unlock(&ipa_ctx->nat_mem.lock);
 		return -EPERM;
 	}
 	/* Check Indx Table Entry offset is not
@@ -392,6 +415,7 @@
 		IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
 			init->index_offset, (init->table_entries + 1),
 			tmp, ipa_ctx->nat_mem.size);
+		mutex_unlock(&ipa_ctx->nat_mem.lock);
 		return -EPERM;
 	}
 
@@ -399,6 +423,7 @@
 	if (init->index_expn_offset >
 		(UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries))) {
 		IPAERR_RL("Detected overflow\n");
+		mutex_unlock(&ipa_ctx->nat_mem.lock);
 		return -EPERM;
 	}
 	/* Check Expn Table entry offset is not
@@ -411,6 +436,7 @@
 		IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
 			init->index_expn_offset, init->expn_table_entries,
 			tmp, ipa_ctx->nat_mem.size);
+		mutex_unlock(&ipa_ctx->nat_mem.lock);
 		return -EPERM;
 	}
 
@@ -559,6 +585,7 @@
 free_nop:
 	kfree(reg_write_nop);
 bail:
+	mutex_unlock(&ipa_ctx->nat_mem.lock);
 	return result;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
index 98f5574..f60a4c7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
@@ -55,6 +55,16 @@
 			DEV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
+#define IPAWANERR_RL(fmt, args...) \
+	do { \
+		pr_err_ratelimited_ipa(DEV_NAME " %s:%d " fmt, __func__,\
+			__LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
 #define IPAWANINFO(fmt, args...) \
 	do { \
 		pr_info(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 073409b..be52968 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -1344,6 +1344,8 @@
 	struct ipa_rt_entry *rule_next;
 	struct ipa_rt_tbl_set *rset;
 	u32 apps_start_idx;
+	struct ipa_hdr_entry *hdr_entry;
+	struct ipa_hdr_proc_ctx_entry *hdr_proc_entry;
 	int id;
 	bool tbl_user = false;
 
@@ -1397,6 +1399,27 @@
 			if (!user_only ||
 				rule->ipacm_installed) {
 				list_del(&rule->link);
+				if (rule->hdr) {
+					hdr_entry = ipa_id_find(
+						rule->rule.hdr_hdl);
+					if (!hdr_entry ||
+					hdr_entry->cookie != IPA_HDR_COOKIE) {
+						IPAERR_RL(
+						"Header already deleted\n");
+						return -EINVAL;
+					}
+				} else if (rule->proc_ctx) {
+					hdr_proc_entry =
+						ipa_id_find(
+						rule->rule.hdr_proc_ctx_hdl);
+					if (!hdr_proc_entry ||
+						hdr_proc_entry->cookie !=
+						IPA_PROC_HDR_COOKIE) {
+					IPAERR_RL(
+						"Proc entry already deleted\n");
+						return -EINVAL;
+					}
+				}
 				tbl->rule_cnt--;
 				if (rule->hdr)
 					__ipa_release_hdr(rule->hdr->id);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 681b009..d68fe31 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -903,7 +903,7 @@
 		ipa_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS,
 			wdi_sap_stats);
 	} else {
-		IPAERR("uc_wdi_ctx.stats_notify not registered\n");
+		IPAERR_RL("uc_wdi_ctx.stats_notify not registered\n");
 		return -EFAULT;
 	}
 	return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 1df1232..a9040a6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -3066,7 +3066,8 @@
 		rc = rmnet_ipa_query_tethering_stats_wifi(
 			&tether_stats, data->reset_stats);
 		if (rc) {
-			IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+			IPAWANERR_RL(
+				"wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
 			return rc;
 		}
 		data->tx_bytes = tether_stats.ipv4_tx_bytes
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
index 0c1cabf..904bcde 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2015, 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -162,7 +162,7 @@
 		}
 		if (rmnet_ipa_poll_tethering_stats(
 		(struct wan_ioctl_poll_tethering_stats *)param)) {
-			IPAWANERR("WAN_IOCTL_POLL_TETHERING_STATS failed\n");
+			IPAWANERR_RL("WAN_IOCTL_POLL_TETHERING_STATS failed\n");
 			retval = -EFAULT;
 			break;
 		}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index bbc3a4f..c84c2d0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -68,7 +68,8 @@
 		 * if DPL client is not pulling the data
 		 * on other end from IPA hw.
 		 */
-		if (ep->client == IPA_CLIENT_USB_DPL_CONS)
+		if ((ep->client == IPA_CLIENT_USB_DPL_CONS) ||
+				(ep->client == IPA_CLIENT_MHI_DPL_CONS))
 			holb_cfg.en = IPA_HOLB_TMR_EN;
 		else
 			holb_cfg.en = IPA_HOLB_TMR_DIS;
@@ -1844,9 +1845,7 @@
 	/* Set disconnect in progress flag so further flow control events are
 	 * not honored.
 	 */
-	spin_lock(&ipa3_ctx->disconnect_lock);
-	ep->disconnect_in_progress = true;
-	spin_unlock(&ipa3_ctx->disconnect_lock);
+	atomic_set(&ep->disconnect_in_progress, 1);
 
 	/* If flow is disabled at this point, restore the ep state.*/
 	ep_ctrl.ipa_ep_delay = false;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 690d564..ec1d4d9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -1771,7 +1771,7 @@
 
 	buff = kzalloc(buff_size, GFP_KERNEL);
 	if (buff == NULL)
-		return 0;
+		return -ENOMEM;
 
 	if (!ipa3_ctx->nat_mem.dev.is_dev_init) {
 		pos += scnprintf(buff + pos, buff_size - pos,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index e6dae89..bbf7085 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -2610,11 +2610,8 @@
 	metadata = status.metadata;
 	ucp = status.ucp;
 	ep = &ipa3_ctx->ep[src_pipe];
-	if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes ||
-		!ep->valid ||
-		!ep->client_notify)) {
-		IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
-		  src_pipe, ep->valid, ep->client_notify);
+	if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes)) {
+		IPAERR("drop pipe=%d\n", src_pipe);
 		dev_kfree_skb_any(rx_skb);
 		return;
 	}
@@ -2636,7 +2633,12 @@
 			metadata, *(u32 *)rx_skb->cb);
 	IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4));
 
-	ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+	if (likely((!atomic_read(&ep->disconnect_in_progress)) &&
+				ep->valid && ep->client_notify))
+		ep->client_notify(ep->priv, IPA_RECEIVE,
+				(unsigned long)(rx_skb));
+	else
+		dev_kfree_skb_any(rx_skb);
 }
 
 static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 6dc2905..6740fcb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -768,7 +768,7 @@
 	bool keep_ipa_awake;
 	struct ipa3_wlan_stats wstats;
 	u32 uc_offload_state;
-	bool disconnect_in_progress;
+	atomic_t disconnect_in_progress;
 	u32 qmi_request_sent;
 	bool napi_enabled;
 	u32 eot_in_poll_err;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index 9075237..c4eb731 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -61,7 +61,7 @@
 	IPA_MHI_DBG("EXIT\n")
 
 #define IPA_MHI_MAX_UL_CHANNELS 1
-#define IPA_MHI_MAX_DL_CHANNELS 1
+#define IPA_MHI_MAX_DL_CHANNELS 2
 
 /* bit #40 in address should be asserted for MHI transfers over pcie */
 #define IPA_MHI_HOST_ADDR_COND(addr) \
@@ -283,8 +283,10 @@
 	ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
 			params->ch_ctx_host->rbase);
 
-	if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
-		params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE) {
+	/* Burst mode is not supported on DPL pipes */
+	if ((client != IPA_CLIENT_MHI_DPL_CONS) &&
+		(params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
+		params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE)) {
 		burst_mode_enabled = true;
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index 2716d4a..12d6274 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -242,6 +242,7 @@
 	dev->smem_offset = smem_offset;
 
 	dev->is_dev_init = true;
+	dev->tmp_mem = tmp_mem;
 	mutex_unlock(&dev->lock);
 
 	IPADBG("ipa dev %s added successful. major:%d minor:%d\n", name,
@@ -265,6 +266,9 @@
 
 	mutex_lock(&dev->lock);
 
+	dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
+		dev->tmp_mem->vaddr, dev->tmp_mem->dma_handle);
+	kfree(dev->tmp_mem);
 	device_destroy(dev->class, dev->dev_num);
 	unregister_chrdev_region(dev->dev_num, 1);
 	class_destroy(dev->class);
@@ -838,19 +842,24 @@
 
 	IPADBG("\n");
 
+	mutex_lock(&ipa3_ctx->nat_mem.dev.lock);
+
 	if (!ipa3_ctx->nat_mem.dev.is_mapped) {
 		IPAERR_RL("attempt to init %s before mmap\n",
 			ipa3_ctx->nat_mem.dev.name);
+		mutex_unlock(&ipa3_ctx->nat_mem.dev.lock);
 		return -EPERM;
 	}
 
 	if (init->tbl_index >= 1) {
 		IPAERR_RL("Unsupported table index %d\n", init->tbl_index);
+		mutex_unlock(&ipa3_ctx->nat_mem.dev.lock);
 		return -EPERM;
 	}
 
 	if (init->table_entries == 0) {
 		IPAERR_RL("Table entries is zero\n");
+		mutex_unlock(&ipa3_ctx->nat_mem.dev.lock);
 		return -EPERM;
 	}
 
@@ -861,6 +870,7 @@
 		IPAHAL_NAT_IPV4);
 	if (result) {
 		IPAERR_RL("Bad params for NAT base table\n");
+		mutex_unlock(&ipa3_ctx->nat_mem.dev.lock);
 		return result;
 	}
 
@@ -871,6 +881,7 @@
 		IPAHAL_NAT_IPV4);
 	if (result) {
 		IPAERR_RL("Bad params for NAT expansion table\n");
+		mutex_unlock(&ipa3_ctx->nat_mem.dev.lock);
 		return result;
 	}
 
@@ -881,6 +892,7 @@
 		IPAHAL_NAT_IPV4_INDEX);
 	if (result) {
 		IPAERR_RL("Bad params for index table\n");
+		mutex_unlock(&ipa3_ctx->nat_mem.dev.lock);
 		return result;
 	}
 
@@ -891,6 +903,7 @@
 		IPAHAL_NAT_IPV4_INDEX);
 	if (result) {
 		IPAERR_RL("Bad params for index expansion table\n");
+		mutex_unlock(&ipa3_ctx->nat_mem.dev.lock);
 		return result;
 	}
 
@@ -924,6 +937,7 @@
 	result = ipa3_nat_send_init_cmd(&cmd, false);
 	if (result) {
 		IPAERR("Fail to send NAT init immediate command\n");
+		mutex_unlock(&ipa3_ctx->nat_mem.dev.lock);
 		return result;
 	}
 
@@ -949,6 +963,8 @@
 				 ipa3_ctx->nat_mem.index_table_expansion_addr);
 
 	ipa3_ctx->nat_mem.dev.is_hw_init = true;
+	mutex_unlock(&ipa3_ctx->nat_mem.dev.lock);
+
 	IPADBG("return\n");
 	return 0;
 }
@@ -1073,26 +1089,32 @@
 	struct ipahal_imm_cmd_pyld *cmd_pyld;
 	int result = 0;
 	struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
-	struct ipa_pdn_entry *pdn_entries = nat_ctx->pdn_mem.base;
+	struct ipa_pdn_entry *pdn_entries = NULL;
 
 	IPADBG("\n");
 
+	mutex_lock(&nat_ctx->dev.lock);
+
 	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
 		IPAERR_RL("IPA HW does not support multi PDN\n");
-		return -EPERM;
+		result = -EPERM;
+		goto bail;
 	}
+
 	if (!nat_ctx->dev.is_mem_allocated) {
 		IPAERR_RL(
 			"attempt to modify a PDN entry before the PDN table memory allocation\n");
-		return -EPERM;
+		result = -EPERM;
+		goto bail;
 	}
 
 	if (mdfy_pdn->pdn_index > (IPA_MAX_PDN_NUM - 1)) {
 		IPAERR_RL("pdn index out of range %d\n", mdfy_pdn->pdn_index);
-		return -EPERM;
+		result = -EPERM;
+		goto bail;
 	}
 
-	mutex_lock(&nat_ctx->dev.lock);
+	pdn_entries = nat_ctx->pdn_mem.base;
 
 	/* store ip in pdn entries cache array */
 	pdn_entries[mdfy_pdn->pdn_index].public_ip =
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index 3210a70..7f3814d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -59,6 +59,16 @@
 				DEV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
+#define IPAWANERR_RL(fmt, args...) \
+	do { \
+		pr_err_ratelimited_ipa(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				DEV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
 #define IPAWANINFO(fmt, args...) \
 	do { \
 		pr_info(DEV_NAME " %s:%d " fmt, __func__,\
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index dc76140..8a7b414 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1047,13 +1047,12 @@
 		goto error;
 	}
 	/*
-	 * do not allow any rules to be added at end of the "default" routing
-	 * tables
+	 * do not allow any rule to be added at "default" routing
+	 * table
 	 */
 	if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
-	    (tbl->rule_cnt > 0) && (at_rear != 0)) {
-		IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d at_rear=%d"
-				, tbl->rule_cnt, at_rear);
+	    (tbl->rule_cnt > 0)) {
+		IPAERR_RL("cannot add rules to default rt table\n");
 		goto error;
 	}
 
@@ -1276,13 +1275,12 @@
 	}
 
 	/*
-	 * do not allow any rules to be added at end of the "default" routing
-	 * tables
+	 * do not allow any rule to be added at "default" routing
+	 * table
 	 */
 	if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
-			(&entry->link == tbl->head_rt_rule_list.prev)) {
-		IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d\n",
-			tbl->rule_cnt);
+		(tbl->rule_cnt > 0)) {
+		IPAERR_RL("cannot add rules to default rt table\n");
 		ret = -EINVAL;
 		goto bail;
 	}
@@ -1489,6 +1487,8 @@
 	struct ipa3_rt_entry *rule;
 	struct ipa3_rt_entry *rule_next;
 	struct ipa3_rt_tbl_set *rset;
+	struct ipa3_hdr_entry *hdr_entry;
+	struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry;
 	u32 apps_start_idx;
 	int id;
 	bool tbl_user = false;
@@ -1542,6 +1542,27 @@
 			if (!user_only ||
 				rule->ipacm_installed) {
 				list_del(&rule->link);
+				if (rule->hdr) {
+					hdr_entry = ipa3_id_find(
+							rule->rule.hdr_hdl);
+					if (!hdr_entry ||
+					hdr_entry->cookie != IPA_HDR_COOKIE) {
+						IPAERR_RL(
+						"Header already deleted\n");
+						return -EINVAL;
+					}
+				} else if (rule->proc_ctx) {
+					hdr_proc_entry =
+						ipa3_id_find(
+						rule->rule.hdr_proc_ctx_hdl);
+					if (!hdr_proc_entry ||
+						hdr_proc_entry->cookie !=
+							IPA_PROC_HDR_COOKIE) {
+						IPAERR_RL(
+						"Proc entry already deleted\n");
+						return -EINVAL;
+					}
+				}
 				tbl->rule_cnt--;
 				if (rule->hdr)
 					__ipa3_release_hdr(rule->hdr->id);
@@ -1549,7 +1570,9 @@
 					__ipa3_release_hdr_proc_ctx(
 						rule->proc_ctx->id);
 				rule->cookie = 0;
-				idr_remove(tbl->rule_ids, rule->rule_id);
+				if (!rule->rule_id_valid)
+					idr_remove(tbl->rule_ids,
+						rule->rule_id);
 				id = rule->id;
 				kmem_cache_free(ipa3_ctx->rt_rule_cache, rule);
 
@@ -1742,6 +1765,10 @@
 		goto error;
 	}
 
+	if (!strcmp(entry->tbl->name, IPA_DFLT_RT_TBL_NAME)) {
+		IPAERR_RL("Default tbl rule cannot be modified\n");
+		return -EINVAL;
+	}
 	/* Adding check to confirm still
 	 * header entry present in header table or not
 	 */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index e746229..89dde7d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -569,7 +569,11 @@
 		if (ipa3_ctx->uc_ctx.uc_status ==
 		    IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE ||
 		    ipa3_ctx->uc_ctx.uc_status ==
-		    IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE) {
+		    IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE ||
+		    ipa3_ctx->uc_ctx.uc_status ==
+		    IPA_HW_CONS_STOP_FAILURE ||
+		    ipa3_ctx->uc_ctx.uc_status ==
+		    IPA_HW_PROD_STOP_FAILURE) {
 			retries++;
 			if (retries == IPA_GSI_CHANNEL_STOP_MAX_RETRY) {
 				IPAERR("Failed after %d tries\n", retries);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
index 2401166..63b57bb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -551,6 +551,9 @@
 		return -EFAULT;
 	}
 
+	atomic_set(&ep_ul->disconnect_in_progress, 1);
+	atomic_set(&ep_dl->disconnect_in_progress, 1);
+
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
 		cmd.size = sizeof(*cmd_data_v4_0);
 	else
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
index 8d415a1..bda4219 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -92,6 +92,8 @@
  * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
  * @IPA_HW_INVALID_PARAMS : Invalid params for the requested command
  * @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed
+ * @IPA_HW_CONS_STOP_FAILURE : NTN/ETH CONS stop failed
+ * @IPA_HW_PROD_STOP_FAILURE : NTN/ETH PROD stop failed
  */
 enum ipa3_hw_errors {
 	IPA_HW_ERROR_NONE              =
@@ -111,7 +113,11 @@
 	IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
 	IPA_HW_GSI_CH_NOT_EMPTY_FAILURE =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8)
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8),
+	IPA_HW_CONS_STOP_FAILURE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9),
+	IPA_HW_PROD_STOP_FAILURE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10)
 };
 
 /**
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index b7a561e..6f19081 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1444,6 +1444,18 @@
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 5, 9, 9, IPA_EE_Q6, GSI_USE_PREFETCH_BUFS } },
+	[IPA_4_0_MHI][IPA_CLIENT_USB_DPL_CONS]        = {
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 15, 7, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
+	[IPA_4_0_MHI][IPA_CLIENT_MHI_DPL_CONS]        = {
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 12, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY } },
 	/* Only for test purpose */
 	[IPA_4_0_MHI][IPA_CLIENT_TEST_CONS]           = {
 			true, IPA_v4_0_GROUP_UL_DL,
@@ -2267,7 +2279,7 @@
 		ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS,
 			wdi_sap_stats);
 	} else {
-		IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+		IPAERR_RL("uc_wdi_ctx.stats_notify NULL\n");
 		return -EFAULT;
 	}
 	return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 1a9630e..bf6c443 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1075,8 +1075,12 @@
  */
 static int ipa3_wwan_stop(struct net_device *dev)
 {
+	struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+
 	IPAWANDBG("[%s] ipa3_wwan_stop()\n", dev->name);
 	__ipa_wwan_close(dev);
+	if (ipa3_rmnet_res.ipa_napi_enable)
+		napi_disable(&(wwan_ptr->napi));
 	netif_stop_queue(dev);
 	return 0;
 }
@@ -3131,7 +3135,7 @@
 
 	rc = ipa3_get_wlan_stats(sap_stats);
 	if (rc) {
-		IPAWANERR("can't get ipa3_get_wlan_stats\n");
+		IPAWANERR_RL("can't get ipa3_get_wlan_stats\n");
 		kfree(sap_stats);
 		return rc;
 	} else if (data == NULL) {
@@ -3464,7 +3468,7 @@
 			return rc;
 		}
 	} else {
-		IPAWANDBG_LOW(" query modem-backhaul stats\n");
+		IPAWANDBG_LOW("query modem-backhaul stats\n");
 		rc = rmnet_ipa3_query_tethering_stats_modem(
 			data, false);
 		if (rc) {
@@ -3498,7 +3502,8 @@
 		rc = rmnet_ipa3_query_tethering_stats_wifi(
 			&tether_stats, data->reset_stats);
 		if (rc) {
-			IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+			IPAWANERR_RL(
+				"wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
 			return rc;
 		}
 		data->tx_bytes = tether_stats.ipv4_tx_bytes
@@ -3730,6 +3735,17 @@
 	struct ipa_lan_client *lan_client = NULL;
 	int i;
 
+	IPAWANDBG("Delete lan client info: %d, %d, %d\n",
+		rmnet_ipa3_ctx->tether_device[device_type].num_clients,
+		lan_clnt_idx, device_type);
+	/* Check if Device type is valid. */
+
+	if (device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+		device_type < 0) {
+		IPAWANERR("Invalid Device type: %d\n", device_type);
+		return -EINVAL;
+	}
+
 	/* Check if the request is to clean up all clients. */
 	if (lan_clnt_idx == 0xffffffff) {
 		/* Reset the complete device info. */
@@ -3746,6 +3762,8 @@
 		/* Reset the client info before sending the message. */
 		memset(lan_client, 0, sizeof(struct ipa_lan_client));
 		lan_client->client_idx = -1;
+		/* Decrement the number of clients. */
+		rmnet_ipa3_ctx->tether_device[device_type].num_clients--;
 
 	}
 	return 0;
@@ -3866,6 +3884,10 @@
 		return -EINVAL;
 	}
 
+	IPAWANDBG("Client : %d:%d:%d\n",
+		data->device_type, data->client_idx,
+		rmnet_ipa3_ctx->tether_device[data->device_type].num_clients);
+
 	mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
 	lan_client =
 	&rmnet_ipa3_ctx->tether_device[data->device_type].
@@ -4015,6 +4037,21 @@
 
 	mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
 
+	/* Check if Source pipe is valid. */
+	if (rmnet_ipa3_ctx->tether_device
+		[data->device_type].ul_src_pipe == -1) {
+		IPAWANERR("Device not initialized: %d\n", data->device_type);
+		mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+		return -EINVAL;
+	}
+
+	/* Check if we have clients connected. */
+	if (rmnet_ipa3_ctx->tether_device[data->device_type].num_clients == 0) {
+		IPAWANERR("No clients connected: %d\n", data->device_type);
+		mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+		return -EINVAL;
+	}
+
 	if (data->num_clients == 1) {
 		/* Check if the client info is valid.*/
 		lan_clnt_idx1 = rmnet_ipa3_get_lan_client_info(
@@ -4071,6 +4108,9 @@
 	memset(req, 0, sizeof(struct ipa_get_stats_per_client_req_msg_v01));
 	memset(resp, 0, sizeof(struct ipa_get_stats_per_client_resp_msg_v01));
 
+	IPAWANDBG("Reset stats: %s",
+		data->reset_stats?"Yes":"No");
+
 	if (data->reset_stats) {
 		req->reset_stats_valid = true;
 		req->reset_stats = true;
@@ -4142,6 +4182,9 @@
 		}
 	}
 
+	IPAWANDBG("Disconnect clnt: %s",
+		data->disconnect_clnt?"Yes":"No");
+
 	if (data->disconnect_clnt) {
 		rmnet_ipa3_delete_lan_client_info(data->device_type,
 		lan_clnt_idx1);
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 929242a..cf73aeb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -82,8 +82,17 @@
 		DRIVER_NAME);
 
 	if (!ipa3_process_ioctl) {
-		IPAWANDBG("modem is in SSR, ignoring ioctl\n");
-		return -EAGAIN;
+
+		if ((cmd == WAN_IOC_SET_LAN_CLIENT_INFO) ||
+			(cmd == WAN_IOC_CLEAR_LAN_CLIENT_INFO)) {
+			IPAWANDBG("Modem is in SSR\n");
+			IPAWANDBG("Still allow IOCTL for exceptions (%d)\n",
+				cmd);
+		} else {
+			IPAWANERR("Modem is in SSR, ignoring ioctl (%d)\n",
+				cmd);
+			return -EAGAIN;
+		}
 	}
 
 	switch (cmd) {
@@ -227,7 +236,7 @@
 		}
 		if (rmnet_ipa3_poll_tethering_stats(
 		(struct wan_ioctl_poll_tethering_stats *)param)) {
-			IPAWANERR("WAN_IOCTL_POLL_TETHERING_STATS failed\n");
+			IPAWANERR_RL("WAN_IOCTL_POLL_TETHERING_STATS failed\n");
 			retval = -EFAULT;
 			break;
 		}
diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c
index c938e8e..3199b29 100644
--- a/drivers/platform/msm/mhi_dev/mhi.c
+++ b/drivers/platform/msm/mhi_dev/mhi.c
@@ -44,7 +44,7 @@
 #define MHI_RING_PRIMARY_EVT_ID		1
 #define MHI_1K_SIZE			0x1000
 /* Updated Specification for event start is NER - 2 and end - NER -1 */
-#define MHI_HW_ACC_EVT_RING_START	2
+#define MHI_HW_ACC_EVT_RING_START	3
 #define MHI_HW_ACC_EVT_RING_END		1
 
 #define MHI_HOST_REGION_NUM             2
@@ -495,7 +495,9 @@
 	case MHI_DEV_RING_EL_START:
 		connect_params.channel_id = chid;
 		connect_params.sys.skip_ep_cfg = true;
-		if ((chid % 2) == 0x0)
+		if (chid == MHI_CLIENT_ADPL_IN)
+			connect_params.sys.client = IPA_CLIENT_MHI_DPL_CONS;
+		else if ((chid % 2) == 0x0)
 			connect_params.sys.client = IPA_CLIENT_MHI_PROD;
 		else
 			connect_params.sys.client = IPA_CLIENT_MHI_CONS;
@@ -1278,7 +1280,7 @@
 			ring = &mhi->ring[ch_num + mhi->ch_ring_start];
 			if (ring->state == RING_STATE_UINT) {
 				pr_debug("Channel not opened for %d\n", ch_num);
-				break;
+				continue;
 			}
 			mhi_ring_set_state(ring, RING_STATE_PENDING);
 			list_add(&ring->list, &mhi->process_ring_list);
@@ -1429,7 +1431,7 @@
 	ch = client->channel;
 	mhi = ch->ring->mhi_dev;
 	el = req->el;
-	transfer_len = req->len;
+	transfer_len = req->transfer_len;
 	snd_cmpl = req->snd_cmpl;
 	rd_offset = req->rd_offset;
 	ch->curr_ereq->context = ch;
@@ -2142,7 +2144,7 @@
 			(mreq->len - usr_buf_remaining);
 		ch->tre_bytes_left -= bytes_to_read;
 		mreq->el = el;
-		mreq->actual_len = bytes_read;
+		mreq->transfer_len = bytes_to_read;
 		mreq->rd_offset = ring->rd_offset;
 		mhi_log(MHI_MSG_VERBOSE, "reading %d bytes from chan %d\n",
 				bytes_to_read, mreq->chan);
@@ -2306,6 +2308,7 @@
 		write_to_loc = el->tre.data_buf_ptr;
 		wreq->rd_offset = ring->rd_offset;
 		wreq->el = el;
+		wreq->transfer_len = bytes_to_write;
 		rc = mhi_transfer_device_to_host(write_to_loc,
 						(void *) read_from_loc,
 						bytes_to_write,
@@ -2362,6 +2365,64 @@
 }
 EXPORT_SYMBOL(mhi_dev_write_channel);
 
+static int mhi_dev_recover(struct mhi_dev *mhi)
+{
+	int rc = 0;
+	uint32_t syserr, max_cnt = 0, bhi_intvec = 0;
+	bool mhi_reset;
+	enum mhi_dev_state state;
+
+	/* Check if MHI is in syserr */
+	mhi_dev_mmio_masked_read(mhi, MHISTATUS,
+				MHISTATUS_SYSERR_MASK,
+				MHISTATUS_SYSERR_SHIFT, &syserr);
+
+	mhi_log(MHI_MSG_VERBOSE, "mhi_syserr = 0x%X\n", syserr);
+	if (syserr) {
+		rc = mhi_dev_mmio_read(mhi, BHI_INTVEC, &bhi_intvec);
+		if (rc)
+			return rc;
+
+		if (bhi_intvec != 0xffffffff) {
+			/* Indicate the host that the device is ready */
+			rc = ep_pcie_trigger_msi(mhi->phandle, bhi_intvec);
+			if (rc) {
+				pr_err("%s: error sending msi\n", __func__);
+				return rc;
+			}
+		}
+
+		/* Poll for the host to set the reset bit */
+		rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
+		if (rc) {
+			pr_err("%s: get mhi state failed\n", __func__);
+			return rc;
+		}
+		while (mhi_reset != true && max_cnt < MHI_SUSPEND_TIMEOUT) {
+			/* Wait for Host to set the reset */
+			msleep(MHI_SUSPEND_MIN);
+			rc = mhi_dev_mmio_get_mhi_state(mhi, &state,
+								&mhi_reset);
+			if (rc) {
+				pr_err("%s: get mhi state failed\n", __func__);
+				return rc;
+			}
+			max_cnt++;
+		}
+
+		if (!mhi_reset) {
+			mhi_log(MHI_MSG_VERBOSE, "Host failed to set reset\n");
+			return -EINVAL;
+		}
+	}
+	/*
+	 * Now mask the interrupts so that the state machine moves
+	 * only after IPA is ready
+	 */
+	mhi_dev_mmio_mask_interrupts(mhi);
+	return 0;
+}
+
 static void mhi_dev_enable(struct work_struct *work)
 {
 	int rc = 0;
@@ -2864,6 +2925,18 @@
 	mutex_init(&mhi_ctx->mhi_event_lock);
 	mutex_init(&mhi_ctx->mhi_write_test);
 
+	mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id);
+	if (!mhi_ctx->phandle) {
+		pr_err("PCIe driver get handle failed.\n");
+		return -EINVAL;
+	}
+
+	rc = mhi_dev_recover(mhi_ctx);
+	if (rc) {
+		pr_err("%s: get mhi state failed\n", __func__);
+		return rc;
+	}
+
 	rc = mhi_init(mhi_ctx);
 	if (rc)
 		return rc;
@@ -2893,13 +2966,6 @@
 		pr_err("Failed to update the MHI version\n");
 		return rc;
 	}
-
-	mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id);
-	if (!mhi_ctx->phandle) {
-		pr_err("PCIe driver get handle failed.\n");
-		return -EINVAL;
-	}
-
 	mhi_ctx->event_reg.events = EP_PCIE_EVENT_PM_D3_HOT |
 		EP_PCIE_EVENT_PM_D3_COLD |
 		EP_PCIE_EVENT_PM_D0 |
@@ -3091,7 +3157,7 @@
 {
 	return platform_driver_register(&mhi_dev_driver);
 }
-module_init(mhi_dev_init);
+subsys_initcall(mhi_dev_init);
 
 static void __exit mhi_dev_exit(void)
 {
diff --git a/drivers/platform/msm/mhi_dev/mhi.h b/drivers/platform/msm/mhi_dev/mhi.h
index 6cb2d7d..fae39e0 100644
--- a/drivers/platform/msm/mhi_dev/mhi.h
+++ b/drivers/platform/msm/mhi_dev/mhi.h
@@ -906,6 +906,12 @@
 int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev);
 
 /**
+ * mhi_dev_mmio_mask_interrupts() - Mask all MHI interrupts.
+ * @dev:	MHI device structure.
+ */
+void mhi_dev_mmio_mask_interrupts(struct mhi_dev *dev);
+
+/**
  * mhi_dev_mmio_clear_interrupts() - Clear all doorbell interrupts.
  * @dev:	MHI device structure.
  */
diff --git a/drivers/platform/msm/mhi_dev/mhi_dev_net.c b/drivers/platform/msm/mhi_dev/mhi_dev_net.c
index d8dc85f..d1f86a4 100644
--- a/drivers/platform/msm/mhi_dev/mhi_dev_net.c
+++ b/drivers/platform/msm/mhi_dev/mhi_dev_net.c
@@ -246,10 +246,10 @@
 	struct sk_buff *skb = mreq->context;
 	unsigned long   flags;
 
-	skb->len = mreq->actual_len;
+	skb->len = mreq->transfer_len;
 	skb->protocol =
 		mhi_dev_net_eth_type_trans(skb);
-	skb_put(skb, mreq->actual_len);
+	skb_put(skb, mreq->transfer_len);
 	net_handle->dev->stats.rx_packets++;
 	skb->dev = net_handle->dev;
 	netif_rx(skb);
diff --git a/drivers/platform/msm/mhi_dev/mhi_mmio.c b/drivers/platform/msm/mhi_dev/mhi_mmio.c
index 559fa84..28d465a 100644
--- a/drivers/platform/msm/mhi_dev/mhi_mmio.c
+++ b/drivers/platform/msm/mhi_dev/mhi_mmio.c
@@ -568,7 +568,7 @@
 }
 EXPORT_SYMBOL(mhi_dev_mmio_disable_cmdb_interrupt);
 
-static void mhi_dev_mmio_mask_interrupts(struct mhi_dev *dev)
+void mhi_dev_mmio_mask_interrupts(struct mhi_dev *dev)
 {
 	int rc = 0;
 
@@ -596,6 +596,7 @@
 		return;
 	}
 }
+EXPORT_SYMBOL(mhi_dev_mmio_mask_interrupts);
 
 int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev)
 {
diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c
index 52d324e..87dddfd 100644
--- a/drivers/platform/msm/mhi_dev/mhi_uci.c
+++ b/drivers/platform/msm/mhi_dev/mhi_uci.c
@@ -37,10 +37,12 @@
 #define MAX_UCI_WR_REQ			10
 #define MAX_NR_TRBS_PER_CHAN		9
 #define MHI_QTI_IFACE_ID		4
+#define MHI_ADPL_IFACE_ID		5
 #define DEVICE_NAME			"mhi"
 #define MAX_DEVICE_NAME_SIZE		80
 
 #define MHI_UCI_ASYNC_READ_TIMEOUT	msecs_to_jiffies(100)
+#define MHI_UCI_ASYNC_WRITE_TIMEOUT	msecs_to_jiffies(100)
 
 enum uci_dbg_level {
 	UCI_DBG_VERBOSE = 0x0,
@@ -73,12 +75,21 @@
 	u32 nr_trbs;
 	/* direction of the channel, see enum mhi_chan_dir */
 	enum mhi_chan_dir dir;
-	/* need to register mhi channel state change callback */
-	bool register_cb;
+	/* Optional mhi channel state change callback func pointer */
+	void (*chan_state_cb)(struct mhi_dev_client_cb_data *cb_data);
 	/* Name of char device */
 	char *device_name;
+	/* Client-specific TRE handler */
+	void (*tre_notif_cb)(struct mhi_dev_client_cb_reason *reason);
+	/* Write completion - false if not needed */
+	bool wr_cmpl;
+
 };
 
+static void mhi_uci_adb_client_cb(struct mhi_dev_client_cb_data *cb_data);
+static void mhi_uci_at_ctrl_client_cb(struct mhi_dev_client_cb_data *cb_data);
+static void mhi_uci_at_ctrl_tre_cb(struct mhi_dev_client_cb_reason *reason);
+
 /* UCI channel attributes table */
 static const struct chan_attr uci_chan_attr_table[] = {
 	{
@@ -86,7 +97,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_OUT,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -94,7 +105,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_IN,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -102,7 +113,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_OUT,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -110,7 +121,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_IN,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -118,7 +129,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_OUT,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -126,7 +137,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_IN,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -134,7 +145,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_OUT,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -142,7 +153,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_IN,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -150,7 +161,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_OUT,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -158,7 +169,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_IN,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -166,7 +177,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_OUT,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -174,7 +185,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_IN,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -182,23 +193,26 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_OUT,
-		false,
-		NULL
+		mhi_uci_at_ctrl_client_cb,
+		NULL,
+		mhi_uci_at_ctrl_tre_cb
 	},
 	{
 		MHI_CLIENT_IP_CTRL_1_IN,
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_IN,
-		false,
-		NULL
+		mhi_uci_at_ctrl_client_cb,
+		NULL,
+		NULL,
+		true
 	},
 	{
 		MHI_CLIENT_DUN_OUT,
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_OUT,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -206,7 +220,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_IN,
-		false,
+		NULL,
 		NULL
 	},
 	{
@@ -214,7 +228,7 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_OUT,
-		true,
+		mhi_uci_adb_client_cb,
 		NULL
 	},
 	{
@@ -222,11 +236,33 @@
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
 		MHI_DIR_IN,
-		true,
+		mhi_uci_adb_client_cb,
 		"android_adb"
 	},
 };
 
+/* Defines for AT messages */
+#define MHI_UCI_CTRL_MSG_MAGIC		(0x4354524C)
+#define MHI_UCI_CTRL_MSG_DTR		BIT(0)
+#define MHI_UCI_CTRL_MSG_RTS		BIT(1)
+#define MHI_UCI_CTRL_MSG_DCD		BIT(0)
+#define MHI_UCI_CTRL_MSG_DSR		BIT(1)
+#define MHI_UCI_CTRL_MSG_RI		BIT(3)
+
+#define MHI_UCI_CTRL_MSGID_SET_CTRL_LINE	0x10
+#define MHI_UCI_CTRL_MSGID_SERIAL_STATE		0x11
+#define MHI_UCI_TIOCM_GET			TIOCMGET
+#define MHI_UCI_TIOCM_SET			TIOCMSET
+
+/* AT message format */
+struct __packed mhi_uci_ctrl_msg {
+	u32 preamble;
+	u32 msg_id;
+	u32 dest_id;
+	u32 size;
+	u32 msg;
+};
+
 struct uci_ctrl {
 	wait_queue_head_t	ctrl_wq;
 	struct mhi_uci_ctxt_t	*uci_ctxt;
@@ -262,8 +298,11 @@
 	struct mhi_req *wreqs;
 	struct list_head wr_req_list;
 	struct completion read_done;
+	struct completion *write_done;
 	int (*send)(struct uci_client*, void*, u32);
 	int (*read)(struct uci_client*, struct mhi_req*, int*);
+	unsigned int tiocm;
+	unsigned int at_ctrl_mask;
 };
 
 struct mhi_uci_ctxt_t {
@@ -278,6 +317,8 @@
 	struct class *mhi_uci_class;
 	atomic_t mhi_disabled;
 	atomic_t mhi_enable_notif_wq_active;
+	struct workqueue_struct *at_ctrl_wq;
+	struct work_struct at_ctrl_work;
 };
 
 #define CHAN_TO_CLIENT(_CHAN_NR) (_CHAN_NR / 2)
@@ -375,6 +416,9 @@
 	spin_lock_irqsave(&uci_handle->wr_req_lock, flags);
 	list_add_tail(&ureq->list, &uci_handle->wr_req_list);
 	spin_unlock_irqrestore(&uci_handle->wr_req_lock, flags);
+
+	if (uci_handle->write_done)
+		complete(uci_handle->write_done);
 }
 
 static void mhi_uci_read_completion_cb(void *req)
@@ -400,7 +444,8 @@
 
 	ret_val = mhi_dev_write_channel(&ureq);
 
-	kfree(data_loc);
+	if (ret_val == size)
+		kfree(data_loc);
 	return ret_val;
 }
 
@@ -441,7 +486,6 @@
 	return bytes_to_write;
 
 error_async_transfer:
-	kfree(data_loc);
 	ureq->buf = NULL;
 	spin_lock_irq(&uci_handle->wr_req_lock);
 	list_add_tail(&ureq->list, &uci_handle->wr_req_list);
@@ -450,41 +494,42 @@
 	return bytes_to_write;
 }
 
-static int mhi_uci_send_packet(struct mhi_dev_client **client_handle,
-		const char __user *buf, u32 size)
+static int mhi_uci_send_packet(struct uci_client *uci_handle, void *data_loc,
+				u32 size)
 {
-	void *data_loc;
-	unsigned long memcpy_result;
-	struct uci_client *uci_handle;
+	int ret_val;
 
-	if (!client_handle || !buf || !size)
-		return -EINVAL;
+	mutex_lock(&uci_handle->out_chan_lock);
+	do {
+		ret_val = uci_handle->send(uci_handle, data_loc, size);
+		if (ret_val < 0) {
+			uci_log(UCI_DBG_ERROR,
+				"Err sending data: chan %d, buf %pK, size %d\n",
+				uci_handle->out_chan, data_loc, size);
+			ret_val = -EIO;
+			break;
+		}
+		if (!ret_val) {
+			uci_log(UCI_DBG_VERBOSE,
+				"No descriptors available, did we poll, chan %d?\n",
+				uci_handle->out_chan);
+			mutex_unlock(&uci_handle->out_chan_lock);
+			if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY))
+				return -EAGAIN;
+			ret_val = wait_event_interruptible(uci_handle->write_wq,
+					!mhi_dev_channel_isempty(
+					uci_handle->out_handle));
+			if (-ERESTARTSYS == ret_val) {
+				uci_log(UCI_DBG_WARNING,
+					"Waitqueue cancelled by system\n");
+				return ret_val;
+			}
+			mutex_lock(&uci_handle->out_chan_lock);
+		}
+	} while (!ret_val);
+	mutex_unlock(&uci_handle->out_chan_lock);
 
-	if (size > TRB_MAX_DATA_SIZE) {
-		uci_log(UCI_DBG_ERROR,
-			"Too big write size: %d, max supported size is %d\n",
-			size, TRB_MAX_DATA_SIZE);
-		return -EFBIG;
-	}
-
-	uci_handle = container_of(client_handle, struct uci_client,
-					out_handle);
-	data_loc = kmalloc(size, GFP_KERNEL);
-	if (!data_loc) {
-		uci_log(UCI_DBG_ERROR,
-		"Failed to allocate kernel buf for user requested size 0x%x\n",
-			size);
-		return -ENOMEM;
-	}
-	memcpy_result = copy_from_user(data_loc, buf, size);
-	if (memcpy_result)
-		goto error_memcpy;
-
-	return uci_handle->send(uci_handle, data_loc, size);
-
-error_memcpy:
-	kfree(data_loc);
-	return -EFAULT;
+	return ret_val;
 }
 
 static unsigned int mhi_uci_ctrl_poll(struct file *file, poll_table *wait)
@@ -523,6 +568,7 @@
 
 	poll_wait(file, &uci_handle->read_wq, wait);
 	poll_wait(file, &uci_handle->write_wq, wait);
+	mask = uci_handle->at_ctrl_mask;
 	if (!atomic_read(&uci_ctxt.mhi_disabled) &&
 		!mhi_dev_channel_isempty(uci_handle->in_handle)) {
 		uci_log(UCI_DBG_VERBOSE,
@@ -608,7 +654,7 @@
 			"wk up Read completed on ch %d\n", ureq->chan);
 
 		uci_handle->pkt_loc = (void *)ureq->buf;
-		uci_handle->pkt_size = ureq->actual_len;
+		uci_handle->pkt_size = ureq->transfer_len;
 
 		uci_log(UCI_DBG_VERBOSE,
 			"Got pkt of sz 0x%x at adr %pK, ch %d\n",
@@ -641,7 +687,7 @@
 
 	if (*bytes_avail > 0) {
 		uci_handle->pkt_loc = (void *)ureq->buf;
-		uci_handle->pkt_size = ureq->actual_len;
+		uci_handle->pkt_size = ureq->transfer_len;
 
 		uci_log(UCI_DBG_VERBOSE,
 			"Got pkt of sz 0x%x at adr %pK, ch %d\n",
@@ -912,81 +958,62 @@
 	return size;
 }
 
-static ssize_t mhi_uci_client_read(struct file *file, char __user *ubuf,
-		size_t uspace_buf_size, loff_t *bytes_pending)
+static int __mhi_uci_client_read(struct uci_client *uci_handle,
+		int *bytes_avail)
 {
-	struct uci_client *uci_handle = NULL;
-	struct mhi_dev_client *client_handle = NULL;
-	int bytes_avail = 0;
 	int ret_val = 0;
-	struct mutex *mutex;
-	ssize_t bytes_copied = 0;
-	u32 addr_offset = 0;
+	struct mhi_dev_client *client_handle;
 	struct mhi_req ureq;
 
-	if (!file || !ubuf || !uspace_buf_size ||
-			!file->private_data)
-		return -EINVAL;
-
-	uci_handle = file->private_data;
 	client_handle = uci_handle->in_handle;
-	mutex = &uci_handle->in_chan_lock;
 	ureq.chan = uci_handle->in_chan;
-
-	mutex_lock(mutex);
 	ureq.client = client_handle;
 	ureq.buf = uci_handle->in_buf_list[0].addr;
 	ureq.len = uci_handle->in_buf_list[0].buf_size;
 
-
-	uci_log(UCI_DBG_VERBOSE, "Client attempted read on chan %d\n",
-			ureq.chan);
 	do {
 		if (!uci_handle->pkt_loc &&
 			!atomic_read(&uci_ctxt.mhi_disabled)) {
 			ret_val = uci_handle->read(uci_handle, &ureq,
-							&bytes_avail);
+				bytes_avail);
 			if (ret_val)
-				goto error;
-			if (bytes_avail > 0)
-				*bytes_pending = (loff_t)uci_handle->pkt_size;
+				return ret_val;
 		}
-		if (bytes_avail == 0) {
+		if (*bytes_avail == 0) {
 
 			/* If nothing was copied yet, wait for data */
 			uci_log(UCI_DBG_VERBOSE,
 				"No data read_data_ready %d, chan %d\n",
 				atomic_read(&uci_handle->read_data_ready),
 				ureq.chan);
-			if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY)) {
-				ret_val = -EAGAIN;
-				goto error;
-			}
+			if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY))
+				return -EAGAIN;
+
 			ret_val = wait_event_interruptible(uci_handle->read_wq,
 				(!mhi_dev_channel_isempty(client_handle)));
 
 			if (ret_val == -ERESTARTSYS) {
 				uci_log(UCI_DBG_ERROR, "Exit signal caught\n");
-				goto error;
+				return ret_val;
 			}
+
 			uci_log(UCI_DBG_VERBOSE,
 				"wk up Got data on ch %d read_data_ready %d\n",
 				ureq.chan,
 				atomic_read(&uci_handle->read_data_ready));
-
+		} else if (*bytes_avail > 0) {
 			/* A valid packet was returned from MHI */
-		} else if (bytes_avail > 0) {
 			uci_log(UCI_DBG_VERBOSE,
 				"Got packet: avail pkts %d phy_adr %p, ch %d\n",
 				atomic_read(&uci_handle->read_data_ready),
 				ureq.buf,
 				ureq.chan);
 			break;
+		} else {
 			/*
 			 * MHI did not return a valid packet, but we have one
 			 * which we did not finish returning to user
 			 */
-		} else {
 			uci_log(UCI_DBG_CRITICAL,
 				"chan %d err: avail pkts %d phy_adr %p",
 				ureq.chan,
@@ -996,10 +1023,36 @@
 		}
 	} while (!uci_handle->pkt_loc);
 
+	return ret_val;
+}
+
+static ssize_t mhi_uci_client_read(struct file *file, char __user *ubuf,
+	size_t uspace_buf_size, loff_t *bytes_pending)
+{
+	struct uci_client *uci_handle = NULL;
+	int bytes_avail = 0, ret_val = 0;
+	struct mutex *mutex;
+	ssize_t bytes_copied = 0;
+	u32 addr_offset = 0;
+
+	uci_handle = file->private_data;
+	mutex = &uci_handle->in_chan_lock;
+	mutex_lock(mutex);
+
+	uci_log(UCI_DBG_VERBOSE, "Client attempted read on chan %d\n",
+		uci_handle->in_chan);
+
+	ret_val = __mhi_uci_client_read(uci_handle, &bytes_avail);
+	if (ret_val)
+		goto error;
+
+	if (bytes_avail > 0)
+		*bytes_pending = (loff_t)uci_handle->pkt_size;
+
 	if (uspace_buf_size >= *bytes_pending) {
 		addr_offset = uci_handle->pkt_size - *bytes_pending;
 		if (copy_to_user(ubuf, uci_handle->pkt_loc + addr_offset,
-							*bytes_pending)) {
+			*bytes_pending)) {
 			ret_val = -EIO;
 			goto error;
 		}
@@ -1007,50 +1060,51 @@
 		bytes_copied = *bytes_pending;
 		*bytes_pending = 0;
 		uci_log(UCI_DBG_VERBOSE, "Copied 0x%x of 0x%x, chan %d\n",
-				bytes_copied, (u32)*bytes_pending, ureq.chan);
+			bytes_copied, (u32)*bytes_pending, uci_handle->in_chan);
 	} else {
 		addr_offset = uci_handle->pkt_size - *bytes_pending;
 		if (copy_to_user(ubuf, (void *) (uintptr_t)uci_handle->pkt_loc +
-					addr_offset, uspace_buf_size)) {
+			addr_offset, uspace_buf_size)) {
 			ret_val = -EIO;
 			goto error;
 		}
 		bytes_copied = uspace_buf_size;
 		*bytes_pending -= uspace_buf_size;
 		uci_log(UCI_DBG_VERBOSE, "Copied 0x%x of 0x%x,chan %d\n",
-				bytes_copied,
-				(u32)*bytes_pending,
-				ureq.chan);
+			bytes_copied,
+			(u32)*bytes_pending,
+			uci_handle->in_chan);
 	}
 	/* We finished with this buffer, map it back */
 	if (*bytes_pending == 0) {
 		uci_log(UCI_DBG_VERBOSE,
-				"All data consumed. Pkt loc %p ,chan %d\n",
-				uci_handle->pkt_loc, ureq.chan);
+			"All data consumed. Pkt loc %p ,chan %d\n",
+			uci_handle->pkt_loc, uci_handle->in_chan);
 		uci_handle->pkt_loc = 0;
 		uci_handle->pkt_size = 0;
 	}
 	uci_log(UCI_DBG_VERBOSE,
-			"Returning 0x%x bytes, 0x%x bytes left\n",
-			bytes_copied, (u32)*bytes_pending);
+		"Returning 0x%x bytes, 0x%x bytes left\n",
+		bytes_copied, (u32)*bytes_pending);
 	mutex_unlock(mutex);
 	return bytes_copied;
 error:
 	mutex_unlock(mutex);
+
 	uci_log(UCI_DBG_ERROR, "Returning %d\n", ret_val);
 	return ret_val;
 }
 
 static ssize_t mhi_uci_client_write(struct file *file,
-		const char __user *buf,
-		size_t count, loff_t *offp)
+			const char __user *buf, size_t count, loff_t *offp)
 {
 	struct uci_client *uci_handle = NULL;
-	int ret_val = 0;
-	u32 chan = 0xFFFFFFFF;
+	void *data_loc;
+	unsigned long memcpy_result;
+	int rc;
 
 	if (file == NULL || buf == NULL ||
-			!count || file->private_data == NULL)
+		!count || file->private_data == NULL)
 		return -EINVAL;
 
 	uci_handle = file->private_data;
@@ -1061,39 +1115,32 @@
 			uci_handle->out_chan);
 		return -EIO;
 	}
-	chan = uci_handle->out_chan;
-	mutex_lock(&uci_handle->out_chan_lock);
-	while (!ret_val) {
-		ret_val = mhi_uci_send_packet(&uci_handle->out_handle,
-						buf, count);
-		if (ret_val < 0) {
-			uci_log(UCI_DBG_ERROR,
-				"Error while writing data to MHI, chan %d, buf %pK, size %d\n",
-				chan, (void *)buf, count);
-			ret_val = -EIO;
-			break;
-		}
-		if (!ret_val) {
-			uci_log(UCI_DBG_VERBOSE,
-				"No descriptors available, did we poll, chan %d?\n",
-				chan);
-			mutex_unlock(&uci_handle->out_chan_lock);
-			if (uci_handle->f_flags & (O_NONBLOCK | O_NDELAY))
-				return -EAGAIN;
-			ret_val = wait_event_interruptible(uci_handle->write_wq,
-				!mhi_dev_channel_isempty(
-					uci_handle->out_handle));
 
-			mutex_lock(&uci_handle->out_chan_lock);
-			if (-ERESTARTSYS == ret_val) {
-				uci_log(UCI_DBG_WARNING,
-					    "Waitqueue cancelled by system\n");
-				break;
-			}
-		}
+	if (count > TRB_MAX_DATA_SIZE) {
+		uci_log(UCI_DBG_ERROR,
+			"Too big write size: %d, max supported size is %d\n",
+			count, TRB_MAX_DATA_SIZE);
+		return -EFBIG;
 	}
-	mutex_unlock(&uci_handle->out_chan_lock);
-	return ret_val;
+
+	data_loc = kmalloc(count, GFP_KERNEL);
+	if (!data_loc)
+		return -ENOMEM;
+
+	memcpy_result = copy_from_user(data_loc, buf, count);
+	if (memcpy_result) {
+		rc = -EFAULT;
+		goto error_memcpy;
+	}
+
+	rc = mhi_uci_send_packet(uci_handle, data_loc, count);
+	if (rc == count)
+		return rc;
+
+error_memcpy:
+	kfree(data_loc);
+	return rc;
+
 }
 
 void uci_ctrl_update(struct mhi_dev_client_cb_reason *reason)
@@ -1119,13 +1166,18 @@
 	int client_index = 0;
 	struct uci_client *uci_handle = NULL;
 
-	if (reason->reason == MHI_DEV_TRE_AVAILABLE) {
-		client_index = reason->ch_id / 2;
-		uci_handle = &uci_ctxt.client_handles[client_index];
+	client_index = reason->ch_id / 2;
+	uci_handle = &uci_ctxt.client_handles[client_index];
+	/*
+	 * If this client has its own TRE event handler, call that
+	 * else use the default handler.
+	 */
+	if (uci_handle->out_chan_attr->tre_notif_cb) {
+		uci_handle->out_chan_attr->tre_notif_cb(reason);
+	} else if (reason->reason == MHI_DEV_TRE_AVAILABLE) {
 		uci_log(UCI_DBG_DBG,
 			"recived TRE available event for chan %d\n",
-					uci_handle->in_chan);
-
+			uci_handle->in_chan);
 		if (reason->ch_id % 2) {
 			atomic_set(&uci_handle->write_data_ready, 1);
 			wake_up(&uci_handle->write_wq);
@@ -1150,12 +1202,126 @@
 	return 0;
 }
 
+static int mhi_uci_ctrl_set_tiocm(struct uci_client *client,
+				unsigned int ser_state)
+{
+	unsigned int cur_ser_state;
+	unsigned long compl_ret;
+	struct mhi_uci_ctrl_msg *ctrl_msg;
+	int ret_val;
+	struct uci_client *ctrl_client =
+		&uci_ctxt.client_handles[CHAN_TO_CLIENT
+					(MHI_CLIENT_IP_CTRL_1_OUT)];
+
+	uci_log(UCI_DBG_VERBOSE, "Rcvd ser_state = 0x%x\n", ser_state);
+
+	cur_ser_state = client->tiocm & ~(TIOCM_DTR | TIOCM_RTS);
+	ser_state &= (TIOCM_CD | TIOCM_DSR | TIOCM_RI);
+
+	if (cur_ser_state == ser_state)
+		return 0;
+
+	ctrl_msg = kzalloc(sizeof(*ctrl_msg), GFP_KERNEL);
+	if (!ctrl_msg)
+		return -ENOMEM;
+
+	ctrl_msg->preamble = MHI_UCI_CTRL_MSG_MAGIC;
+	ctrl_msg->msg_id = MHI_UCI_CTRL_MSGID_SERIAL_STATE;
+	ctrl_msg->dest_id = client->out_chan;
+	ctrl_msg->size = sizeof(unsigned int);
+	if (ser_state & TIOCM_CD)
+		ctrl_msg->msg |= MHI_UCI_CTRL_MSG_DCD;
+	if (ser_state & TIOCM_DSR)
+		ctrl_msg->msg |= MHI_UCI_CTRL_MSG_DSR;
+	if (ser_state & TIOCM_RI)
+		ctrl_msg->msg |= MHI_UCI_CTRL_MSG_RI;
+
+	reinit_completion(ctrl_client->write_done);
+	ret_val = mhi_uci_send_packet(ctrl_client, ctrl_msg, sizeof(*ctrl_msg));
+	if (ret_val != sizeof(*ctrl_msg))
+		goto tiocm_error;
+	compl_ret = wait_for_completion_interruptible_timeout(
+			ctrl_client->write_done,
+			MHI_UCI_ASYNC_WRITE_TIMEOUT);
+	if (compl_ret == -ERESTARTSYS) {
+		uci_log(UCI_DBG_ERROR, "Exit signal caught\n");
+		ret_val = compl_ret;
+		goto tiocm_error;
+	} else if (compl_ret == 0) {
+		uci_log(UCI_DBG_ERROR, "Timed out trying to send ctrl msg\n");
+		ret_val = -EIO;
+		goto tiocm_error;
+	}
+
+	client->tiocm &= ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI);
+	client->tiocm |= ser_state;
+	return 0;
+
+tiocm_error:
+	kfree(ctrl_msg);
+	return ret_val;
+}
+
+static void mhi_uci_at_ctrl_read(struct work_struct *work)
+{
+	int ret_val;
+	int msg_size = 0;
+	struct uci_client *ctrl_client =
+		&uci_ctxt.client_handles[CHAN_TO_CLIENT
+		(MHI_CLIENT_IP_CTRL_1_OUT)];
+	struct uci_client *tgt_client;
+	struct mhi_uci_ctrl_msg *ctrl_msg;
+	unsigned int chan;
+
+	ctrl_client->pkt_loc = NULL;
+	ctrl_client->pkt_size = 0;
+
+	ret_val = __mhi_uci_client_read(ctrl_client, &msg_size);
+	if (ret_val) {
+		uci_log(UCI_DBG_ERROR,
+			"Ctrl msg read failed, ret_val is %d!\n",
+			ret_val);
+		return;
+	}
+	if (msg_size != sizeof(*ctrl_msg)) {
+		uci_log(UCI_DBG_ERROR, "Invalid ctrl msg size!\n");
+		return;
+	}
+	if (!ctrl_client->pkt_loc) {
+		uci_log(UCI_DBG_ERROR, "ctrl msg pkt_loc null!\n");
+		return;
+	}
+	ctrl_msg = ctrl_client->pkt_loc;
+
+	chan = ctrl_msg->dest_id;
+	if (chan >= MHI_MAX_SOFTWARE_CHANNELS) {
+		uci_log(UCI_DBG_ERROR,
+			"Invalid channel number in ctrl msg!\n");
+		return;
+	}
+	tgt_client = &uci_ctxt.client_handles[CHAN_TO_CLIENT(chan)];
+	tgt_client->tiocm &= (TIOCM_CD | TIOCM_DSR | TIOCM_RI);
+
+	if (ctrl_msg->msg & MHI_UCI_CTRL_MSG_DCD)
+		tgt_client->tiocm |= TIOCM_CD;
+	if (ctrl_msg->msg & MHI_UCI_CTRL_MSG_DSR)
+		tgt_client->tiocm |= TIOCM_DSR;
+	if (ctrl_msg->msg & MHI_UCI_CTRL_MSG_RI)
+		tgt_client->tiocm |= TIOCM_RI;
+
+	uci_log(UCI_DBG_VERBOSE, "Rcvd tiocm %d\n", tgt_client->tiocm);
+
+	tgt_client->at_ctrl_mask = POLLPRI;
+	wake_up(&tgt_client->read_wq);
+}
+
 static long mhi_uci_client_ioctl(struct file *file, unsigned int cmd,
 		unsigned long arg)
 {
 	struct uci_client *uci_handle = NULL;
 	int rc = 0;
 	struct ep_info epinfo;
+	unsigned int tiocm;
 
 	if (file == NULL || file->private_data == NULL)
 		return -EINVAL;
@@ -1188,6 +1354,42 @@
 			sizeof(epinfo));
 		if (rc)
 			uci_log(UCI_DBG_ERROR, "copying to user space failed");
+	} else if (cmd == MHI_UCI_TIOCM_GET) {
+		rc = copy_to_user((void __user *)arg, &uci_handle->tiocm,
+			sizeof(uci_handle->tiocm));
+		if (rc) {
+			uci_log(UCI_DBG_ERROR,
+				"copying ctrl state to user space failed");
+			rc = -EFAULT;
+		}
+		uci_handle->at_ctrl_mask = 0;
+	} else if (cmd == MHI_UCI_TIOCM_SET) {
+		rc = get_user(tiocm, (unsigned int __user *)arg);
+		if (rc)
+			return rc;
+		rc = mhi_uci_ctrl_set_tiocm(uci_handle, tiocm);
+	} else if (cmd == MHI_UCI_DPL_EP_LOOKUP) {
+		uci_log(UCI_DBG_DBG, "DPL EP_LOOKUP for client:%d\n",
+			uci_handle->client_index);
+		epinfo.ph_ep_info.ep_type = DATA_EP_TYPE_PCIE;
+		epinfo.ph_ep_info.peripheral_iface_id = MHI_ADPL_IFACE_ID;
+		epinfo.ipa_ep_pair.prod_pipe_num =
+			ipa_get_ep_mapping(IPA_CLIENT_MHI_DPL_CONS);
+		/* For DPL set cons pipe to -1 to indicate it is unused */
+		epinfo.ipa_ep_pair.cons_pipe_num = -1;
+
+		uci_log(UCI_DBG_DBG, "client:%d ep_type:%d intf:%d\n",
+			uci_handle->client_index,
+			epinfo.ph_ep_info.ep_type,
+			epinfo.ph_ep_info.peripheral_iface_id);
+
+		uci_log(UCI_DBG_DBG, "DPL ipa_prod_idx:%d\n",
+			epinfo.ipa_ep_pair.prod_pipe_num);
+
+		rc = copy_to_user((void __user *)arg, &epinfo,
+			sizeof(epinfo));
+		if (rc)
+			uci_log(UCI_DBG_ERROR, "copying to user space failed");
 	} else {
 		uci_log(UCI_DBG_ERROR, "wrong parameter:%d\n", cmd);
 		rc = -EINVAL;
@@ -1279,7 +1481,50 @@
 	return r;
 }
 
-static void mhi_uci_client_cb(struct mhi_dev_client_cb_data *cb_data)
+static void mhi_uci_at_ctrl_tre_cb(struct mhi_dev_client_cb_reason *reason)
+{
+	int client_index;
+	struct uci_client *uci_handle;
+
+	client_index = reason->ch_id / 2;
+	uci_handle = &uci_ctxt.client_handles[client_index];
+
+	if (reason->reason == MHI_DEV_TRE_AVAILABLE) {
+		if (reason->ch_id % 2) {
+			atomic_set(&uci_handle->write_data_ready, 1);
+			wake_up(&uci_handle->write_wq);
+		} else {
+			queue_work(uci_ctxt.at_ctrl_wq, &uci_ctxt.at_ctrl_work);
+		}
+	}
+}
+
+static void mhi_uci_at_ctrl_client_cb(struct mhi_dev_client_cb_data *cb_data)
+{
+	struct uci_client *client = cb_data->user_data;
+	int rc;
+
+	uci_log(UCI_DBG_VERBOSE, " Rcvd MHI cb for channel %d, state %d\n",
+		cb_data->channel, cb_data->ctrl_info);
+
+	if (cb_data->ctrl_info == MHI_STATE_CONNECTED) {
+		/* Open the AT ctrl channels */
+		rc = open_client_mhi_channels(client);
+		if (rc) {
+			uci_log(UCI_DBG_INFO,
+				"Failed to open channels ret %d\n", rc);
+			return;
+		}
+		/* Init the completion event for AT ctrl writes */
+		init_completion(client->write_done);
+		/* Create a work queue to process AT commands */
+		uci_ctxt.at_ctrl_wq =
+			create_singlethread_workqueue("mhi_at_ctrl_wq");
+		INIT_WORK(&uci_ctxt.at_ctrl_work, mhi_uci_at_ctrl_read);
+	}
+}
+
+static void mhi_uci_adb_client_cb(struct mhi_dev_client_cb_data *cb_data)
 {
 	struct uci_client *client = cb_data->user_data;
 
@@ -1305,16 +1550,26 @@
 		client->in_chan_attr = ++chan_attrib;
 		client->in_chan = index * 2;
 		client->out_chan = index * 2 + 1;
+		client->at_ctrl_mask = 0;
 		client->in_buf_list =
 			kcalloc(chan_attrib->nr_trbs,
 			sizeof(struct mhi_dev_iov),
 			GFP_KERNEL);
 		if (!client->in_buf_list)
 			return -ENOMEM;
-		/* Register callback with MHI if requested */
-		if (client->out_chan_attr->register_cb)
-			mhi_register_state_cb(mhi_uci_client_cb, client,
-						client->out_chan);
+		/* Register channel state change cb with MHI if requested */
+		if (client->out_chan_attr->chan_state_cb)
+			mhi_register_state_cb(
+					client->out_chan_attr->chan_state_cb,
+					client,
+					client->out_chan);
+		if (client->in_chan_attr->wr_cmpl) {
+			client->write_done = kzalloc(
+					sizeof(*client->write_done),
+					GFP_KERNEL);
+			if (!client->write_done)
+				return -ENOMEM;
+		}
 	}
 	return 0;
 }
@@ -1396,7 +1651,7 @@
 		 * this client's channels is called by the MHI driver,
 		 * if one is registered.
 		 */
-		if (mhi_client->in_chan_attr->register_cb)
+		if (mhi_client->in_chan_attr->chan_state_cb)
 			continue;
 		ret_val = uci_device_create(mhi_client);
 		if (ret_val)
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 4c0531f..5a379b4 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -57,6 +57,7 @@
 struct device;
 
 static const char * const gpio_en_name = "qcom,wigig-en";
+static const char * const gpio_dc_name = "qcom,wigig-dc";
 static const char * const sleep_clk_en_name = "qcom,sleep-clk-en";
 
 struct wigig_pci {
@@ -90,6 +91,7 @@
 	struct list_head list;
 	struct device *dev; /* for platform device */
 	int gpio_en; /* card enable */
+	int gpio_dc;
 	int sleep_clk_en; /* sleep clock enable for low PM management */
 
 	/* pci device */
@@ -539,6 +541,9 @@
 	if (ctx->gpio_en >= 0)
 		gpio_direction_output(ctx->gpio_en, 0);
 
+	if (ctx->gpio_dc >= 0)
+		gpio_direction_output(ctx->gpio_dc, 0);
+
 	if (ctx->sleep_clk_en >= 0)
 		gpio_direction_output(ctx->sleep_clk_en, 0);
 
@@ -569,6 +574,11 @@
 	if (ctx->sleep_clk_en >= 0)
 		gpio_direction_output(ctx->sleep_clk_en, 1);
 
+	if (ctx->gpio_dc >= 0) {
+		gpio_direction_output(ctx->gpio_dc, 1);
+		msleep(WIGIG_ENABLE_DELAY);
+	}
+
 	if (ctx->gpio_en >= 0) {
 		gpio_direction_output(ctx->gpio_en, 1);
 		msleep(WIGIG_ENABLE_DELAY);
@@ -1060,6 +1070,7 @@
 	 *	compatible = "qcom,wil6210";
 	 *	qcom,pcie-parent = <&pcie1>;
 	 *	qcom,wigig-en = <&tlmm 94 0>; (ctx->gpio_en)
+	 *	qcom,wigig-dc = <&tlmm 81 0>; (ctx->gpio_dc)
 	 *	qcom,sleep-clk-en = <&pm8994_gpios 18 0>; (ctx->sleep_clk_en)
 	 *	qcom,msm-bus,name = "wil6210";
 	 *	qcom,msm-bus,num-cases = <2>;
@@ -1075,7 +1086,11 @@
 	 * qcom,smmu-exist;
 	 */
 
-	/* wigig-en is optional property */
+	/* wigig-en and wigig-dc are optional properties */
+	ctx->gpio_dc = of_get_named_gpio(of_node, gpio_dc_name, 0);
+	if (ctx->gpio_dc < 0)
+		dev_warn(ctx->dev, "GPIO <%s> not found, dc GPIO not used\n",
+			 gpio_dc_name);
 	ctx->gpio_en = of_get_named_gpio(of_node, gpio_en_name, 0);
 	if (ctx->gpio_en < 0)
 		dev_warn(ctx->dev, "GPIO <%s> not found, enable GPIO not used\n",
@@ -1150,6 +1165,22 @@
 		goto out_vreg_clk;
 	}
 
+	if (ctx->gpio_dc >= 0) {
+		rc = gpio_request(ctx->gpio_dc, gpio_dc_name);
+		if (rc < 0) {
+			dev_err(ctx->dev, "failed to request GPIO %d <%s>\n",
+				ctx->gpio_dc, gpio_dc_name);
+			goto out_req_dc;
+		}
+		rc = gpio_direction_output(ctx->gpio_dc, 1);
+		if (rc < 0) {
+			dev_err(ctx->dev, "failed to set GPIO %d <%s>\n",
+				ctx->gpio_dc, gpio_dc_name);
+			goto out_set_dc;
+		}
+		msleep(WIGIG_ENABLE_DELAY);
+	}
+
 	if (ctx->gpio_en >= 0) {
 		rc = gpio_request(ctx->gpio_en, gpio_en_name);
 		if (rc < 0) {
@@ -1260,12 +1291,13 @@
 	/* report */
 	dev_info(ctx->dev, "msm_11ad discovered. %p {\n"
 		 "  gpio_en = %d\n"
+		 "  gpio_dc = %d\n"
 		 "  sleep_clk_en = %d\n"
 		 "  rc_index = %d\n"
 		 "  use_smmu = %d\n"
 		 "  pcidev = %p\n"
-		 "}\n", ctx, ctx->gpio_en, ctx->sleep_clk_en, ctx->rc_index,
-		 ctx->use_smmu, ctx->pcidev);
+		 "}\n", ctx, ctx->gpio_en, ctx->gpio_dc, ctx->sleep_clk_en,
+		 ctx->rc_index, ctx->use_smmu, ctx->pcidev);
 
 	platform_set_drvdata(pdev, ctx);
 	device_disable_async_suspend(&pcidev->dev);
@@ -1285,6 +1317,13 @@
 		gpio_free(ctx->gpio_en);
 out_req:
 	ctx->gpio_en = -EINVAL;
+	if (ctx->gpio_dc >= 0)
+		gpio_direction_output(ctx->gpio_dc, 0);
+out_set_dc:
+	if (ctx->gpio_dc >= 0)
+		gpio_free(ctx->gpio_dc);
+out_req_dc:
+	ctx->gpio_dc = -EINVAL;
 out_vreg_clk:
 	msm_11ad_disable_clocks(ctx);
 	msm_11ad_release_clocks(ctx);
@@ -1309,6 +1348,10 @@
 		gpio_direction_output(ctx->gpio_en, 0);
 		gpio_free(ctx->gpio_en);
 	}
+	if (ctx->gpio_dc >= 0) {
+		gpio_direction_output(ctx->gpio_dc, 0);
+		gpio_free(ctx->gpio_dc);
+	}
 	if (ctx->sleep_clk_en >= 0)
 		gpio_free(ctx->sleep_clk_en);
 
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index 0056294..fe41993 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -518,6 +518,7 @@
 		if (obj && obj->type == ACPI_TYPE_INTEGER)
 			*out_data = (u32) obj->integer.value;
 	}
+	kfree(output.pointer);
 	return status;
 
 }
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 687cc5b..c857d2d 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -531,6 +531,7 @@
 	{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
 	{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
 	{ KE_IGNORE, 0xC6, },  /* Ambient Light Sensor notification */
+	{ KE_KEY, 0xFA, { KEY_PROG2 } },           /* Lid flip action */
 	{ KE_END, 0},
 };
 
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index b5b8901..b7dfe06 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -17,6 +17,7 @@
 #include <linux/bitops.h>
 #include <linux/device.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/platform_device.h>
 #include <asm/intel_punit_ipc.h>
 
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 074bf2f..79a2289 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -34,6 +34,7 @@
 #define TOSHIBA_ACPI_VERSION	"0.24"
 #define PROC_INTERFACE_VERSION	1
 
+#include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -1687,7 +1688,7 @@
 	.write		= keys_proc_write,
 };
 
-static int version_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused version_proc_show(struct seq_file *m, void *v)
 {
 	seq_printf(m, "driver:                  %s\n", TOSHIBA_ACPI_VERSION);
 	seq_printf(m, "proc_interface:          %d\n", PROC_INTERFACE_VERSION);
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 102f95a..e9e749f 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -35,6 +35,7 @@
 }
 
 static struct device *vexpress_power_off_device;
+static atomic_t vexpress_restart_nb_refcnt = ATOMIC_INIT(0);
 
 static void vexpress_power_off(void)
 {
@@ -99,10 +100,13 @@
 	int err;
 
 	vexpress_restart_device = dev;
-	err = register_restart_handler(&vexpress_restart_nb);
-	if (err) {
-		dev_err(dev, "cannot register restart handler (err=%d)\n", err);
-		return err;
+	if (atomic_inc_return(&vexpress_restart_nb_refcnt) == 1) {
+		err = register_restart_handler(&vexpress_restart_nb);
+		if (err) {
+			dev_err(dev, "cannot register restart handler (err=%d)\n", err);
+			atomic_dec(&vexpress_restart_nb_refcnt);
+			return err;
+		}
 	}
 	device_create_file(dev, &dev_attr_active);
 
diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
index edb36bf..f627b39 100644
--- a/drivers/power/supply/generic-adc-battery.c
+++ b/drivers/power/supply/generic-adc-battery.c
@@ -243,10 +243,10 @@
 	struct power_supply_desc *psy_desc;
 	struct power_supply_config psy_cfg = {};
 	struct gab_platform_data *pdata = pdev->dev.platform_data;
-	enum power_supply_property *properties;
 	int ret = 0;
 	int chan;
-	int index = 0;
+	int index = ARRAY_SIZE(gab_props);
+	bool any = false;
 
 	adc_bat = devm_kzalloc(&pdev->dev, sizeof(*adc_bat), GFP_KERNEL);
 	if (!adc_bat) {
@@ -280,8 +280,6 @@
 	}
 
 	memcpy(psy_desc->properties, gab_props, sizeof(gab_props));
-	properties = (enum power_supply_property *)
-			((char *)psy_desc->properties + sizeof(gab_props));
 
 	/*
 	 * getting channel from iio and copying the battery properties
@@ -295,15 +293,22 @@
 			adc_bat->channel[chan] = NULL;
 		} else {
 			/* copying properties for supported channels only */
-			memcpy(properties + sizeof(*(psy_desc->properties)) * index,
-					&gab_dyn_props[chan],
-					sizeof(gab_dyn_props[chan]));
-			index++;
+			int index2;
+
+			for (index2 = 0; index2 < index; index2++) {
+				if (psy_desc->properties[index2] ==
+				    gab_dyn_props[chan])
+					break;	/* already known */
+			}
+			if (index2 == index)	/* really new */
+				psy_desc->properties[index++] =
+					gab_dyn_props[chan];
+			any = true;
 		}
 	}
 
 	/* none of the channels are supported so let's bail out */
-	if (index == 0) {
+	if (!any) {
 		ret = -ENODEV;
 		goto second_mem_fail;
 	}
@@ -314,7 +319,7 @@
 	 * as come channels may be not be supported by the device.So
 	 * we need to take care of that.
 	 */
-	psy_desc->num_properties = ARRAY_SIZE(gab_props) + index;
+	psy_desc->num_properties = index;
 
 	adc_bat->psy = power_supply_register(&pdev->dev, psy_desc, &psy_cfg);
 	if (IS_ERR(adc_bat->psy)) {
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 077d237..77b6885 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/notifier.h>
 #include <linux/err.h>
@@ -141,8 +142,13 @@
 	struct power_supply *psy = container_of(work, struct power_supply,
 						deferred_register_work.work);
 
-	if (psy->dev.parent)
-		mutex_lock(&psy->dev.parent->mutex);
+	if (psy->dev.parent) {
+		while (!mutex_trylock(&psy->dev.parent->mutex)) {
+			if (psy->removing)
+				return;
+			msleep(10);
+		}
+	}
 
 	psy_register_cooler(psy->dev.parent, psy);
 	power_supply_changed(psy);
@@ -948,6 +954,7 @@
 void power_supply_unregister(struct power_supply *psy)
 {
 	WARN_ON(atomic_dec_return(&psy->use_cnt));
+	psy->removing = true;
 	cancel_work_sync(&psy->changed_work);
 	cancel_delayed_work_sync(&psy->deferred_register_work);
 	sysfs_remove_link(&psy->dev.kobj, "powers");
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 899cd31..14f179b 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -45,6 +45,7 @@
 #define USBIN_I_VOTER			"USBIN_I_VOTER"
 #define PL_FCC_LOW_VOTER		"PL_FCC_LOW_VOTER"
 #define ICL_LIMIT_VOTER			"ICL_LIMIT_VOTER"
+#define FCC_STEPPER_VOTER		"FCC_STEPPER_VOTER"
 
 struct pl_data {
 	int			pl_mode;
@@ -52,6 +53,7 @@
 	int			pl_min_icl_ua;
 	int			slave_pct;
 	int			slave_fcc_ua;
+	int			main_fcc_ua;
 	int			restricted_current;
 	bool			restricted_charging_enabled;
 	struct votable		*fcc_votable;
@@ -65,21 +67,32 @@
 	struct work_struct	pl_disable_forever_work;
 	struct work_struct	pl_taper_work;
 	struct delayed_work	pl_awake_work;
+	struct delayed_work	fcc_stepper_work;
 	bool			taper_work_running;
 	struct power_supply	*main_psy;
 	struct power_supply	*pl_psy;
 	struct power_supply	*batt_psy;
 	struct power_supply	*usb_psy;
+	struct power_supply	*dc_psy;
 	int			charge_type;
 	int			total_settled_ua;
 	int			pl_settled_ua;
 	int			pl_fcc_max;
+	int			fcc_stepper_enable;
+	int			main_step_fcc_dir;
+	int			main_step_fcc_count;
+	int			main_step_fcc_residual;
+	int			parallel_step_fcc_dir;
+	int			parallel_step_fcc_count;
+	int			parallel_step_fcc_residual;
+	int			step_fcc;
 	u32			wa_flags;
 	struct class		qcom_batt_class;
 	struct wakeup_source	*pl_ws;
 	struct notifier_block	nb;
 	bool			pl_disable;
 	int			taper_entry_fv;
+	u32			float_voltage_uv;
 };
 
 struct pl_data *the_chip;
@@ -111,6 +124,7 @@
 	SLAVE_PCT,
 	RESTRICT_CHG_ENABLE,
 	RESTRICT_CHG_CURRENT,
+	FCC_STEPPING_IN_PROGRESS,
 };
 
 /*******
@@ -258,7 +272,6 @@
 
 	chip->total_settled_ua = total_settled_ua;
 	chip->pl_settled_ua = slave_ua;
-
 }
 
 static ssize_t version_show(struct class *c, struct class_attribute *attr,
@@ -373,6 +386,18 @@
 	return count;
 }
 
+/****************************
+ * FCC STEPPING IN PROGRESS *
+ ****************************/
+static ssize_t fcc_stepping_in_progress_show(struct class *c,
+			struct class_attribute *attr, char *ubuf)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+				qcom_batt_class);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", chip->step_fcc);
+}
+
 static struct class_attribute pl_attributes[] = {
 	[VER]			= __ATTR_RO(version),
 	[SLAVE_PCT]		= __ATTR(parallel_pct, 0644,
@@ -381,6 +406,8 @@
 					restrict_chg_show, restrict_chg_store),
 	[RESTRICT_CHG_CURRENT]	= __ATTR(restricted_current, 0644,
 					restrict_cur_show, restrict_cur_store),
+	[FCC_STEPPING_IN_PROGRESS]
+				= __ATTR_RO(fcc_stepping_in_progress),
 	__ATTR_NULL,
 };
 
@@ -388,6 +415,10 @@
  *  FCC  *
  **********/
 #define EFFICIENCY_PCT	80
+#define FCC_STEP_SIZE_UA 100000
+#define FCC_STEP_UPDATE_DELAY_MS 1000
+#define STEP_UP 1
+#define STEP_DOWN -1
 static void get_fcc_split(struct pl_data *chip, int total_ua,
 			int *master_ua, int *slave_ua)
 {
@@ -440,6 +471,47 @@
 		*master_ua = max(0, total_ua - *slave_ua);
 }
 
+static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua,
+			int parallel_fcc_ua)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	/* Read current FCC of main charger */
+	rc = power_supply_get_property(chip->main_psy,
+		POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get main charger current fcc, rc=%d\n", rc);
+		return;
+	}
+	chip->main_fcc_ua = pval.intval;
+
+	chip->main_step_fcc_dir = (main_fcc_ua > pval.intval) ?
+				STEP_UP : STEP_DOWN;
+	chip->main_step_fcc_count = abs((main_fcc_ua - pval.intval) /
+				FCC_STEP_SIZE_UA);
+	chip->main_step_fcc_residual = (main_fcc_ua - pval.intval) %
+				FCC_STEP_SIZE_UA;
+
+	chip->parallel_step_fcc_dir = (parallel_fcc_ua > chip->slave_fcc_ua) ?
+				STEP_UP : STEP_DOWN;
+	chip->parallel_step_fcc_count = abs((parallel_fcc_ua -
+				chip->slave_fcc_ua) / FCC_STEP_SIZE_UA);
+	chip->parallel_step_fcc_residual = (parallel_fcc_ua -
+				chip->slave_fcc_ua) % FCC_STEP_SIZE_UA;
+
+	if (chip->parallel_step_fcc_count || chip->parallel_step_fcc_residual
+		|| chip->main_step_fcc_count || chip->main_step_fcc_residual)
+		chip->step_fcc = 1;
+
+	pr_debug("Main FCC Stepper parameters: main_step_direction: %d, main_step_count: %d, main_residual_fcc: %d\n",
+		chip->main_step_fcc_dir, chip->main_step_fcc_count,
+		chip->main_step_fcc_residual);
+	pr_debug("Parallel FCC Stepper parameters: parallel_step_direction: %d, parallel_step_count: %d, parallel_residual_fcc: %d\n",
+		chip->parallel_step_fcc_dir, chip->parallel_step_fcc_count,
+		chip->parallel_step_fcc_residual);
+}
+
 #define MINIMUM_PARALLEL_FCC_UA		500000
 #define PL_TAPER_WORK_DELAY_MS		500
 #define TAPER_RESIDUAL_PCT		90
@@ -559,6 +631,206 @@
 	return 0;
 }
 
+static void fcc_stepper_work(struct work_struct *work)
+{
+	struct pl_data *chip = container_of(work, struct pl_data,
+			fcc_stepper_work.work);
+	union power_supply_propval pval = {0, };
+	int reschedule_ms = 0, rc = 0, charger_present = 0;
+	int main_fcc = chip->main_fcc_ua;
+	int parallel_fcc = chip->slave_fcc_ua;
+
+	/* Check whether USB is present or not */
+	rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT, &pval);
+	if (rc < 0)
+		pr_err("Couldn't get USB Present status, rc=%d\n", rc);
+
+	charger_present = pval.intval;
+
+	/*Check whether DC charger is present or not */
+	if (!chip->dc_psy)
+		chip->dc_psy = power_supply_get_by_name("dc");
+	if (chip->dc_psy) {
+		rc = power_supply_get_property(chip->dc_psy,
+				POWER_SUPPLY_PROP_PRESENT, &pval);
+		if (rc < 0)
+			pr_err("Couldn't get DC Present status, rc=%d\n", rc);
+
+		charger_present |= pval.intval;
+	}
+
+	/*
+	 * If USB is not present, then set parallel FCC to min value and
+	 * main FCC to the effective value of FCC votable and exit.
+	 */
+	if (!charger_present) {
+		/* Disable parallel */
+		parallel_fcc = 0;
+
+		if (chip->pl_psy) {
+			pval.intval = 1;
+			rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+			if (rc < 0) {
+				pr_err("Couldn't change slave suspend state rc=%d\n",
+					rc);
+				goto out;
+			}
+
+			chip->pl_disable = true;
+			power_supply_changed(chip->pl_psy);
+		}
+
+		main_fcc = get_effective_result_locked(chip->fcc_votable);
+		pval.intval = main_fcc;
+		rc = power_supply_set_property(chip->main_psy,
+			POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't set main charger fcc, rc=%d\n", rc);
+			goto out;
+		}
+
+		goto stepper_exit;
+	}
+
+	if (chip->main_step_fcc_count) {
+		main_fcc += (FCC_STEP_SIZE_UA * chip->main_step_fcc_dir);
+		chip->main_step_fcc_count--;
+		reschedule_ms = FCC_STEP_UPDATE_DELAY_MS;
+	} else if (chip->main_step_fcc_residual) {
+		main_fcc += chip->main_step_fcc_residual;
+		chip->main_step_fcc_residual = 0;
+	}
+
+	if (chip->parallel_step_fcc_count) {
+		parallel_fcc += (FCC_STEP_SIZE_UA *
+			chip->parallel_step_fcc_dir);
+		chip->parallel_step_fcc_count--;
+		reschedule_ms = FCC_STEP_UPDATE_DELAY_MS;
+	} else if (chip->parallel_step_fcc_residual) {
+		parallel_fcc += chip->parallel_step_fcc_residual;
+		chip->parallel_step_fcc_residual = 0;
+	}
+
+	if (parallel_fcc < chip->slave_fcc_ua) {
+		/* Set parallel FCC */
+		if (chip->pl_psy && !chip->pl_disable) {
+			if (parallel_fcc < MINIMUM_PARALLEL_FCC_UA) {
+				pval.intval = 1;
+				rc = power_supply_set_property(chip->pl_psy,
+					POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+				if (rc < 0) {
+					pr_err("Couldn't change slave suspend state rc=%d\n",
+						rc);
+					goto out;
+				}
+
+				if (IS_USBIN(chip->pl_mode))
+					split_settled(chip);
+
+				parallel_fcc = 0;
+				chip->parallel_step_fcc_count = 0;
+				chip->parallel_step_fcc_residual = 0;
+				chip->total_settled_ua = 0;
+				chip->pl_settled_ua = 0;
+				chip->pl_disable = true;
+				power_supply_changed(chip->pl_psy);
+			} else {
+				/* Set Parallel FCC */
+				pval.intval = parallel_fcc;
+				rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+					&pval);
+				if (rc < 0) {
+					pr_err("Couldn't set parallel charger fcc, rc=%d\n",
+						rc);
+					goto out;
+				}
+			}
+		}
+
+		/* Set main FCC */
+		pval.intval = main_fcc;
+		rc = power_supply_set_property(chip->main_psy,
+			POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't set main charger fcc, rc=%d\n", rc);
+			goto out;
+		}
+	} else {
+		/* Set main FCC */
+		pval.intval = main_fcc;
+		rc = power_supply_set_property(chip->main_psy,
+			POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't set main charger fcc, rc=%d\n", rc);
+			goto out;
+		}
+
+		/* Set parallel FCC */
+		if (chip->pl_psy) {
+			pval.intval = parallel_fcc;
+			rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+				&pval);
+			if (rc < 0) {
+				pr_err("Couldn't set parallel charger fcc, rc=%d\n",
+					rc);
+				goto out;
+			}
+
+			/*
+			 * Enable parallel charger only if it was disabled
+			 * earlier and configured slave fcc is greater than or
+			 * equal to minimum parallel FCC value.
+			 */
+			if (chip->pl_disable && parallel_fcc
+					>= MINIMUM_PARALLEL_FCC_UA) {
+				pval.intval = 0;
+				rc = power_supply_set_property(chip->pl_psy,
+					POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+				if (rc < 0) {
+					pr_err("Couldn't change slave suspend state rc=%d\n",
+						rc);
+					goto out;
+				}
+
+				if (IS_USBIN(chip->pl_mode))
+					split_settled(chip);
+
+				chip->pl_disable = false;
+				power_supply_changed(chip->pl_psy);
+			}
+		}
+	}
+
+stepper_exit:
+	chip->main_fcc_ua = main_fcc;
+	chip->slave_fcc_ua = parallel_fcc;
+
+	if (reschedule_ms) {
+		schedule_delayed_work(&chip->fcc_stepper_work,
+				msecs_to_jiffies(reschedule_ms));
+		pr_debug("Rescheduling FCC_STEPPER work\n");
+		return;
+	}
+out:
+	chip->step_fcc = 0;
+	vote(chip->pl_awake_votable, FCC_STEPPER_VOTER, false, 0);
+}
+
+static bool is_batt_available(struct pl_data *chip)
+{
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	return true;
+}
+
 #define PARALLEL_FLOAT_VOLTAGE_DELTA_UV 50000
 static int pl_fv_vote_callback(struct votable *votable, void *data,
 			int fv_uv, const char *client)
@@ -592,6 +864,31 @@
 		}
 	}
 
+	/*
+	 * check for termination at reduced float voltage and re-trigger
+	 * charging if new float voltage is above last FV.
+	 */
+	if ((chip->float_voltage_uv < fv_uv) && is_batt_available(chip)) {
+		rc = power_supply_get_property(chip->batt_psy,
+				POWER_SUPPLY_PROP_STATUS, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get battery status rc=%d\n", rc);
+		} else {
+			if (pval.intval == POWER_SUPPLY_STATUS_FULL) {
+				pr_debug("re-triggering charging\n");
+				pval.intval = 1;
+				rc = power_supply_set_property(chip->batt_psy,
+					POWER_SUPPLY_PROP_RECHARGE_SOC,
+					&pval);
+				if (rc < 0)
+					pr_err("Couldn't set force recharge rc=%d\n",
+							rc);
+			}
+		}
+	}
+
+	chip->float_voltage_uv = fv_uv;
+
 	return 0;
 }
 
@@ -709,6 +1006,30 @@
 	if (!is_main_available(chip))
 		return -ENODEV;
 
+	if (!is_batt_available(chip))
+		return -ENODEV;
+
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+	if (!chip->usb_psy) {
+		pr_err("Couldn't get usb psy\n");
+		return -ENODEV;
+	}
+
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't read FCC step update status, rc=%d\n", rc);
+		return rc;
+	}
+	chip->fcc_stepper_enable = pval.intval;
+	pr_debug("FCC Stepper %s\n", pval.intval ? "enabled" : "disabled");
+
+	if (chip->fcc_stepper_enable) {
+		cancel_delayed_work_sync(&chip->fcc_stepper_work);
+		vote(chip->pl_awake_votable, FCC_STEPPER_VOTER, false, 0);
+	}
+
 	total_fcc_ua = get_effective_result_locked(chip->fcc_votable);
 
 	if (chip->pl_mode != POWER_SUPPLY_PL_NONE && !pl_disable) {
@@ -744,73 +1065,87 @@
 		get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
 				&slave_fcc_ua);
 
-		/*
-		 * If there is an increase in slave share
-		 * (Also handles parallel enable case)
-		 *	Set Main ICL then slave FCC
-		 * else
-		 * (Also handles parallel disable case)
-		 *	Set slave ICL then main FCC.
-		 */
-		if (slave_fcc_ua > chip->slave_fcc_ua) {
-			pval.intval = master_fcc_ua;
-			rc = power_supply_set_property(chip->main_psy,
-				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
-				&pval);
-			if (rc < 0) {
-				pr_err("Could not set main fcc, rc=%d\n", rc);
-				return rc;
-			}
-
-			pval.intval = slave_fcc_ua;
-			rc = power_supply_set_property(chip->pl_psy,
-				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
-				&pval);
-			if (rc < 0) {
-				pr_err("Couldn't set parallel fcc, rc=%d\n",
-						rc);
-				return rc;
-			}
-
-			chip->slave_fcc_ua = slave_fcc_ua;
+		if (chip->fcc_stepper_enable) {
+			get_fcc_stepper_params(chip, master_fcc_ua,
+					slave_fcc_ua);
+			if (chip->step_fcc) {
+				vote(chip->pl_awake_votable, FCC_STEPPER_VOTER,
+					true, 0);
+				schedule_delayed_work(&chip->fcc_stepper_work,
+					0);
+				}
 		} else {
-			pval.intval = slave_fcc_ua;
-			rc = power_supply_set_property(chip->pl_psy,
+			/*
+			 * If there is an increase in slave share
+			 * (Also handles parallel enable case)
+			 *	Set Main ICL then slave FCC
+			 * else
+			 * (Also handles parallel disable case)
+			 *	Set slave ICL then main FCC.
+			 */
+			if (slave_fcc_ua > chip->slave_fcc_ua) {
+				pval.intval = master_fcc_ua;
+				rc = power_supply_set_property(chip->main_psy,
 				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
-				&pval);
-			if (rc < 0) {
-				pr_err("Couldn't set parallel fcc, rc=%d\n",
+					&pval);
+				if (rc < 0) {
+					pr_err("Could not set main fcc, rc=%d\n",
 						rc);
-				return rc;
-			}
+					return rc;
+				}
 
-			chip->slave_fcc_ua = slave_fcc_ua;
-
-			pval.intval = master_fcc_ua;
-			rc = power_supply_set_property(chip->main_psy,
+				pval.intval = slave_fcc_ua;
+				rc = power_supply_set_property(chip->pl_psy,
 				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
-				&pval);
-			if (rc < 0) {
-				pr_err("Could not set main fcc, rc=%d\n", rc);
-				return rc;
+					&pval);
+				if (rc < 0) {
+					pr_err("Couldn't set parallel fcc, rc=%d\n",
+						rc);
+					return rc;
+				}
+
+				chip->slave_fcc_ua = slave_fcc_ua;
+			} else {
+				pval.intval = slave_fcc_ua;
+				rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+					&pval);
+				if (rc < 0) {
+					pr_err("Couldn't set parallel fcc, rc=%d\n",
+						rc);
+					return rc;
+				}
+
+				chip->slave_fcc_ua = slave_fcc_ua;
+
+				pval.intval = master_fcc_ua;
+				rc = power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+					&pval);
+				if (rc < 0) {
+					pr_err("Could not set main fcc, rc=%d\n",
+						rc);
+					return rc;
+				}
 			}
+
+			/*
+			 * Enable will be called with a valid pl_psy always. The
+			 * PARALLEL_PSY_VOTER keeps it disabled unless a pl_psy
+			 * is seen.
+			 */
+			pval.intval = 0;
+			rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+			if (rc < 0)
+				pr_err("Couldn't change slave suspend state rc=%d\n",
+					rc);
+
+			if (IS_USBIN(chip->pl_mode))
+				split_settled(chip);
 		}
 
 		/*
-		 * Enable will be called with a valid pl_psy always. The
-		 * PARALLEL_PSY_VOTER keeps it disabled unless a pl_psy
-		 * is seen.
-		 */
-		pval.intval = 0;
-		rc = power_supply_set_property(chip->pl_psy,
-				POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
-		if (rc < 0)
-			pr_err("Couldn't change slave suspend state rc=%d\n",
-				rc);
-
-		if (IS_USBIN(chip->pl_mode))
-			split_settled(chip);
-		/*
 		 * we could have been enabled while in taper mode,
 		 *  start the taper work if so
 		 */
@@ -835,43 +1170,54 @@
 			(master_fcc_ua * 100) / total_fcc_ua,
 			(slave_fcc_ua * 100) / total_fcc_ua);
 	} else {
-		if (IS_USBIN(chip->pl_mode))
-			split_settled(chip);
+		if (!chip->fcc_stepper_enable) {
+			if (IS_USBIN(chip->pl_mode))
+				split_settled(chip);
 
-		/* pl_psy may be NULL while in the disable branch */
-		if (chip->pl_psy) {
-			pval.intval = 1;
-			rc = power_supply_set_property(chip->pl_psy,
+			/* pl_psy may be NULL while in the disable branch */
+			if (chip->pl_psy) {
+				pval.intval = 1;
+				rc = power_supply_set_property(chip->pl_psy,
 					POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
-			if (rc < 0)
-				pr_err("Couldn't change slave suspend state rc=%d\n",
-					rc);
-		}
+				if (rc < 0)
+					pr_err("Couldn't change slave suspend state rc=%d\n",
+						rc);
+			}
 
-		/* main psy gets all share */
-		pval.intval = total_fcc_ua;
-		rc = power_supply_set_property(chip->main_psy,
+			/* main psy gets all share */
+			pval.intval = total_fcc_ua;
+			rc = power_supply_set_property(chip->main_psy,
 				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 				&pval);
-		if (rc < 0) {
-			pr_err("Could not set main fcc, rc=%d\n", rc);
-			return rc;
+			if (rc < 0) {
+				pr_err("Could not set main fcc, rc=%d\n", rc);
+				return rc;
+			}
+
+			/* reset parallel FCC */
+			chip->slave_fcc_ua = 0;
+			chip->total_settled_ua = 0;
+			chip->pl_settled_ua = 0;
+		} else {
+			get_fcc_stepper_params(chip, total_fcc_ua, 0);
+			if (chip->step_fcc) {
+				vote(chip->pl_awake_votable, FCC_STEPPER_VOTER,
+					true, 0);
+				schedule_delayed_work(&chip->fcc_stepper_work,
+					0);
+			}
 		}
 
-		/* reset parallel FCC */
-		chip->slave_fcc_ua = 0;
 		rerun_election(chip->fv_votable);
 
 		cancel_delayed_work_sync(&chip->pl_awake_work);
 		schedule_delayed_work(&chip->pl_awake_work,
 						msecs_to_jiffies(5000));
-
-		chip->total_settled_ua = 0;
-		chip->pl_settled_ua = 0;
 	}
 
 	/* notify parallel state change */
-	if (chip->pl_psy && (chip->pl_disable != pl_disable)) {
+	if (chip->pl_psy && (chip->pl_disable != pl_disable)
+				&& !chip->fcc_stepper_enable) {
 		power_supply_changed(chip->pl_psy);
 		chip->pl_disable = (bool)pl_disable;
 	}
@@ -906,17 +1252,6 @@
 	return 0;
 }
 
-static bool is_batt_available(struct pl_data *chip)
-{
-	if (!chip->batt_psy)
-		chip->batt_psy = power_supply_get_by_name("battery");
-
-	if (!chip->batt_psy)
-		return false;
-
-	return true;
-}
-
 static bool is_parallel_available(struct pl_data *chip)
 {
 	union power_supply_propval pval = {0, };
@@ -1080,6 +1415,7 @@
 	else
 		vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0);
 
+	rerun_election(chip->fcc_votable);
 
 	if (IS_USBIN(chip->pl_mode)) {
 		/*
@@ -1328,6 +1664,7 @@
 	INIT_WORK(&chip->pl_taper_work, pl_taper_work);
 	INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
 	INIT_DELAYED_WORK(&chip->pl_awake_work, pl_awake_work);
+	INIT_DELAYED_WORK(&chip->fcc_stepper_work, fcc_stepper_work);
 
 	rc = pl_register_notifier(chip);
 	if (rc < 0) {
@@ -1383,6 +1720,7 @@
 	cancel_work_sync(&chip->pl_taper_work);
 	cancel_work_sync(&chip->pl_disable_forever_work);
 	cancel_delayed_work_sync(&chip->pl_awake_work);
+	cancel_delayed_work_sync(&chip->fcc_stepper_work);
 
 	power_supply_unreg_notifier(&chip->nb);
 	destroy_votable(chip->pl_enable_votable_indirect);
diff --git a/drivers/power/supply/qcom/batterydata-interface.c b/drivers/power/supply/qcom/batterydata-interface.c
index 0187827..9a75de1 100644
--- a/drivers/power/supply/qcom/batterydata-interface.c
+++ b/drivers/power/supply/qcom/batterydata-interface.c
@@ -66,7 +66,7 @@
 
 	switch (cmd) {
 	case BPIOCXSOC:
-		soc = interpolate_pc(battery->profile->pc_temp_ocv_lut,
+		soc = interpolate_pc_bms(battery->profile->pc_temp_ocv_lut,
 					bp.batt_temp, bp.ocv_uv / 1000);
 		rc = put_user(soc, &bp_user->soc);
 		if (rc) {
@@ -77,7 +77,7 @@
 				bp.ocv_uv / 1000, bp.batt_temp, soc);
 		break;
 	case BPIOCXRBATT:
-		rbatt_sf = interpolate_scalingfactor(
+		rbatt_sf = interpolate_scalingfactor_bms(
 				battery->profile->rbatt_sf_lut,
 				bp.batt_temp, bp.soc);
 		rc = put_user(rbatt_sf, &bp_user->rbatt_sf);
@@ -89,7 +89,7 @@
 					bp.soc, bp.batt_temp, rbatt_sf);
 		break;
 	case BPIOCXSLOPE:
-		slope = interpolate_slope(battery->profile->pc_temp_ocv_lut,
+		slope = interpolate_slope_bms(battery->profile->pc_temp_ocv_lut,
 							bp.batt_temp, bp.soc);
 		rc = put_user(slope, &bp_user->slope);
 		if (rc) {
@@ -100,7 +100,7 @@
 					bp.soc, bp.batt_temp, slope);
 		break;
 	case BPIOCXFCC:
-		fcc_mah = interpolate_fcc(battery->profile->fcc_temp_lut,
+		fcc_mah = interpolate_fcc_bms(battery->profile->fcc_temp_lut,
 							bp.batt_temp);
 		rc = put_user(fcc_mah, &bp_user->fcc_mah);
 		if (rc) {
diff --git a/drivers/power/supply/qcom/batterydata-lib.c b/drivers/power/supply/qcom/batterydata-lib.c
index fae5658..d10d719 100644
--- a/drivers/power/supply/qcom/batterydata-lib.c
+++ b/drivers/power/supply/qcom/batterydata-lib.c
@@ -15,7 +15,7 @@
 #include <linux/module.h>
 #include <linux/batterydata-lib.h>
 
-int linear_interpolate(int y0, int x0, int y1, int x1, int x)
+int linear_interpolate_bms(int y0, int x0, int y1, int x1, int x)
 {
 	if (y0 == y1 || x == x0)
 		return y0;
@@ -25,7 +25,7 @@
 	return y0 + ((y1 - y0) * (x - x0) / (x1 - x0));
 }
 
-static int interpolate_single_lut_scaled(struct single_row_lut *lut,
+static int interpolate_single_lut_scaled_bms(struct single_row_lut *lut,
 						int x, int scale)
 {
 	int i, result;
@@ -47,7 +47,7 @@
 	if (x == lut->x[i] * scale) {
 		result = lut->y[i];
 	} else {
-		result = linear_interpolate(
+		result = linear_interpolate_bms(
 			lut->y[i - 1],
 			lut->x[i - 1] * scale,
 			lut->y[i],
@@ -57,14 +57,14 @@
 	return result;
 }
 
-int interpolate_fcc(struct single_row_lut *fcc_temp_lut, int batt_temp)
+int interpolate_fcc_bms(struct single_row_lut *fcc_temp_lut, int batt_temp)
 {
-	return interpolate_single_lut_scaled(fcc_temp_lut,
+	return interpolate_single_lut_scaled_bms(fcc_temp_lut,
 						batt_temp,
 						DEGC_SCALE);
 }
 
-int interpolate_scalingfactor_fcc(struct single_row_lut *fcc_sf_lut,
+int interpolate_scalingfactor_fcc_bms(struct single_row_lut *fcc_sf_lut,
 		int cycles)
 {
 	/*
@@ -72,12 +72,12 @@
 	 * that case return 100%
 	 */
 	if (fcc_sf_lut)
-		return interpolate_single_lut_scaled(fcc_sf_lut, cycles, 1);
+		return interpolate_single_lut_scaled_bms(fcc_sf_lut, cycles, 1);
 	else
 		return 100;
 }
 
-int interpolate_scalingfactor(struct sf_lut *sf_lut, int row_entry, int pc)
+int interpolate_scalingfactor_bms(struct sf_lut *sf_lut, int row_entry, int pc)
 {
 	int i, scalefactorrow1, scalefactorrow2, scalefactor, rows, cols;
 	int row1 = 0;
@@ -124,7 +124,7 @@
 		if (row_entry <= sf_lut->row_entries[i] * DEGC_SCALE)
 			break;
 	if (row_entry == sf_lut->row_entries[i] * DEGC_SCALE) {
-		scalefactor = linear_interpolate(
+		scalefactor = linear_interpolate_bms(
 				sf_lut->sf[row1][i],
 				sf_lut->percent[row1],
 				sf_lut->sf[row2][i],
@@ -133,21 +133,21 @@
 		return scalefactor;
 	}
 
-	scalefactorrow1 = linear_interpolate(
+	scalefactorrow1 = linear_interpolate_bms(
 				sf_lut->sf[row1][i - 1],
 				sf_lut->row_entries[i - 1] * DEGC_SCALE,
 				sf_lut->sf[row1][i],
 				sf_lut->row_entries[i] * DEGC_SCALE,
 				row_entry);
 
-	scalefactorrow2 = linear_interpolate(
+	scalefactorrow2 = linear_interpolate_bms(
 				sf_lut->sf[row2][i - 1],
 				sf_lut->row_entries[i - 1] * DEGC_SCALE,
 				sf_lut->sf[row2][i],
 				sf_lut->row_entries[i] * DEGC_SCALE,
 				row_entry);
 
-	scalefactor = linear_interpolate(
+	scalefactor = linear_interpolate_bms(
 				scalefactorrow1,
 				sf_lut->percent[row1],
 				scalefactorrow2,
@@ -158,7 +158,7 @@
 }
 
 /* get ocv given a soc  -- reverse lookup */
-int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv,
+int interpolate_ocv_bms(struct pc_temp_ocv_lut *pc_temp_ocv,
 				int batt_temp, int pc)
 {
 	int i, ocvrow1, ocvrow2, ocv, rows, cols;
@@ -199,7 +199,7 @@
 		if (batt_temp <= pc_temp_ocv->temp[i] * DEGC_SCALE)
 			break;
 	if (batt_temp == pc_temp_ocv->temp[i] * DEGC_SCALE) {
-		ocv = linear_interpolate(
+		ocv = linear_interpolate_bms(
 				pc_temp_ocv->ocv[row1][i],
 				pc_temp_ocv->percent[row1],
 				pc_temp_ocv->ocv[row2][i],
@@ -208,21 +208,21 @@
 		return ocv;
 	}
 
-	ocvrow1 = linear_interpolate(
+	ocvrow1 = linear_interpolate_bms(
 				pc_temp_ocv->ocv[row1][i - 1],
 				pc_temp_ocv->temp[i - 1] * DEGC_SCALE,
 				pc_temp_ocv->ocv[row1][i],
 				pc_temp_ocv->temp[i] * DEGC_SCALE,
 				batt_temp);
 
-	ocvrow2 = linear_interpolate(
+	ocvrow2 = linear_interpolate_bms(
 				pc_temp_ocv->ocv[row2][i - 1],
 				pc_temp_ocv->temp[i - 1] * DEGC_SCALE,
 				pc_temp_ocv->ocv[row2][i],
 				pc_temp_ocv->temp[i] * DEGC_SCALE,
 				batt_temp);
 
-	ocv = linear_interpolate(
+	ocv = linear_interpolate_bms(
 				ocvrow1,
 				pc_temp_ocv->percent[row1],
 				ocvrow2,
@@ -232,7 +232,7 @@
 	return ocv;
 }
 
-int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv,
+int interpolate_pc_bms(struct pc_temp_ocv_lut *pc_temp_ocv,
 				int batt_temp, int ocv)
 {
 	int i, j, pcj, pcj_minus_one, pc;
@@ -262,7 +262,7 @@
 			if (ocv >= pc_temp_ocv->ocv[i][j]) {
 				if (ocv == pc_temp_ocv->ocv[i][j])
 					return pc_temp_ocv->percent[i];
-				pc = linear_interpolate(
+				pc = linear_interpolate_bms(
 					pc_temp_ocv->percent[i],
 					pc_temp_ocv->ocv[i][j],
 					pc_temp_ocv->percent[i - 1],
@@ -288,7 +288,7 @@
 		if (pcj == 0
 			&& is_between(pc_temp_ocv->ocv[i][j],
 				pc_temp_ocv->ocv[i+1][j], ocv)) {
-			pcj = linear_interpolate(
+			pcj = linear_interpolate_bms(
 				pc_temp_ocv->percent[i],
 				pc_temp_ocv->ocv[i][j],
 				pc_temp_ocv->percent[i + 1],
@@ -299,7 +299,7 @@
 		if (pcj_minus_one == 0
 			&& is_between(pc_temp_ocv->ocv[i][j-1],
 				pc_temp_ocv->ocv[i+1][j-1], ocv)) {
-			pcj_minus_one = linear_interpolate(
+			pcj_minus_one = linear_interpolate_bms(
 				pc_temp_ocv->percent[i],
 				pc_temp_ocv->ocv[i][j-1],
 				pc_temp_ocv->percent[i + 1],
@@ -308,7 +308,7 @@
 		}
 
 		if (pcj && pcj_minus_one) {
-			pc = linear_interpolate(
+			pc = linear_interpolate_bms(
 				pcj_minus_one,
 				pc_temp_ocv->temp[j-1] * DEGC_SCALE,
 				pcj,
@@ -329,7 +329,7 @@
 	return 100;
 }
 
-int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv,
+int interpolate_slope_bms(struct pc_temp_ocv_lut *pc_temp_ocv,
 					int batt_temp, int pc)
 {
 	int i, ocvrow1, ocvrow2, rows, cols;
@@ -385,14 +385,14 @@
 			pc_temp_ocv->percent[row2]);
 		return slope;
 	}
-	ocvrow1 = linear_interpolate(
+	ocvrow1 = linear_interpolate_bms(
 			pc_temp_ocv->ocv[row1][i - 1],
 			pc_temp_ocv->temp[i - 1] * DEGC_SCALE,
 			pc_temp_ocv->ocv[row1][i],
 			pc_temp_ocv->temp[i] * DEGC_SCALE,
 			batt_temp);
 
-	ocvrow2 = linear_interpolate(
+	ocvrow2 = linear_interpolate_bms(
 			pc_temp_ocv->ocv[row2][i - 1],
 				pc_temp_ocv->temp[i - 1] * DEGC_SCALE,
 				pc_temp_ocv->ocv[row2][i],
@@ -411,7 +411,7 @@
 }
 
 
-int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut,
+int interpolate_acc_bms(struct ibat_temp_acc_lut *ibat_acc_lut,
 					int batt_temp, int ibat)
 {
 	int i, accrow1, accrow2, rows, cols;
@@ -457,7 +457,7 @@
 			break;
 
 	if (batt_temp == (ibat_acc_lut->temp[i] * DEGC_SCALE)) {
-		acc = linear_interpolate(
+		acc = linear_interpolate_bms(
 			ibat_acc_lut->acc[row1][i],
 			ibat_acc_lut->ibat[row1],
 			ibat_acc_lut->acc[row2][i],
@@ -466,21 +466,21 @@
 		return acc;
 	}
 
-	accrow1 = linear_interpolate(
+	accrow1 = linear_interpolate_bms(
 		ibat_acc_lut->acc[row1][i - 1],
 		ibat_acc_lut->temp[i - 1] * DEGC_SCALE,
 		ibat_acc_lut->acc[row1][i],
 		ibat_acc_lut->temp[i] * DEGC_SCALE,
 		batt_temp);
 
-	accrow2 = linear_interpolate(
+	accrow2 = linear_interpolate_bms(
 		ibat_acc_lut->acc[row2][i - 1],
 		ibat_acc_lut->temp[i - 1] * DEGC_SCALE,
 		ibat_acc_lut->acc[row2][i],
 		ibat_acc_lut->temp[i] * DEGC_SCALE,
 		batt_temp);
 
-	acc = linear_interpolate(accrow1,
+	acc = linear_interpolate_bms(accrow1,
 			ibat_acc_lut->ibat[row1],
 			accrow2,
 			ibat_acc_lut->ibat[row2],
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index b0ff18c..4f0773b 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -50,11 +50,13 @@
 	int			delta_soc;
 	int			rbat_conn_mohm;
 	int			ignore_shutdown_soc_secs;
+	int			shutdown_temp_diff;
 	int			cold_temp_threshold;
 	int			esr_qual_i_ua;
 	int			esr_qual_v_uv;
 	int			esr_disable_soc;
 	int			esr_min_ibat_ua;
+	int			shutdown_soc_threshold;
 	bool			hold_soc_while_full;
 	bool			linearize_soc;
 	bool			cl_disable;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 7c9d9d5..d7ee5bd 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -2748,7 +2748,25 @@
 	mutex_unlock(&chip->cyc_ctr.lock);
 }
 
-static const char *fg_get_cycle_count(struct fg_chip *chip)
+static int fg_get_cycle_count(struct fg_chip *chip)
+{
+	int i, len = 0;
+
+	if (!chip->cyc_ctr.en)
+		return 0;
+
+	mutex_lock(&chip->cyc_ctr.lock);
+	for (i = 0; i < BUCKET_COUNT; i++)
+		len += chip->cyc_ctr.count[i];
+
+	mutex_unlock(&chip->cyc_ctr.lock);
+
+	len = len / BUCKET_COUNT;
+
+	return len;
+}
+
+static const char *fg_get_cycle_counts(struct fg_chip *chip)
 {
 	int i, len = 0;
 	char *buf;
@@ -4037,8 +4055,11 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
 		pval->intval = chip->bp.float_volt_uv;
 		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT:
+		pval->intval = fg_get_cycle_count(chip);
+		break;
 	case POWER_SUPPLY_PROP_CYCLE_COUNTS:
-		pval->strval = fg_get_cycle_count(chip);
+		pval->strval = fg_get_cycle_counts(chip);
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
 		rc = fg_get_charge_raw(chip, &pval->intval);
@@ -4273,6 +4294,7 @@
 	POWER_SUPPLY_PROP_BATTERY_TYPE,
 	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_CYCLE_COUNT,
 	POWER_SUPPLY_PROP_CYCLE_COUNTS,
 	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
 	POWER_SUPPLY_PROP_CHARGE_NOW,
@@ -5582,13 +5604,24 @@
 {
 	int i;
 
+	power_supply_unreg_notifier(&chip->nb);
+	qpnp_misc_twm_notifier_unregister(&chip->twm_nb);
+	cancel_delayed_work_sync(&chip->ttf_work);
+	cancel_delayed_work_sync(&chip->sram_dump_work);
+	if (chip->dt.use_esr_sw)
+		alarm_cancel(&chip->esr_sw_timer);
+	cancel_work_sync(&chip->esr_sw_work);
+	cancel_delayed_work_sync(&chip->profile_load_work);
+	cancel_work_sync(&chip->status_change_work);
+	cancel_work_sync(&chip->esr_filter_work);
+	cancel_delayed_work_sync(&chip->pl_enable_work);
+
 	for (i = 0; i < FG_IRQ_MAX; i++) {
 		if (fg_irqs[i].irq)
 			devm_free_irq(chip->dev, fg_irqs[i].irq, chip);
 	}
 
 	alarm_try_to_cancel(&chip->esr_filter_alarm);
-	power_supply_unreg_notifier(&chip->nb);
 	debugfs_remove_recursive(chip->dfs_root);
 	if (chip->awake_votable)
 		destroy_votable(chip->awake_votable);
@@ -5953,8 +5986,6 @@
 		if (rc < 0)
 			pr_err("Error in disabling FG resets rc=%d\n", rc);
 	}
-
-	fg_cleanup(chip);
 }
 
 static const struct of_device_id fg_gen3_match_table[] = {
diff --git a/drivers/power/supply/qcom/qpnp-linear-charger.c b/drivers/power/supply/qcom/qpnp-linear-charger.c
index dff51df..6e3158a 100644
--- a/drivers/power/supply/qcom/qpnp-linear-charger.c
+++ b/drivers/power/supply/qcom/qpnp-linear-charger.c
@@ -161,6 +161,7 @@
 	SOC	= BIT(3),
 	PARALLEL = BIT(4),
 	COLLAPSE = BIT(5),
+	DEBUG_BOARD = BIT(6),
 };
 
 enum bpd_type {
@@ -208,14 +209,19 @@
 	POWER_SUPPLY_PROP_PRESENT,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
 	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_CAPACITY,
 	POWER_SUPPLY_PROP_CURRENT_NOW,
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_CYCLE_COUNT,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_DEBUG_BATTERY,
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_COOL_TEMP,
 	POWER_SUPPLY_PROP_WARM_TEMP,
-	POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
 };
 
 static char *pm_batt_supplied_to[] = {
@@ -349,6 +355,7 @@
 	bool				cfg_use_external_charger;
 	bool				cfg_chgr_led_support;
 	bool				non_collapsible_chgr_detected;
+	bool				debug_board;
 	unsigned int			cfg_warm_bat_chg_ma;
 	unsigned int			cfg_cool_bat_chg_ma;
 	unsigned int			cfg_safe_voltage_mv;
@@ -1351,6 +1358,23 @@
 	return DEFAULT_CAPACITY;
 }
 
+static int get_bms_property(struct qpnp_lbc_chip *chip,
+				enum power_supply_property psy_prop)
+{
+	union power_supply_propval ret = {0,};
+
+	if (!chip->bms_psy)
+		chip->bms_psy = power_supply_get_by_name("bms");
+
+	if (chip->bms_psy)  {
+		power_supply_get_property(chip->bms_psy, psy_prop, &ret);
+		return ret.intval;
+	}
+	pr_debug("No BMS supply registered\n");
+
+	return -EINVAL;
+}
+
 static int get_prop_charge_count(struct qpnp_lbc_chip *chip)
 {
 	union power_supply_propval ret = {0,};
@@ -1536,7 +1560,7 @@
 	case POWER_SUPPLY_PROP_COOL_TEMP:
 	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
 	case POWER_SUPPLY_PROP_WARM_TEMP:
-	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
 		return 1;
 	default:
 		break;
@@ -1637,10 +1661,15 @@
 		rc = qpnp_lbc_charger_enable(chip, USER,
 						!chip->cfg_charging_disabled);
 		break;
+	case POWER_SUPPLY_PROP_DEBUG_BATTERY:
+		chip->debug_board = val->intval;
+		rc = qpnp_lbc_charger_enable(chip, DEBUG_BOARD,
+						!(val->intval));
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
 		qpnp_lbc_vinmin_set(chip, val->intval / 1000);
 		break;
-	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
 		qpnp_lbc_system_temp_level_set(chip, val->intval);
 		break;
 	default:
@@ -1676,6 +1705,9 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
 		val->intval = chip->cfg_min_voltage_mv * 1000;
 		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		val->intval = chip->cfg_max_voltage_mv * 1000;
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
 		val->intval = get_prop_battery_voltage_now(chip);
 		break;
@@ -1697,12 +1729,24 @@
 	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
 		val->intval = get_prop_charge_count(chip);
 		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT:
+		val->intval = get_bms_property(chip, psp);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+		val->intval = get_bms_property(chip, psp);
+		break;
 	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
 		val->intval = !(chip->cfg_charging_disabled);
 		break;
-	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+	case POWER_SUPPLY_PROP_DEBUG_BATTERY:
+		val->intval = chip->debug_board;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
 		val->intval = chip->therm_lvl_sel;
 		break;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+		val->intval = chip->cfg_thermal_levels;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -1793,8 +1837,9 @@
 	POWER_SUPPLY_PROP_TYPE,
 	POWER_SUPPLY_PROP_REAL_TYPE,
 	POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
 };
-
+#define MICRO_5V        5000000
 static int qpnp_lbc_usb_get_property(struct power_supply *psy,
 				  enum power_supply_property psp,
 				  union power_supply_propval *val)
@@ -1827,6 +1872,12 @@
 			(chip->usb_supply_type != POWER_SUPPLY_TYPE_UNKNOWN))
 			val->intval = chip->usb_supply_type;
 		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		if (chip->usb_present)
+			val->intval = MICRO_5V;
+		else
+			val->intval = 0;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 855e31d..12ab956 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -2514,7 +2514,6 @@
 	return 0;
 }
 
-
 static struct ocv_all ocv[] = {
 	[S7_PON_OCV] = { 0, 0, "S7_PON_OCV"},
 	[S3_GOOD_OCV] = { 0, 0, "S3_GOOD_OCV"},
@@ -2528,7 +2527,8 @@
 	int rc = 0, batt_temp = 0, i;
 	bool use_pon_ocv = true;
 	unsigned long rtc_sec = 0;
-	u32 ocv_uv = 0, soc = 0, shutdown[SDAM_MAX] = {0};
+	u32 ocv_uv = 0, soc = 0, pon_soc = 0, full_soc = 0, cutoff_soc = 0;
+	u32 shutdown[SDAM_MAX] = {0};
 	char ocv_type[20] = "NONE";
 
 	if (!chip->profile_loaded) {
@@ -2536,6 +2536,24 @@
 		return 0;
 	}
 
+	/* read all OCVs */
+	for (i = S7_PON_OCV; i < PON_OCV_MAX; i++) {
+		rc = qg_read_ocv(chip, &ocv[i].ocv_uv,
+					&ocv[i].ocv_raw, i);
+		if (rc < 0)
+			pr_err("Failed to read %s OCV rc=%d\n",
+					ocv[i].ocv_type, rc);
+		else
+			qg_dbg(chip, QG_DEBUG_PON, "%s OCV=%d\n",
+					ocv[i].ocv_type, ocv[i].ocv_uv);
+	}
+
+	rc = qg_get_battery_temp(chip, &batt_temp);
+	if (rc) {
+		pr_err("Failed to read BATT_TEMP at PON rc=%d\n", rc);
+		goto done;
+	}
+
 	rc = get_rtc_time(&rtc_sec);
 	if (rc < 0) {
 		pr_err("Failed to read RTC time rc=%d\n", rc);
@@ -2548,47 +2566,50 @@
 		goto use_pon_ocv;
 	}
 
-	qg_dbg(chip, QG_DEBUG_PON, "Shutdown: Valid=%d SOC=%d OCV=%duV time=%dsecs, time_now=%ldsecs\n",
+	rc = lookup_soc_ocv(&pon_soc, ocv[S7_PON_OCV].ocv_uv, batt_temp, false);
+	if (rc < 0) {
+		pr_err("Failed to lookup S7_PON SOC rc=%d\n", rc);
+		goto done;
+	}
+
+	qg_dbg(chip, QG_DEBUG_PON, "Shutdown: Valid=%d SOC=%d OCV=%duV time=%dsecs temp=%d, time_now=%ldsecs temp_now=%d S7_soc=%d\n",
 			shutdown[SDAM_VALID],
 			shutdown[SDAM_SOC],
 			shutdown[SDAM_OCV_UV],
 			shutdown[SDAM_TIME_SEC],
-			rtc_sec);
+			shutdown[SDAM_TEMP],
+			rtc_sec, batt_temp,
+			pon_soc);
 	/*
 	 * Use the shutdown SOC if
-	 * 1. The device was powered off for < ignore_shutdown_time
-	 * 2. SDAM read is a success & SDAM data is valid
+	 * 1. SDAM read is a success & SDAM data is valid
+	 * 2. The device was powered off for < ignore_shutdown_time
+	 * 2. Batt temp has not changed more than shutdown_temp_diff
 	 */
-	if (shutdown[SDAM_VALID] && is_between(0,
-			chip->dt.ignore_shutdown_soc_secs,
-			(rtc_sec - shutdown[SDAM_TIME_SEC]))) {
-		use_pon_ocv = false;
-		ocv_uv = shutdown[SDAM_OCV_UV];
-		soc = shutdown[SDAM_SOC];
-		strlcpy(ocv_type, "SHUTDOWN_SOC", 20);
-		qg_dbg(chip, QG_DEBUG_PON, "Using SHUTDOWN_SOC @ PON\n");
-	}
+	if (!shutdown[SDAM_VALID])
+		goto use_pon_ocv;
+
+	if (!is_between(0, chip->dt.ignore_shutdown_soc_secs,
+			(rtc_sec - shutdown[SDAM_TIME_SEC])))
+		goto use_pon_ocv;
+
+	if (!is_between(0, chip->dt.shutdown_temp_diff,
+			abs(shutdown[SDAM_TEMP] -  batt_temp)))
+		goto use_pon_ocv;
+
+	if ((chip->dt.shutdown_soc_threshold != -EINVAL) &&
+			!is_between(0, chip->dt.shutdown_soc_threshold,
+			abs(pon_soc - shutdown[SDAM_SOC])))
+		goto use_pon_ocv;
+
+	use_pon_ocv = false;
+	ocv_uv = shutdown[SDAM_OCV_UV];
+	soc = shutdown[SDAM_SOC];
+	strlcpy(ocv_type, "SHUTDOWN_SOC", 20);
+	qg_dbg(chip, QG_DEBUG_PON, "Using SHUTDOWN_SOC @ PON\n");
 
 use_pon_ocv:
 	if (use_pon_ocv == true) {
-		rc = qg_get_battery_temp(chip, &batt_temp);
-		if (rc) {
-			pr_err("Failed to read BATT_TEMP at PON rc=%d\n", rc);
-			goto done;
-		}
-
-		/* read all OCVs */
-		for (i = S7_PON_OCV; i < PON_OCV_MAX; i++) {
-			rc = qg_read_ocv(chip, &ocv[i].ocv_uv,
-						&ocv[i].ocv_raw, i);
-			if (rc < 0)
-				pr_err("Failed to read %s OCV rc=%d\n",
-						ocv[i].ocv_type, rc);
-			else
-				qg_dbg(chip, QG_DEBUG_PON, "%s OCV=%d\n",
-					ocv[i].ocv_type, ocv[i].ocv_uv);
-		}
-
 		if (ocv[S3_LAST_OCV].ocv_raw == FIFO_V_RESET_VAL) {
 			if (!ocv[SDAM_PON_OCV].ocv_uv) {
 				strlcpy(ocv_type, "S7_PON_SOC", 20);
@@ -2618,11 +2639,36 @@
 		}
 
 		ocv_uv = CAP(QG_MIN_OCV_UV, QG_MAX_OCV_UV, ocv_uv);
-		rc = lookup_soc_ocv(&soc, ocv_uv, batt_temp, false);
+		rc = lookup_soc_ocv(&pon_soc, ocv_uv, batt_temp, false);
 		if (rc < 0) {
 			pr_err("Failed to lookup SOC@PON rc=%d\n", rc);
 			goto done;
 		}
+
+		rc = lookup_soc_ocv(&full_soc, chip->bp.float_volt_uv,
+							batt_temp, true);
+		if (rc < 0) {
+			pr_err("Failed to lookup FULL_SOC@PON rc=%d\n", rc);
+			goto done;
+		}
+
+		rc = lookup_soc_ocv(&cutoff_soc,
+				chip->dt.vbatt_cutoff_mv * 1000,
+				batt_temp, false);
+		if (rc < 0) {
+			pr_err("Failed to lookup CUTOFF_SOC@PON rc=%d\n", rc);
+			goto done;
+		}
+
+		if ((full_soc > cutoff_soc) && (pon_soc > cutoff_soc))
+			soc = DIV_ROUND_UP(((pon_soc - cutoff_soc) * 100),
+						(full_soc - cutoff_soc));
+		else
+			soc = pon_soc;
+
+		qg_dbg(chip, QG_DEBUG_PON, "v_float=%d v_cutoff=%d FULL_SOC=%d CUTOFF_SOC=%d PON_SYS_SOC=%d pon_soc=%d\n",
+			chip->bp.float_volt_uv, chip->dt.vbatt_cutoff_mv * 1000,
+			full_soc, cutoff_soc, pon_soc, soc);
 	}
 done:
 	if (rc < 0) {
@@ -3074,6 +3120,7 @@
 #define DEFAULT_CL_MAX_DEC_DECIPERC	20
 #define DEFAULT_CL_MIN_LIM_DECIPERC	500
 #define DEFAULT_CL_MAX_LIM_DECIPERC	100
+#define DEFAULT_SHUTDOWN_TEMP_DIFF	60	/* 6 degC */
 #define DEFAULT_ESR_QUAL_CURRENT_UA	130000
 #define DEFAULT_ESR_QUAL_VBAT_UV	7000
 #define DEFAULT_ESR_DISABLE_SOC		1000
@@ -3259,6 +3306,12 @@
 	else
 		chip->dt.ignore_shutdown_soc_secs = temp;
 
+	rc = of_property_read_u32(node, "qcom,shutdown-temp-diff", &temp);
+	if (rc < 0)
+		chip->dt.shutdown_temp_diff = DEFAULT_SHUTDOWN_TEMP_DIFF;
+	else
+		chip->dt.shutdown_temp_diff = temp;
+
 	chip->dt.hold_soc_while_full = of_property_read_bool(node,
 					"qcom,hold-soc-while-full");
 
@@ -3302,6 +3355,12 @@
 	else
 		chip->dt.esr_min_ibat_ua = (int)temp;
 
+	rc = of_property_read_u32(node, "qcom,shutdown_soc_threshold", &temp);
+	if (rc < 0)
+		chip->dt.shutdown_soc_threshold = -EINVAL;
+	else
+		chip->dt.shutdown_soc_threshold = temp;
+
 	chip->dt.qg_ext_sense = of_property_read_bool(node, "qcom,qg-ext-sns");
 
 	/* Capacity learning params*/
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index e3e4f2b..dd0f889 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -217,6 +217,9 @@
 		return -EINVAL;
 	}
 
+	chg->reddragon_ipc_wa = of_property_read_bool(node,
+				"qcom,qcs605-ipc-wa");
+
 	chg->step_chg_enabled = of_property_read_bool(node,
 				"qcom,step-charging-enable");
 
@@ -334,6 +337,9 @@
 	chg->disable_stat_sw_override = of_property_read_bool(node,
 					"qcom,disable-stat-sw-override");
 
+	chg->fcc_stepper_enable = of_property_read_bool(node,
+					"qcom,fcc-stepping-enable");
+
 	return 0;
 }
 
@@ -1001,6 +1007,7 @@
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
 	POWER_SUPPLY_PROP_CHARGE_FULL,
 	POWER_SUPPLY_PROP_CYCLE_COUNT,
+	POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE,
 };
 
 static int smb2_batt_get_prop(struct power_supply *psy,
@@ -1115,6 +1122,9 @@
 	case POWER_SUPPLY_PROP_TEMP:
 		rc = smblib_get_prop_from_bms(chg, psp, val);
 		break;
+	case POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE:
+		val->intval = chg->fcc_stepper_enable;
+		break;
 	default:
 		pr_err("batt power supply prop %d not supported\n", psp);
 		return -EINVAL;
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 57aa0c6..0a677ec 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -282,7 +282,7 @@
 		break;
 	case PMI632_SUBTYPE:
 		chip->chg.smb_version = PMI632_SUBTYPE;
-		chg->wa_flags |= WEAK_ADAPTER_WA;
+		chg->wa_flags |= WEAK_ADAPTER_WA | USBIN_OV_WA;
 		if (pmic_rev_id->rev4 >= 2)
 			chg->wa_flags |= MOISTURE_PROTECTION_WA;
 		chg->param = smb5_pmi632_params;
@@ -504,6 +504,9 @@
 	chg->moisture_protection_enabled = of_property_read_bool(node,
 					"qcom,moisture-protection-enable");
 
+	chg->fcc_stepper_enable = of_property_read_bool(node,
+					"qcom,fcc-stepping-enable");
+
 	return 0;
 }
 
@@ -1258,6 +1261,7 @@
 	POWER_SUPPLY_PROP_CYCLE_COUNT,
 	POWER_SUPPLY_PROP_RECHARGE_SOC,
 	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE,
 };
 
 #define ITERM_SCALING_FACTOR_PMI632	1525
@@ -1414,6 +1418,9 @@
 		rc = smblib_get_prop_from_bms(chg,
 				POWER_SUPPLY_PROP_CHARGE_FULL, val);
 		break;
+	case POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE:
+		val->intval = chg->fcc_stepper_enable;
+		break;
 	default:
 		pr_err("batt power supply prop %d not supported\n", psp);
 		return -EINVAL;
@@ -1732,6 +1739,14 @@
 		}
 	}
 
+	/* Disable TypeC and RID change source interrupts */
+	rc = smblib_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG, 0);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure Type-C interrupts rc=%d\n", rc);
+		return rc;
+	}
+
 	return rc;
 }
 
@@ -2428,7 +2443,7 @@
 	},
 	[USBIN_OV_IRQ] = {
 		.name		= "usbin-ov",
-		.handler	= default_irq_handler,
+		.handler	= usbin_ov_irq_handler,
 	},
 	[USBIN_PLUGIN_IRQ] = {
 		.name		= "usbin-plugin",
diff --git a/drivers/power/supply/qcom/qpnp-vm-bms.c b/drivers/power/supply/qcom/qpnp-vm-bms.c
index 042ce99..6295bfc 100644
--- a/drivers/power/supply/qcom/qpnp-vm-bms.c
+++ b/drivers/power/supply/qcom/qpnp-vm-bms.c
@@ -123,6 +123,8 @@
 
 #define QPNP_VM_BMS_DEV_NAME		"qcom,qpnp-vm-bms"
 
+#define DEBUG_BATT_ID_LOW	6500
+#define DEBUG_BATT_ID_HIGH	8500
 /* indicates the state of BMS */
 enum {
 	IDLE_STATE,
@@ -275,6 +277,7 @@
 	struct power_supply		*bms_psy;
 	struct power_supply		*batt_psy;
 	struct power_supply		*usb_psy;
+	struct notifier_block		nb;
 	bool				reported_soc_in_use;
 	bool				charger_removed_since_full;
 	bool				charger_reinserted;
@@ -282,6 +285,7 @@
 	int				reported_soc;
 	int				reported_soc_change_sec;
 	int				reported_soc_delta;
+	int				batt_id_ohm;
 };
 
 static struct qpnp_bms_chip *the_chip;
@@ -478,6 +482,45 @@
 	return 0;
 }
 
+static bool is_debug_batt_id(struct qpnp_bms_chip *chip)
+{
+	if (is_between(DEBUG_BATT_ID_LOW, DEBUG_BATT_ID_HIGH,
+				chip->batt_id_ohm))
+		return true;
+
+	return false;
+}
+
+static int bms_notifier_cb(struct notifier_block *nb,
+			unsigned long event, void *data)
+{
+	union power_supply_propval ret = {0,};
+	struct power_supply *psy = data;
+	struct qpnp_bms_chip *chip = container_of(nb, struct qpnp_bms_chip, nb);
+
+	if (event != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if ((strcmp(psy->desc->name, "battery") == 0)) {
+		if (chip->batt_psy == NULL)
+			chip->batt_psy = power_supply_get_by_name("battery");
+		if (chip->batt_psy) {
+			if (is_debug_batt_id(chip)) {
+				power_supply_get_property(chip->batt_psy,
+					POWER_SUPPLY_PROP_DEBUG_BATTERY, &ret);
+				if (!ret.intval)  {
+					ret.intval = 1;
+					power_supply_set_property(
+						chip->batt_psy,
+						POWER_SUPPLY_PROP_DEBUG_BATTERY,
+						 &ret);
+				}
+			}
+		}
+	}
+
+	return NOTIFY_OK;
+}
 static bool is_charger_present(struct qpnp_bms_chip *chip)
 {
 	union power_supply_propval ret = {0,};
@@ -953,9 +996,9 @@
 	int soc_ocv = 0, soc_cutoff = 0, soc_final = 0;
 	int fcc, acc, soc_uuc = 0, soc_acc = 0, iavg_ma = 0;
 
-	soc_ocv = interpolate_pc(chip->batt_data->pc_temp_ocv_lut,
+	soc_ocv = interpolate_pc_bms(chip->batt_data->pc_temp_ocv_lut,
 					batt_temp, ocv_uv / 1000);
-	soc_cutoff = interpolate_pc(chip->batt_data->pc_temp_ocv_lut,
+	soc_cutoff = interpolate_pc_bms(chip->batt_data->pc_temp_ocv_lut,
 				batt_temp, chip->dt.cfg_v_cutoff_uv / 1000);
 
 	soc_final = DIV_ROUND_CLOSEST(100 * (soc_ocv - soc_cutoff),
@@ -974,9 +1017,9 @@
 			else
 				iavg_ma = chip->current_now / 1000;
 
-			fcc = interpolate_fcc(chip->batt_data->fcc_temp_lut,
+			fcc = interpolate_fcc_bms(chip->batt_data->fcc_temp_lut,
 								batt_temp);
-			acc = interpolate_acc(chip->batt_data->ibat_acc_lut,
+			acc = interpolate_acc_bms(chip->batt_data->ibat_acc_lut,
 							batt_temp, iavg_ma);
 			if (acc <= 0) {
 				if (chip->last_acc)
@@ -1249,7 +1292,8 @@
 		return rbatt_mohm;
 	}
 
-	scalefactor = interpolate_scalingfactor(chip->batt_data->rbatt_sf_lut,
+	scalefactor = interpolate_scalingfactor_bms(
+						chip->batt_data->rbatt_sf_lut,
 						batt_temp, soc);
 	rbatt_mohm = (rbatt_mohm * scalefactor) / 100;
 
@@ -1412,7 +1456,7 @@
 	if (chip->batt_psy == NULL)
 		chip->batt_psy = power_supply_get_by_name("battery");
 	if (chip->batt_psy) {
-		power_supply_get_property(chip->batt_psy,
+		rc = power_supply_get_property(chip->batt_psy,
 				POWER_SUPPLY_PROP_STATUS, &ret);
 		if (rc) {
 			pr_err("Unable to get battery 'STATUS' rc=%d\n", rc);
@@ -2161,8 +2205,16 @@
 	mutex_unlock(&chip->bms_device_mutex);
 }
 
+#define DEBUG_BOARD_SOC 67
+#define BATT_MISSING_SOC 50
 static int get_prop_bms_capacity(struct qpnp_bms_chip *chip)
 {
+	if (is_debug_batt_id(chip))
+		return DEBUG_BOARD_SOC;
+
+	if (!chip->battery_present)
+		return BATT_MISSING_SOC;
+
 	return report_state_of_charge(chip);
 }
 
@@ -2226,6 +2278,15 @@
 	return current_charge;
 }
 
+static int get_charge_full(struct qpnp_bms_chip *chip)
+{
+
+	if (chip->batt_data)
+		return chip->batt_data->fcc * 1000;
+
+	return -EINVAL;
+}
+
 static enum power_supply_property bms_power_props[] = {
 	POWER_SUPPLY_PROP_CAPACITY,
 	POWER_SUPPLY_PROP_STATUS,
@@ -2240,6 +2301,8 @@
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_CYCLE_COUNT,
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_RESISTANCE_ID,
 };
 
 static int
@@ -2320,6 +2383,12 @@
 	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
 		val->intval = get_current_cc(chip);
 		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+		val->intval = get_charge_full(chip);
+		break;
+	case POWER_SUPPLY_PROP_RESISTANCE_ID:
+		val->intval = chip->batt_id_ohm;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -2847,7 +2916,7 @@
 	if (die_temp == (temp_curr_comp_lut[i].temp_decideg))
 		return temp_curr_comp_lut[i].current_ma;
 
-	return linear_interpolate(
+	return linear_interpolate_bms(
 				temp_curr_comp_lut[i - 1].current_ma,
 				temp_curr_comp_lut[i - 1].temp_decideg,
 				temp_curr_comp_lut[i].current_ma,
@@ -2864,7 +2933,7 @@
 	if (rc) {
 		pr_err("error reading adc channel=%d, rc=%d\n", DIE_TEMP, rc);
 	} else {
-		pc = interpolate_pc(chip->batt_data->pc_temp_ocv_lut,
+		pc = interpolate_pc_bms(chip->batt_data->pc_temp_ocv_lut,
 					batt_temp, chip->last_ocv_uv / 1000);
 		/*
 		 * For pc < 2, use the rbatt of pc = 2. This is to avoid
@@ -3281,7 +3350,7 @@
 }
 
 
-static int64_t read_battery_id(struct qpnp_bms_chip *chip)
+static int64_t read_battery_id_uv(struct qpnp_bms_chip *chip)
 {
 	int rc;
 	struct qpnp_vadc_result result;
@@ -3464,20 +3533,55 @@
 	.release	= single_release,
 };
 
+#define BID_RPULL_OHM          100000
+#define VREF_BAT_THERM         1800
+static int get_battery_id(struct qpnp_bms_chip *chip,
+			int64_t battery_id_uv)
+{
+	int batt_id_mv;
+	int64_t denom;
+
+	batt_id_mv = div_s64(battery_id_uv, 1000);
+	if (batt_id_mv == 0) {
+		pr_debug("batt_id_mv = 0 from ADC\n");
+		return 0;
+	}
+
+	denom = div64_s64(VREF_BAT_THERM * 1000, batt_id_mv) - 1000;
+	if (denom <= 0) {
+		/* batt id connector might be open, return 0 kohms */
+		return 0;
+	}
+
+	chip->batt_id_ohm = div64_u64(BID_RPULL_OHM * 1000 + denom / 2, denom);
+
+	return 0;
+}
+
 static int set_battery_data(struct qpnp_bms_chip *chip)
 {
-	int64_t battery_id;
+	int64_t battery_id_uv;
 	int rc = 0;
 	struct bms_battery_data *batt_data;
 	struct device_node *node;
 
-	battery_id = read_battery_id(chip);
-	if (battery_id < 0) {
-		pr_err("cannot read battery id err = %lld\n", battery_id);
-		return battery_id;
+	battery_id_uv = read_battery_id_uv(chip);
+	if (battery_id_uv < 0) {
+		pr_err("cannot read battery id_uv err = %lld\n", battery_id_uv);
+		return battery_id_uv;
 	}
-	node = of_find_node_by_name(chip->pdev->dev.of_node,
-					"qcom,battery-data");
+
+	rc = get_battery_id(chip, battery_id_uv);
+	if (rc < 0) {
+		pr_err("Failed to calcualte battery-id rc=%d\n", rc);
+		return rc;
+	}
+
+	node = of_parse_phandle(chip->pdev->dev.of_node,
+					"qcom,battery-data", 0);
+	pr_debug(" battery-id-uV=%lld batt_id=%d ohm\n",
+					battery_id_uv, chip->batt_id_ohm);
+
 	if (!node) {
 		pr_err("No available batterydata\n");
 		return -EINVAL;
@@ -3505,7 +3609,7 @@
 	 * if the alloced luts are 0s, of_batterydata_read_data ignores
 	 * them.
 	 */
-	rc = of_batterydata_read_data(node, batt_data, battery_id);
+	rc = of_batterydata_read_data(node, batt_data, battery_id_uv);
 	if (rc || !batt_data->pc_temp_ocv_lut
 		|| !batt_data->fcc_temp_lut
 		|| !batt_data->rbatt_sf_lut
@@ -3953,6 +4057,11 @@
 	}
 	chip->bms_psy_registered = true;
 
+	chip->nb.notifier_call = bms_notifier_cb;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0)
+		pr_err("Failed register psy notifier rc = %d\n", rc);
+
 	rc = get_battery_voltage(chip, &vbatt);
 	if (rc) {
 		pr_err("error reading vbat_sns adc channel=%d, rc=%d\n",
@@ -4043,6 +4152,7 @@
 	mutex_destroy(&chip->last_soc_mutex);
 	mutex_destroy(&chip->state_change_mutex);
 	mutex_destroy(&chip->bms_device_mutex);
+	power_supply_unreg_notifier(&chip->nb);
 	power_supply_unregister(chip->bms_psy);
 	dev_set_drvdata(&pdev->dev, NULL);
 	the_chip = NULL;
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 5546eae..3671891 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1606,7 +1606,7 @@
 
 	smblib_dbg(chg, PR_OTG, "enabling OTG\n");
 
-	if (chg->wa_flags & OTG_WA) {
+	if ((chg->wa_flags & OTG_WA) && (!chg->reddragon_ipc_wa)) {
 		rc = smblib_enable_otg_wa(chg);
 		if (rc < 0)
 			smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
@@ -3518,8 +3518,17 @@
 	vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
 
 	if (vbus_rising) {
+		/* Remove FCC_STEPPER 1.5A init vote to allow FCC ramp up */
+		if (chg->fcc_stepper_enable)
+			vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0);
+
 		smblib_cc2_sink_removal_exit(chg);
 	} else {
+		/* Force 1500mA FCC on USB removal if fcc stepper is enabled */
+		if (chg->fcc_stepper_enable)
+			vote(chg->fcc_votable, FCC_STEPPER_VOTER,
+							true, 1500000);
+
 		smblib_cc2_sink_removal_enter(chg);
 		if (chg->wa_flags & BOOST_BACK_WA) {
 			data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
@@ -3569,6 +3578,10 @@
 		if (rc < 0)
 			smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
 
+		/* Remove FCC_STEPPER 1.5A init vote to allow FCC ramp up */
+		if (chg->fcc_stepper_enable)
+			vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0);
+
 		/* Schedule work to enable parallel charger */
 		vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
 		schedule_delayed_work(&chg->pl_enable_work,
@@ -3596,6 +3609,11 @@
 			}
 		}
 
+		/* Force 1500mA FCC on removal if fcc stepper is enabled */
+		if (chg->fcc_stepper_enable)
+			vote(chg->fcc_votable, FCC_STEPPER_VOTER,
+							true, 1500000);
+
 		rc = smblib_request_dpdm(chg, false);
 		if (rc < 0)
 			smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 2457ed2..87a8303 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -72,6 +72,7 @@
 #define HVDCP2_ICL_VOTER		"HVDCP2_ICL_VOTER"
 #define OV_VOTER			"OV_VOTER"
 #define FG_ESR_VOTER			"FG_ESR_VOTER"
+#define FCC_STEPPER_VOTER		"FCC_STEPPER_VOTER"
 
 #define VCONN_MAX_ATTEMPTS	3
 #define OTG_MAX_ATTEMPTS	3
@@ -356,6 +357,7 @@
 	bool			is_audio_adapter;
 	bool			disable_stat_sw_override;
 	bool			in_chg_lock;
+	bool			fcc_stepper_enable;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -367,6 +369,7 @@
 	int			qc2_max_pulses;
 	bool			non_compliant_chg_detected;
 	bool			fake_usb_insertion;
+	bool			reddragon_ipc_wa;
 
 	/* extcon for VBUS / ID notification to USB for uUSB */
 	struct extcon_dev	*extcon;
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index f91d477..6f13635 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -801,6 +801,11 @@
 		smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
 			rc);
 
+	/* reset USBOV votes and cancel work */
+	cancel_delayed_work_sync(&chg->usbov_dbc_work);
+	vote(chg->awake_votable, USBOV_DBC_VOTER, false, 0);
+	chg->dbc_usbov = false;
+
 	chg->voltage_min_uv = MICRO_5V;
 	chg->voltage_max_uv = MICRO_5V;
 	chg->usb_icl_delta_ua = 0;
@@ -1393,7 +1398,32 @@
 	union power_supply_propval pval = {0, };
 	bool usb_online, dc_online;
 	u8 stat;
-	int rc;
+	int rc, suspend = 0;
+
+	if (chg->dbc_usbov) {
+		rc = smblib_get_prop_usb_present(chg, &pval);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't get usb present prop rc=%d\n", rc);
+			return rc;
+		}
+
+		rc = smblib_get_usb_suspend(chg, &suspend);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't get usb suspend rc=%d\n", rc);
+			return rc;
+		}
+
+		/*
+		 * Report charging as long as USBOV is not debounced and
+		 * charging path is un-suspended.
+		 */
+		if (pval.intval && !suspend) {
+			val->intval = POWER_SUPPLY_STATUS_CHARGING;
+			return 0;
+		}
+	}
 
 	rc = smblib_get_prop_usb_online(chg, &pval);
 	if (rc < 0) {
@@ -2939,7 +2969,11 @@
 
 	vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
 
-	if (!vbus_rising) {
+	if (vbus_rising) {
+		/* Remove FCC_STEPPER 1.5A init vote to allow FCC ramp up */
+		if (chg->fcc_stepper_enable)
+			vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0);
+	} else {
 		if (chg->wa_flags & BOOST_BACK_WA) {
 			data = chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data;
 			if (data) {
@@ -2952,6 +2986,11 @@
 						false, 0);
 			}
 		}
+
+		/* Force 1500mA FCC on USB removal if fcc stepper is enabled */
+		if (chg->fcc_stepper_enable)
+			vote(chg->fcc_votable, FCC_STEPPER_VOTER,
+							true, 1500000);
 	}
 
 	power_supply_changed(chg->usb_psy);
@@ -2983,6 +3022,10 @@
 		if (rc < 0)
 			smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
 
+		/* Remove FCC_STEPPER 1.5A init vote to allow FCC ramp up */
+		if (chg->fcc_stepper_enable)
+			vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0);
+
 		/* Schedule work to enable parallel charger */
 		vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
 		schedule_delayed_work(&chg->pl_enable_work,
@@ -3028,6 +3071,11 @@
 					false, 0);
 		}
 
+		/* Force 1500mA FCC on removal if fcc stepper is enabled */
+		if (chg->fcc_stepper_enable)
+			vote(chg->fcc_votable, FCC_STEPPER_VOTER,
+							true, 1500000);
+
 		rc = smblib_request_dpdm(chg, false);
 		if (rc < 0)
 			smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
@@ -3443,6 +3491,11 @@
 	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
 	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
 
+	/* reset USBOV votes and cancel work */
+	cancel_delayed_work_sync(&chg->usbov_dbc_work);
+	vote(chg->awake_votable, USBOV_DBC_VOTER, false, 0);
+	chg->dbc_usbov = false;
+
 	chg->pulse_cnt = 0;
 	chg->usb_icl_delta_ua = 0;
 	chg->voltage_min_uv = MICRO_5V;
@@ -3724,6 +3777,51 @@
 	return IRQ_HANDLED;
 }
 
+static void smblib_usbov_dbc_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						usbov_dbc_work.work);
+
+	smblib_dbg(chg, PR_MISC, "Resetting USBOV debounce\n");
+	chg->dbc_usbov = false;
+	vote(chg->awake_votable, USBOV_DBC_VOTER, false, 0);
+}
+
+irqreturn_t usbin_ov_irq_handler(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	u8 stat;
+	int rc;
+
+	if (!(chg->wa_flags & USBIN_OV_WA))
+		goto out;
+
+	rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+		goto out;
+	}
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s stat=%x\n", irq_data->name,
+				!!stat);
+
+	if (stat & USBIN_OV_RT_STS_BIT) {
+		chg->dbc_usbov = true;
+		vote(chg->awake_votable, USBOV_DBC_VOTER, true, 0);
+		schedule_delayed_work(&chg->usbov_dbc_work,
+				msecs_to_jiffies(1000));
+	} else {
+		cancel_delayed_work_sync(&chg->usbov_dbc_work);
+		chg->dbc_usbov = false;
+		vote(chg->awake_votable, USBOV_DBC_VOTER, true, 0);
+	}
+
+out:
+	smblib_dbg(chg, PR_MISC, "USBOV debounce status %d\n",
+				chg->dbc_usbov);
+	return IRQ_HANDLED;
+}
+
 /**************
  * Additional USB PSY getters/setters
  * that call interrupt functions
@@ -4188,6 +4286,7 @@
 	INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
 	INIT_DELAYED_WORK(&chg->uusb_otg_work, smblib_uusb_otg_work);
 	INIT_DELAYED_WORK(&chg->bb_removal_work, smblib_bb_removal_work);
+	INIT_DELAYED_WORK(&chg->usbov_dbc_work, smblib_usbov_dbc_work);
 
 	if (chg->moisture_protection_enabled &&
 				(chg->wa_flags & MOISTURE_PROTECTION_WA)) {
@@ -4277,6 +4376,7 @@
 		cancel_delayed_work_sync(&chg->pl_enable_work);
 		cancel_delayed_work_sync(&chg->uusb_otg_work);
 		cancel_delayed_work_sync(&chg->bb_removal_work);
+		cancel_delayed_work_sync(&chg->usbov_dbc_work);
 		power_supply_unreg_notifier(&chg->nb);
 		smblib_destroy_votables(chg);
 		qcom_step_chg_deinit();
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 9afd7cd..1bba206 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -70,6 +70,8 @@
 #define FORCE_RECHARGE_VOTER		"FORCE_RECHARGE_VOTER"
 #define AICL_THRESHOLD_VOTER		"AICL_THRESHOLD_VOTER"
 #define MOISTURE_VOTER			"MOISTURE_VOTER"
+#define USBOV_DBC_VOTER			"USBOV_DBC_VOTER"
+#define FCC_STEPPER_VOTER		"FCC_STEPPER_VOTER"
 
 #define BOOST_BACK_STORM_COUNT	3
 #define WEAK_CHG_STORM_COUNT	8
@@ -101,6 +103,7 @@
 	BOOST_BACK_WA			= BIT(0),
 	WEAK_ADAPTER_WA			= BIT(1),
 	MOISTURE_PROTECTION_WA		= BIT(2),
+	USBIN_OV_WA			= BIT(3),
 };
 
 enum {
@@ -348,6 +351,7 @@
 	struct delayed_work	pl_enable_work;
 	struct delayed_work	uusb_otg_work;
 	struct delayed_work	bb_removal_work;
+	struct delayed_work	usbov_dbc_work;
 
 	/* alarm */
 	struct alarm		moisture_protection_alarm;
@@ -402,10 +406,12 @@
 	bool			aicl_max_reached;
 	bool			moisture_present;
 	bool			moisture_protection_enabled;
+	bool			fcc_stepper_enable;
 
 	/* workaround flag */
 	u32			wa_flags;
 	int			boost_current_ua;
+	bool			dbc_usbov;
 
 	/* extcon for VBUS / ID notification to USB for uUSB */
 	struct extcon_dev	*extcon;
@@ -478,6 +484,7 @@
 irqreturn_t switcher_power_ok_irq_handler(int irq, void *data);
 irqreturn_t wdog_bark_irq_handler(int irq, void *data);
 irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data);
+irqreturn_t usbin_ov_irq_handler(int irq, void *data);
 
 int smblib_get_prop_input_suspend(struct smb_charger *chg,
 				union power_supply_propval *val);
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 58a97d4..5136462 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -89,6 +89,7 @@
 	case PTP_PF_PHYSYNC:
 		if (chan != 0)
 			return -EINVAL;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index b5c6b06..c0e06f0 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -382,6 +382,8 @@
 		aqcsfrc_mask = AQCSFRC_CSFA_MASK;
 	}
 
+	/* Update shadow register first before modifying active register */
+	ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
 	/*
 	 * Changes to immediate action on Action Qualifier. This puts
 	 * Action Qualifier control on PWM output from next TBCLK
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 1a25499..0d81304 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -4458,13 +4458,13 @@
 	    !rdev->desc->fixed_uV)
 		rdev->is_switch = true;
 
+	dev_set_drvdata(&rdev->dev, rdev);
 	ret = device_register(&rdev->dev);
 	if (ret != 0) {
 		put_device(&rdev->dev);
 		goto unset_supplies;
 	}
 
-	dev_set_drvdata(&rdev->dev, rdev);
 	rdev_init_debugfs(rdev);
 	rdev->proxy_consumer = regulator_proxy_consumer_register(dev,
 							config->of_node);
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index cb18b5c..86b3487 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -153,6 +153,7 @@
 static struct regulator_ops pfuze100_swb_regulator_ops = {
 	.enable = regulator_enable_regmap,
 	.disable = regulator_disable_regmap,
+	.is_enabled = regulator_is_enabled_regmap,
 	.list_voltage = regulator_list_voltage_table,
 	.map_voltage = regulator_map_voltage_ascend,
 	.set_voltage_sel = regulator_set_voltage_sel_regmap,
diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c
index a0f0fcec..0fd7802 100644
--- a/drivers/regulator/qpnp-lcdb-regulator.c
+++ b/drivers/regulator/qpnp-lcdb-regulator.c
@@ -181,6 +181,7 @@
 	int				soft_start_us;
 	int				vreg_ok_dbc_us;
 	int				voltage_mv;
+	int				prev_voltage_mv;
 };
 
 struct ncp_regulator {
@@ -195,6 +196,7 @@
 	int				soft_start_us;
 	int				vreg_ok_dbc_us;
 	int				voltage_mv;
+	int				prev_voltage_mv;
 };
 
 struct bst_params {
@@ -216,6 +218,7 @@
 	struct device			*dev;
 	struct platform_device		*pdev;
 	struct regmap			*regmap;
+	struct class			lcdb_class;
 	struct pmic_revid_data		*pmic_rev_id;
 	u32				base;
 	u32				wa_flags;
@@ -229,6 +232,8 @@
 	bool				lcdb_enabled;
 	bool				settings_saved;
 	bool				lcdb_sc_disable;
+	bool				secure_mode;
+	bool				voltage_step_ramp;
 	int				sc_count;
 	ktime_t				sc_module_enable_time;
 
@@ -250,6 +255,7 @@
 	LDO,
 	NCP,
 	BST,
+	LDO_NCP,
 };
 
 enum pfm_hysteresis {
@@ -321,6 +327,12 @@
 		.valid = _valid			\
 	}					\
 
+static int qpnp_lcdb_set_voltage_step(struct qpnp_lcdb *lcdb,
+				      int voltage_start_mv, u8 type);
+
+static int qpnp_lcdb_set_voltage(struct qpnp_lcdb *lcdb,
+				 int voltage_mv, u8 type);
+
 static bool is_between(int value, int min, int max)
 {
 	if (value < min || value > max)
@@ -784,9 +796,13 @@
 	return 0;
 }
 
+#define VOLTAGE_START_MV	4500
+#define VOLTAGE_STEP_MV		500
+
 static int qpnp_lcdb_enable(struct qpnp_lcdb *lcdb)
 {
 	int rc = 0, timeout, delay;
+	int voltage_mv = VOLTAGE_START_MV;
 	u8 val = 0;
 
 	if (lcdb->lcdb_enabled || lcdb->lcdb_sc_disable) {
@@ -809,6 +825,22 @@
 		return rc;
 	}
 
+	if (lcdb->voltage_step_ramp) {
+		if (lcdb->ldo.voltage_mv < VOLTAGE_START_MV)
+			voltage_mv = lcdb->ldo.voltage_mv;
+
+		rc = qpnp_lcdb_set_voltage(lcdb, voltage_mv, LDO);
+		if (rc < 0)
+			return rc;
+
+		if (lcdb->ncp.voltage_mv < VOLTAGE_START_MV)
+			voltage_mv = lcdb->ncp.voltage_mv;
+
+		rc = qpnp_lcdb_set_voltage(lcdb, voltage_mv, NCP);
+		if (rc < 0)
+			return rc;
+	}
+
 	val = MODULE_EN_BIT;
 	rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
 							&val, 1);
@@ -845,6 +877,17 @@
 	}
 
 	lcdb->lcdb_enabled = true;
+	if (lcdb->voltage_step_ramp) {
+		usleep_range(10000, 11000);
+		rc = qpnp_lcdb_set_voltage_step(lcdb,
+						voltage_mv + VOLTAGE_STEP_MV,
+						LDO_NCP);
+		if (rc < 0) {
+			pr_err("Failed to set LCDB voltage rc=%d\n", rc);
+			return rc;
+		}
+	}
+
 	pr_debug("lcdb enabled successfully!\n");
 
 	return 0;
@@ -1131,6 +1174,56 @@
 	return rc;
 }
 
+static int qpnp_lcdb_set_voltage_step(struct qpnp_lcdb *lcdb,
+				      int voltage_start_mv, u8 type)
+{
+	int i, ldo_voltage, ncp_voltage, voltage, rc = 0;
+
+	for (i = voltage_start_mv; i <= (MAX_VOLTAGE_MV + VOLTAGE_STEP_MV);
+						i += VOLTAGE_STEP_MV) {
+
+		ldo_voltage = (lcdb->ldo.voltage_mv < i) ?
+					lcdb->ldo.voltage_mv : i;
+
+		ncp_voltage = (lcdb->ncp.voltage_mv < i) ?
+					lcdb->ncp.voltage_mv : i;
+		if (type == LDO_NCP) {
+			rc = qpnp_lcdb_set_voltage(lcdb, ldo_voltage, LDO);
+			if (rc < 0)
+				return rc;
+
+			rc = qpnp_lcdb_set_voltage(lcdb, ncp_voltage, NCP);
+			if (rc < 0)
+				return rc;
+
+			pr_debug(" LDO voltage step %d NCP voltage step %d\n",
+					ldo_voltage, ncp_voltage);
+
+			if ((i >= lcdb->ncp.voltage_mv) &&
+					(i >= lcdb->ldo.voltage_mv))
+				break;
+		} else {
+			voltage = (type == LDO) ? ldo_voltage : ncp_voltage;
+			rc = qpnp_lcdb_set_voltage(lcdb, voltage, type);
+			if (rc < 0)
+				return rc;
+
+			pr_debug("%s voltage step %d\n",
+				 (type == LDO) ? "LDO" : "NCP", voltage);
+			if ((type == LDO) && (i >= lcdb->ldo.voltage_mv))
+				break;
+
+			if ((type == NCP) && (i >= lcdb->ncp.voltage_mv))
+				break;
+
+		}
+
+		usleep_range(1000, 1100);
+	}
+
+	return rc;
+}
+
 static int qpnp_lcdb_get_voltage(struct qpnp_lcdb *lcdb,
 					u32 *voltage_mv, u8 type)
 {
@@ -1203,6 +1296,9 @@
 	int rc = 0;
 	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
 
+	if (lcdb->secure_mode)
+		return 0;
+
 	mutex_lock(&lcdb->lcdb_mutex);
 	rc = qpnp_lcdb_enable(lcdb);
 	if (rc < 0)
@@ -1217,6 +1313,9 @@
 	int rc = 0;
 	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
 
+	if (lcdb->secure_mode)
+		return 0;
+
 	mutex_lock(&lcdb->lcdb_mutex);
 	rc = qpnp_lcdb_disable(lcdb);
 	if (rc < 0)
@@ -1239,11 +1338,20 @@
 	int rc = 0;
 	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
 
-	rc = qpnp_lcdb_set_voltage(lcdb, min_uV / 1000, LDO);
+	if (lcdb->secure_mode)
+		return 0;
+
+	lcdb->ldo.voltage_mv = min_uV / 1000;
+	if (lcdb->voltage_step_ramp)
+		rc = qpnp_lcdb_set_voltage_step(lcdb,
+			lcdb->ldo.prev_voltage_mv + VOLTAGE_STEP_MV, LDO);
+	else
+		rc = qpnp_lcdb_set_voltage(lcdb, lcdb->ldo.voltage_mv, LDO);
+
 	if (rc < 0)
 		pr_err("Failed to set LDO voltage rc=%c\n", rc);
 	else
-		lcdb->ldo.voltage_mv = min_uV / 1000;
+		lcdb->ldo.prev_voltage_mv = lcdb->ldo.voltage_mv;
 
 	return rc;
 }
@@ -1276,6 +1384,9 @@
 	int rc = 0;
 	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
 
+	if (lcdb->secure_mode)
+		return 0;
+
 	mutex_lock(&lcdb->lcdb_mutex);
 	rc = qpnp_lcdb_enable(lcdb);
 	if (rc < 0)
@@ -1290,6 +1401,9 @@
 	int rc = 0;
 	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
 
+	if (lcdb->secure_mode)
+		return 0;
+
 	mutex_lock(&lcdb->lcdb_mutex);
 	rc = qpnp_lcdb_disable(lcdb);
 	if (rc < 0)
@@ -1312,11 +1426,20 @@
 	int rc = 0;
 	struct qpnp_lcdb *lcdb  = rdev_get_drvdata(rdev);
 
-	rc = qpnp_lcdb_set_voltage(lcdb, min_uV / 1000, NCP);
-	if (rc < 0)
-		pr_err("Failed to set LDO voltage rc=%c\n", rc);
+	if (lcdb->secure_mode)
+		return 0;
+
+	lcdb->ncp.voltage_mv = min_uV / 1000;
+	if (lcdb->voltage_step_ramp)
+		rc = qpnp_lcdb_set_voltage_step(lcdb,
+			lcdb->ncp.prev_voltage_mv + VOLTAGE_STEP_MV, NCP);
 	else
-		lcdb->ncp.voltage_mv = min_uV / 1000;
+		rc = qpnp_lcdb_set_voltage(lcdb, lcdb->ncp.voltage_mv, NCP);
+
+	if (rc < 0)
+		pr_err("Failed to set NCP voltage rc=%c\n", rc);
+	else
+		lcdb->ncp.prev_voltage_mv = lcdb->ncp.voltage_mv;
 
 	return rc;
 }
@@ -1678,6 +1801,8 @@
 		return rc;
 	}
 
+	lcdb->ldo.prev_voltage_mv = lcdb->ldo.voltage_mv;
+
 	rc = qpnp_lcdb_read(lcdb, lcdb->base +
 			LCDB_LDO_VREG_OK_CTL_REG, &val, 1);
 	if (rc < 0) {
@@ -1783,6 +1908,8 @@
 		return rc;
 	}
 
+	lcdb->ncp.prev_voltage_mv = lcdb->ncp.voltage_mv;
+
 	rc = qpnp_lcdb_read(lcdb, lcdb->base +
 			LCDB_NCP_VREG_OK_CTL_REG, &val, 1);
 	if (rc < 0) {
@@ -1959,6 +2086,8 @@
 
 	if (lcdb->sc_irq >= 0 && !(lcdb->wa_flags & NCP_SCP_DISABLE_WA)) {
 		lcdb->sc_count = 0;
+		irq_set_status_flags(lcdb->sc_irq,
+					IRQ_DISABLE_UNLAZY);
 		rc = devm_request_threaded_irq(lcdb->dev, lcdb->sc_irq,
 				NULL, qpnp_lcdb_sc_irq_handler, IRQF_ONESHOT,
 				"qpnp_lcdb_sc_irq", lcdb);
@@ -2017,6 +2146,7 @@
 	}
 
 	of_node_put(revid_dev_node);
+
 	for_each_available_child_of_node(node, temp) {
 		rc = of_property_read_string(temp, "label", &label);
 		if (rc < 0) {
@@ -2056,9 +2186,50 @@
 	if (lcdb->sc_irq < 0)
 		pr_debug("sc irq is not defined\n");
 
+	lcdb->voltage_step_ramp =
+			of_property_read_bool(node, "qcom,voltage-step-ramp");
+
 	return rc;
 }
 
+static ssize_t qpnp_lcdb_irq_control(struct class *c,
+					struct class_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct qpnp_lcdb *lcdb = container_of(c, struct qpnp_lcdb,
+							lcdb_class);
+	int val, rc;
+
+	rc = kstrtouint(buf, 0, &val);
+
+	if (rc < 0)
+		return rc;
+
+	if (val != 0 && val != 1)
+		return count;
+
+	if (val == 1 && !lcdb->secure_mode) {
+		if (lcdb->sc_irq > 0)
+			disable_irq(lcdb->sc_irq);
+
+		lcdb->secure_mode = true;
+	} else if (val == 0 && lcdb->secure_mode) {
+
+		if (lcdb->sc_irq > 0)
+			enable_irq(lcdb->sc_irq);
+
+		lcdb->secure_mode = false;
+	}
+
+	return count;
+}
+
+static struct  class_attribute lcdb_attributes[] = {
+	[0] =  __ATTR(secure_mode, 0664, NULL,
+				qpnp_lcdb_irq_control),
+	__ATTR_NULL,
+};
+
 static int qpnp_lcdb_regulator_probe(struct platform_device *pdev)
 {
 	int rc;
@@ -2098,6 +2269,16 @@
 		return rc;
 	}
 
+	lcdb->lcdb_class.name = "lcd_bias";
+	lcdb->lcdb_class.owner = THIS_MODULE;
+	lcdb->lcdb_class.class_attrs = lcdb_attributes;
+
+	rc = class_register(&lcdb->lcdb_class);
+	if (rc < 0) {
+		pr_err("Failed to register lcdb  class rc = %d\n", rc);
+		return rc;
+	}
+
 	rc = qpnp_lcdb_hw_init(lcdb);
 	if (rc < 0)
 		pr_err("Failed to initialize LCDB module rc=%d\n", rc);
diff --git a/drivers/regulator/qpnp-oledb-regulator.c b/drivers/regulator/qpnp-oledb-regulator.c
index bee9a3d..03f9ab1 100644
--- a/drivers/regulator/qpnp-oledb-regulator.c
+++ b/drivers/regulator/qpnp-oledb-regulator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -155,6 +155,7 @@
 	struct platform_device			*pdev;
 	struct device				*dev;
 	struct regmap				*regmap;
+	struct class				oledb_class;
 	struct regulator_desc			rdesc;
 	struct regulator_dev			*rdev;
 	struct qpnp_oledb_psm_ctl		psm_ctl;
@@ -185,6 +186,7 @@
 	bool					force_pd_control;
 	bool					handle_lab_sc_notification;
 	bool					lab_sc_detected;
+	bool					secure_mode;
 };
 
 static const u16 oledb_warmup_dly_ns[] = {6700, 13300, 26700, 53400};
@@ -279,6 +281,9 @@
 
 	struct qpnp_oledb *oledb  = rdev_get_drvdata(rdev);
 
+	if (oledb->secure_mode)
+		return 0;
+
 	if (oledb->lab_sc_detected == true) {
 		pr_info("Short circuit detected: Disabled OLEDB rail\n");
 		return 0;
@@ -342,6 +347,8 @@
 
 	struct qpnp_oledb *oledb  = rdev_get_drvdata(rdev);
 
+	if (oledb->secure_mode)
+		return 0;
 	/*
 	 * Disable ext-pin-ctl after display-supply is turned off. This is to
 	 * avoid glitches on the external pin.
@@ -416,7 +423,7 @@
 
 	struct qpnp_oledb *oledb = rdev_get_drvdata(rdev);
 
-	if (oledb->swire_control)
+	if (oledb->swire_control || oledb->secure_mode)
 		return 0;
 
 	val = DIV_ROUND_UP(min_uV - OLEDB_VOUT_MIN_MV, OLEDB_VOUT_STEP_MV);
@@ -1260,6 +1267,9 @@
 	struct qpnp_oledb *oledb = container_of(nb, struct qpnp_oledb,
 								oledb_nb);
 
+	if (oledb->secure_mode)
+		return 0;
+
 	if (action == LAB_VREG_NOT_OK) {
 		/* short circuit detected. Disable OLEDB module */
 		val = 0;
@@ -1286,6 +1296,10 @@
 	return NOTIFY_OK;
 }
 
+static struct  class_attribute oledb_attributes[] = {
+	[0] =  __ATTR(secure_mode, 0664, NULL, NULL),
+	__ATTR_NULL,
+};
 static int qpnp_oledb_regulator_probe(struct platform_device *pdev)
 {
 	int rc = 0;
@@ -1345,6 +1359,17 @@
 		pr_err("Failed to register regulator rc=%d\n", rc);
 		goto out;
 	}
+
+	oledb->oledb_class.name = "amoled_bias";
+	oledb->oledb_class.owner = THIS_MODULE;
+	oledb->oledb_class.class_attrs = oledb_attributes;
+
+	rc = class_register(&oledb->oledb_class);
+	if (rc < 0) {
+		pr_err("Failed to register oledb class rc = %d\n", rc);
+		return rc;
+	}
+
 	pr_info("OLEDB registered successfully, ext_pin_en=%d mod_en=%d current_voltage=%d mV\n",
 			oledb->ext_pin_control, oledb->mod_enable,
 						oledb->current_voltage);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 162afcc..d37960e 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -359,6 +359,11 @@
 {
 	int err;
 
+	if (!rtc->ops)
+		return -ENODEV;
+	else if (!rtc->ops->set_alarm)
+		return -EINVAL;
+
 	err = rtc_valid_tm(&alarm->time);
 	if (err != 0)
 		return err;
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c
index bd170cb..5747a54 100644
--- a/drivers/rtc/rtc-bq4802.c
+++ b/drivers/rtc/rtc-bq4802.c
@@ -164,6 +164,10 @@
 	} else if (p->r->flags & IORESOURCE_MEM) {
 		p->regs = devm_ioremap(&pdev->dev, p->r->start,
 					resource_size(p->r));
+		if (!p->regs){
+			err = -ENOMEM;
+			goto out;
+		}
 		p->read = bq4802_read_mem;
 		p->write = bq4802_write_mem;
 	} else {
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 51e5244..bd5ca54 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -817,13 +817,6 @@
 			goto err;
 	}
 
-	if (rtc->is_pmic_controller) {
-		if (!pm_power_off) {
-			omap_rtc_power_off_rtc = rtc;
-			pm_power_off = omap_rtc_power_off;
-		}
-	}
-
 	/* Support ext_wakeup pinconf */
 	rtc_pinctrl_desc.name = dev_name(&pdev->dev);
 
@@ -833,6 +826,13 @@
 		return PTR_ERR(rtc->pctldev);
 	}
 
+	if (rtc->is_pmic_controller) {
+		if (!pm_power_off) {
+			omap_rtc_power_off_rtc = rtc;
+			pm_power_off = omap_rtc_power_off;
+		}
+	}
+
 	return 0;
 
 err:
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 0f5bc2f..be17de9 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1834,6 +1834,9 @@
 	struct dasd_eckd_private *private = device->private;
 	int i;
 
+	if (!private)
+		return;
+
 	dasd_alias_disconnect_device_from_lcu(device);
 	private->ned = NULL;
 	private->sneq = NULL;
@@ -2085,8 +2088,11 @@
 
 static int dasd_eckd_online_to_ready(struct dasd_device *device)
 {
-	cancel_work_sync(&device->reload_device);
-	cancel_work_sync(&device->kick_validate);
+	if (cancel_work_sync(&device->reload_device))
+		dasd_put_device(device);
+	if (cancel_work_sync(&device->kick_validate))
+		dasd_put_device(device);
+
 	return 0;
 };
 
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 66e9bb0..18ab84e 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -640,21 +640,20 @@
 	unsigned long phys_aob = 0;
 
 	if (!q->use_cq)
-		goto out;
+		return 0;
 
 	if (!q->aobs[bufnr]) {
 		struct qaob *aob = qdio_allocate_aob();
 		q->aobs[bufnr] = aob;
 	}
 	if (q->aobs[bufnr]) {
-		q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
 		q->sbal_state[bufnr].aob = q->aobs[bufnr];
 		q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
 		phys_aob = virt_to_phys(q->aobs[bufnr]);
 		WARN_ON_ONCE(phys_aob & 0xFF);
 	}
 
-out:
+	q->sbal_state[bufnr].flags = 0;
 	return phys_aob;
 }
 
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 283416a..a5e6030 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -23,6 +23,7 @@
 #include <linux/netdevice.h>
 #include <linux/netdev_features.h>
 #include <linux/skbuff.h>
+#include <linux/vmalloc.h>
 
 #include <net/iucv/af_iucv.h>
 #include <net/dsfield.h>
@@ -3499,13 +3500,14 @@
 	qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
 	if (atomic_read(&queue->set_pci_flags_count))
 		qdio_flags |= QDIO_FLAG_PCI_OUT;
+	atomic_add(count, &queue->used_buffers);
+
 	rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
 		     queue->queue_no, index, count);
 	if (queue->card->options.performance_stats)
 		queue->card->perf_stats.outbound_do_qdio_time +=
 			qeth_get_micros() -
 			queue->card->perf_stats.outbound_do_qdio_start_time;
-	atomic_add(count, &queue->used_buffers);
 	if (rc) {
 		queue->card->stats.tx_errors += count;
 		/* ignore temporary SIGA errors without busy condition */
@@ -4714,7 +4716,7 @@
 
 	priv.buffer_len = oat_data.buffer_len;
 	priv.response_len = 0;
-	priv.buffer =  kzalloc(oat_data.buffer_len, GFP_KERNEL);
+	priv.buffer = vzalloc(oat_data.buffer_len);
 	if (!priv.buffer) {
 		rc = -ENOMEM;
 		goto out;
@@ -4755,7 +4757,7 @@
 			rc = -EFAULT;
 
 out_free:
-	kfree(priv.buffer);
+	vfree(priv.buffer);
 out:
 	return rc;
 }
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index db6a285..0a7a6da 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -423,6 +423,7 @@
 	if (card->discipline) {
 		card->discipline->remove(card->gdev);
 		qeth_core_free_discipline(card);
+		card->options.layer2 = -1;
 	}
 
 	rc = qeth_core_load_discipline(card, newdis);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index e94e957..58404e6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -491,7 +491,7 @@
 		default:
 			dev_kfree_skb_any(skb);
 			QETH_CARD_TEXT(card, 3, "inbunkno");
-			QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+			QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
 			continue;
 		}
 		work_done++;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 4ca161b..efefe07 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1836,7 +1836,7 @@
 		default:
 			dev_kfree_skb_any(skb);
 			QETH_CARD_TEXT(card, 3, "inbunkno");
-			QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+			QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
 			continue;
 		}
 		work_done++;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a56a7b2..b78a2f3 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -889,6 +889,11 @@
 	unsigned int minor_number;
 	int retval = TW_IOCTL_ERROR_OS_ENODEV;
 
+	if (!capable(CAP_SYS_ADMIN)) {
+		retval = -EACCES;
+		goto out;
+	}
+
 	minor_number = iminor(inode);
 	if (minor_number >= twa_device_extension_count)
 		goto out;
@@ -2040,6 +2045,7 @@
 
 	if (twa_initialize_device_extension(tw_dev)) {
 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
+		retval = -ENOMEM;
 		goto out_free_device_extension;
 	}
 
@@ -2062,6 +2068,7 @@
 	tw_dev->base_addr = ioremap(mem_addr, mem_len);
 	if (!tw_dev->base_addr) {
 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
+		retval = -ENOMEM;
 		goto out_release_mem_region;
 	}
 
@@ -2069,8 +2076,10 @@
 	TW_DISABLE_INTERRUPTS(tw_dev);
 
 	/* Initialize the card */
-	if (twa_reset_sequence(tw_dev, 0))
+	if (twa_reset_sequence(tw_dev, 0)) {
+		retval = -ENOMEM;
 		goto out_iounmap;
+	}
 
 	/* Set host specific parameters */
 	if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index f837485..f0a5536 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1600,6 +1600,7 @@
 
 	if (twl_initialize_device_extension(tw_dev)) {
 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
+		retval = -ENOMEM;
 		goto out_free_device_extension;
 	}
 
@@ -1614,6 +1615,7 @@
 	tw_dev->base_addr = pci_iomap(pdev, 1, 0);
 	if (!tw_dev->base_addr) {
 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
+		retval = -ENOMEM;
 		goto out_release_mem_region;
 	}
 
@@ -1623,6 +1625,7 @@
 	/* Initialize the card */
 	if (twl_reset_sequence(tw_dev, 0)) {
 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
+		retval = -ENOMEM;
 		goto out_iounmap;
 	}
 
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 25aba16..0ee0835 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1034,6 +1034,9 @@
 
 	dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n");
 
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
 	minor_number = iminor(inode);
 	if (minor_number >= tw_device_extension_count)
 		return -ENODEV;
@@ -2278,6 +2281,7 @@
 
 	if (tw_initialize_device_extension(tw_dev)) {
 		printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
+		retval = -ENOMEM;
 		goto out_free_device_extension;
 	}
 
@@ -2292,6 +2296,7 @@
 	tw_dev->base_addr = pci_resource_start(pdev, 0);
 	if (!tw_dev->base_addr) {
 		printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
+		retval = -ENOMEM;
 		goto out_release_mem_region;
 	}
 
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 662b232..913ebb6 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -1031,8 +1031,10 @@
 
 	aic94xx_transport_template =
 		sas_domain_attach_transport(&aic94xx_transport_functions);
-	if (!aic94xx_transport_template)
+	if (!aic94xx_transport_template) {
+		err = -ENOMEM;
 		goto out_destroy_caches;
+	}
 
 	err = pci_register_driver(&aic94xx_pci_driver);
 	if (err)
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 42921db..4ca1050 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2742,6 +2742,8 @@
 					      BNX2X_DOORBELL_PCI_BAR);
 		reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
 		ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+		if (!ep->qp.ctx_base)
+			return -ENOMEM;
 		goto arm_cq;
 	}
 
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index dcf3653..cc3994d 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -755,9 +755,9 @@
 	case ELS_LOGO:
 		if (fip->mode == FIP_MODE_VN2VN) {
 			if (fip->state != FIP_ST_VNMP_UP)
-				return -EINVAL;
+				goto drop;
 			if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
-				return -EINVAL;
+				goto drop;
 		} else {
 			if (fip->state != FIP_ST_ENABLED)
 				return 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d9534ee..e173022 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -93,7 +93,7 @@
 static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
 static int fast_fail = 1;
 static int client_reserve = 1;
-static char partition_name[97] = "UNKNOWN";
+static char partition_name[96] = "UNKNOWN";
 static unsigned int partition_number = -1;
 
 static struct scsi_transport_template *ibmvscsi_transport_template;
@@ -259,7 +259,7 @@
 
 	ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
 	if (ppartition_name)
-		strncpy(partition_name, ppartition_name,
+		strlcpy(partition_name, ppartition_name,
 				sizeof(partition_name));
 	p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
 	if (p_number_ptr)
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 97aeadd..e3ffd24 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1935,6 +1935,7 @@
 		FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
 			     fc_rport_state(rdata));
 
+		rdata->flags &= ~FC_RP_STARTED;
 		fc_rport_enter_delete(rdata, RPORT_EV_STOP);
 		mutex_unlock(&rdata->rp_mutex);
 		kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c2b6829..cc8f2a7 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -283,11 +283,11 @@
 		 */
 		if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
 			iscsi_conn_printk(KERN_INFO, conn,
-					  "task [op %x/%x itt "
+					  "task [op %x itt "
 					  "0x%x/0x%x] "
 					  "rejected.\n",
-					  task->hdr->opcode, opcode,
-					  task->itt, task->hdr_itt);
+					  opcode, task->itt,
+					  task->hdr_itt);
 			return -EACCES;
 		}
 		/*
@@ -296,10 +296,10 @@
 		 */
 		if (conn->session->fast_abort) {
 			iscsi_conn_printk(KERN_INFO, conn,
-					  "task [op %x/%x itt "
+					  "task [op %x itt "
 					  "0x%x/0x%x] fast abort.\n",
-					  task->hdr->opcode, opcode,
-					  task->itt, task->hdr_itt);
+					  opcode, task->itt,
+					  task->hdr_itt);
 			return -EACCES;
 		}
 		break;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 9d05302..19bffe0b 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4197,6 +4197,9 @@
 	int irq, i, j;
 	int error = -ENODEV;
 
+	if (hba_count >= MAX_CONTROLLERS)
+		goto out;
+
 	if (pci_enable_device(pdev))
 		goto out;
 	pci_set_master(pdev);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 35cbd36..090fdcd 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6193,6 +6193,9 @@
 			goto fail_init_mfi;
 	}
 
+	if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
+		goto fail_init_mfi;
+
 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
 		     (unsigned long)instance);
 
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index a156451..f722a0e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2031,6 +2031,9 @@
 		pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
 		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
 	} else {
+		if (os_timeout_value)
+			os_timeout_value++;
+
 		/* system pd Fast Path */
 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
 		timeout_limit = (scmd->device->type == TYPE_DISK) ?
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 34bbcfc..5f66b6d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -329,11 +329,10 @@
 
 	wait_for_completion(&tm_iocb->u.tmf.comp);
 
-	rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
-	    QLA_SUCCESS : QLA_FUNCTION_FAILED;
+	rval = tm_iocb->u.tmf.data;
 
-	if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
-		ql_dbg(ql_dbg_taskm, vha, 0x8030,
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x8030,
 		    "TM IOCB failed (%x).\n", rval);
 	}
 
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index baccd11..c813c9b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -5218,8 +5218,9 @@
 			}
 		}
 
-		if (test_and_clear_bit(ISP_ABORT_NEEDED,
-						&base_vha->dpc_flags)) {
+		if (test_and_clear_bit
+		    (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
+		    !test_bit(UNLOADING, &base_vha->dpc_flags)) {
 
 			ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
 			    "ISP abort scheduled.\n");
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index a5e30e9..375cede 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -58,7 +58,10 @@
 	{"IBM", "3526",			"rdac", },
 	{"IBM", "3542",			"rdac", },
 	{"IBM", "3552",			"rdac", },
-	{"SGI", "TP9",			"rdac", },
+	{"SGI", "TP9300",		"rdac", },
+	{"SGI", "TP9400",		"rdac", },
+	{"SGI", "TP9500",		"rdac", },
+	{"SGI", "TP9700",		"rdac", },
 	{"SGI", "IS",			"rdac", },
 	{"STK", "OPENstorage",		"rdac", },
 	{"STK", "FLEXLINE 380",		"rdac", },
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index f14d95e..5294fa3 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -709,8 +709,24 @@
 sdev_store_delete(struct device *dev, struct device_attribute *attr,
 		  const char *buf, size_t count)
 {
-	if (device_remove_file_self(dev, attr))
-		scsi_remove_device(to_scsi_device(dev));
+	struct kernfs_node *kn;
+
+	kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+	WARN_ON_ONCE(!kn);
+	/*
+	 * Concurrent writes into the "delete" sysfs attribute may trigger
+	 * concurrent calls to device_remove_file() and scsi_remove_device().
+	 * device_remove_file() handles concurrent removal calls by
+	 * serializing these and by ignoring the second and later removal
+	 * attempts.  Concurrent calls of scsi_remove_device() are
+	 * serialized. The second and later calls of scsi_remove_device() are
+	 * ignored because the first call of that function changes the device
+	 * state into SDEV_DEL.
+	 */
+	device_remove_file(dev, attr);
+	scsi_remove_device(to_scsi_device(dev));
+	if (kn)
+		sysfs_unbreak_active_protection(kn);
 	return count;
 };
 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0f657b8..edc6362 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2189,6 +2189,7 @@
 	write_lock_irqsave(&sdp->sfd_lock, iflags);
 	if (atomic_read(&sdp->detaching)) {
 		write_unlock_irqrestore(&sdp->sfd_lock, iflags);
+		kfree(sfp);
 		return ERR_PTR(-ENODEV);
 	}
 	list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0169984..cc484cb 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -520,18 +520,26 @@
 static int sr_block_open(struct block_device *bdev, fmode_t mode)
 {
 	struct scsi_cd *cd;
+	struct scsi_device *sdev;
 	int ret = -ENXIO;
 
+	cd = scsi_cd_get(bdev->bd_disk);
+	if (!cd)
+		goto out;
+
+	sdev = cd->device;
+	scsi_autopm_get_device(sdev);
 	check_disk_change(bdev);
 
 	mutex_lock(&sr_mutex);
-	cd = scsi_cd_get(bdev->bd_disk);
-	if (cd) {
-		ret = cdrom_open(&cd->cdi, bdev, mode);
-		if (ret)
-			scsi_cd_put(cd);
-	}
+	ret = cdrom_open(&cd->cdi, bdev, mode);
 	mutex_unlock(&sr_mutex);
+
+	scsi_autopm_put_device(sdev);
+	if (ret)
+		scsi_cd_put(cd);
+
+out:
 	return ret;
 }
 
@@ -559,6 +567,8 @@
 	if (ret)
 		goto out;
 
+	scsi_autopm_get_device(sdev);
+
 	/*
 	 * Send SCSI addressing ioctls directly to mid level, send other
 	 * ioctls to cdrom/block level.
@@ -567,15 +577,18 @@
 	case SCSI_IOCTL_GET_IDLUN:
 	case SCSI_IOCTL_GET_BUS_NUMBER:
 		ret = scsi_ioctl(sdev, cmd, argp);
-		goto out;
+		goto put;
 	}
 
 	ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
 	if (ret != -ENOSYS)
-		goto out;
+		goto put;
 
 	ret = scsi_ioctl(sdev, cmd, argp);
 
+put:
+	scsi_autopm_put_device(sdev);
+
 out:
 	mutex_unlock(&sr_mutex);
 	return ret;
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index 1f3967d..06fb897 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -366,6 +366,7 @@
 	.open		= ufsdbg_err_inj_scenario_open,
 	.read		= seq_read,
 	.write		= ufsdbg_err_inj_scenario_write,
+	.release	= single_release,
 };
 
 static int ufsdbg_err_inj_stats_read(struct seq_file *file, void *data)
@@ -407,6 +408,7 @@
 	.open		= ufsdbg_err_inj_stats_open,
 	.read		= seq_read,
 	.write		= ufsdbg_err_inj_stats_write,
+	.release	= single_release,
 };
 
 static void ufsdbg_setup_fault_injection(struct ufs_hba *hba)
@@ -591,6 +593,7 @@
 	.open		= ufsdbg_tag_stats_open,
 	.read		= seq_read,
 	.write		= ufsdbg_tag_stats_write,
+	.release	= single_release,
 };
 
 static int ufsdbg_query_stats_show(struct seq_file *file, void *data)
@@ -662,6 +665,7 @@
 	.open		= ufsdbg_query_stats_open,
 	.read		= seq_read,
 	.write		= ufsdbg_query_stats_write,
+	.release	= single_release,
 };
 
 static int ufsdbg_err_stats_show(struct seq_file *file, void *data)
@@ -766,6 +770,7 @@
 	.open		= ufsdbg_err_stats_open,
 	.read		= seq_read,
 	.write		= ufsdbg_err_stats_write,
+	.release	= single_release,
 };
 
 static int ufshcd_init_statistics(struct ufs_hba *hba)
@@ -845,6 +850,7 @@
 static const struct file_operations ufsdbg_host_regs_fops = {
 	.open		= ufsdbg_host_regs_open,
 	.read		= seq_read,
+	.release	= single_release,
 };
 
 static int ufsdbg_dump_device_desc_show(struct seq_file *file, void *data)
@@ -1008,6 +1014,7 @@
 static const struct file_operations ufsdbg_show_hba_fops = {
 	.open		= ufsdbg_show_hba_open,
 	.read		= seq_read,
+	.release	= single_release,
 };
 
 static int ufsdbg_dump_device_desc_open(struct inode *inode, struct file *file)
@@ -1019,6 +1026,7 @@
 static const struct file_operations ufsdbg_dump_device_desc = {
 	.open		= ufsdbg_dump_device_desc_open,
 	.read		= seq_read,
+	.release	= single_release,
 };
 
 static int ufsdbg_power_mode_show(struct seq_file *file, void *data)
@@ -1257,6 +1265,7 @@
 	.open		= ufsdbg_power_mode_open,
 	.read		= seq_read,
 	.write		= ufsdbg_power_mode_write,
+	.release	= single_release,
 };
 
 static int ufsdbg_dme_read(void *data, u64 *attr_val, bool peer)
@@ -1436,6 +1445,7 @@
 	.open		= ufsdbg_req_stats_open,
 	.read		= seq_read,
 	.write		= ufsdbg_req_stats_write,
+	.release	= single_release,
 };
 
 
@@ -1484,6 +1494,7 @@
 	.open		= ufsdbg_reset_controller_open,
 	.read		= seq_read,
 	.write		= ufsdbg_reset_controller_write,
+	.release	= single_release,
 };
 
 static int ufsdbg_clear_err_state(void *data, u64 val)
diff --git a/drivers/scsi/ufs/ufs-qcom-debugfs.c b/drivers/scsi/ufs/ufs-qcom-debugfs.c
index db4ecec..ecbe307 100644
--- a/drivers/scsi/ufs/ufs-qcom-debugfs.c
+++ b/drivers/scsi/ufs/ufs-qcom-debugfs.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015,2017, Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018 Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -186,6 +186,7 @@
 	.open		= ufs_qcom_dbg_testbus_cfg_open,
 	.read		= seq_read,
 	.write		= ufs_qcom_dbg_testbus_cfg_write,
+	.release	= single_release,
 };
 
 static int ufs_qcom_dbg_testbus_bus_read(void *data, u64 *attr_val)
@@ -240,6 +241,7 @@
 static const struct file_operations ufs_qcom_dbg_dbg_regs_desc = {
 	.open		= ufs_qcom_dbg_dbg_regs_open,
 	.read		= seq_read,
+	.release	= single_release,
 };
 
 static int ufs_qcom_dbg_pm_qos_show(struct seq_file *file, void *data)
@@ -273,6 +275,7 @@
 static const struct file_operations ufs_qcom_dbg_pm_qos_desc = {
 	.open		= ufs_qcom_dbg_pm_qos_open,
 	.read		= seq_read,
+	.release	= single_release,
 };
 
 void ufs_qcom_dbg_add_debugfs(struct ufs_hba *hba, struct dentry *root)
diff --git a/drivers/scsi/ufs/ufs_test.c b/drivers/scsi/ufs/ufs_test.c
index 2e3997d..5515f1a 100644
--- a/drivers/scsi/ufs/ufs_test.c
+++ b/drivers/scsi/ufs/ufs_test.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -78,6 +78,7 @@
 	.open = ufs_test_ ## test_name ## _open,			\
 	.read = seq_read,						\
 	.write = ufs_test_ ## test_name ## _write,			\
+	.release = single_release,					\
 };
 
 #define add_test(utd, test_name, upper_case_name)			\
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 6c97870..e8d860d 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3445,6 +3445,7 @@
 	int tag;
 	struct completion wait;
 	unsigned long flags;
+	bool has_read_lock = false;
 
 	/*
 	 * May get invoked from shutdown and IOCTL contexts.
@@ -3452,8 +3453,10 @@
 	 * In error recovery context, it may come with lock acquired.
 	 */
 
-	if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
+	if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba)) {
 		down_read(&hba->lock);
+		has_read_lock = true;
+	}
 
 	/*
 	 * Get free slot, sleep if slots are unavailable.
@@ -3486,7 +3489,7 @@
 out_put_tag:
 	ufshcd_put_dev_cmd_tag(hba, tag);
 	wake_up(&hba->dev_cmd.tag_wq);
-	if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
+	if (has_read_lock)
 		up_read(&hba->lock);
 	return err;
 }
@@ -7504,7 +7507,7 @@
 		dev_err(hba->dev,
 			"%s: Failed reading power descriptor.len = %d ret = %d",
 			__func__, buff_len, ret);
-		return;
+		goto out;
 	}
 
 	icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
@@ -7518,6 +7521,9 @@
 		dev_err(hba->dev,
 			"%s: Failed configuring bActiveICCLevel = %d ret = %d",
 			__func__, icc_level, ret);
+
+out:
+	kfree(desc_buf);
 }
 
 /**
@@ -7875,7 +7881,7 @@
 
 static int ufs_read_device_desc_data(struct ufs_hba *hba)
 {
-	int err;
+	int err = 0;
 	u8 *desc_buf = NULL;
 
 	if (hba->desc_size.dev_desc) {
@@ -7889,7 +7895,7 @@
 	}
 	err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
 	if (err)
-		return err;
+		goto out;
 
 	/*
 	 * getting vendor (manufacturerID) and Bank Index in big endian
@@ -7901,8 +7907,9 @@
 	hba->dev_info.b_device_sub_class =
 		desc_buf[DEVICE_DESC_PARAM_DEVICE_SUB_CLASS];
 	hba->dev_info.i_product_name = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
-
-	return 0;
+out:
+	kfree(desc_buf);
+	return err;
 }
 
 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 15ca09c..874e9f0 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -564,9 +564,14 @@
 	    (btstat == BTSTAT_SUCCESS ||
 	     btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
 	     btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
-		cmd->result = (DID_OK << 16) | sdstat;
-		if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
-			cmd->result |= (DRIVER_SENSE << 24);
+		if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
+			cmd->result = (DID_RESET << 16);
+		} else {
+			cmd->result = (DID_OK << 16) | sdstat;
+			if (sdstat == SAM_STAT_CHECK_CONDITION &&
+			    cmd->sense_buffer)
+				cmd->result |= (DRIVER_SENSE << 24);
+		}
 	} else
 		switch (btstat) {
 		case BTSTAT_SUCCESS:
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 9dc8687..e1b32ed 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -676,10 +676,17 @@
 static int scsifront_sdev_configure(struct scsi_device *sdev)
 {
 	struct vscsifrnt_info *info = shost_priv(sdev->host);
+	int err;
 
-	if (info && current == info->curr)
-		xenbus_printf(XBT_NIL, info->dev->nodename,
+	if (info && current == info->curr) {
+		err = xenbus_printf(XBT_NIL, info->dev->nodename,
 			      info->dev_state_path, "%d", XenbusStateConnected);
+		if (err) {
+			xenbus_dev_error(info->dev, err,
+				"%s: writing dev_state_path", __func__);
+			return err;
+		}
+	}
 
 	return 0;
 }
@@ -687,10 +694,15 @@
 static void scsifront_sdev_destroy(struct scsi_device *sdev)
 {
 	struct vscsifrnt_info *info = shost_priv(sdev->host);
+	int err;
 
-	if (info && current == info->curr)
-		xenbus_printf(XBT_NIL, info->dev->nodename,
+	if (info && current == info->curr) {
+		err = xenbus_printf(XBT_NIL, info->dev->nodename,
 			      info->dev_state_path, "%d", XenbusStateClosed);
+		if (err)
+			xenbus_dev_error(info->dev, err,
+				"%s: writing dev_state_path", __func__);
+	}
 }
 
 static struct scsi_host_template scsifront_sht = {
@@ -1025,9 +1037,12 @@
 
 			if (scsi_add_device(info->host, chn, tgt, lun)) {
 				dev_err(&dev->dev, "scsi_add_device\n");
-				xenbus_printf(XBT_NIL, dev->nodename,
+				err = xenbus_printf(XBT_NIL, dev->nodename,
 					      info->dev_state_path,
 					      "%d", XenbusStateClosed);
+				if (err)
+					xenbus_dev_error(dev, err,
+						"%s: writing dev_state_path", __func__);
 			}
 			break;
 		case VSCSIFRONT_OP_DEL_LUN:
@@ -1041,10 +1056,14 @@
 			}
 			break;
 		case VSCSIFRONT_OP_READD_LUN:
-			if (device_state == XenbusStateConnected)
-				xenbus_printf(XBT_NIL, dev->nodename,
+			if (device_state == XenbusStateConnected) {
+				err = xenbus_printf(XBT_NIL, dev->nodename,
 					      info->dev_state_path,
 					      "%d", XenbusStateConnected);
+				if (err)
+					xenbus_dev_error(dev, err,
+						"%s: writing dev_state_path", __func__);
+			}
 			break;
 		default:
 			break;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 0e16501..9583336 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -533,15 +533,16 @@
 	  this layer registers a transport with IPC Router and enable
 	  message exchange.
 
-config MSM_IPC_ROUTER_HSIC_XPRT
-	depends on USB_QCOM_IPC_BRIDGE
+config MSM_IPC_ROUTER_USB_XPRT
+	depends on USB_QCOM_IPC_BRIDGE || USB_F_IPC
 	depends on IPC_ROUTER
-	bool "MSM HSIC XPRT Layer"
+	bool "MSM USB XPRT Layer"
 	help
-	  HSIC Transport Layer that enables off-chip communication of
-	  IPC Router. When the HSIC endpoint becomes available, this layer
-	  registers the transport with IPC Router and enable message
-	  exchange.
+	  USB Transport Layer that enables off-chip communication of IPC Router.
+	  When the USB endpoint becomes available, this layer registers the
+	  transport with IPC Router and enable message exchange. This layer is
+	  independent of USB host or device mode IPC bridge and can interface
+	  with only one of them on a given platform.
 
 config MSM_IPC_ROUTER_MHI_XPRT
 	depends on MSM_MHI
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 1da8346..2e59e77 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -57,7 +57,7 @@
 obj-$(CONFIG_MSM_SERVICE_LOCATOR) += service-locator.o
 obj-$(CONFIG_MSM_SMP2P) += msm_smp2p.o smp2p_loopback.o smp2p_debug.o smp2p_sleepstate.o
 obj-$(CONFIG_MSM_IPC_ROUTER_SMD_XPRT) += ipc_router_smd_xprt.o
-obj-$(CONFIG_MSM_IPC_ROUTER_HSIC_XPRT) += ipc_router_hsic_xprt.o
+obj-$(CONFIG_MSM_IPC_ROUTER_USB_XPRT) += ipc_router_usb_xprt.o
 obj-$(CONFIG_MSM_IPC_ROUTER_MHI_XPRT) += ipc_router_mhi_xprt.o
 obj-$(CONFIG_MSM_IPC_ROUTER_GLINK_XPRT) += ipc_router_glink_xprt.o
 obj-$(CONFIG_MSM_QMI_INTERFACE) += qmi_interface.o
diff --git a/drivers/soc/qcom/bg_rsb.c b/drivers/soc/qcom/bg_rsb.c
index 1b5830a..4cd4638 100644
--- a/drivers/soc/qcom/bg_rsb.c
+++ b/drivers/soc/qcom/bg_rsb.c
@@ -135,6 +135,8 @@
 
 	bool calibration_needed;
 	bool is_calibrd;
+
+	bool is_cnfgrd;
 };
 
 static void *bgrsb_drv;
@@ -415,6 +417,7 @@
 			pr_err("Failed to unvote LDO-11 on BG down\n");
 	}
 
+	dev->is_cnfgrd = false;
 	pr_info("RSB current state is : %d\n", dev->bgrsb_current_state);
 
 	if (dev->bgrsb_current_state == BGRSB_STATE_INIT) {
@@ -452,6 +455,9 @@
 		else
 			pr_err("Failed to unvote LDO-11 on BG Glink down\n");
 	}
+
+	dev->is_cnfgrd = false;
+
 	if (dev->handle)
 		glink_close(dev->handle);
 	dev->handle = NULL;
@@ -562,6 +568,8 @@
 				dev->bgrsb_current_state = BGRSB_STATE_INIT;
 			return;
 		}
+
+		dev->is_cnfgrd = true;
 		dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED;
 		pr_debug("RSB Cofigured\n");
 	}
@@ -592,6 +600,7 @@
 				dev->bgrsb_current_state = BGRSB_STATE_INIT;
 			return;
 		}
+		dev->is_cnfgrd = true;
 		dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED;
 		pr_debug("Glink RSB Cofigured\n");
 	}
@@ -715,6 +724,11 @@
 			container_of(work, struct bgrsb_priv,
 							rsb_calibration_work);
 
+	if (!dev->is_cnfgrd) {
+		pr_err("RSB is not configured\n");
+		return;
+	}
+
 	req.cmd_id = 0x03;
 	req.data = dev->calbrtion_cpi;
 
@@ -744,6 +758,11 @@
 			container_of(work, struct bgrsb_priv,
 							bttn_configr_work);
 
+	if (!dev->is_cnfgrd) {
+		pr_err("RSB is not configured\n");
+		return;
+	}
+
 	req.cmd_id = 0x05;
 	req.data = dev->bttn_configs;
 
@@ -993,7 +1012,8 @@
 		goto ret_success;
 
 	if (dev->bgrsb_current_state == BGRSB_STATE_INIT) {
-		if (bgrsb_ldo_work(dev, BGRSB_ENABLE_LDO11) == 0) {
+		if (dev->is_cnfgrd &&
+			bgrsb_ldo_work(dev, BGRSB_ENABLE_LDO11) == 0) {
 			dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED;
 			pr_debug("RSB Cofigured\n");
 			goto ret_success;
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 8a8484b..a7d262b 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -3186,6 +3186,8 @@
 
 int icnss_get_soc_info(struct device *dev, struct icnss_soc_info *info)
 {
+	char *fw_build_timestamp = NULL;
+
 	if (!penv || !dev) {
 		icnss_pr_err("Platform driver not initialized\n");
 		return -EINVAL;
@@ -3198,6 +3200,8 @@
 	info->board_id = penv->board_info.board_id;
 	info->soc_id = penv->soc_info.soc_id;
 	info->fw_version = penv->fw_version_info.fw_version;
+	fw_build_timestamp = penv->fw_version_info.fw_build_timestamp;
+	fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01] = '\0';
 	strlcpy(info->fw_build_timestamp,
 		penv->fw_version_info.fw_build_timestamp,
 		QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1);
diff --git a/drivers/soc/qcom/ipc_router_hsic_xprt.c b/drivers/soc/qcom/ipc_router_hsic_xprt.c
deleted file mode 100644
index 937c9f7..0000000
--- a/drivers/soc/qcom/ipc_router_hsic_xprt.c
+++ /dev/null
@@ -1,784 +0,0 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/*
- * IPC ROUTER HSIC XPRT module.
- */
-#define DEBUG
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/of.h>
-#include <linux/ipc_router_xprt.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <soc/qcom/subsystem_restart.h>
-
-#include <mach/ipc_bridge.h>
-
-static int msm_ipc_router_hsic_xprt_debug_mask;
-module_param_named(debug_mask, msm_ipc_router_hsic_xprt_debug_mask,
-		   int, 0664);
-
-#if defined(DEBUG)
-#define D(x...) do { \
-if (msm_ipc_router_hsic_xprt_debug_mask) \
-	pr_info(x); \
-} while (0)
-#else
-#define D(x...) do { } while (0)
-#endif
-
-#define NUM_HSIC_XPRTS 1
-#define XPRT_NAME_LEN 32
-
-/**
- * msm_ipc_router_hsic_xprt - IPC Router's HSIC XPRT structure
- * @list: IPC router's HSIC XPRTs list.
- * @ch_name: Name of the HSIC endpoint exported by ipc_bridge driver.
- * @xprt_name: Name of the XPRT to be registered with IPC Router.
- * @driver: Platform drivers register by this XPRT.
- * @xprt: IPC Router XPRT structure to contain HSIC XPRT specific info.
- * @pdev: Platform device registered by IPC Bridge function driver.
- * @hsic_xprt_wq: Workqueue to queue read & other XPRT related works.
- * @read_work: Read Work to perform read operation from HSIC's ipc_bridge.
- * @in_pkt: Pointer to any partially read packet.
- * @ss_reset_lock: Lock to protect access to the ss_reset flag.
- * @ss_reset: flag used to check SSR state.
- * @sft_close_complete: Variable to indicate completion of SSR handling
- *                      by IPC Router.
- * @xprt_version: IPC Router header version supported by this XPRT.
- * @xprt_option: XPRT specific options to be handled by IPC Router.
- */
-struct msm_ipc_router_hsic_xprt {
-	struct list_head list;
-	char ch_name[XPRT_NAME_LEN];
-	char xprt_name[XPRT_NAME_LEN];
-	struct platform_driver driver;
-	struct msm_ipc_router_xprt xprt;
-	struct platform_device *pdev;
-	struct workqueue_struct *hsic_xprt_wq;
-	struct delayed_work read_work;
-	struct rr_packet *in_pkt;
-	struct mutex ss_reset_lock;
-	int ss_reset;
-	struct completion sft_close_complete;
-	unsigned int xprt_version;
-	unsigned int xprt_option;
-};
-
-struct msm_ipc_router_hsic_xprt_work {
-	struct msm_ipc_router_xprt *xprt;
-	struct work_struct work;
-};
-
-static void hsic_xprt_read_data(struct work_struct *work);
-
-/**
- * msm_ipc_router_hsic_xprt_config - Config. Info. of each HSIC XPRT
- * @ch_name: Name of the HSIC endpoint exported by ipc_bridge driver.
- * @xprt_name: Name of the XPRT to be registered with IPC Router.
- * @hsic_pdev_id: ID to differentiate among multiple ipc_bridge endpoints.
- * @link_id: Network Cluster ID to which this XPRT belongs to.
- * @xprt_version: IPC Router header version supported by this XPRT.
- */
-struct msm_ipc_router_hsic_xprt_config {
-	char ch_name[XPRT_NAME_LEN];
-	char xprt_name[XPRT_NAME_LEN];
-	int hsic_pdev_id;
-	uint32_t link_id;
-	unsigned int xprt_version;
-};
-
-struct msm_ipc_router_hsic_xprt_config hsic_xprt_cfg[] = {
-	{"ipc_bridge", "ipc_rtr_ipc_bridge1", 1, 1, 3},
-};
-
-#define MODULE_NAME "ipc_router_hsic_xprt"
-#define IPC_ROUTER_HSIC_XPRT_WAIT_TIMEOUT 3000
-static int ipc_router_hsic_xprt_probe_done;
-static struct delayed_work ipc_router_hsic_xprt_probe_work;
-static DEFINE_MUTEX(hsic_remote_xprt_list_lock_lha1);
-static LIST_HEAD(hsic_remote_xprt_list);
-
-/**
- * find_hsic_xprt_list() - Find xprt item specific to an HSIC endpoint
- * @name: Name of the platform device to find in list
- *
- * @return: pointer to msm_ipc_router_hsic_xprt if matching endpoint is found,
- *		else NULL.
- *
- * This function is used to find specific xprt item from the global xprt list
- */
-static struct msm_ipc_router_hsic_xprt *
-		find_hsic_xprt_list(const char *name)
-{
-	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
-
-	mutex_lock(&hsic_remote_xprt_list_lock_lha1);
-	list_for_each_entry(hsic_xprtp, &hsic_remote_xprt_list, list) {
-		if (!strcmp(name, hsic_xprtp->ch_name)) {
-			mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
-			return hsic_xprtp;
-		}
-	}
-	mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
-	return NULL;
-}
-
-/**
- * ipc_router_hsic_set_xprt_version() - Set IPC Router header version
- *                                          in the transport
- * @xprt: Reference to the transport structure.
- * @version: The version to be set in transport.
- */
-static void ipc_router_hsic_set_xprt_version(
-	struct msm_ipc_router_xprt *xprt, unsigned int version)
-{
-	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
-
-	if (!xprt)
-		return;
-	hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
-	hsic_xprtp->xprt_version = version;
-}
-
-/**
- * msm_ipc_router_hsic_get_xprt_version() - Get IPC Router header version
- *                                          supported by the XPRT
- * @xprt: XPRT for which the version information is required.
- *
- * @return: IPC Router header version supported by the XPRT.
- */
-static int msm_ipc_router_hsic_get_xprt_version(
-	struct msm_ipc_router_xprt *xprt)
-{
-	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
-
-	if (!xprt)
-		return -EINVAL;
-	hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
-
-	return (int)hsic_xprtp->xprt_version;
-}
-
-/**
- * msm_ipc_router_hsic_get_xprt_option() - Get XPRT options
- * @xprt: XPRT for which the option information is required.
- *
- * @return: Options supported by the XPRT.
- */
-static int msm_ipc_router_hsic_get_xprt_option(
-	struct msm_ipc_router_xprt *xprt)
-{
-	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
-
-	if (!xprt)
-		return -EINVAL;
-	hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
-
-	return (int)hsic_xprtp->xprt_option;
-}
-
-/**
- * msm_ipc_router_hsic_remote_write_avail() - Get available write space
- * @xprt: XPRT for which the available write space info. is required.
- *
- * @return: Write space in bytes on success, 0 on SSR.
- */
-static int msm_ipc_router_hsic_remote_write_avail(
-	struct msm_ipc_router_xprt *xprt)
-{
-	struct ipc_bridge_platform_data *pdata;
-	int write_avail;
-	struct msm_ipc_router_hsic_xprt *hsic_xprtp =
-		container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
-
-	mutex_lock(&hsic_xprtp->ss_reset_lock);
-	if (hsic_xprtp->ss_reset || !hsic_xprtp->pdev) {
-		write_avail = 0;
-	} else {
-		pdata = hsic_xprtp->pdev->dev.platform_data;
-		write_avail = pdata->max_write_size;
-	}
-	mutex_unlock(&hsic_xprtp->ss_reset_lock);
-	return write_avail;
-}
-
-/**
- * msm_ipc_router_hsic_remote_write() - Write to XPRT
- * @data: Data to be written to the XPRT.
- * @len: Length of the data to be written.
- * @xprt: XPRT to which the data has to be written.
- *
- * @return: Data Length on success, standard Linux error codes on failure.
- */
-static int msm_ipc_router_hsic_remote_write(void *data,
-		uint32_t len, struct msm_ipc_router_xprt *xprt)
-{
-	struct rr_packet *pkt = (struct rr_packet *)data;
-	struct sk_buff *skb;
-	struct ipc_bridge_platform_data *pdata;
-	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
-	int ret;
-	uint32_t bytes_written = 0;
-	uint32_t bytes_to_write;
-	unsigned char *tx_data;
-
-	if (!pkt || pkt->length != len || !xprt) {
-		IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
-	mutex_lock(&hsic_xprtp->ss_reset_lock);
-	if (hsic_xprtp->ss_reset) {
-		IPC_RTR_ERR("%s: Trying to write on a reset link\n", __func__);
-		mutex_unlock(&hsic_xprtp->ss_reset_lock);
-		return -ENETRESET;
-	}
-
-	if (!hsic_xprtp->pdev) {
-		IPC_RTR_ERR("%s: Trying to write on a closed link\n", __func__);
-		mutex_unlock(&hsic_xprtp->ss_reset_lock);
-		return -ENODEV;
-	}
-
-	pdata = hsic_xprtp->pdev->dev.platform_data;
-	if (!pdata || !pdata->write) {
-		IPC_RTR_ERR("%s on a uninitialized link\n", __func__);
-		mutex_unlock(&hsic_xprtp->ss_reset_lock);
-		return -EFAULT;
-	}
-
-	skb = skb_peek(pkt->pkt_fragment_q);
-	if (!skb) {
-		IPC_RTR_ERR("%s SKB is NULL\n", __func__);
-		mutex_unlock(&hsic_xprtp->ss_reset_lock);
-		return -EINVAL;
-	}
-	D("%s: About to write %d bytes\n", __func__, len);
-
-	while (bytes_written < len) {
-		bytes_to_write = min_t(uint32_t, (skb->len - bytes_written),
-				       pdata->max_write_size);
-		tx_data = skb->data + bytes_written;
-		ret = pdata->write(hsic_xprtp->pdev, tx_data, bytes_to_write);
-		if (ret < 0) {
-			IPC_RTR_ERR("%s: Error writing data %d\n",
-				    __func__, ret);
-			break;
-		}
-		if (ret != bytes_to_write)
-			IPC_RTR_ERR("%s: Partial write %d < %d, retrying...\n",
-				    __func__, ret, bytes_to_write);
-		bytes_written += bytes_to_write;
-	}
-	if (bytes_written == len) {
-		ret = bytes_written;
-	} else if (ret > 0 && bytes_written != len) {
-		IPC_RTR_ERR("%s: Fault writing data %d != %d\n",
-			    __func__, bytes_written, len);
-		ret = -EFAULT;
-	}
-	D("%s: Finished writing %d bytes\n", __func__, len);
-	mutex_unlock(&hsic_xprtp->ss_reset_lock);
-	return ret;
-}
-
-/**
- * msm_ipc_router_hsic_remote_close() - Close the XPRT
- * @xprt: XPRT which needs to be closed.
- *
- * @return: 0 on success, standard Linux error codes on failure.
- */
-static int msm_ipc_router_hsic_remote_close(
-	struct msm_ipc_router_xprt *xprt)
-{
-	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
-	struct ipc_bridge_platform_data *pdata;
-
-	if (!xprt)
-		return -EINVAL;
-	hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
-
-	mutex_lock(&hsic_xprtp->ss_reset_lock);
-	hsic_xprtp->ss_reset = 1;
-	mutex_unlock(&hsic_xprtp->ss_reset_lock);
-	flush_workqueue(hsic_xprtp->hsic_xprt_wq);
-	destroy_workqueue(hsic_xprtp->hsic_xprt_wq);
-	pdata = hsic_xprtp->pdev->dev.platform_data;
-	if (pdata && pdata->close)
-		pdata->close(hsic_xprtp->pdev);
-	hsic_xprtp->pdev = NULL;
-	return 0;
-}
-
-/**
- * hsic_xprt_read_data() - Read work to read from the XPRT
- * @work: Read work to be executed.
- *
- * This function is a read work item queued on a XPRT specific workqueue.
- * The work parameter contains information regarding the XPRT on which this
- * read work has to be performed. The work item keeps reading from the HSIC
- * endpoint, until the endpoint returns an error.
- */
-static void hsic_xprt_read_data(struct work_struct *work)
-{
-	int bytes_to_read;
-	int bytes_read;
-	int skb_size;
-	struct sk_buff *skb = NULL;
-	struct ipc_bridge_platform_data *pdata;
-	struct delayed_work *rwork = to_delayed_work(work);
-	struct msm_ipc_router_hsic_xprt *hsic_xprtp =
-		container_of(rwork, struct msm_ipc_router_hsic_xprt, read_work);
-
-	while (1) {
-		mutex_lock(&hsic_xprtp->ss_reset_lock);
-		if (hsic_xprtp->ss_reset) {
-			mutex_unlock(&hsic_xprtp->ss_reset_lock);
-			break;
-		}
-		pdata = hsic_xprtp->pdev->dev.platform_data;
-		mutex_unlock(&hsic_xprtp->ss_reset_lock);
-		while (!hsic_xprtp->in_pkt) {
-			hsic_xprtp->in_pkt = create_pkt(NULL);
-			if (hsic_xprtp->in_pkt)
-				break;
-			IPC_RTR_ERR("%s: packet allocation failure\n",
-								__func__);
-			msleep(100);
-		}
-		D("%s: Allocated rr_packet\n", __func__);
-
-		bytes_to_read = 0;
-		skb_size = pdata->max_read_size;
-		do {
-			do {
-				skb = alloc_skb(skb_size, GFP_KERNEL);
-				if (skb)
-					break;
-				IPC_RTR_ERR("%s: Couldn't alloc SKB\n",
-					    __func__);
-				msleep(100);
-			} while (!skb);
-			bytes_read = pdata->read(hsic_xprtp->pdev, skb->data,
-						 pdata->max_read_size);
-			if (bytes_read < 0) {
-				IPC_RTR_ERR("%s: Error %d @ read operation\n",
-					    __func__, bytes_read);
-				kfree_skb(skb);
-				goto out_read_data;
-			}
-			if (!bytes_to_read) {
-				bytes_to_read = ipc_router_peek_pkt_size(
-						skb->data);
-				if (bytes_to_read < 0) {
-					IPC_RTR_ERR("%s: Invalid size %d\n",
-						__func__, bytes_to_read);
-					kfree_skb(skb);
-					goto out_read_data;
-				}
-			}
-			bytes_to_read -= bytes_read;
-			skb_put(skb, bytes_read);
-			skb_queue_tail(hsic_xprtp->in_pkt->pkt_fragment_q, skb);
-			hsic_xprtp->in_pkt->length += bytes_read;
-			skb_size = min_t(uint32_t, pdata->max_read_size,
-					 (uint32_t)bytes_to_read);
-		} while (bytes_to_read > 0);
-
-		D("%s: Packet size read %d\n",
-		  __func__, hsic_xprtp->in_pkt->length);
-		msm_ipc_router_xprt_notify(&hsic_xprtp->xprt,
-			IPC_ROUTER_XPRT_EVENT_DATA, (void *)hsic_xprtp->in_pkt);
-		release_pkt(hsic_xprtp->in_pkt);
-		hsic_xprtp->in_pkt = NULL;
-	}
-out_read_data:
-	release_pkt(hsic_xprtp->in_pkt);
-	hsic_xprtp->in_pkt = NULL;
-}
-
-/**
- * hsic_xprt_sft_close_done() - Completion of XPRT reset
- * @xprt: XPRT on which the reset operation is complete.
- *
- * This function is used by IPC Router to signal this HSIC XPRT Abstraction
- * Layer(XAL) that the reset of XPRT is completely handled by IPC Router.
- */
-static void hsic_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt)
-{
-	struct msm_ipc_router_hsic_xprt *hsic_xprtp =
-		container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
-
-	complete_all(&hsic_xprtp->sft_close_complete);
-}
-
-/**
- * msm_ipc_router_hsic_remote_remove() - Remove an HSIC endpoint
- * @pdev: Platform device corresponding to HSIC endpoint.
- *
- * @return: 0 on success, standard Linux error codes on error.
- *
- * This function is called when the underlying ipc_bridge driver unregisters
- * a platform device, mapped to an HSIC endpoint, during SSR.
- */
-static int msm_ipc_router_hsic_remote_remove(struct platform_device *pdev)
-{
-	struct ipc_bridge_platform_data *pdata;
-	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
-
-	hsic_xprtp = find_hsic_xprt_list(pdev->name);
-	if (!hsic_xprtp) {
-		IPC_RTR_ERR("%s No device with name %s\n",
-					__func__, pdev->name);
-		return -ENODEV;
-	}
-
-	mutex_lock(&hsic_xprtp->ss_reset_lock);
-	hsic_xprtp->ss_reset = 1;
-	mutex_unlock(&hsic_xprtp->ss_reset_lock);
-	flush_workqueue(hsic_xprtp->hsic_xprt_wq);
-	destroy_workqueue(hsic_xprtp->hsic_xprt_wq);
-	init_completion(&hsic_xprtp->sft_close_complete);
-	msm_ipc_router_xprt_notify(&hsic_xprtp->xprt,
-				   IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
-	D("%s: Notified IPC Router of %s CLOSE\n",
-	  __func__, hsic_xprtp->xprt.name);
-	wait_for_completion(&hsic_xprtp->sft_close_complete);
-	hsic_xprtp->pdev = NULL;
-	pdata = pdev->dev.platform_data;
-	if (pdata && pdata->close)
-		pdata->close(pdev);
-	return 0;
-}
-
-/**
- * msm_ipc_router_hsic_remote_probe() - Probe an HSIC endpoint
- * @pdev: Platform device corresponding to HSIC endpoint.
- *
- * @return: 0 on success, standard Linux error codes on error.
- *
- * This function is called when the underlying ipc_bridge driver registers
- * a platform device, mapped to an HSIC endpoint.
- */
-static int msm_ipc_router_hsic_remote_probe(struct platform_device *pdev)
-{
-	int rc;
-	struct ipc_bridge_platform_data *pdata;
-	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
-
-	pdata = pdev->dev.platform_data;
-	if (!pdata || !pdata->open || !pdata->read ||
-	    !pdata->write || !pdata->close) {
-		IPC_RTR_ERR("%s: pdata or pdata->operations is NULL\n",
-								__func__);
-		return -EINVAL;
-	}
-
-	hsic_xprtp = find_hsic_xprt_list(pdev->name);
-	if (!hsic_xprtp) {
-		IPC_RTR_ERR("%s No device with name %s\n",
-						__func__, pdev->name);
-		return -ENODEV;
-	}
-
-	hsic_xprtp->hsic_xprt_wq =
-		create_singlethread_workqueue(pdev->name);
-	if (!hsic_xprtp->hsic_xprt_wq) {
-		IPC_RTR_ERR("%s: WQ creation failed for %s\n",
-			__func__, pdev->name);
-		return -EFAULT;
-	}
-
-	rc = pdata->open(pdev);
-	if (rc < 0) {
-		IPC_RTR_ERR("%s: Channel open failed for %s.%d\n",
-			__func__, pdev->name, pdev->id);
-		destroy_workqueue(hsic_xprtp->hsic_xprt_wq);
-		return rc;
-	}
-	hsic_xprtp->pdev = pdev;
-	mutex_lock(&hsic_xprtp->ss_reset_lock);
-	hsic_xprtp->ss_reset = 0;
-	mutex_unlock(&hsic_xprtp->ss_reset_lock);
-	msm_ipc_router_xprt_notify(&hsic_xprtp->xprt,
-				   IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
-	D("%s: Notified IPC Router of %s OPEN\n",
-	  __func__, hsic_xprtp->xprt.name);
-	queue_delayed_work(hsic_xprtp->hsic_xprt_wq,
-			   &hsic_xprtp->read_work, 0);
-	return 0;
-}
-
-/**
- * msm_ipc_router_hsic_driver_register() - register HSIC XPRT drivers
- *
- * @hsic_xprtp: pointer to IPC router hsic xprt structure.
- *
- * @return: 0 on success, standard Linux error codes on error.
- *
- * This function is called when a new XPRT is added to register platform
- * drivers for new XPRT.
- */
-static int msm_ipc_router_hsic_driver_register(
-			struct msm_ipc_router_hsic_xprt *hsic_xprtp)
-{
-	int ret;
-	struct msm_ipc_router_hsic_xprt *hsic_xprtp_item;
-
-	hsic_xprtp_item = find_hsic_xprt_list(hsic_xprtp->ch_name);
-
-	mutex_lock(&hsic_remote_xprt_list_lock_lha1);
-	list_add(&hsic_xprtp->list, &hsic_remote_xprt_list);
-	mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
-
-	if (!hsic_xprtp_item) {
-		hsic_xprtp->driver.driver.name = hsic_xprtp->ch_name;
-		hsic_xprtp->driver.driver.owner = THIS_MODULE;
-		hsic_xprtp->driver.probe = msm_ipc_router_hsic_remote_probe;
-		hsic_xprtp->driver.remove = msm_ipc_router_hsic_remote_remove;
-
-		ret = platform_driver_register(&hsic_xprtp->driver);
-		if (ret) {
-			IPC_RTR_ERR(
-			"%s: Failed to register platform driver[%s]\n",
-					__func__, hsic_xprtp->ch_name);
-			return ret;
-		}
-	} else {
-		IPC_RTR_ERR("%s Already driver registered %s\n",
-					__func__, hsic_xprtp->ch_name);
-	}
-
-	return 0;
-}
-
-/**
- * msm_ipc_router_hsic_config_init() - init HSIC xprt configs
- *
- * @hsic_xprt_config: pointer to HSIC xprt configurations.
- *
- * @return: 0 on success, standard Linux error codes on error.
- *
- * This function is called to initialize the HSIC XPRT pointer with
- * the HSIC XPRT configurations either from device tree or static arrays.
- */
-static int msm_ipc_router_hsic_config_init(
-		struct msm_ipc_router_hsic_xprt_config *hsic_xprt_config)
-{
-	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
-
-	hsic_xprtp = kzalloc(sizeof(struct msm_ipc_router_hsic_xprt),
-							GFP_KERNEL);
-	if (IS_ERR_OR_NULL(hsic_xprtp)) {
-		IPC_RTR_ERR("%s: kzalloc() failed for hsic_xprtp id:%s\n",
-				__func__, hsic_xprt_config->ch_name);
-		return -ENOMEM;
-	}
-
-	hsic_xprtp->xprt.link_id = hsic_xprt_config->link_id;
-	hsic_xprtp->xprt_version = hsic_xprt_config->xprt_version;
-
-	strlcpy(hsic_xprtp->ch_name, hsic_xprt_config->ch_name,
-					XPRT_NAME_LEN);
-
-	strlcpy(hsic_xprtp->xprt_name, hsic_xprt_config->xprt_name,
-						XPRT_NAME_LEN);
-	hsic_xprtp->xprt.name = hsic_xprtp->xprt_name;
-
-	hsic_xprtp->xprt.set_version =
-		ipc_router_hsic_set_xprt_version;
-	hsic_xprtp->xprt.get_version =
-		msm_ipc_router_hsic_get_xprt_version;
-	hsic_xprtp->xprt.get_option =
-		 msm_ipc_router_hsic_get_xprt_option;
-	hsic_xprtp->xprt.read_avail = NULL;
-	hsic_xprtp->xprt.read = NULL;
-	hsic_xprtp->xprt.write_avail =
-		msm_ipc_router_hsic_remote_write_avail;
-	hsic_xprtp->xprt.write = msm_ipc_router_hsic_remote_write;
-	hsic_xprtp->xprt.close = msm_ipc_router_hsic_remote_close;
-	hsic_xprtp->xprt.sft_close_done = hsic_xprt_sft_close_done;
-	hsic_xprtp->xprt.priv = NULL;
-
-	hsic_xprtp->in_pkt = NULL;
-	INIT_DELAYED_WORK(&hsic_xprtp->read_work, hsic_xprt_read_data);
-	mutex_init(&hsic_xprtp->ss_reset_lock);
-	hsic_xprtp->ss_reset = 0;
-	hsic_xprtp->xprt_option = 0;
-
-	msm_ipc_router_hsic_driver_register(hsic_xprtp);
-	return 0;
-
-}
-
-/**
- * parse_devicetree() - parse device tree binding
- *
- * @node: pointer to device tree node
- * @hsic_xprt_config: pointer to HSIC XPRT configurations
- *
- * @return: 0 on success, -ENODEV on failure.
- */
-static int parse_devicetree(struct device_node *node,
-		struct msm_ipc_router_hsic_xprt_config *hsic_xprt_config)
-{
-	int ret;
-	int link_id;
-	int version;
-	char *key;
-	const char *ch_name;
-	const char *remote_ss;
-
-	key = "qcom,ch-name";
-	ch_name = of_get_property(node, key, NULL);
-	if (!ch_name)
-		goto error;
-	strlcpy(hsic_xprt_config->ch_name, ch_name, XPRT_NAME_LEN);
-
-	key = "qcom,xprt-remote";
-	remote_ss = of_get_property(node, key, NULL);
-	if (!remote_ss)
-		goto error;
-
-	key = "qcom,xprt-linkid";
-	ret = of_property_read_u32(node, key, &link_id);
-	if (ret)
-		goto error;
-	hsic_xprt_config->link_id = link_id;
-
-	key = "qcom,xprt-version";
-	ret = of_property_read_u32(node, key, &version);
-	if (ret)
-		goto error;
-	hsic_xprt_config->xprt_version = version;
-
-	scnprintf(hsic_xprt_config->xprt_name, XPRT_NAME_LEN, "%s_%s",
-			remote_ss, hsic_xprt_config->ch_name);
-
-	return 0;
-
-error:
-	IPC_RTR_ERR("%s: missing key: %s\n", __func__, key);
-	return -ENODEV;
-}
-
-/**
- * msm_ipc_router_hsic_xprt_probe() - Probe an HSIC xprt
- * @pdev: Platform device corresponding to HSIC xprt.
- *
- * @return: 0 on success, standard Linux error codes on error.
- *
- * This function is called when the underlying device tree driver registers
- * a platform device, mapped to an HSIC transport.
- */
-static int msm_ipc_router_hsic_xprt_probe(
-				struct platform_device *pdev)
-{
-	int ret;
-	struct msm_ipc_router_hsic_xprt_config hsic_xprt_config;
-
-	if (pdev && pdev->dev.of_node) {
-		mutex_lock(&hsic_remote_xprt_list_lock_lha1);
-		ipc_router_hsic_xprt_probe_done = 1;
-		mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
-
-		ret = parse_devicetree(pdev->dev.of_node,
-						&hsic_xprt_config);
-		if (ret) {
-			IPC_RTR_ERR("%s: Failed to parse device tree\n",
-								__func__);
-			return ret;
-		}
-
-		ret = msm_ipc_router_hsic_config_init(
-						&hsic_xprt_config);
-		if (ret) {
-			IPC_RTR_ERR(" %s init failed\n", __func__);
-			return ret;
-		}
-	}
-	return ret;
-}
-
-/**
- * ipc_router_hsic_xprt_probe_worker() - probe worker for non DT configurations
- *
- * @work: work item to process
- *
- * This function is called by schedule_delay_work after 3sec and check if
- * device tree probe is done or not. If device tree probe fails the default
- * configurations read from static array.
- */
-static void ipc_router_hsic_xprt_probe_worker(struct work_struct *work)
-{
-	int i, ret;
-
-	if (WARN_ON(ARRAY_SIZE(hsic_xprt_cfg) != NUM_HSIC_XPRTS))
-		return;
-
-	mutex_lock(&hsic_remote_xprt_list_lock_lha1);
-	if (!ipc_router_hsic_xprt_probe_done) {
-		mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
-		for (i = 0; i < ARRAY_SIZE(hsic_xprt_cfg); i++) {
-			ret = msm_ipc_router_hsic_config_init(
-							&hsic_xprt_cfg[i]);
-			if (ret)
-				IPC_RTR_ERR(" %s init failed config idx %d\n",
-								__func__, i);
-		}
-		mutex_lock(&hsic_remote_xprt_list_lock_lha1);
-	}
-	mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
-}
-
-static const struct of_device_id msm_ipc_router_hsic_xprt_match_table[] = {
-	{ .compatible = "qcom,ipc_router_hsic_xprt" },
-	{},
-};
-
-static struct platform_driver msm_ipc_router_hsic_xprt_driver = {
-	.probe = msm_ipc_router_hsic_xprt_probe,
-	.driver = {
-		.name = MODULE_NAME,
-		.owner = THIS_MODULE,
-		.of_match_table = msm_ipc_router_hsic_xprt_match_table,
-	 },
-};
-
-static int __init msm_ipc_router_hsic_xprt_init(void)
-{
-	int rc;
-
-	rc = platform_driver_register(&msm_ipc_router_hsic_xprt_driver);
-	if (rc) {
-		IPC_RTR_ERR(
-		"%s: msm_ipc_router_hsic_xprt_driver register failed %d\n",
-								__func__, rc);
-		return rc;
-	}
-
-	INIT_DELAYED_WORK(&ipc_router_hsic_xprt_probe_work,
-					ipc_router_hsic_xprt_probe_worker);
-	schedule_delayed_work(&ipc_router_hsic_xprt_probe_work,
-			msecs_to_jiffies(IPC_ROUTER_HSIC_XPRT_WAIT_TIMEOUT));
-	return 0;
-}
-
-module_init(msm_ipc_router_hsic_xprt_init);
-MODULE_DESCRIPTION("IPC Router HSIC XPRT");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/ipc_router_usb_xprt.c b/drivers/soc/qcom/ipc_router_usb_xprt.c
new file mode 100644
index 0000000..0c6fda6
--- /dev/null
+++ b/drivers/soc/qcom/ipc_router_usb_xprt.c
@@ -0,0 +1,800 @@
+/*
+ * Copyright (c) 2013-2016, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * IPC ROUTER USB XPRT module.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include <linux/usb/ipc_bridge.h>
+
+static int msm_ipc_router_usb_xprt_debug_mask;
+module_param_named(debug_mask, msm_ipc_router_usb_xprt_debug_mask,
+		   int, 0664);
+
+#define D(x...) do { \
+if (msm_ipc_router_usb_xprt_debug_mask) \
+	pr_info(x); \
+} while (0)
+
+#define NUM_USB_XPRTS 1
+#define XPRT_NAME_LEN 32
+
+/**
+ * msm_ipc_router_usb_xprt - IPC Router's USB XPRT structure
+ * @list: IPC router's USB XPRTs list.
+ * @ch_name: Name of the USB endpoint exported by ipc_bridge driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @driver: Platform drivers register by this XPRT.
+ * @xprt: IPC Router XPRT structure to contain USB XPRT specific info.
+ * @pdev: Platform device registered by IPC Bridge function driver.
+ * @usb_xprt_wq: Workqueue to queue read & other XPRT related works.
+ * @read_work: Read Work to perform read operation from USB's ipc_bridge.
+ * @in_pkt: Pointer to any partially read packet.
+ * @ss_reset_lock: Lock to protect access to the ss_reset flag.
+ * @ss_reset: flag used to check SSR state.
+ * @sft_close_complete: Variable to indicate completion of SSR handling
+ *                      by IPC Router.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @xprt_option: XPRT specific options to be handled by IPC Router.
+ */
+struct msm_ipc_router_usb_xprt {
+	struct list_head list;
+	char ch_name[XPRT_NAME_LEN];
+	char xprt_name[XPRT_NAME_LEN];
+	struct platform_driver driver;
+	struct msm_ipc_router_xprt xprt;
+	struct platform_device *pdev;
+	struct workqueue_struct *usb_xprt_wq;
+	struct delayed_work read_work;
+	struct rr_packet *in_pkt;
+	struct mutex ss_reset_lock;
+	int ss_reset;
+	struct completion sft_close_complete;
+	unsigned int xprt_version;
+	unsigned int xprt_option;
+};
+
+struct msm_ipc_router_usb_xprt_work {
+	struct msm_ipc_router_xprt *xprt;
+	struct work_struct work;
+};
+
+static void usb_xprt_read_data(struct work_struct *work);
+
+/**
+ * msm_ipc_router_usb_xprt_config - Config. Info. of each USB XPRT
+ * @ch_name: Name of the USB endpoint exported by ipc_bridge driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @usb_pdev_id: ID to differentiate among multiple ipc_bridge endpoints.
+ * @link_id: Network Cluster ID to which this XPRT belongs to.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ */
+struct msm_ipc_router_usb_xprt_config {
+	char ch_name[XPRT_NAME_LEN];
+	char xprt_name[XPRT_NAME_LEN];
+	int usb_pdev_id;
+	uint32_t link_id;
+	unsigned int xprt_version;
+};
+
+static struct msm_ipc_router_usb_xprt_config usb_xprt_cfg[] = {
+	{"ipc_bridge", "ipc_rtr_ipc_bridge1", 1, 2, 3},
+};
+
+#define MODULE_NAME "ipc_router_usb_xprt"
+#define IPC_ROUTER_USB_XPRT_WAIT_TIMEOUT 3000
+static int ipc_router_usb_xprt_probe_done;
+static struct delayed_work ipc_router_usb_xprt_probe_work;
+static DEFINE_MUTEX(usb_remote_xprt_list_lock_lha1);
+static LIST_HEAD(usb_remote_xprt_list);
+
+/**
+ * find_usb_xprt_list() - Find xprt item specific to an USB endpoint
+ * @name: Name of the platform device to find in list
+ *
+ * @return: pointer to msm_ipc_router_usb_xprt if matching endpoint is found,
+ *		else NULL.
+ *
+ * This function is used to find specific xprt item from the global xprt list
+ */
+static struct msm_ipc_router_usb_xprt *
+		find_usb_xprt_list(const char *name)
+{
+	struct msm_ipc_router_usb_xprt *usb_xprtp;
+
+	mutex_lock(&usb_remote_xprt_list_lock_lha1);
+	list_for_each_entry(usb_xprtp, &usb_remote_xprt_list, list) {
+		if (!strcmp(name, usb_xprtp->ch_name)) {
+			mutex_unlock(&usb_remote_xprt_list_lock_lha1);
+			return usb_xprtp;
+		}
+	}
+	mutex_unlock(&usb_remote_xprt_list_lock_lha1);
+	return NULL;
+}
+
+/**
+ * ipc_router_usb_set_xprt_version() - Set IPC Router header version
+ *                                          in the transport
+ * @xprt: Reference to the transport structure.
+ * @version: The version to be set in transport.
+ */
+static void ipc_router_usb_set_xprt_version(
+	struct msm_ipc_router_xprt *xprt, unsigned int version)
+{
+	struct msm_ipc_router_usb_xprt *usb_xprtp;
+
+	if (!xprt)
+		return;
+	usb_xprtp = container_of(xprt, struct msm_ipc_router_usb_xprt, xprt);
+	usb_xprtp->xprt_version = version;
+}
+
+/**
+ * msm_ipc_router_usb_get_xprt_version() - Get IPC Router header version
+ *                                          supported by the XPRT
+ * @xprt: XPRT for which the version information is required.
+ *
+ * @return: IPC Router header version supported by the XPRT.
+ */
+static int msm_ipc_router_usb_get_xprt_version(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_usb_xprt *usb_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	usb_xprtp = container_of(xprt, struct msm_ipc_router_usb_xprt, xprt);
+
+	return (int)usb_xprtp->xprt_version;
+}
+
+/**
+ * msm_ipc_router_usb_get_xprt_option() - Get XPRT options
+ * @xprt: XPRT for which the option information is required.
+ *
+ * @return: Options supported by the XPRT.
+ */
+static int msm_ipc_router_usb_get_xprt_option(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_usb_xprt *usb_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	usb_xprtp = container_of(xprt, struct msm_ipc_router_usb_xprt, xprt);
+
+	return (int)usb_xprtp->xprt_option;
+}
+
+/**
+ * msm_ipc_router_usb_remote_write_avail() - Get available write space
+ * @xprt: XPRT for which the available write space info. is required.
+ *
+ * @return: Write space in bytes on success, 0 on SSR.
+ */
+static int msm_ipc_router_usb_remote_write_avail(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_bridge_platform_data *pdata;
+	int write_avail;
+	struct msm_ipc_router_usb_xprt *usb_xprtp =
+		container_of(xprt, struct msm_ipc_router_usb_xprt, xprt);
+
+	mutex_lock(&usb_xprtp->ss_reset_lock);
+	if (usb_xprtp->ss_reset || !usb_xprtp->pdev) {
+		write_avail = 0;
+	} else {
+		pdata = usb_xprtp->pdev->dev.platform_data;
+		write_avail = pdata->max_write_size;
+	}
+	mutex_unlock(&usb_xprtp->ss_reset_lock);
+	return write_avail;
+}
+
+/**
+ * msm_ipc_router_usb_remote_write() - Write to XPRT
+ * @data: Data to be written to the XPRT.
+ * @len: Length of the data to be written.
+ * @xprt: XPRT to which the data has to be written.
+ *
+ * @return: Data Length on success, standard Linux error codes on failure.
+ */
+static int msm_ipc_router_usb_remote_write(void *data,
+		uint32_t len, struct msm_ipc_router_xprt *xprt)
+{
+	struct rr_packet *pkt = (struct rr_packet *)data;
+	struct sk_buff *skb;
+	struct ipc_bridge_platform_data *pdata;
+	struct msm_ipc_router_usb_xprt *usb_xprtp;
+	int ret;
+	uint32_t bytes_written = 0;
+	uint32_t bytes_to_write;
+	unsigned char *tx_data;
+
+	if (!pkt || pkt->length != len || !xprt) {
+		IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	usb_xprtp = container_of(xprt, struct msm_ipc_router_usb_xprt, xprt);
+	mutex_lock(&usb_xprtp->ss_reset_lock);
+	if (usb_xprtp->ss_reset) {
+		IPC_RTR_ERR("%s: Trying to write on a reset link\n", __func__);
+		mutex_unlock(&usb_xprtp->ss_reset_lock);
+		return -ENETRESET;
+	}
+
+	if (!usb_xprtp->pdev) {
+		IPC_RTR_ERR("%s: Trying to write on a closed link\n", __func__);
+		mutex_unlock(&usb_xprtp->ss_reset_lock);
+		return -ENODEV;
+	}
+
+	pdata = usb_xprtp->pdev->dev.platform_data;
+	if (!pdata || !pdata->write) {
+		IPC_RTR_ERR("%s on a uninitialized link\n", __func__);
+		mutex_unlock(&usb_xprtp->ss_reset_lock);
+		return -EFAULT;
+	}
+
+	skb = skb_peek(pkt->pkt_fragment_q);
+	if (!skb) {
+		IPC_RTR_ERR("%s SKB is NULL\n", __func__);
+		mutex_unlock(&usb_xprtp->ss_reset_lock);
+		return -EINVAL;
+	}
+
+	if (len > pdata->max_write_size)
+		pr_warn("%s: Data size exceeds max write size %d\n",
+					__func__, pdata->max_write_size);
+
+	D("%s: About to write %d bytes\n", __func__, len);
+
+	while (bytes_written < len) {
+		bytes_to_write = min_t(uint32_t, (skb->len - bytes_written),
+				       pdata->max_write_size);
+		tx_data = skb->data + bytes_written;
+		ret = pdata->write(usb_xprtp->pdev, tx_data, bytes_to_write);
+		if (ret < 0) {
+			IPC_RTR_ERR("%s: Error writing data %d\n",
+				    __func__, ret);
+			break;
+		}
+		if (ret != bytes_to_write)
+			IPC_RTR_ERR("%s: Partial write %d < %d, retrying...\n",
+				    __func__, ret, bytes_to_write);
+		bytes_written += bytes_to_write;
+	}
+	if (bytes_written == len) {
+		ret = bytes_written;
+	} else if (ret > 0 && bytes_written != len) {
+		IPC_RTR_ERR("%s: Fault writing data %d != %d\n",
+			    __func__, bytes_written, len);
+		ret = -EFAULT;
+	}
+	D("%s: Finished writing %d bytes\n", __func__, len);
+	mutex_unlock(&usb_xprtp->ss_reset_lock);
+	return ret;
+}
+
+/**
+ * msm_ipc_router_usb_remote_close() - Close the XPRT
+ * @xprt: XPRT which needs to be closed.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int msm_ipc_router_usb_remote_close(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_usb_xprt *usb_xprtp;
+	struct ipc_bridge_platform_data *pdata;
+
+	if (!xprt)
+		return -EINVAL;
+	usb_xprtp = container_of(xprt, struct msm_ipc_router_usb_xprt, xprt);
+
+	mutex_lock(&usb_xprtp->ss_reset_lock);
+	usb_xprtp->ss_reset = 1;
+	mutex_unlock(&usb_xprtp->ss_reset_lock);
+	flush_workqueue(usb_xprtp->usb_xprt_wq);
+	destroy_workqueue(usb_xprtp->usb_xprt_wq);
+	pdata = usb_xprtp->pdev->dev.platform_data;
+	if (pdata && pdata->close)
+		pdata->close(usb_xprtp->pdev);
+	usb_xprtp->pdev = NULL;
+	return 0;
+}
+
+/**
+ * usb_xprt_read_data() - Read work to read from the XPRT
+ * @work: Read work to be executed.
+ *
+ * This function is a read work item queued on a XPRT specific workqueue.
+ * The work parameter contains information regarding the XPRT on which this
+ * read work has to be performed. The work item keeps reading from the USB
+ * endpoint, until the endpoint returns an error.
+ */
+static void usb_xprt_read_data(struct work_struct *work)
+{
+	int bytes_to_read;
+	int bytes_read;
+	int skb_size;
+	struct sk_buff *skb = NULL;
+	struct ipc_bridge_platform_data *pdata;
+	struct delayed_work *rwork = to_delayed_work(work);
+	struct msm_ipc_router_usb_xprt *usb_xprtp =
+		container_of(rwork, struct msm_ipc_router_usb_xprt, read_work);
+
+	while (1) {
+		mutex_lock(&usb_xprtp->ss_reset_lock);
+		if (usb_xprtp->ss_reset) {
+			mutex_unlock(&usb_xprtp->ss_reset_lock);
+			break;
+		}
+		pdata = usb_xprtp->pdev->dev.platform_data;
+		mutex_unlock(&usb_xprtp->ss_reset_lock);
+		while (!usb_xprtp->in_pkt) {
+			usb_xprtp->in_pkt = create_pkt(NULL);
+			if (usb_xprtp->in_pkt)
+				break;
+			IPC_RTR_ERR("%s: packet allocation failure\n",
+								__func__);
+			msleep(100);
+		}
+		D("%s: Allocated rr_packet\n", __func__);
+
+		bytes_to_read = 0;
+		skb_size = pdata->max_read_size;
+		do {
+			do {
+				skb = alloc_skb(skb_size, GFP_KERNEL);
+				if (skb)
+					break;
+				IPC_RTR_ERR("%s: Couldn't alloc SKB\n",
+					    __func__);
+				msleep(100);
+			} while (!skb);
+			bytes_read = pdata->read(usb_xprtp->pdev, skb->data,
+						 pdata->max_read_size);
+			if (bytes_read < 0) {
+				IPC_RTR_ERR("%s: Error %d @ read operation\n",
+					    __func__, bytes_read);
+				kfree_skb(skb);
+				goto out_read_data;
+			}
+			if (!bytes_to_read) {
+				bytes_to_read = ipc_router_peek_pkt_size(
+						skb->data);
+				if (bytes_to_read < 0) {
+					IPC_RTR_ERR("%s: Invalid size %d\n",
+						__func__, bytes_to_read);
+					kfree_skb(skb);
+					goto out_read_data;
+				}
+			}
+			bytes_to_read -= bytes_read;
+			skb_put(skb, bytes_read);
+			skb_queue_tail(usb_xprtp->in_pkt->pkt_fragment_q, skb);
+			usb_xprtp->in_pkt->length += bytes_read;
+			skb_size = min_t(uint32_t, pdata->max_read_size,
+					 (uint32_t)bytes_to_read);
+		} while (bytes_to_read > 0);
+
+		D("%s: Packet size read %d\n",
+		  __func__, usb_xprtp->in_pkt->length);
+		msm_ipc_router_xprt_notify(&usb_xprtp->xprt,
+			IPC_ROUTER_XPRT_EVENT_DATA, (void *)usb_xprtp->in_pkt);
+		release_pkt(usb_xprtp->in_pkt);
+		usb_xprtp->in_pkt = NULL;
+	}
+out_read_data:
+	release_pkt(usb_xprtp->in_pkt);
+	usb_xprtp->in_pkt = NULL;
+}
+
+/**
+ * usb_xprt_sft_close_done() - Completion of XPRT reset
+ * @xprt: XPRT on which the reset operation is complete.
+ *
+ * This function is used by IPC Router to signal this USB XPRT Abstraction
+ * Layer(XAL) that the reset of XPRT is completely handled by IPC Router.
+ */
+static void usb_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_usb_xprt *usb_xprtp =
+		container_of(xprt, struct msm_ipc_router_usb_xprt, xprt);
+
+	complete_all(&usb_xprtp->sft_close_complete);
+}
+
+/**
+ * msm_ipc_router_usb_remote_remove() - Remove an USB endpoint
+ * @pdev: Platform device corresponding to USB endpoint.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying ipc_bridge driver unregisters
+ * a platform device, mapped to an USB endpoint, during SSR.
+ */
+static int msm_ipc_router_usb_remote_remove(struct platform_device *pdev)
+{
+	struct ipc_bridge_platform_data *pdata;
+	struct msm_ipc_router_usb_xprt *usb_xprtp;
+
+	usb_xprtp = find_usb_xprt_list(pdev->name);
+	if (!usb_xprtp) {
+		IPC_RTR_ERR("%s No device with name %s\n",
+					__func__, pdev->name);
+		return -ENODEV;
+	}
+
+	mutex_lock(&usb_xprtp->ss_reset_lock);
+	usb_xprtp->ss_reset = 1;
+	mutex_unlock(&usb_xprtp->ss_reset_lock);
+	flush_workqueue(usb_xprtp->usb_xprt_wq);
+	destroy_workqueue(usb_xprtp->usb_xprt_wq);
+	init_completion(&usb_xprtp->sft_close_complete);
+	msm_ipc_router_xprt_notify(&usb_xprtp->xprt,
+				   IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
+	D("%s: Notified IPC Router of %s CLOSE\n",
+	  __func__, usb_xprtp->xprt.name);
+	wait_for_completion(&usb_xprtp->sft_close_complete);
+	usb_xprtp->pdev = NULL;
+	pdata = pdev->dev.platform_data;
+	if (pdata && pdata->close)
+		pdata->close(pdev);
+	return 0;
+}
+
+/**
+ * msm_ipc_router_usb_remote_probe() - Probe an USB endpoint
+ * @pdev: Platform device corresponding to USB endpoint.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying ipc_bridge driver registers
+ * a platform device, mapped to an USB endpoint.
+ */
+static int msm_ipc_router_usb_remote_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct ipc_bridge_platform_data *pdata;
+	struct msm_ipc_router_usb_xprt *usb_xprtp;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata || !pdata->open || !pdata->read ||
+	    !pdata->write || !pdata->close) {
+		IPC_RTR_ERR("%s: pdata or pdata->operations is NULL\n",
+								__func__);
+		return -EINVAL;
+	}
+
+	usb_xprtp = find_usb_xprt_list(pdev->name);
+	if (!usb_xprtp) {
+		IPC_RTR_ERR("%s No device with name %s\n",
+						__func__, pdev->name);
+		return -ENODEV;
+	}
+
+	usb_xprtp->usb_xprt_wq =
+		create_singlethread_workqueue(pdev->name);
+	if (!usb_xprtp->usb_xprt_wq) {
+		IPC_RTR_ERR("%s: WQ creation failed for %s\n",
+			__func__, pdev->name);
+		return -EFAULT;
+	}
+
+	rc = pdata->open(pdev);
+	if (rc < 0) {
+		IPC_RTR_ERR("%s: Channel open failed for %s.%d\n",
+			__func__, pdev->name, pdev->id);
+		destroy_workqueue(usb_xprtp->usb_xprt_wq);
+		return rc;
+	}
+	usb_xprtp->pdev = pdev;
+	mutex_lock(&usb_xprtp->ss_reset_lock);
+	usb_xprtp->ss_reset = 0;
+	mutex_unlock(&usb_xprtp->ss_reset_lock);
+	msm_ipc_router_xprt_notify(&usb_xprtp->xprt,
+				   IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
+	D("%s: Notified IPC Router of %s OPEN\n",
+	  __func__, usb_xprtp->xprt.name);
+	queue_delayed_work(usb_xprtp->usb_xprt_wq,
+			   &usb_xprtp->read_work, 0);
+	return 0;
+}
+
+/**
+ * msm_ipc_router_usb_driver_register() - register USB XPRT drivers
+ *
+ * @usb_xprtp: pointer to IPC router usb xprt structure.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when a new XPRT is added to register platform
+ * drivers for new XPRT.
+ */
+static int msm_ipc_router_usb_driver_register(
+			struct msm_ipc_router_usb_xprt *usb_xprtp)
+{
+	int ret;
+	struct msm_ipc_router_usb_xprt *usb_xprtp_item;
+
+	usb_xprtp_item = find_usb_xprt_list(usb_xprtp->ch_name);
+
+	mutex_lock(&usb_remote_xprt_list_lock_lha1);
+	list_add(&usb_xprtp->list, &usb_remote_xprt_list);
+	mutex_unlock(&usb_remote_xprt_list_lock_lha1);
+
+	if (!usb_xprtp_item) {
+		usb_xprtp->driver.driver.name = usb_xprtp->ch_name;
+		usb_xprtp->driver.driver.owner = THIS_MODULE;
+		usb_xprtp->driver.probe = msm_ipc_router_usb_remote_probe;
+		usb_xprtp->driver.remove = msm_ipc_router_usb_remote_remove;
+
+		ret = platform_driver_register(&usb_xprtp->driver);
+		if (ret) {
+			IPC_RTR_ERR(
+			"%s: Failed to register platform driver[%s]\n",
+					__func__, usb_xprtp->ch_name);
+			return ret;
+		}
+	} else {
+		IPC_RTR_ERR("%s Already driver registered %s\n",
+					__func__, usb_xprtp->ch_name);
+	}
+
+	return 0;
+}
+
+/**
+ * msm_ipc_router_usb_config_init() - init USB xprt configs
+ *
+ * @usb_xprt_config: pointer to USB xprt configurations.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the USB XPRT pointer with
+ * the USB XPRT configurations either from device tree or static arrays.
+ */
+static int msm_ipc_router_usb_config_init(
+		struct msm_ipc_router_usb_xprt_config *usb_xprt_config)
+{
+	struct msm_ipc_router_usb_xprt *usb_xprtp;
+
+	usb_xprtp = kzalloc(sizeof(struct msm_ipc_router_usb_xprt),
+							GFP_KERNEL);
+	if (IS_ERR_OR_NULL(usb_xprtp)) {
+		IPC_RTR_ERR("%s: kzalloc() failed for usb_xprtp id:%s\n",
+				__func__, usb_xprt_config->ch_name);
+		return -ENOMEM;
+	}
+
+	usb_xprtp->xprt.link_id = usb_xprt_config->link_id;
+	usb_xprtp->xprt_version = usb_xprt_config->xprt_version;
+
+	strlcpy(usb_xprtp->ch_name, usb_xprt_config->ch_name,
+					XPRT_NAME_LEN);
+
+	strlcpy(usb_xprtp->xprt_name, usb_xprt_config->xprt_name,
+						XPRT_NAME_LEN);
+	usb_xprtp->xprt.name = usb_xprtp->xprt_name;
+
+	usb_xprtp->xprt.set_version =
+		ipc_router_usb_set_xprt_version;
+	usb_xprtp->xprt.get_version =
+		msm_ipc_router_usb_get_xprt_version;
+	usb_xprtp->xprt.get_option =
+		 msm_ipc_router_usb_get_xprt_option;
+	usb_xprtp->xprt.read_avail = NULL;
+	usb_xprtp->xprt.read = NULL;
+	usb_xprtp->xprt.write_avail =
+		msm_ipc_router_usb_remote_write_avail;
+	usb_xprtp->xprt.write = msm_ipc_router_usb_remote_write;
+	usb_xprtp->xprt.close = msm_ipc_router_usb_remote_close;
+	usb_xprtp->xprt.sft_close_done = usb_xprt_sft_close_done;
+	usb_xprtp->xprt.priv = NULL;
+
+	usb_xprtp->in_pkt = NULL;
+	INIT_DELAYED_WORK(&usb_xprtp->read_work, usb_xprt_read_data);
+	mutex_init(&usb_xprtp->ss_reset_lock);
+	usb_xprtp->ss_reset = 0;
+	usb_xprtp->xprt_option = 0;
+
+	msm_ipc_router_usb_driver_register(usb_xprtp);
+	return 0;
+
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ * @usb_xprt_config: pointer to USB XPRT configurations
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node,
+		struct msm_ipc_router_usb_xprt_config *usb_xprt_config)
+{
+	int ret;
+	int link_id;
+	int version;
+	char *key;
+	const char *ch_name;
+	const char *remote_ss;
+
+	key = "qcom,ch-name";
+	ch_name = of_get_property(node, key, NULL);
+	if (!ch_name)
+		goto error;
+	strlcpy(usb_xprt_config->ch_name, ch_name, XPRT_NAME_LEN);
+
+	key = "qcom,xprt-remote";
+	remote_ss = of_get_property(node, key, NULL);
+	if (!remote_ss)
+		goto error;
+
+	key = "qcom,xprt-linkid";
+	ret = of_property_read_u32(node, key, &link_id);
+	if (ret)
+		goto error;
+	usb_xprt_config->link_id = link_id;
+
+	key = "qcom,xprt-version";
+	ret = of_property_read_u32(node, key, &version);
+	if (ret)
+		goto error;
+	usb_xprt_config->xprt_version = version;
+
+	scnprintf(usb_xprt_config->xprt_name, XPRT_NAME_LEN, "%s_%s",
+			remote_ss, usb_xprt_config->ch_name);
+
+	return 0;
+
+error:
+	IPC_RTR_ERR("%s: missing key: %s\n", __func__, key);
+	return -ENODEV;
+}
+
+/**
+ * msm_ipc_router_usb_xprt_probe() - Probe an USB xprt
+ * @pdev: Platform device corresponding to USB xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an USB transport.
+ */
+static int msm_ipc_router_usb_xprt_probe(
+				struct platform_device *pdev)
+{
+	int ret;
+	struct msm_ipc_router_usb_xprt_config usb_xprt_config;
+
+	if (pdev && pdev->dev.of_node) {
+		mutex_lock(&usb_remote_xprt_list_lock_lha1);
+		ipc_router_usb_xprt_probe_done = 1;
+		mutex_unlock(&usb_remote_xprt_list_lock_lha1);
+
+		ret = parse_devicetree(pdev->dev.of_node,
+						&usb_xprt_config);
+		if (ret) {
+			IPC_RTR_ERR("%s: Failed to parse device tree\n",
+								__func__);
+			return ret;
+		}
+
+		ret = msm_ipc_router_usb_config_init(
+						&usb_xprt_config);
+		if (ret) {
+			IPC_RTR_ERR(" %s init failed\n", __func__);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ipc_router_usb_xprt_probe_worker() - probe worker for non DT configurations
+ *
+ * @work: work item to process
+ *
+ * This function is called by schedule_delay_work after 3sec and check if
+ * device tree probe is done or not. If device tree probe fails the default
+ * configurations read from static array.
+ */
+static void ipc_router_usb_xprt_probe_worker(struct work_struct *work)
+{
+	int i, ret;
+
+	if (WARN_ON(ARRAY_SIZE(usb_xprt_cfg) != NUM_USB_XPRTS))
+		return;
+
+	mutex_lock(&usb_remote_xprt_list_lock_lha1);
+	if (!ipc_router_usb_xprt_probe_done) {
+		mutex_unlock(&usb_remote_xprt_list_lock_lha1);
+		for (i = 0; i < ARRAY_SIZE(usb_xprt_cfg); i++) {
+			ret = msm_ipc_router_usb_config_init(
+							&usb_xprt_cfg[i]);
+			if (ret)
+				IPC_RTR_ERR(" %s init failed config idx %d\n",
+								__func__, i);
+		}
+		mutex_lock(&usb_remote_xprt_list_lock_lha1);
+	}
+	mutex_unlock(&usb_remote_xprt_list_lock_lha1);
+}
+
+static const struct of_device_id msm_ipc_router_usb_xprt_match_table[] = {
+	{ .compatible = "qcom,ipc-router-usb-xprt" },
+	{},
+};
+
+static struct platform_driver msm_ipc_router_usb_xprt_driver = {
+	.probe = msm_ipc_router_usb_xprt_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_ipc_router_usb_xprt_match_table,
+	 },
+};
+
+static int __init msm_ipc_router_usb_xprt_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&msm_ipc_router_usb_xprt_driver);
+	if (rc) {
+		IPC_RTR_ERR(
+		"%s: msm_ipc_router_usb_xprt_driver register failed %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	INIT_DELAYED_WORK(&ipc_router_usb_xprt_probe_work,
+					ipc_router_usb_xprt_probe_worker);
+	schedule_delayed_work(&ipc_router_usb_xprt_probe_work,
+			msecs_to_jiffies(IPC_ROUTER_USB_XPRT_WAIT_TIMEOUT));
+	return 0;
+}
+
+static void __exit msm_ipc_router_usb_xprt_exit(void)
+{
+	struct msm_ipc_router_usb_xprt *usb_xprtp;
+	struct msm_ipc_router_usb_xprt *temp_usb_xprtp;
+
+	list_for_each_entry_safe(usb_xprtp, temp_usb_xprtp,
+					&usb_remote_xprt_list, list) {
+		kfree(usb_xprtp);
+	}
+
+	platform_driver_unregister(&msm_ipc_router_usb_xprt_driver);
+}
+
+module_init(msm_ipc_router_usb_xprt_init);
+module_exit(msm_ipc_router_usb_xprt_exit);
+MODULE_DESCRIPTION("IPC Router USB XPRT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index 5912ff2..5459f35 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -140,7 +140,7 @@
 
 	dmac_flush_range(table, (void *)table + sizeof(struct msm_dump_table));
 
-	if (msm_dump_data_add_minidump(entry))
+	if (msm_dump_data_add_minidump(entry) < 0)
 		pr_err("Failed to add entry in Minidump table\n");
 
 	return 0;
diff --git a/drivers/soc/qcom/minidump_log.c b/drivers/soc/qcom/minidump_log.c
index 87e1700..ff4a79c 100644
--- a/drivers/soc/qcom/minidump_log.c
+++ b/drivers/soc/qcom/minidump_log.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -37,7 +37,7 @@
 	md_entry.virt_addr = (uintptr_t) (*log_bufp);
 	md_entry.phys_addr = virt_to_phys(*log_bufp);
 	md_entry.size = *log_buf_lenp;
-	if (msm_minidump_add_region(&md_entry))
+	if (msm_minidump_add_region(&md_entry) < 0)
 		pr_err("Failed to add logbuf in Minidump\n");
 }
 
@@ -53,7 +53,7 @@
 	ksec_entry.virt_addr = (uintptr_t)_sdata;
 	ksec_entry.phys_addr = virt_to_phys(_sdata);
 	ksec_entry.size = roundup((__bss_stop - _sdata), 4);
-	if (msm_minidump_add_region(&ksec_entry))
+	if (msm_minidump_add_region(&ksec_entry) < 0)
 		pr_err("Failed to add data section in Minidump\n");
 
 	/* Add percpu static sections */
@@ -66,7 +66,7 @@
 		ksec_entry.virt_addr = (uintptr_t)start;
 		ksec_entry.phys_addr = per_cpu_ptr_to_phys(start);
 		ksec_entry.size = static_size;
-		if (msm_minidump_add_region(&ksec_entry))
+		if (msm_minidump_add_region(&ksec_entry) < 0)
 			pr_err("Failed to add percpu sections in Minidump\n");
 	}
 }
@@ -87,14 +87,14 @@
 	ksp_entry.virt_addr = sp;
 	ksp_entry.phys_addr = virt_to_phys((uintptr_t *)sp);
 	ksp_entry.size = THREAD_SIZE;
-	if (msm_minidump_add_region(&ksp_entry))
+	if (msm_minidump_add_region(&ksp_entry) < 0)
 		pr_err("Failed to add stack of cpu %d in Minidump\n", cpu);
 
 	scnprintf(ktsk_entry.name, sizeof(ktsk_entry.name), "KTASK%d", cpu);
 	ktsk_entry.virt_addr = (u64)current;
 	ktsk_entry.phys_addr = virt_to_phys((uintptr_t *)current);
 	ktsk_entry.size = sizeof(struct task_struct);
-	if (msm_minidump_add_region(&ktsk_entry))
+	if (msm_minidump_add_region(&ktsk_entry) < 0)
 		pr_err("Failed to add current task %d in Minidump\n", cpu);
 }
 
diff --git a/drivers/soc/qcom/msm_minidump.c b/drivers/soc/qcom/msm_minidump.c
index 3644dd6..309af6c 100644
--- a/drivers/soc/qcom/msm_minidump.c
+++ b/drivers/soc/qcom/msm_minidump.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -40,7 +40,7 @@
 	struct md_ss_toc	*md_ss_toc;
 	struct md_global_toc	*md_gbl_toc;
 	struct md_ss_region	*md_regions;
-	struct md_region        entry[MAX_NUM_ENTRIES];
+	struct md_region	entry[MAX_NUM_ENTRIES];
 };
 
 /**
@@ -169,17 +169,10 @@
 }
 EXPORT_SYMBOL(msm_minidump_enabled);
 
-int msm_minidump_add_region(const struct md_region *entry)
+static inline int validate_region(const struct md_region *entry)
 {
-	u32 entries;
-	struct md_region *mdr;
-	int ret = 0;
-
-	if (!entry)
-		return -EINVAL;
-
-	if ((strlen(entry->name) > MAX_NAME_LENGTH) ||
-		md_check_name(entry->name) || !entry->virt_addr) {
+	if (!entry || (strlen(entry->name) > MAX_NAME_LENGTH) ||
+		!entry->virt_addr) {
 		pr_err("Invalid entry details\n");
 		return -EINVAL;
 	}
@@ -188,6 +181,55 @@
 		pr_err("size should be 4 byte aligned\n");
 		return -EINVAL;
 	}
+	return 0;
+}
+
+int msm_minidump_update_region(int regno, const struct md_region *entry)
+{
+	struct md_region *mdr;
+	struct md_ss_region *mdssr;
+	struct elfhdr *hdr = minidump_elfheader.ehdr;
+	struct elf_shdr *shdr;
+	struct elf_phdr *phdr;
+
+	if (validate_region(entry) || (regno >= MAX_NUM_ENTRIES))
+		return -EINVAL;
+
+	if (!md_check_name(entry->name)) {
+		pr_err("Region:[%s] does not exist to update.\n", entry->name);
+		return -ENOMEM;
+	}
+
+	mdr = &minidump_table.entry[regno];
+	mdr->virt_addr = entry->virt_addr;
+	mdr->phys_addr = entry->phys_addr;
+
+	mdssr = &minidump_table.md_regions[regno + 1];
+	mdssr->region_base_address = entry->phys_addr;
+
+	shdr = elf_section(hdr, regno + 4);
+	phdr = elf_program(hdr, regno + 1);
+
+	shdr->sh_addr = (elf_addr_t)entry->virt_addr;
+	phdr->p_vaddr = entry->virt_addr;
+	phdr->p_paddr = entry->phys_addr;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_minidump_update_region);
+
+int msm_minidump_add_region(const struct md_region *entry)
+{
+	u32 entries;
+	struct md_region *mdr;
+
+	if (validate_region(entry))
+		return -EINVAL;
+
+	if (md_check_name(entry->name)) {
+		pr_err("Region name [%s] already registered\n", entry->name);
+		return -EEXIST;
+	}
 
 	spin_lock(&mdt_lock);
 	entries = minidump_table.num_regions;
@@ -215,7 +257,7 @@
 
 	spin_unlock(&mdt_lock);
 
-	return ret;
+	return entries;
 }
 EXPORT_SYMBOL(msm_minidump_add_region);
 
diff --git a/drivers/soc/qcom/rpmh_master_stat.c b/drivers/soc/qcom/rpmh_master_stat.c
index 80589de..a1c30ba 100644
--- a/drivers/soc/qcom/rpmh_master_stat.c
+++ b/drivers/soc/qcom/rpmh_master_stat.c
@@ -58,7 +58,8 @@
 	POWER_UP_END,
 	POWER_DOWN_END,
 	POWER_UP_START,
-	NUM_UNIT,
+	ALT_UNIT,
+	NUM_UNIT = ALT_UNIT,
 };
 
 struct msm_rpmh_master_data {
@@ -96,6 +97,7 @@
 
 static struct msm_rpmh_master_stats apss_master_stats;
 static void __iomem *rpmh_unit_base;
+static uint32_t use_alt_unit;
 
 static DEFINE_MUTEX(rpmh_stats_mutex);
 
@@ -178,6 +180,17 @@
 		return;
 
 	for (i = POWER_DOWN_END; i < NUM_UNIT; i++) {
+		if (i == use_alt_unit) {
+			profile_unit[i].value = readl_relaxed(
+						rpmh_unit_base + GET_ADDR(
+						REG_DATA_LO, ALT_UNIT));
+			profile_unit[i].value |= ((uint64_t)
+						readl_relaxed(
+						rpmh_unit_base + GET_ADDR(
+						REG_DATA_HI, ALT_UNIT)) << 32);
+			continue;
+		}
+
 		profile_unit[i].valid = readl_relaxed(rpmh_unit_base +
 						GET_ADDR(REG_VALID, i));
 
@@ -233,6 +246,12 @@
 		goto fail_sysfs;
 	}
 
+	ret = of_property_read_u32(pdev->dev.of_node,
+					"qcom,use-alt-unit",
+					&use_alt_unit);
+	if (ret)
+		use_alt_unit = -1;
+
 	rpmh_unit_base = of_iomap(pdev->dev.of_node, 0);
 	if (!rpmh_unit_base) {
 		pr_err("Failed to get rpmh_unit_base\n");
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index f6a28c0..e78a49a 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -602,6 +602,9 @@
 	/* SDM710 ID */
 	[360] = {MSM_CPU_SDM710, "SDM710"},
 
+	/* SDMNOBELIUM ID */
+	[393] = {MSM_CPU_SDMNOBELIUM, "SDMNOBELIUM"},
+
 	/* SXR1120 ID */
 	[370] = {MSM_CPU_SXR1120, "SXR1120"},
 
@@ -627,6 +630,9 @@
 	[308] = {MSM_CPU_8917, "MSM8217"},
 	[309] = {MSM_CPU_8917, "MSM8617"},
 
+	/* MSM8940 IDs */
+	[313] = {MSM_CPU_8940, "MSM8940"},
+
 	/* SDM429 and SDM439 ID*/
 	[353] = {MSM_CPU_SDM439, "SDM439"},
 	[354] = {MSM_CPU_SDM429, "SDM429"},
@@ -1554,6 +1560,10 @@
 		dummy_socinfo.id = 360;
 		strlcpy(dummy_socinfo.build_id, "sdm710 - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sdmnobelium()) {
+		dummy_socinfo.id = 393;
+		strlcpy(dummy_socinfo.build_id, "sdmnobelium - ",
+			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_sda670()) {
 		dummy_socinfo.id = 337;
 		strlcpy(dummy_socinfo.build_id, "sda670 - ",
@@ -1586,10 +1596,18 @@
 		dummy_socinfo.id = 303;
 		strlcpy(dummy_socinfo.build_id, "msm8917 - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8940()) {
+		dummy_socinfo.id = 313;
+		strlcpy(dummy_socinfo.build_id, "msm8940 - ",
+			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_sdm450()) {
 		dummy_socinfo.id = 338;
 		strlcpy(dummy_socinfo.build_id, "sdm450 - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sda450()) {
+		dummy_socinfo.id = 351;
+		strlcpy(dummy_socinfo.build_id, "sda450 - ",
+			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_sdm632()) {
 		dummy_socinfo.id = 349;
 		strlcpy(dummy_socinfo.build_id, "sdm632 - ",
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index f20eda2..4aac534 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -61,7 +61,7 @@
 
 #define setup_timeout(dest_ss, source_ss, comm_type) \
 	_setup_timeout(dest_ss, source_ss, comm_type)
-#define cancel_timeout(subsys) del_timer(&subsys->timeout_data.timer)
+#define cancel_timeout(subsys) del_timer_sync(&subsys->timeout_data.timer)
 #define init_subsys_timer(subsys) _init_subsys_timer(subsys)
 
 /* Timeout values */
diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c
index f4c7779..ea4b5a5 100644
--- a/drivers/soc/qcom/sysmon-qmi.c
+++ b/drivers/soc/qcom/sysmon-qmi.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -88,6 +88,7 @@
 static void sysmon_clnt_svc_exit(struct work_struct *work);
 
 static const int notif_map[SUBSYS_NOTIF_TYPE_COUNT] = {
+	[0 ... SUBSYS_NOTIF_TYPE_COUNT - 1] = SSCTL_SSR_EVENT_INVALID,
 	[SUBSYS_BEFORE_POWERUP] = SSCTL_SSR_EVENT_BEFORE_POWERUP,
 	[SUBSYS_AFTER_POWERUP] = SSCTL_SSR_EVENT_AFTER_POWERUP,
 	[SUBSYS_BEFORE_SHUTDOWN] = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN,
@@ -147,6 +148,11 @@
 	}
 }
 
+static bool is_ssctl_event(enum subsys_notif_type notif)
+{
+	return notif_map[notif] != SSCTL_SSR_EVENT_INVALID;
+}
+
 static void sysmon_clnt_svc_arrive(struct work_struct *work)
 {
 	int rc;
@@ -318,8 +324,8 @@
 	const char *dest_ss = dest_desc->name;
 	int ret;
 
-	if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || event_ss == NULL
-		|| dest_ss == NULL)
+	if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT ||
+	    !is_ssctl_event(notif) || event_ss == NULL || dest_ss == NULL)
 		return -EINVAL;
 
 	mutex_lock(&sysmon_list_lock);
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index b924bc8..30672a37 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -885,7 +885,7 @@
 	md_entry.virt_addr = (uintptr_t)wdog_dd;
 	md_entry.phys_addr = virt_to_phys(wdog_dd);
 	md_entry.size = sizeof(*wdog_dd);
-	if (msm_minidump_add_region(&md_entry))
+	if (msm_minidump_add_region(&md_entry) < 0)
 		pr_info("Failed to add Watchdog data in Minidump\n");
 
 	return 0;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 0d8f43a..1905d20 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -215,7 +215,7 @@
 	pdata = &dspi->pdata;
 
 	/* program delay transfers if tx_delay is non zero */
-	if (spicfg->wdelay)
+	if (spicfg && spicfg->wdelay)
 		spidat1 |= SPIDAT1_WDEL;
 
 	/*
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index a67b0ff..db3b6e9 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -715,21 +715,6 @@
 		return PTR_ERR(dspi->regmap);
 	}
 
-	dspi_init(dspi);
-	dspi->irq = platform_get_irq(pdev, 0);
-	if (dspi->irq < 0) {
-		dev_err(&pdev->dev, "can't get platform irq\n");
-		ret = dspi->irq;
-		goto out_master_put;
-	}
-
-	ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
-			pdev->name, dspi);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
-		goto out_master_put;
-	}
-
 	dspi->clk = devm_clk_get(&pdev->dev, "dspi");
 	if (IS_ERR(dspi->clk)) {
 		ret = PTR_ERR(dspi->clk);
@@ -740,6 +725,21 @@
 	if (ret)
 		goto out_master_put;
 
+	dspi_init(dspi);
+	dspi->irq = platform_get_irq(pdev, 0);
+	if (dspi->irq < 0) {
+		dev_err(&pdev->dev, "can't get platform irq\n");
+		ret = dspi->irq;
+		goto out_clk_put;
+	}
+
+	ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
+			pdev->name, dspi);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
+		goto out_clk_put;
+	}
+
 	master->max_speed_hz =
 		clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
 
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 22884ae..8dfaea8 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -913,7 +913,7 @@
 	u32 m_cmd = 0;
 	u32 m_param = 0;
 	u32 spi_tx_cfg = geni_read_reg(mas->base, SE_SPI_TRANS_CFG);
-	u32 trans_len = 0;
+	u32 trans_len = 0, fifo_size = 0;
 
 	if (xfer->bits_per_word != mas->cur_word_len) {
 		spi_setup_word_len(mas, mode, xfer->bits_per_word);
@@ -977,7 +977,9 @@
 		mas->rx_rem_bytes = xfer->len;
 	}
 
-	if (trans_len > (mas->tx_fifo_depth * mas->tx_fifo_width)) {
+	fifo_size =
+		(mas->tx_fifo_depth * mas->tx_fifo_width / mas->cur_word_len);
+	if (trans_len > fifo_size) {
 		if (mas->cur_xfer_mode != SE_DMA) {
 			mas->cur_xfer_mode = SE_DMA;
 			geni_se_select_mode(mas->base, mas->cur_xfer_mode);
@@ -1251,12 +1253,12 @@
 	mas->rx_rem_bytes -= rx_bytes;
 }
 
-static irqreturn_t geni_spi_irq(int irq, void *dev)
+static irqreturn_t geni_spi_irq(int irq, void *data)
 {
-	struct spi_geni_master *mas = dev;
+	struct spi_geni_master *mas = data;
 	u32 m_irq = 0;
 
-	if (pm_runtime_status_suspended(dev)) {
+	if (pm_runtime_status_suspended(mas->dev)) {
 		GENI_SE_DBG(mas->ipc, false, mas->dev,
 				"%s: device is suspended\n", __func__);
 		goto exit_geni_spi_irq;
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index a816f07..093c9cf 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -597,11 +597,13 @@
 
 	ret = wait_event_interruptible_timeout(rspi->wait,
 					       rspi->dma_callbacked, HZ);
-	if (ret > 0 && rspi->dma_callbacked)
+	if (ret > 0 && rspi->dma_callbacked) {
 		ret = 0;
-	else if (!ret) {
-		dev_err(&rspi->master->dev, "DMA timeout\n");
-		ret = -ETIMEDOUT;
+	} else {
+		if (!ret) {
+			dev_err(&rspi->master->dev, "DMA timeout\n");
+			ret = -ETIMEDOUT;
+		}
 		if (tx)
 			dmaengine_terminate_all(rspi->master->dma_tx);
 		if (rx)
@@ -1313,12 +1315,36 @@
 
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+	return spi_master_suspend(rspi->master);
+}
+
+static int rspi_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+	return spi_master_resume(rspi->master);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS	&rspi_pm_ops
+#else
+#define DEV_PM_OPS	NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver rspi_driver = {
 	.probe =	rspi_probe,
 	.remove =	rspi_remove,
 	.id_table =	spi_driver_ids,
 	.driver		= {
 		.name = "renesas_spi",
+		.pm = DEV_PM_OPS,
 		.of_match_table = of_match_ptr(rspi_of_match),
 	},
 };
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index cbf02eb..711ea52 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -373,7 +373,8 @@
 
 static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
 {
-	sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+	sh_msiof_write(p, STR,
+		       sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
 }
 
 static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -1275,12 +1276,37 @@
 };
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int sh_msiof_spi_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+	return spi_master_suspend(p->master);
+}
+
+static int sh_msiof_spi_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+	return spi_master_resume(p->master);
+}
+
+static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
+			 sh_msiof_spi_resume);
+#define DEV_PM_OPS	&sh_msiof_spi_pm_ops
+#else
+#define DEV_PM_OPS	NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver sh_msiof_spi_drv = {
 	.probe		= sh_msiof_spi_probe,
 	.remove		= sh_msiof_spi_remove,
 	.id_table	= spi_driver_ids,
 	.driver		= {
 		.name		= "spi_sh_msiof",
+		.pm		= DEV_PM_OPS,
 		.of_match_table = of_match_ptr(sh_msiof_match),
 	},
 };
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 85c91f5..af2880d 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1063,6 +1063,24 @@
 		goto exit_free_master;
 	}
 
+	/* disabled clock may cause interrupt storm upon request */
+	tspi->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(tspi->clk)) {
+		ret = PTR_ERR(tspi->clk);
+		dev_err(&pdev->dev, "Can not get clock %d\n", ret);
+		goto exit_free_master;
+	}
+	ret = clk_prepare(tspi->clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
+		goto exit_free_master;
+	}
+	ret = clk_enable(tspi->clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
+		goto exit_free_master;
+	}
+
 	spi_irq = platform_get_irq(pdev, 0);
 	tspi->irq = spi_irq;
 	ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
@@ -1071,14 +1089,7 @@
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
 					tspi->irq);
-		goto exit_free_master;
-	}
-
-	tspi->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(tspi->clk)) {
-		dev_err(&pdev->dev, "can not get clock\n");
-		ret = PTR_ERR(tspi->clk);
-		goto exit_free_irq;
+		goto exit_clk_disable;
 	}
 
 	tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
@@ -1138,6 +1149,8 @@
 	tegra_slink_deinit_dma_param(tspi, true);
 exit_free_irq:
 	free_irq(spi_irq, tspi);
+exit_clk_disable:
+	clk_disable(tspi->clk);
 exit_free_master:
 	spi_master_put(master);
 	return ret;
@@ -1150,6 +1163,8 @@
 
 	free_irq(tspi->irq, tspi);
 
+	clk_disable(tspi->clk);
+
 	if (tspi->tx_dma_chan)
 		tegra_slink_deinit_dma_param(tspi, false);
 
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 0f0b7ba..a26b289 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -685,19 +685,26 @@
 	if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
 		type.type |= bit_mask_irq;
 		if (flow_type & IRQF_TRIGGER_RISING)
-			type.polarity_high |= bit_mask_irq;
+			type.polarity_high |=  bit_mask_irq;
+		else
+			type.polarity_high &= ~bit_mask_irq;
 		if (flow_type & IRQF_TRIGGER_FALLING)
-			type.polarity_low  |= bit_mask_irq;
+			type.polarity_low  |=  bit_mask_irq;
+		else
+			type.polarity_low  &= ~bit_mask_irq;
 	} else {
 		if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
 		    (flow_type & (IRQF_TRIGGER_LOW)))
 			return -EINVAL;
 
 		type.type &= ~bit_mask_irq; /* level trig */
-		if (flow_type & IRQF_TRIGGER_HIGH)
-			type.polarity_high |= bit_mask_irq;
-		else
-			type.polarity_low  |= bit_mask_irq;
+		if (flow_type & IRQF_TRIGGER_HIGH) {
+			type.polarity_high |=  bit_mask_irq;
+			type.polarity_low  &= ~bit_mask_irq;
+		} else {
+			type.polarity_low  |=  bit_mask_irq;
+			type.polarity_high &= ~bit_mask_irq;
+		}
 	}
 
 	qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index f4ffac4..5af176b 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -383,6 +383,12 @@
 		goto out;
 	}
 
+	/* requested mapping size larger than object size */
+	if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	/* requested protection bits must match our allowed protection mask */
 	if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
 		     calc_vm_prot_bits(PROT_MASK, 0))) {
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index eb73b8b..a34829c 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -17,6 +17,7 @@
  */
 
 #include <linux/atomic.h>
+#include <linux/device.h>
 #include <linux/err.h>
 #include <linux/file.h>
 #include <linux/freezer.h>
@@ -498,8 +499,8 @@
 	return ERR_PTR(-EINVAL);
 }
 
-static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
-						      int id)
+struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+					       int id)
 {
 	struct ion_handle *handle;
 
@@ -510,20 +511,7 @@
 	return ERR_PTR(-EINVAL);
 }
 
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
-					int id)
-{
-	struct ion_handle *handle;
-
-	mutex_lock(&client->lock);
-	handle = ion_handle_get_by_id_nolock(client, id);
-	mutex_unlock(&client->lock);
-
-	return handle;
-}
-
-static bool ion_handle_validate(struct ion_client *client,
-				struct ion_handle *handle)
+bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
 {
 	WARN_ON(!mutex_is_locked(&client->lock));
 	return idr_find(&client->idr, handle->id) == handle;
@@ -684,8 +672,7 @@
 }
 EXPORT_SYMBOL(ion_alloc);
 
-static void ion_free_nolock(struct ion_client *client,
-			    struct ion_handle *handle)
+void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
 {
 	bool valid_handle;
 
@@ -731,15 +718,17 @@
 }
 EXPORT_SYMBOL(ion_free);
 
-int ion_phys(struct ion_client *client, struct ion_handle *handle,
-	     ion_phys_addr_t *addr, size_t *len)
+static int __ion_phys(struct ion_client *client, struct ion_handle *handle,
+		      ion_phys_addr_t *addr, size_t *len, bool lock_client)
 {
 	struct ion_buffer *buffer;
 	int ret;
 
-	mutex_lock(&client->lock);
+	if (lock_client)
+		mutex_lock(&client->lock);
 	if (!ion_handle_validate(client, handle)) {
-		mutex_unlock(&client->lock);
+		if (lock_client)
+			mutex_unlock(&client->lock);
 		return -EINVAL;
 	}
 
@@ -747,16 +736,30 @@
 
 	if (!buffer->heap->ops->phys) {
 		pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
-		       __func__, buffer->heap->name, buffer->heap->type);
-		mutex_unlock(&client->lock);
+			__func__, buffer->heap->name, buffer->heap->type);
+		if (lock_client)
+			mutex_unlock(&client->lock);
 		return -ENODEV;
 	}
 	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
-	mutex_unlock(&client->lock);
+	if (lock_client)
+		mutex_unlock(&client->lock);
 	return ret;
 }
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+	     ion_phys_addr_t *addr, size_t *len)
+{
+	return __ion_phys(client, handle, addr, len, true);
+}
 EXPORT_SYMBOL(ion_phys);
 
+int ion_phys_nolock(struct ion_client *client, struct ion_handle *handle,
+		    ion_phys_addr_t *addr, size_t *len)
+{
+	return __ion_phys(client, handle, addr, len, false);
+}
+
 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
 {
 	void *vaddr;
@@ -1398,24 +1401,28 @@
 	.kunmap = ion_dma_buf_kunmap,
 };
 
-struct dma_buf *ion_share_dma_buf(struct ion_client *client,
-						struct ion_handle *handle)
+static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
+					   struct ion_handle *handle,
+					   bool lock_client)
 {
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 	struct ion_buffer *buffer;
 	struct dma_buf *dmabuf;
 	bool valid_handle;
 
-	mutex_lock(&client->lock);
+	if (lock_client)
+		mutex_lock(&client->lock);
 	valid_handle = ion_handle_validate(client, handle);
 	if (!valid_handle) {
 		WARN(1, "%s: invalid handle passed to share.\n", __func__);
-		mutex_unlock(&client->lock);
+		if (lock_client)
+			mutex_unlock(&client->lock);
 		return ERR_PTR(-EINVAL);
 	}
 	buffer = handle->buffer;
 	ion_buffer_get(buffer);
-	mutex_unlock(&client->lock);
+	if (lock_client)
+		mutex_unlock(&client->lock);
 
 	exp_info.ops = &dma_buf_ops;
 	exp_info.size = buffer->size;
@@ -1430,14 +1437,21 @@
 
 	return dmabuf;
 }
+
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+				  struct ion_handle *handle)
+{
+	return __ion_share_dma_buf(client, handle, true);
+}
 EXPORT_SYMBOL(ion_share_dma_buf);
 
-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+static int __ion_share_dma_buf_fd(struct ion_client *client,
+				  struct ion_handle *handle, bool lock_client)
 {
 	struct dma_buf *dmabuf;
 	int fd;
 
-	dmabuf = ion_share_dma_buf(client, handle);
+	dmabuf = __ion_share_dma_buf(client, handle, lock_client);
 	if (IS_ERR(dmabuf))
 		return PTR_ERR(dmabuf);
 
@@ -1446,10 +1460,21 @@
 		dma_buf_put(dmabuf);
 	return fd;
 }
+
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+{
+	return __ion_share_dma_buf_fd(client, handle, true);
+}
 EXPORT_SYMBOL(ion_share_dma_buf_fd);
 
-struct ion_handle *ion_import_dma_buf(struct ion_client *client,
-				      struct dma_buf *dmabuf)
+int ion_share_dma_buf_fd_nolock(struct ion_client *client,
+				struct ion_handle *handle)
+{
+	return __ion_share_dma_buf_fd(client, handle, false);
+}
+
+static struct ion_handle *__ion_import_dma_buf(struct ion_client *client,
+				      struct dma_buf *dmabuf, bool lock_client)
 {
 	struct ion_buffer *buffer;
 	struct ion_handle *handle;
@@ -1464,34 +1489,48 @@
 	}
 	buffer = dmabuf->priv;
 
-	mutex_lock(&client->lock);
+	if (lock_client)
+		mutex_lock(&client->lock);
 	/* if a handle exists for this buffer just take a reference to it */
 	handle = ion_handle_lookup(client, buffer);
 	if (!IS_ERR(handle)) {
 		handle = ion_handle_get_check_overflow(handle);
-		mutex_unlock(&client->lock);
+		if (lock_client)
+			mutex_unlock(&client->lock);
 		goto end;
 	}
 
 	handle = ion_handle_create(client, buffer);
 	if (IS_ERR(handle)) {
-		mutex_unlock(&client->lock);
+		if (lock_client)
+			mutex_unlock(&client->lock);
 		goto end;
 	}
 
 	ret = ion_handle_add(client, handle);
-	mutex_unlock(&client->lock);
+	if (lock_client)
+		mutex_unlock(&client->lock);
 	if (ret) {
-		ion_handle_put(handle);
+		if (lock_client)
+			ion_handle_put(handle);
+		else
+			ion_handle_put_nolock(handle);
 		handle = ERR_PTR(ret);
 	}
 
 end:
 	return handle;
 }
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client,
+						struct dma_buf *dmabuf)
+{
+	return __ion_import_dma_buf(client, dmabuf, true);
+}
 EXPORT_SYMBOL(ion_import_dma_buf);
 
-struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
+static struct ion_handle *__ion_import_dma_buf_fd(struct ion_client *client,
+							int fd, bool lock_client)
 {
 	struct dma_buf *dmabuf;
 	struct ion_handle *handle;
@@ -1500,12 +1539,22 @@
 	if (IS_ERR(dmabuf))
 		return ERR_CAST(dmabuf);
 
-	handle = ion_import_dma_buf(client, dmabuf);
+	handle = __ion_import_dma_buf(client, dmabuf, lock_client);
 	dma_buf_put(dmabuf);
 	return handle;
 }
+
+struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
+{
+	return __ion_import_dma_buf_fd(client, fd, true);
+}
 EXPORT_SYMBOL(ion_import_dma_buf_fd);
 
+struct ion_handle *ion_import_dma_buf_fd_nolock(struct ion_client *client, int fd)
+{
+	return __ion_import_dma_buf_fd(client, fd, false);
+}
+
 static int ion_sync_for_device(struct ion_client *client, int fd)
 {
 	struct dma_buf *dmabuf;
@@ -1610,7 +1659,7 @@
 	{
 		struct ion_handle *handle;
 
-		handle = ion_handle_get_by_id(client, data.handle.handle);
+		handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
 		if (IS_ERR(handle))
 			return PTR_ERR(handle);
 		data.fd.fd = ion_share_dma_buf_fd(client, handle);
@@ -2143,3 +2192,18 @@
 			data->heaps[i].size);
 	}
 }
+
+void lock_client(struct ion_client *client)
+{
+	mutex_lock(&client->lock);
+}
+
+void unlock_client(struct ion_client *client)
+{
+	mutex_unlock(&client->lock);
+}
+
+struct ion_buffer *get_buffer(struct ion_handle *handle)
+{
+	return handle->buffer;
+}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index d991b02..831c334 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -4,7 +4,7 @@
  * Copyright (C) Linaro 2012
  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
  *
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -386,14 +386,37 @@
 	return ret;
 }
 
+static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
+				       struct ion_buffer *buffer)
+{
+	if (!is_buffer_hlos_assigned(buffer)) {
+		pr_info("%s: Mapping non-HLOS accessible buffer disallowed\n",
+			__func__);
+		return NULL;
+	}
+	return ion_cma_map_kernel(heap, buffer);
+}
+
+static int ion_secure_cma_map_user(struct ion_heap *mapper,
+				   struct ion_buffer *buffer,
+				   struct vm_area_struct *vma)
+{
+	if (!is_buffer_hlos_assigned(buffer)) {
+		pr_info("%s: Mapping non-HLOS accessible buffer disallowed\n",
+			__func__);
+		return -EINVAL;
+	}
+	return ion_cma_mmap(mapper, buffer, vma);
+}
+
 static struct ion_heap_ops ion_secure_cma_ops = {
 	.allocate = ion_secure_cma_allocate,
 	.free = ion_secure_cma_free,
 	.map_dma = ion_cma_heap_map_dma,
 	.unmap_dma = ion_cma_heap_unmap_dma,
 	.phys = ion_cma_phys,
-	.map_user = ion_cma_mmap,
-	.map_kernel = ion_cma_map_kernel,
+	.map_user = ion_secure_cma_map_user,
+	.map_kernel = ion_secure_cma_map_kernel,
 	.unmap_kernel = ion_cma_unmap_kernel,
 	.print_debug = ion_cma_print_debug,
 };
diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c
index b2eac28..69ec5d2 100644
--- a/drivers/staging/android/ion/ion_cma_secure_heap.c
+++ b/drivers/staging/android/ion/ion_cma_secure_heap.c
@@ -3,7 +3,7 @@
  *
  * Copyright (C) Linaro 2012
  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -346,8 +346,8 @@
 	kfree(chunk);
 }
 
-static void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap,
-					 int max_nr)
+static unsigned long
+__ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
 {
 	struct list_head *entry, *_n;
 	unsigned long drained_size = 0, skipped_size = 0;
@@ -371,6 +371,7 @@
 	}
 
 	trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
+	return drained_size;
 }
 
 int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
@@ -388,6 +389,7 @@
 static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
 					     struct shrink_control *sc)
 {
+	unsigned long freed;
 	struct ion_cma_secure_heap *sheap = container_of(shrinker,
 					struct ion_cma_secure_heap, shrinker);
 	int nr_to_scan = sc->nr_to_scan;
@@ -400,11 +402,11 @@
 	if (!mutex_trylock(&sheap->chunk_lock))
 		return -EAGAIN;
 
-	__ion_secure_cma_shrink_pool(sheap, nr_to_scan);
+	freed = __ion_secure_cma_shrink_pool(sheap, nr_to_scan);
 
 	mutex_unlock(&sheap->chunk_lock);
 
-	return atomic_read(&sheap->total_pool_size);
+	return freed;
 }
 
 static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker,
@@ -558,10 +560,11 @@
 	struct ion_secure_cma_buffer_info *info;
 	int ret;
 	unsigned long alloc_size = len;
-	struct ion_secure_cma_non_contig_info *nc_info, *temp;
+	struct ion_secure_cma_non_contig_info *nc_info;
 	unsigned long ncelems = 0;
 	struct scatterlist *sg;
 	unsigned long total_allocated = 0;
+	unsigned long total_added_to_pool = 0;
 
 	dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len);
 
@@ -599,6 +602,7 @@
 				kfree(nc_info);
 				continue;
 			}
+			total_added_to_pool += alloc_size;
 			ret = ion_secure_cma_alloc_from_pool(sheap,
 							     &nc_info->phys,
 							     alloc_size);
@@ -648,12 +652,13 @@
 err2:
 	mutex_unlock(&sheap->alloc_lock);
 err1:
-	list_for_each_entry_safe(nc_info, temp, &info->non_contig_list,
-				 entry) {
-		list_del(&nc_info->entry);
-		kfree(nc_info);
-	}
+	__ion_secure_cma_free_non_contig(sheap, info);
 	kfree(info->table);
+	/*
+	 * There may be a concurrent case that entering this function
+	 * although remaining heap not enough
+	 */
+	__ion_secure_cma_shrink_pool(sheap, total_added_to_pool);
 err:
 	kfree(info);
 	return ION_CMA_ALLOCATE_FAILED;
@@ -667,6 +672,8 @@
 	unsigned long secure_allocation = flags & ION_FLAG_SECURE;
 	struct ion_secure_cma_buffer_info *buf = NULL;
 	unsigned long allow_non_contig = flags & ION_FLAG_ALLOW_NON_CONTIG;
+	struct ion_cma_secure_heap *sheap =
+			container_of(heap, struct ion_cma_secure_heap, heap);
 
 	if (!secure_allocation &&
 	    !ion_heap_allow_secure_allocation(heap->type)) {
@@ -690,6 +697,9 @@
 	if (!allow_non_contig)
 		buf = __ion_secure_cma_allocate(heap, buffer, len, align,
 						flags);
+	else if (len > (sheap->heap_size - atomic_read(&sheap->
+			total_allocated) - atomic_read(&sheap->total_leaked)))
+		return -ENOMEM;
 	else
 		buf = __ion_secure_cma_allocate_non_contig(heap, buffer, len,
 							   align, flags);
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 38d4175..5709434 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -2,7 +2,7 @@
  * drivers/staging/android/ion/ion_page_pool.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -65,6 +65,9 @@
 		list_add_tail(&page->lru, &pool->low_items);
 		pool->low_count++;
 	}
+
+	mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
+			    (1 << (PAGE_SHIFT + pool->order)));
 	mutex_unlock(&pool->mutex);
 	return 0;
 }
@@ -84,6 +87,8 @@
 	}
 
 	list_del(&page->lru);
+	mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES,
+			    -(1 << (PAGE_SHIFT + pool->order)));
 	return page;
 }
 
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index 775c666..3f662df 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -2,7 +2,7 @@
  * drivers/staging/android/ion/ion_priv.h
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -542,11 +542,39 @@
 		   enum ion_heap_type type, void *data,
 		   int (*f)(struct ion_heap *heap, void *data));
 
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
-					int id);
+struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+					       int id);
 
 int ion_handle_put(struct ion_handle *handle);
 
 void show_ion_usage(struct ion_device *dev);
 
+int ion_share_dma_buf_fd_nolock(struct ion_client *client,
+				struct ion_handle *handle);
+
+bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle);
+
+void lock_client(struct ion_client *client);
+
+void unlock_client(struct ion_client *client);
+
+struct ion_buffer *get_buffer(struct ion_handle *handle);
+
+/**
+ * This function is same as ion_free() except it won't use client->lock.
+ */
+void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * This function is same as ion_phys() except it won't use client->lock.
+ */
+int ion_phys_nolock(struct ion_client *client, struct ion_handle *handle,
+		    ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * This function is same as ion_import_dma_buf() except it won't use
+ * client->lock.
+ */
+struct ion_handle *ion_import_dma_buf_fd_nolock(struct ion_client *client, int fd);
+
 #endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 3b27252..5c71628 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -102,6 +102,11 @@
 	return total << PAGE_SHIFT;
 }
 
+static int ion_heap_is_system_heap_type(enum ion_heap_type type)
+{
+	return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM);
+}
+
 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
 				      struct ion_buffer *buffer,
 				      unsigned long order,
@@ -357,6 +362,13 @@
 	int vmid = get_secure_vmid(buffer->flags);
 	struct device *dev = heap->priv;
 
+	if (ion_heap_is_system_heap_type(buffer->heap->type) &&
+	    is_secure_vmid_valid(vmid)) {
+		pr_info("%s: System heap doesn't support secure allocations\n",
+			__func__);
+		return -EINVAL;
+	}
+
 	if (align > PAGE_SIZE)
 		return -EINVAL;
 
@@ -469,7 +481,7 @@
 
 err_free_sg2:
 	/* We failed to zero buffers. Bypass pool */
-	buffer->flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+	buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
 
 	if (vmid > 0)
 		ion_system_secure_heap_unassign_sg(table, vmid);
diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c
index 5bf484b..cf86ea2 100644
--- a/drivers/staging/android/ion/ion_system_secure_heap.c
+++ b/drivers/staging/android/ion/ion_system_secure_heap.c
@@ -1,6 +1,6 @@
 /*
  *
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016,2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -171,14 +171,15 @@
 	sys_heap->ops->free(&buffer);
 }
 
-static void process_one_shrink(struct ion_heap *sys_heap,
+static void process_one_shrink(struct ion_system_secure_heap *secure_heap,
+			       struct ion_heap *sys_heap,
 			       struct prefetch_info *info)
 {
 	struct ion_buffer buffer;
 	size_t pool_size, size;
 	int ret;
 
-	buffer.heap = sys_heap;
+	buffer.heap = &secure_heap->heap;
 	buffer.flags = info->vmid;
 
 	pool_size = ion_system_heap_secure_page_pool_total(sys_heap,
@@ -193,6 +194,7 @@
 	}
 
 	buffer.private_flags = ION_PRIV_FLAG_SHRINKER_FREE;
+	buffer.heap = sys_heap;
 	sys_heap->ops->free(&buffer);
 }
 
@@ -212,7 +214,7 @@
 		spin_unlock_irqrestore(&secure_heap->work_lock, flags);
 
 		if (info->shrink)
-			process_one_shrink(sys_heap, info);
+			process_one_shrink(secure_heap, sys_heap, info);
 		else
 			process_one_prefetch(sys_heap, info);
 
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index 656eb49..49600ba 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -171,7 +171,7 @@
 	ion_phys_addr_t buff_phys_start = 0;
 	size_t buf_length = 0;
 
-	ret = ion_phys(client, handle, &buff_phys_start, &buf_length);
+	ret = ion_phys_nolock(client, handle, &buff_phys_start, &buf_length);
 	if (ret)
 		return -EINVAL;
 
@@ -289,9 +289,10 @@
 	int i;
 	unsigned int len = 0;
 	void (*op)(const void *, size_t);
+	struct ion_buffer *buffer;
 
-
-	table = ion_sg_table(client, handle);
+	buffer = get_buffer(handle);
+	table = buffer->sg_table;
 	if (IS_ERR_OR_NULL(table))
 		return PTR_ERR(table);
 
@@ -340,10 +341,18 @@
 	unsigned long flags;
 	struct sg_table *table;
 	struct page *page;
+	struct ion_buffer *buffer;
 
-	ret = ion_handle_get_flags(client, handle, &flags);
-	if (ret)
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to %s.\n",
+		       __func__, __func__);
 		return -EINVAL;
+	}
+
+	buffer = get_buffer(handle);
+	mutex_lock(&buffer->lock);
+	flags = buffer->flags;
+	mutex_unlock(&buffer->lock);
 
 	if (!ION_IS_CACHED(flags))
 		return 0;
@@ -351,7 +360,7 @@
 	if (!is_buffer_hlos_assigned(ion_handle_buffer(handle)))
 		return 0;
 
-	table = ion_sg_table(client, handle);
+	table = buffer->sg_table;
 
 	if (IS_ERR_OR_NULL(table))
 		return PTR_ERR(table);
@@ -371,7 +380,13 @@
 int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
 			void *vaddr, unsigned long len, unsigned int cmd)
 {
-	return ion_do_cache_op(client, handle, vaddr, 0, len, cmd);
+	int ret;
+
+	lock_client(client);
+	ret = ion_do_cache_op(client, handle, vaddr, 0, len, cmd);
+	unlock_client(client);
+
+	return ret;
 }
 EXPORT_SYMBOL(msm_ion_do_cache_op);
 
@@ -380,7 +395,13 @@
 		void *vaddr, unsigned int offset, unsigned long len,
 		unsigned int cmd)
 {
-	return ion_do_cache_op(client, handle, vaddr, offset, len, cmd);
+	int ret;
+
+	lock_client(client);
+	ret = ion_do_cache_op(client, handle, vaddr, offset, len, cmd);
+	unlock_client(client);
+
+	return ret;
 }
 EXPORT_SYMBOL(msm_ion_do_cache_offset_op);
 
@@ -790,20 +811,23 @@
 		int ret;
 		struct mm_struct *mm = current->active_mm;
 
+		lock_client(client);
 		if (data.flush_data.handle > 0) {
-			handle = ion_handle_get_by_id(
+			handle = ion_handle_get_by_id_nolock(
 					client, (int)data.flush_data.handle);
 			if (IS_ERR(handle)) {
 				pr_info("%s: Could not find handle: %d\n",
 					__func__, (int)data.flush_data.handle);
+				unlock_client(client);
 				return PTR_ERR(handle);
 			}
 		} else {
-			handle = ion_import_dma_buf_fd(client,
-						       data.flush_data.fd);
+			handle = ion_import_dma_buf_fd_nolock(client,
+							   data.flush_data.fd);
 			if (IS_ERR(handle)) {
 				pr_info("%s: Could not import handle: %pK\n",
 					__func__, handle);
+				unlock_client(client);
 				return -EINVAL;
 			}
 		}
@@ -826,8 +850,9 @@
 		}
 		up_read(&mm->mmap_sem);
 
-		ion_free(client, handle);
+		ion_free_nolock(client, handle);
 
+		unlock_client(client);
 		if (ret < 0)
 			return ret;
 		break;
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 18c5312..0fa85d5 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -5407,11 +5407,11 @@
 	/* Digital I/O (PFI) subdevice */
 	s = &dev->subdevices[NI_PFI_DIO_SUBDEV];
 	s->type		= COMEDI_SUBD_DIO;
-	s->subdev_flags	= SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
 	s->maxdata	= 1;
 	if (devpriv->is_m_series) {
 		s->n_chan	= 16;
 		s->insn_bits	= ni_pfi_insn_bits;
+		s->subdev_flags	= SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
 
 		ni_writew(dev, s->state, NI_M_PFI_DO_REG);
 		for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
@@ -5420,6 +5420,7 @@
 		}
 	} else {
 		s->n_chan	= 10;
+		s->subdev_flags	= SDF_INTERNAL;
 	}
 	s->insn_config	= ni_pfi_insn_config;
 
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index ea9a0c2..4ff2931 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1299,11 +1299,6 @@
 		goto failed2;
 	}
 
-	LASSERT(cmid->device);
-	CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n",
-	       libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
-	       &dev->ibd_ifip, cmid->device->name);
-
 	return;
 
  failed2:
@@ -3005,8 +3000,19 @@
 		} else {
 			rc = rdma_resolve_route(
 				cmid, *kiblnd_tunables.kib_timeout * 1000);
-			if (!rc)
+			if (!rc) {
+				struct kib_net *net = peer->ibp_ni->ni_data;
+				struct kib_dev *dev = net->ibn_dev;
+
+				CDEBUG(D_NET, "%s: connection bound to "\
+				       "%s:%pI4h:%s\n",
+				       libcfs_nid2str(peer->ibp_nid),
+				       dev->ibd_ifname,
+				       &dev->ibd_ifip, cmid->device->name);
+
 				return 0;
+			}
+
 			/* Can't initiate route resolution */
 			CERROR("Can't resolve route for %s: %d\n",
 			       libcfs_nid2str(peer->ibp_nid), rc);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index d18ab3f..9addcdb 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -1489,8 +1489,10 @@
 		return ERR_CAST(res);
 
 	lock = ldlm_lock_new(res);
-	if (!lock)
+	if (!lock) {
+		ldlm_resource_putref(res);
 		return ERR_PTR(-ENOMEM);
+	}
 
 	lock->l_req_mode = mode;
 	lock->l_ast_data = data;
@@ -1533,6 +1535,8 @@
 	return ERR_PTR(rc);
 }
 
+
+
 /**
  * Enqueue (request) a lock.
  * On the client this is called from ldlm_cli_enqueue_fini
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index e070adb..57121fd 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -103,7 +103,11 @@
 	__u64 valid;
 	int rc;
 
-	if (flags == XATTR_REPLACE) {
+	/* When setxattr() is called with a size of 0 the value is
+	 * unconditionally replaced by "". When removexattr() is
+	 * called we get a NULL value and XATTR_REPLACE for flags.
+	 */
+	if (!value && flags == XATTR_REPLACE) {
 		ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1);
 		valid = OBD_MD_FLXATTRRM;
 	} else {
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index c16927a..395c7a2 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -11,7 +11,6 @@
  * (at your option) any later version.
  */
 
-#include <asm/cacheflush.h>
 #include <linux/clk.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
@@ -24,6 +23,8 @@
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-mc.h>
 
+#include <asm/cacheflush.h>
+
 #include "iss_video.h"
 #include "iss.h"
 
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index b0bbb36..9e63bdf 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -4976,7 +4976,7 @@
 			goto SD_Execute_Write_Cmd_Failed;
 		}
 
-		rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
+		retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			goto SD_Execute_Write_Cmd_Failed;
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index 1de02bb..647f6be 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -1247,7 +1247,7 @@
 			reg = 0;
 			rtsx_read_register(chip, XD_CTL, &reg);
 			if (reg & (XD_ECC1_ERROR | XD_ECC2_ERROR)) {
-				wait_timeout(100);
+				mdelay(100);
 
 				if (detect_card_cd(chip,
 					XD_CARD) != STATUS_SUCCESS) {
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 98f75e5..f0d9730 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -26,27 +26,6 @@
 #include "iscsi_target_nego.h"
 #include "iscsi_target_auth.h"
 
-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
-{
-	int j = DIV_ROUND_UP(len, 2), rc;
-
-	rc = hex2bin(dst, src, j);
-	if (rc < 0)
-		pr_debug("CHAP string contains non hex digit symbols\n");
-
-	dst[j] = '\0';
-	return j;
-}
-
-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
-{
-	int i;
-
-	for (i = 0; i < src_len; i++) {
-		sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
-	}
-}
-
 static void chap_gen_challenge(
 	struct iscsi_conn *conn,
 	int caller,
@@ -59,7 +38,7 @@
 	memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
 
 	get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
-	chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+	bin2hex(challenge_asciihex, chap->challenge,
 				CHAP_CHALLENGE_LENGTH);
 	/*
 	 * Set CHAP_C, and copy the generated challenge into c_str.
@@ -240,9 +219,16 @@
 		pr_err("Could not find CHAP_R.\n");
 		goto out;
 	}
+	if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+		pr_err("Malformed CHAP_R\n");
+		goto out;
+	}
+	if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+		pr_err("Malformed CHAP_R\n");
+		goto out;
+	}
 
 	pr_debug("[server] Got CHAP_R=%s\n", chap_r);
-	chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
 
 	tfm = crypto_alloc_shash("md5", 0, 0);
 	if (IS_ERR(tfm)) {
@@ -286,7 +272,7 @@
 		goto out;
 	}
 
-	chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+	bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
 	pr_debug("[server] MD5 Server Digest: %s\n", response);
 
 	if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
@@ -341,9 +327,7 @@
 		pr_err("Could not find CHAP_C.\n");
 		goto out;
 	}
-	pr_debug("[server] Got CHAP_C=%s\n", challenge);
-	challenge_len = chap_string_to_hex(challenge_binhex, challenge,
-				strlen(challenge));
+	challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
 	if (!challenge_len) {
 		pr_err("Unable to convert incoming challenge\n");
 		goto out;
@@ -352,6 +336,11 @@
 		pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
 		goto out;
 	}
+	if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+		pr_err("Malformed CHAP_C\n");
+		goto out;
+	}
+	pr_debug("[server] Got CHAP_C=%s\n", challenge);
 	/*
 	 * During mutual authentication, the CHAP_C generated by the
 	 * initiator must not match the original CHAP_C generated by
@@ -405,7 +394,7 @@
 	/*
 	 * Convert response from binary hex to ascii hext.
 	 */
-	chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+	bin2hex(response, digest, MD5_SIGNATURE_SIZE);
 	*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
 			response);
 	*nr_out_len += 1;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 9ccd5da..d2f82aa 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -333,8 +333,7 @@
 		pr_err("idr_alloc() for sess_idr failed\n");
 		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
 				ISCSI_LOGIN_STATUS_NO_RESOURCES);
-		kfree(sess);
-		return -ENOMEM;
+		goto free_sess;
 	}
 
 	sess->creation_time = get_jiffies_64();
@@ -350,20 +349,28 @@
 				ISCSI_LOGIN_STATUS_NO_RESOURCES);
 		pr_err("Unable to allocate memory for"
 				" struct iscsi_sess_ops.\n");
-		kfree(sess);
-		return -ENOMEM;
+		goto remove_idr;
 	}
 
 	sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
 	if (IS_ERR(sess->se_sess)) {
 		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
 				ISCSI_LOGIN_STATUS_NO_RESOURCES);
-		kfree(sess->sess_ops);
-		kfree(sess);
-		return -ENOMEM;
+		goto free_ops;
 	}
 
 	return 0;
+
+free_ops:
+	kfree(sess->sess_ops);
+remove_idr:
+	spin_lock_bh(&sess_idr_lock);
+	idr_remove(&sess_idr, sess->session_index);
+	spin_unlock_bh(&sess_idr_lock);
+free_sess:
+	kfree(sess);
+	conn->sess = NULL;
+	return -ENOMEM;
 }
 
 static int iscsi_login_zero_tsih_s2(
@@ -1152,13 +1159,13 @@
 				   ISCSI_LOGIN_STATUS_INIT_ERR);
 	if (!zero_tsih || !conn->sess)
 		goto old_sess_out;
-	if (conn->sess->se_sess)
-		transport_free_session(conn->sess->se_sess);
-	if (conn->sess->session_index != 0) {
-		spin_lock_bh(&sess_idr_lock);
-		idr_remove(&sess_idr, conn->sess->session_index);
-		spin_unlock_bh(&sess_idr_lock);
-	}
+
+	transport_free_session(conn->sess->se_sess);
+
+	spin_lock_bh(&sess_idr_lock);
+	idr_remove(&sess_idr, conn->sess->session_index);
+	spin_unlock_bh(&sess_idr_lock);
+
 	kfree(conn->sess->sess_ops);
 	kfree(conn->sess);
 	conn->sess = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 63e1dcc..761b065 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -637,8 +637,7 @@
 		none = strstr(buf1, NONE);
 		if (none)
 			goto out;
-		strncat(buf1, ",", strlen(","));
-		strncat(buf1, NONE, strlen(NONE));
+		strlcat(buf1, "," NONE, sizeof(buf1));
 		if (iscsi_update_param_value(param, buf1) < 0)
 			return -EINVAL;
 	}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 6f3eccf..e738b46 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -316,6 +316,7 @@
 {
 	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
 	unsigned char buf[PR_REG_ISID_LEN];
+	unsigned long flags;
 
 	se_sess->se_tpg = se_tpg;
 	se_sess->fabric_sess_ptr = fabric_sess_ptr;
@@ -352,7 +353,7 @@
 			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
 		}
 
-		spin_lock_irq(&se_nacl->nacl_sess_lock);
+		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
 		/*
 		 * The se_nacl->nacl_sess pointer will be set to the
 		 * last active I_T Nexus for each struct se_node_acl.
@@ -361,7 +362,7 @@
 
 		list_add_tail(&se_sess->sess_acl_list,
 			      &se_nacl->acl_sess_list);
-		spin_unlock_irq(&se_nacl->nacl_sess_lock);
+		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
 	}
 	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
 
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 02f93f4..76e163e 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -339,7 +339,7 @@
 	unsigned long clipped_freq = ULONG_MAX, floor_freq = 0;
 	struct cpufreq_cooling_device *cpufreq_dev;
 
-	if (event != CPUFREQ_ADJUST)
+	if (event != CPUFREQ_INCOMPATIBLE)
 		return NOTIFY_DONE;
 
 	mutex_lock(&cooling_list_lock);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index c662cd7..fe811d7 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -396,10 +396,13 @@
 
 	mutex_lock(&tz->lock);
 
-	if (mode == THERMAL_DEVICE_ENABLED)
+	if (mode == THERMAL_DEVICE_ENABLED) {
 		tz->polling_delay = data->polling_delay;
-	else
+		tz->passive_delay = data->passive_delay;
+	} else {
 		tz->polling_delay = 0;
+		tz->passive_delay = 0;
+	}
 
 	mutex_unlock(&tz->lock);
 
diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c
index f01658d..1fa8ca4 100644
--- a/drivers/thermal/qcom/qti_virtual_sensor.c
+++ b/drivers/thermal/qcom/qti_virtual_sensor.c
@@ -102,6 +102,16 @@
 				"cpuss1-usr"},
 		.logic = VIRT_MAXIMUM,
 	},
+	{
+		.virt_zone_name = "penta-cpu-max-step",
+		.num_sensors = 5,
+		.sensor_names = {"apc1-cpu0-usr",
+				"apc1-cpu1-usr",
+				"apc1-cpu2-usr",
+				"apc1-cpu3-usr",
+				"cpuss-usr"},
+		.logic = VIRT_MAXIMUM,
+	},
 };
 
 int qti_virtual_sensor_register(struct device *dev)
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index 5d345cc..9fe601d 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -219,6 +219,8 @@
 #define QPNP_BTM_Mn_DATA1(n)			((n * 2) + 0xa1)
 #define QPNP_BTM_CHANNELS			8
 
+#define QPNP_ADC_WAKEUP_SRC_TIMEOUT_MS          2000
+
 /* QPNP ADC TM HC end */
 
 struct qpnp_adc_thr_info {
@@ -273,7 +275,6 @@
 	bool				adc_tm_initialized;
 	bool				adc_tm_recalib_check;
 	int				max_channels_available;
-	atomic_t			wq_cnt;
 	struct qpnp_vadc_chip		*vadc_dev;
 	struct workqueue_struct		*high_thr_wq;
 	struct workqueue_struct		*low_thr_wq;
@@ -361,6 +362,7 @@
 	[SCALE_R_ABSOLUTE] = {qpnp_adc_absolute_rthr},
 	[SCALE_QRD_SKUH_RBATT_THERM] = {qpnp_adc_qrd_skuh_btm_scaler},
 	[SCALE_QRD_SKUT1_RBATT_THERM] = {qpnp_adc_qrd_skut1_btm_scaler},
+	[SCALE_QRD_215_RBATT_THERM] = {qpnp_adc_qrd_215_btm_scaler},
 };
 
 static int32_t qpnp_adc_tm_read_reg(struct qpnp_adc_tm_chip *chip,
@@ -1889,7 +1891,6 @@
 {
 	struct qpnp_adc_tm_sensor *adc_tm = container_of(work,
 		struct qpnp_adc_tm_sensor, work);
-	struct qpnp_adc_tm_chip *chip = adc_tm->chip;
 
 	if (adc_tm->thermal_node) {
 		pr_debug("notifying uspace client\n");
@@ -1900,8 +1901,6 @@
 		else
 			notify_clients(adc_tm);
 	}
-
-	atomic_dec(&chip->wq_cnt);
 }
 
 static int qpnp_adc_tm_recalib_request_check(struct qpnp_adc_tm_chip *chip,
@@ -2145,11 +2144,8 @@
 		return rc;
 	}
 
-	if (!queue_work(chip->sensor[sensor_num].req_wq,
-				&chip->sensor[sensor_num].work)) {
-		/* The item is already queued, reduce the count */
-		atomic_dec(&chip->wq_cnt);
-	}
+	queue_work(chip->sensor[sensor_num].req_wq,
+		&chip->sensor[sensor_num].work);
 
 	return rc;
 }
@@ -2256,11 +2252,8 @@
 		return rc;
 	}
 
-	if (!queue_work(chip->sensor[sensor_num].req_wq,
-				&chip->sensor[sensor_num].work)) {
-		/* The item is already queued, reduce the count */
-		atomic_dec(&chip->wq_cnt);
-	}
+	queue_work(chip->sensor[sensor_num].req_wq,
+				&chip->sensor[sensor_num].work);
 
 	return rc;
 }
@@ -2324,8 +2317,6 @@
 
 fail:
 	mutex_unlock(&chip->adc->adc_lock);
-	if (rc < 0)
-		atomic_dec(&chip->wq_cnt);
 
 	return rc;
 }
@@ -2378,10 +2369,6 @@
 fail:
 	mutex_unlock(&chip->adc->adc_lock);
 
-	if (rc < 0 || (!chip->th_info.adc_tm_high_enable &&
-					!chip->th_info.adc_tm_low_enable))
-		atomic_dec(&chip->wq_cnt);
-
 	return rc;
 }
 
@@ -2491,7 +2478,6 @@
 		}
 	}
 
-	atomic_inc(&chip->wq_cnt);
 	queue_work(chip->high_thr_wq, &chip->trigger_high_thr_work);
 
 	return IRQ_HANDLED;
@@ -2600,7 +2586,6 @@
 		}
 	}
 
-	atomic_inc(&chip->wq_cnt);
 	queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work);
 
 	return IRQ_HANDLED;
@@ -2729,14 +2714,16 @@
 	}
 
 	if (sensor_low_notify_num) {
-		if (queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work))
-			atomic_inc(&chip->wq_cnt);
+		pm_wakeup_event(chip->dev,
+				QPNP_ADC_WAKEUP_SRC_TIMEOUT_MS);
+		queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work);
 	}
 
 	if (sensor_high_notify_num) {
-		if (queue_work(chip->high_thr_wq,
-				&chip->trigger_high_thr_work))
-			atomic_inc(&chip->wq_cnt);
+		pm_wakeup_event(chip->dev,
+				QPNP_ADC_WAKEUP_SRC_TIMEOUT_MS);
+		queue_work(chip->high_thr_wq,
+				&chip->trigger_high_thr_work);
 	}
 
 	return IRQ_HANDLED;
@@ -3224,7 +3211,6 @@
 
 	INIT_WORK(&chip->trigger_high_thr_work, qpnp_adc_tm_high_thr_work);
 	INIT_WORK(&chip->trigger_low_thr_work, qpnp_adc_tm_low_thr_work);
-	atomic_set(&chip->wq_cnt, 0);
 
 	if (!chip->adc_tm_hc) {
 		rc = qpnp_adc_tm_initial_setup(chip);
@@ -3331,11 +3317,18 @@
 static int qpnp_adc_tm_suspend_noirq(struct device *dev)
 {
 	struct qpnp_adc_tm_chip *chip = dev_get_drvdata(dev);
+	struct device_node *node = dev->of_node, *child;
+	int i = 0;
 
-	if (atomic_read(&chip->wq_cnt) != 0) {
-		pr_err(
-			"Aborting suspend, adc_tm notification running while suspending\n");
-		return -EBUSY;
+	flush_workqueue(chip->high_thr_wq);
+	flush_workqueue(chip->low_thr_wq);
+
+	for_each_child_of_node(node, child) {
+		if (chip->sensor[i].req_wq) {
+			pr_debug("flushing queue for sensor %d\n", i);
+			flush_workqueue(chip->sensor[i].req_wq);
+		}
+		i++;
 	}
 	return 0;
 }
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index a45810b..c974cb5 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -598,6 +598,7 @@
 		threshold_code = temp_to_code(data, temp);
 
 		rising_threshold = readl(data->base + rising_reg_offset);
+		rising_threshold &= ~(0xff << j * 8);
 		rising_threshold |= (threshold_code << j * 8);
 		writel(rising_threshold, data->base + rising_reg_offset);
 
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 9510305..741e966 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -41,6 +41,9 @@
 	  If unsure, say Y, or else you won't be able to do much with your new
 	  shiny Linux system :-)
 
+config TTY_FLUSH_LOCAL_ECHO
+	bool
+
 config CONSOLE_TRANSLATIONS
 	depends on VT
 	default y
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 5107993..1fc5d5b 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -332,7 +332,6 @@
 	udbg_putc = udbg_opal_putc;
 	udbg_getc = udbg_opal_getc;
 	udbg_getc_poll = udbg_opal_getc_poll;
-	tb_ticks_per_usec = 0x200; /* Make udelay not suck */
 }
 
 void __init hvc_opal_init_early(void)
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 0475f96..442a3130 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -128,6 +128,10 @@
 
 #define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
 
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+static void continue_process_echoes(struct work_struct *work);
+#endif
+
 static inline size_t read_cnt(struct n_tty_data *ldata)
 {
 	return ldata->read_head - ldata->read_tail;
@@ -751,6 +755,16 @@
 			tail++;
 	}
 
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+        if (ldata->echo_commit != tail) {
+                if (!tty->delayed_work) {
+                        INIT_DELAYED_WORK(&tty->echo_delayed_work, continue_process_echoes);
+                        schedule_delayed_work(&tty->echo_delayed_work, 1);
+                }
+                tty->delayed_work = 1;
+        }
+#endif
+
  not_yet_stored:
 	ldata->echo_tail = tail;
 	return old_space - space;
@@ -817,6 +831,20 @@
 	mutex_unlock(&ldata->output_lock);
 }
 
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+static void continue_process_echoes(struct work_struct *work)
+{
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, echo_delayed_work.work);
+	struct n_tty_data *ldata = tty->disc_data;
+
+	mutex_lock(&ldata->output_lock);
+	tty->delayed_work = 0;
+	__process_echoes(tty);
+	mutex_unlock(&ldata->output_lock);
+}
+#endif
+
 /**
  *	add_echo_byte	-	add a byte to the echo buffer
  *	@c: unicode byte to echo
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 2b90738..171130a 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -106,16 +106,19 @@
 static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
 {
 	struct tty_struct *to = tty->link;
+	unsigned long flags;
 
 	if (tty->stopped)
 		return 0;
 
 	if (c > 0) {
+		spin_lock_irqsave(&to->port->lock, flags);
 		/* Stuff the data into the input queue of the other end */
 		c = tty_insert_flip_string(to->port, buf, c);
 		/* And shovel */
 		if (c)
 			tty_flip_buffer_push(to->port);
+		spin_unlock_irqrestore(&to->port->lock, flags);
 	}
 	return c;
 }
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index b0cc47c..e8e8973 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1913,7 +1913,7 @@
 	ByteIO_t UPCIRingInd = 0;
 
 	if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
-	    pci_enable_device(dev))
+	    pci_enable_device(dev) || i >= NUM_BOARDS)
 		return 0;
 
 	rcktpt_io_addr[i] = pci_resource_start(dev, 0);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 3eb01a71..3177264 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -235,7 +235,7 @@
 	unsigned int rate;
 	int ret;
 
-	if (IS_ERR(d->clk) || !old)
+	if (IS_ERR(d->clk))
 		goto out;
 
 	clk_disable_unprepare(d->clk);
@@ -626,6 +626,7 @@
 	{ "APMC0D08", 0},
 	{ "AMD0020", 0 },
 	{ "AMDI0020", 0 },
+	{ "BRCM2032", 0 },
 	{ "HISI0031", 0 },
 	{ },
 };
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 5d9038a..5b54439 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -83,8 +83,7 @@
 		.name		= "16550A",
 		.fifo_size	= 16,
 		.tx_loadsz	= 16,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
-				  UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
 		.rxtrig_bytes	= {1, 4, 8, 14},
 		.flags		= UART_CAP_FIFO,
 	},
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index 933c268..8106353 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -637,8 +637,10 @@
 	    (link->has_func_id) &&
 	    (link->socket->pcmcia_pfc == 0) &&
 	    ((link->func_id == CISTPL_FUNCID_MULTI) ||
-	     (link->func_id == CISTPL_FUNCID_SERIAL)))
-		pcmcia_loop_config(link, serial_check_for_multi, info);
+	     (link->func_id == CISTPL_FUNCID_SERIAL))) {
+		if (pcmcia_loop_config(link, serial_check_for_multi, info))
+			goto failed;
+	}
 
 	/*
 	 * Apply any multi-port quirk.
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index d3e3d42..0040c29f 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1068,8 +1068,8 @@
 	/* Get the address of the host memory buffer.
 	 */
 	bdp = pinfo->rx_cur;
-	while (bdp->cbd_sc & BD_SC_EMPTY)
-		;
+	if (bdp->cbd_sc & BD_SC_EMPTY)
+		return NO_POLL_CHAR;
 
 	/* If the buffer address is in the CPM DPRAM, don't
 	 * convert it.
@@ -1104,7 +1104,11 @@
 		poll_chars = 0;
 	}
 	if (poll_chars <= 0) {
-		poll_chars = poll_wait_key(poll_buf, pinfo);
+		int ret = poll_wait_key(poll_buf, pinfo);
+
+		if (ret == NO_POLL_CHAR)
+			return ret;
+		poll_chars = ret;
 		pollp = poll_buf;
 	}
 	poll_chars--;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 937f5e1..e2ec049 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -833,7 +833,8 @@
 	struct circ_buf *ring = &sport->rx_ring;
 	int ret, nent;
 	int bits, baud;
-	struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port);
+	struct tty_port *port = &sport->port.state->port;
+	struct tty_struct *tty = port->tty;
 	struct ktermios *termios = &tty->termios;
 
 	baud = tty_get_baud_rate(tty);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index b24edf6..0d82be1 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2197,6 +2197,14 @@
 				ret);
 			return ret;
 		}
+
+		ret = devm_request_irq(&pdev->dev, rtsirq, imx_rtsint, 0,
+				       dev_name(&pdev->dev), sport);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+				ret);
+			return ret;
+		}
 	} else {
 		ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
 				       dev_name(&pdev->dev), sport);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 69fa4dc..99b40d9 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -428,6 +428,12 @@
 	struct msm_port *msm_port = UART_TO_MSM(port);
 	struct msm_dma *dma = &msm_port->tx_dma;
 
+	/* No need to start tx when system suspended. */
+	if (port->suspended) {
+		printk_deferred("port suspended!\n");
+		return;
+	}
+
 	/* Already started in DMA mode */
 	if (dma->count)
 		return;
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 45b57c2..401c983 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -327,8 +327,10 @@
 	if ((termios->c_cflag & CREAD) == 0)
 		port->ignore_status_mask |= STAT_RX_RDY | STAT_BRK_ERR;
 
-	if (old)
+	if (old) {
 		tty_termios_copy_hw(termios, old);
+		termios->c_cflag |= CS8;
+	}
 
 	baud = uart_get_baud_rate(port, termios, old, 0, 460800);
 	uart_update_timeout(port, termios->c_cflag, baud);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 839cee4..17c2ee2 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -175,6 +175,7 @@
 {
 	struct uart_port *uport = uart_port_check(state);
 	unsigned long page;
+	unsigned long flags = 0;
 	int retval = 0;
 
 	if (uport->type == PORT_UNKNOWN)
@@ -189,15 +190,18 @@
 	 * Initialise and allocate the transmit and temporary
 	 * buffer.
 	 */
-	if (!state->xmit.buf) {
-		/* This is protected by the per port mutex */
-		page = get_zeroed_page(GFP_KERNEL);
-		if (!page)
-			return -ENOMEM;
+	page = get_zeroed_page(GFP_KERNEL);
+	if (!page)
+		return -ENOMEM;
 
+	uart_port_lock(state, flags);
+	if (!state->xmit.buf) {
 		state->xmit.buf = (unsigned char *) page;
 		uart_circ_clear(&state->xmit);
+	} else {
+		free_page(page);
 	}
+	uart_port_unlock(uport, flags);
 
 	retval = uport->ops->startup(uport);
 	if (retval == 0) {
@@ -256,6 +260,7 @@
 {
 	struct uart_port *uport = uart_port_check(state);
 	struct tty_port *port = &state->port;
+	unsigned long flags = 0;
 
 	/*
 	 * Set the TTY IO error marker
@@ -288,10 +293,12 @@
 	/*
 	 * Free the transmit buffer page.
 	 */
+	uart_port_lock(state, flags);
 	if (state->xmit.buf) {
 		free_page((unsigned long)state->xmit.buf);
 		state->xmit.buf = NULL;
 	}
+	uart_port_unlock(uport, flags);
 }
 
 /**
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 789c814..4305524 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1475,6 +1475,7 @@
 static int tty_reopen(struct tty_struct *tty)
 {
 	struct tty_driver *driver = tty->driver;
+	int retval;
 
 	if (driver->type == TTY_DRIVER_TYPE_PTY &&
 	    driver->subtype == PTY_TYPE_MASTER)
@@ -1488,10 +1489,14 @@
 
 	tty->count++;
 
-	if (!tty->ldisc)
-		return tty_ldisc_reinit(tty, tty->termios.c_line);
+	if (tty->ldisc)
+		return 0;
 
-	return 0;
+	retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+	if (retval)
+		tty->count--;
+
+	return retval;
 }
 
 /**
@@ -1658,6 +1663,10 @@
 
 	put_pid(tty->pgrp);
 	put_pid(tty->session);
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+	if (tty->echo_delayed_work.work.func)
+		cancel_delayed_work_sync(&tty->echo_delayed_work);
+#endif
 	free_tty_struct(tty);
 }
 
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index f62c598..638eb9b 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -31,6 +31,8 @@
 #include <asm/io.h>
 #include <asm/uaccess.h>
 
+#include <linux/nospec.h>
+
 #include <linux/kbd_kern.h>
 #include <linux/vt_kern.h>
 #include <linux/kbd_diacr.h>
@@ -703,6 +705,8 @@
 		if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
 			ret = -ENXIO;
 		else {
+			vsa.console = array_index_nospec(vsa.console,
+							 MAX_NR_CONSOLES + 1);
 			vsa.console--;
 			console_lock();
 			ret = vc_allocate(vsa.console);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 208bc52..f0a9ea2 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -841,8 +841,6 @@
 	if (ret)
 		goto err_uio_dev_add_attributes;
 
-	info->uio_dev = idev;
-
 	if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
 		/*
 		 * Note that we deliberately don't use devm_request_irq
@@ -858,6 +856,7 @@
 			goto err_request_irq;
 	}
 
+	info->uio_dev = idev;
 	return 0;
 
 err_request_irq:
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 08bef18..25ae9b9 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -705,20 +705,9 @@
 	}
 
 	if (acm->susp_count) {
-		if (acm->putbuffer) {
-			/* now to preserve order */
-			usb_anchor_urb(acm->putbuffer->urb, &acm->delayed);
-			acm->putbuffer = NULL;
-		}
 		usb_anchor_urb(wb->urb, &acm->delayed);
 		spin_unlock_irqrestore(&acm->write_lock, flags);
 		return count;
-	} else {
-		if (acm->putbuffer) {
-			/* at this point there is no good way to handle errors */
-			acm_start_wb(acm, acm->putbuffer);
-			acm->putbuffer = NULL;
-		}
 	}
 
 	stat = acm_start_wb(acm, wb);
@@ -729,66 +718,6 @@
 	return count;
 }
 
-static void acm_tty_flush_chars(struct tty_struct *tty)
-{
-	struct acm *acm = tty->driver_data;
-	struct acm_wb *cur;
-	int err;
-	unsigned long flags;
-
-	spin_lock_irqsave(&acm->write_lock, flags);
-
-	cur = acm->putbuffer;
-	if (!cur) /* nothing to do */
-		goto out;
-
-	acm->putbuffer = NULL;
-	err = usb_autopm_get_interface_async(acm->control);
-	if (err < 0) {
-		cur->use = 0;
-		acm->putbuffer = cur;
-		goto out;
-	}
-
-	if (acm->susp_count)
-		usb_anchor_urb(cur->urb, &acm->delayed);
-	else
-		acm_start_wb(acm, cur);
-out:
-	spin_unlock_irqrestore(&acm->write_lock, flags);
-	return;
-}
-
-static int acm_tty_put_char(struct tty_struct *tty, unsigned char ch)
-{
-	struct acm *acm = tty->driver_data;
-	struct acm_wb *cur;
-	int wbn;
-	unsigned long flags;
-
-overflow:
-	cur = acm->putbuffer;
-	if (!cur) {
-		spin_lock_irqsave(&acm->write_lock, flags);
-		wbn = acm_wb_alloc(acm);
-		if (wbn >= 0) {
-			cur = &acm->wb[wbn];
-			acm->putbuffer = cur;
-		}
-		spin_unlock_irqrestore(&acm->write_lock, flags);
-		if (!cur)
-			return 0;
-	}
-
-	if (cur->len == acm->writesize) {
-		acm_tty_flush_chars(tty);
-		goto overflow;
-	}
-
-	cur->buf[cur->len++] = ch;
-	return 1;
-}
-
 static int acm_tty_write_room(struct tty_struct *tty)
 {
 	struct acm *acm = tty->driver_data;
@@ -1785,6 +1714,9 @@
 	{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
 	.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
 	},
+	{ USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
+	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+	},
 
 	{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
 	.driver_info = CLEAR_HALT_CONDITIONS,
@@ -1937,8 +1869,6 @@
 	.cleanup =		acm_tty_cleanup,
 	.hangup =		acm_tty_hangup,
 	.write =		acm_tty_write,
-	.put_char =		acm_tty_put_char,
-	.flush_chars =		acm_tty_flush_chars,
 	.write_room =		acm_tty_write_room,
 	.ioctl =		acm_tty_ioctl,
 	.throttle =		acm_tty_throttle,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 1f1eabf..b30ac5f 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -94,7 +94,6 @@
 	unsigned long read_urbs_free;
 	struct urb *read_urbs[ACM_NR];
 	struct acm_rb read_buffers[ACM_NR];
-	struct acm_wb *putbuffer;			/* for acm_tty_put_char() */
 	int rx_buflimit;
 	spinlock_t read_lock;
 	int write_used;					/* number of non-empty write buffers */
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 893ebae..988240e 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1450,10 +1450,13 @@
 	struct async *as = NULL;
 	struct usb_ctrlrequest *dr = NULL;
 	unsigned int u, totlen, isofrmlen;
-	int i, ret, is_in, num_sgs = 0, ifnum = -1;
+	int i, ret, num_sgs = 0, ifnum = -1;
 	int number_of_packets = 0;
 	unsigned int stream_id = 0;
 	void *buf;
+	bool is_in;
+	bool allow_short = false;
+	bool allow_zero = false;
 	unsigned long mask =	USBDEVFS_URB_SHORT_NOT_OK |
 				USBDEVFS_URB_BULK_CONTINUATION |
 				USBDEVFS_URB_NO_FSBR |
@@ -1487,6 +1490,8 @@
 	u = 0;
 	switch (uurb->type) {
 	case USBDEVFS_URB_TYPE_CONTROL:
+		if (is_in)
+			allow_short = true;
 		if (!usb_endpoint_xfer_control(&ep->desc))
 			return -EINVAL;
 		/* min 8 byte setup packet */
@@ -1527,6 +1532,10 @@
 		break;
 
 	case USBDEVFS_URB_TYPE_BULK:
+		if (!is_in)
+			allow_zero = true;
+		else
+			allow_short = true;
 		switch (usb_endpoint_type(&ep->desc)) {
 		case USB_ENDPOINT_XFER_CONTROL:
 		case USB_ENDPOINT_XFER_ISOC:
@@ -1547,6 +1556,10 @@
 		if (!usb_endpoint_xfer_int(&ep->desc))
 			return -EINVAL;
  interrupt_urb:
+		if (!is_in)
+			allow_zero = true;
+		else
+			allow_short = true;
 		break;
 
 	case USBDEVFS_URB_TYPE_ISO:
@@ -1691,16 +1704,21 @@
 	u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
 	if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
 		u |= URB_ISO_ASAP;
-	if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
+	if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
 		u |= URB_SHORT_NOT_OK;
 	if (uurb->flags & USBDEVFS_URB_NO_FSBR)
 		u |= URB_NO_FSBR;
-	if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+	if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
 		u |= URB_ZERO_PACKET;
 	if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
 		u |= URB_NO_INTERRUPT;
 	as->urb->transfer_flags = u;
 
+	if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
+		dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
+	if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+		dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
+
 	as->urb->transfer_buffer_length = uurb->buffer_length;
 	as->urb->setup_packet = (unsigned char *)dr;
 	dr = NULL;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 5532246..7dae981 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -509,7 +509,6 @@
 	struct device *dev;
 	struct usb_device *udev;
 	int retval = 0;
-	int lpm_disable_error = -ENODEV;
 
 	if (!iface)
 		return -ENODEV;
@@ -530,16 +529,6 @@
 
 	iface->condition = USB_INTERFACE_BOUND;
 
-	/* See the comment about disabling LPM in usb_probe_interface(). */
-	if (driver->disable_hub_initiated_lpm) {
-		lpm_disable_error = usb_unlocked_disable_lpm(udev);
-		if (lpm_disable_error) {
-			dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
-					__func__, driver->name);
-			return -ENOMEM;
-		}
-	}
-
 	/* Claimed interfaces are initially inactive (suspended) and
 	 * runtime-PM-enabled, but only if the driver has autosuspend
 	 * support.  Otherwise they are marked active, to prevent the
@@ -558,9 +547,20 @@
 	if (device_is_registered(dev))
 		retval = device_bind_driver(dev);
 
-	/* Attempt to re-enable USB3 LPM, if the disable was successful. */
-	if (!lpm_disable_error)
-		usb_unlocked_enable_lpm(udev);
+	if (retval) {
+		dev->driver = NULL;
+		usb_set_intfdata(iface, NULL);
+		iface->needs_remote_wakeup = 0;
+		iface->condition = USB_INTERFACE_UNBOUND;
+
+		/*
+		 * Unbound interfaces are always runtime-PM-disabled
+		 * and runtime-PM-suspended
+		 */
+		if (driver->supports_autosuspend)
+			pm_runtime_disable(dev);
+		pm_runtime_set_suspended(dev);
+	}
 
 	return retval;
 }
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 7859d73..7af23b2 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -528,8 +528,6 @@
 				event == PM_EVENT_RESTORE);
 		if (retval) {
 			dev_err(dev, "PCI post-resume error %d!\n", retval);
-			if (hcd->shared_hcd)
-				usb_hc_died(hcd->shared_hcd);
 			usb_hc_died(hcd);
 		}
 	}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 771efc9..7f0811f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1150,10 +1150,14 @@
 
 		if (!udev || udev->state == USB_STATE_NOTATTACHED) {
 			/* Tell hub_wq to disconnect the device or
-			 * check for a new connection
+			 * check for a new connection or over current condition.
+			 * Based on USB2.0 Spec Section 11.12.5,
+			 * C_PORT_OVER_CURRENT could be set while
+			 * PORT_OVER_CURRENT is not. So check for any of them.
 			 */
 			if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
-			    (portstatus & USB_PORT_STAT_OVERCURRENT))
+			    (portstatus & USB_PORT_STAT_OVERCURRENT) ||
+			    (portchange & USB_PORT_STAT_C_OVERCURRENT))
 				set_bit(port1, hub->change_bits);
 
 		} else if (portstatus & USB_PORT_STAT_ENABLE) {
@@ -3370,6 +3374,10 @@
 	while (delay_ms < 2000) {
 		if (status || *portstatus & USB_PORT_STAT_CONNECTION)
 			break;
+		if (!port_is_power_on(hub, *portstatus)) {
+			status = -ENODEV;
+			break;
+		}
 		msleep(20);
 		delay_ms += 20;
 		status = hub_port_status(hub, *port1, portstatus, portchange);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 9016a9b..255fecc 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1279,6 +1279,11 @@
  * is submitted that needs that bandwidth.  Some other operating systems
  * allocate bandwidth early, when a configuration is chosen.
  *
+ * xHCI reserves bandwidth and configures the alternate setting in
+ * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting
+ * may be disabled. Drivers cannot rely on any particular alternate
+ * setting being in effect after a failure.
+ *
  * This call is synchronous, and may not be used in an interrupt context.
  * Also, drivers must not change altsettings while urbs are scheduled for
  * endpoints in that interface; all such urbs must first be completed
@@ -1314,6 +1319,12 @@
 			 alternate);
 		return -EINVAL;
 	}
+	/*
+	 * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth,
+	 * including freeing dropped endpoint ring buffers.
+	 * Make sure the interface endpoints are flushed before that
+	 */
+	usb_disable_interface(dev, iface, false);
 
 	/* Make sure we have enough bandwidth for this alternate interface.
 	 * Remove the current alt setting and add the new alt setting.
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 40ce175..37a5e07 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -37,6 +37,10 @@
 	/* CBM - Flash disk */
 	{ USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
 
+	/* WORLDE Controller KS49 or Prodipe MIDI 49C USB controller */
+	{ USB_DEVICE(0x0218, 0x0201), .driver_info =
+			USB_QUIRK_CONFIG_INTF_STRINGS },
+
 	/* WORLDE easy key (easykey.25) MIDI controller  */
 	{ USB_DEVICE(0x0218, 0x0401), .driver_info =
 			USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -231,6 +235,10 @@
 	/* Corsair K70 RGB */
 	{ USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
 
+	/* Corsair Strafe */
+	{ USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
+	  USB_QUIRK_DELAY_CTRL_MSG },
+
 	/* Corsair Strafe RGB */
 	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
 	  USB_QUIRK_DELAY_CTRL_MSG },
@@ -255,6 +263,9 @@
 	{ USB_DEVICE(0x2040, 0x7200), .driver_info =
 			USB_QUIRK_CONFIG_INTF_STRINGS },
 
+	/* DJI CineSSD */
+	{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+
 	/* INTEL VALUE SSD */
 	{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
 
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index bb2a4fe..82dfc60 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -91,6 +91,8 @@
 	struct usb_interface_cache *intf_cache = NULL;
 	int i;
 
+	if (!config)
+		return NULL;
 	for (i = 0; i < config->desc.bNumInterfaces; i++) {
 		if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
 				== iface_num) {
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 09921ef..3ae27b6 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3948,9 +3948,11 @@
 	}
 
 	ret = usb_add_gadget_udc(dev, &hsotg->gadget);
-	if (ret)
+	if (ret) {
+		dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
+					   hsotg->ctrl_req);
 		return ret;
-
+	}
 	dwc2_hsotg_dump(hsotg);
 
 	return 0;
@@ -3963,6 +3965,7 @@
 int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
 {
 	usb_del_gadget_udc(&hsotg->gadget);
+	dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
 
 	return 0;
 }
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 0a0cf15..984d6aa 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2544,34 +2544,29 @@
 
 #define DWC2_USB_DMA_ALIGN 4
 
-struct dma_aligned_buffer {
-	void *kmalloc_ptr;
-	void *old_xfer_buffer;
-	u8 data[0];
-};
-
 static void dwc2_free_dma_aligned_buffer(struct urb *urb)
 {
-	struct dma_aligned_buffer *temp;
+	void *stored_xfer_buffer;
 
 	if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
 		return;
 
-	temp = container_of(urb->transfer_buffer,
-		struct dma_aligned_buffer, data);
+	/* Restore urb->transfer_buffer from the end of the allocated area */
+	memcpy(&stored_xfer_buffer, urb->transfer_buffer +
+	       urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
 
 	if (usb_urb_dir_in(urb))
-		memcpy(temp->old_xfer_buffer, temp->data,
+		memcpy(stored_xfer_buffer, urb->transfer_buffer,
 		       urb->transfer_buffer_length);
-	urb->transfer_buffer = temp->old_xfer_buffer;
-	kfree(temp->kmalloc_ptr);
+	kfree(urb->transfer_buffer);
+	urb->transfer_buffer = stored_xfer_buffer;
 
 	urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
 }
 
 static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
 {
-	struct dma_aligned_buffer *temp, *kmalloc_ptr;
+	void *kmalloc_ptr;
 	size_t kmalloc_size;
 
 	if (urb->num_sgs || urb->sg ||
@@ -2579,22 +2574,29 @@
 	    !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
 		return 0;
 
-	/* Allocate a buffer with enough padding for alignment */
+	/*
+	 * Allocate a buffer with enough padding for original transfer_buffer
+	 * pointer. This allocation is guaranteed to be aligned properly for
+	 * DMA
+	 */
 	kmalloc_size = urb->transfer_buffer_length +
-		sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1;
+		sizeof(urb->transfer_buffer);
 
 	kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
 	if (!kmalloc_ptr)
 		return -ENOMEM;
 
-	/* Position our struct dma_aligned_buffer such that data is aligned */
-	temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1;
-	temp->kmalloc_ptr = kmalloc_ptr;
-	temp->old_xfer_buffer = urb->transfer_buffer;
+	/*
+	 * Position value of original urb->transfer_buffer pointer to the end
+	 * of allocation for later referencing
+	 */
+	memcpy(kmalloc_ptr + urb->transfer_buffer_length,
+	       &urb->transfer_buffer, sizeof(urb->transfer_buffer));
+
 	if (usb_urb_dir_out(urb))
-		memcpy(temp->data, urb->transfer_buffer,
+		memcpy(kmalloc_ptr, urb->transfer_buffer,
 		       urb->transfer_buffer_length);
-	urb->transfer_buffer = temp->data;
+	urb->transfer_buffer = kmalloc_ptr;
 
 	urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
 
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 906f223..8066fa9 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -922,9 +922,8 @@
 	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
 	len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
 					  DWC2_HC_XFER_COMPLETE, NULL);
-	if (!len) {
+	if (!len && !qtd->isoc_split_offset) {
 		qtd->complete_split = 0;
-		qtd->isoc_split_offset = 0;
 		return 0;
 	}
 
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index d53dc92..359c09a 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -2329,7 +2329,14 @@
 		return -EBUSY;
 	}
 
-	if (!mdwc->in_host_mode && (mdwc->vbus_active && !mdwc->suspend)) {
+	/*
+	 * Check if remote wakeup is received and pending before going
+	 * ahead with suspend routine as part of device bus suspend.
+	 */
+
+	if (!mdwc->in_host_mode && (mdwc->vbus_active &&
+		(mdwc->otg_state == OTG_STATE_B_SUSPEND ||
+		mdwc->otg_state == OTG_STATE_B_PERIPHERAL) && !mdwc->suspend)) {
 		dev_dbg(mdwc->dev,
 			"Received wakeup event before the core suspend\n");
 		mutex_unlock(&mdwc->suspend_resume_mutex);
@@ -3728,6 +3735,7 @@
 	if (cpu_to_affin)
 		unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
 put_dwc3:
+	platform_device_put(mdwc->dwc3);
 	if (mdwc->bus_perf_client)
 		msm_bus_scale_unregister_client(mdwc->bus_perf_client);
 
@@ -3781,6 +3789,7 @@
 
 	if (mdwc->hs_phy)
 		mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+	platform_device_put(mdwc->dwc3);
 	of_platform_depopulate(&pdev->dev);
 
 	pm_runtime_disable(mdwc->dev);
@@ -3862,6 +3871,11 @@
 					mdwc->core_clk_rate_hs);
 				mdwc->max_rh_port_speed = USB_SPEED_HIGH;
 			} else {
+				clk_set_rate(mdwc->core_clk,
+						mdwc->core_clk_rate);
+				dev_dbg(mdwc->dev,
+					"set ss core clk rate %ld\n",
+					mdwc->core_clk_rate);
 				mdwc->max_rh_port_speed = USB_SPEED_SUPER;
 			}
 
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index a3e2200..58526932 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -132,8 +132,9 @@
 
 	of_platform_depopulate(dev);
 
-	pm_runtime_put_sync(dev);
 	pm_runtime_disable(dev);
+	pm_runtime_put_noidle(dev);
+	pm_runtime_set_suspended(dev);
 
 	return 0;
 }
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index e8e5c32..70b3a66 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3632,6 +3632,13 @@
 	/* Endpoint IRQ, handle it and return early */
 	if (event->type.is_devspec == 0) {
 		/* depevt */
+		/* If remote-wakeup attempt by device had failed, then core
+		 * wouldn't give wakeup event after resume. Handle that
+		 * here on ep event which indicates that bus is resumed.
+		 */
+		if (dwc->b_suspend &&
+		    dwc3_get_link_state(dwc) == DWC3_LINK_STATE_U0)
+			dwc3_gadget_wakeup_interrupt(dwc, false);
 		return dwc3_endpoint_interrupt(dwc, &event->depevt);
 	}
 
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 90cbb61..3563583 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -179,6 +179,9 @@
 config USB_F_SUBSET
 	tristate
 
+config USB_RNDIS
+	tristate
+
 config USB_F_RNDIS
 	tristate
 
@@ -242,6 +245,9 @@
 config USB_F_QDSS
 	tristate
 
+config USB_F_IPC
+	tristate
+
 # this first set of drivers all depend on bulk-capable hardware.
 
 config USB_CONFIGFS
@@ -324,6 +330,7 @@
 	depends on RNDIS_IPA
 	depends on NET
 	select USB_U_ETHER
+	select USB_RNDIS
 	select USB_F_QCRNDIS
 
 config USB_CONFIGFS_RNDIS
@@ -331,6 +338,7 @@
 	depends on USB_CONFIGFS
 	depends on NET
 	select USB_U_ETHER
+	select USB_RNDIS
 	select USB_F_RNDIS
 	help
 	   Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol,
@@ -569,6 +577,7 @@
 	bool "USB GSI function"
 	select USB_F_GSI
 	select USB_U_ETHER
+	select USB_RNDIS
 	depends on USB_CONFIGFS
 	help
 	  Generic function driver to support h/w acceleration to IPA over GSI.
@@ -580,6 +589,17 @@
 	help
           USB QDSS function driver to get hwtracing related data over USB.
 
+config USB_CONFIGFS_F_IPC
+	bool "USB IPC function"
+	select USB_F_IPC
+	depends on USB_CONFIGFS && !USB_QCOM_IPC_BRIDGE
+	help
+	  IPC function driver enables support for IPC messages port over USB.
+	  This driver and the host mode ipc_bridge driver are mutually exclusive
+	  and the IPC router USB transport layer can interface with only one on
+	  a given platform. Hence, enable this config only if host driver is not
+	  being compiled.
+
 choice
 	tristate "USB Gadget Drivers"
 	default USB_ETH
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 703fb24..2e300a3 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -3541,6 +3541,9 @@
 		hw_device_state(udc->ep0out.qh.dma);
 	} else {
 		hw_device_state(0);
+		spin_unlock_irqrestore(udc->lock, flags);
+		_gadget_stop_activity(&udc->gadget);
+		spin_lock_irqsave(udc->lock, flags);
 	}
 	spin_unlock_irqrestore(udc->lock, flags);
 
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 8bdbf5f..ca9e2ce 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1884,6 +1884,8 @@
 		 */
 		if (w_value && !f->get_alt)
 			break;
+
+		spin_lock(&cdev->lock);
 		value = f->set_alt(f, w_index, w_value);
 		if (value == USB_GADGET_DELAYED_STATUS) {
 			DBG(cdev,
@@ -1893,6 +1895,7 @@
 			DBG(cdev, "delayed_status count %d\n",
 					cdev->delayed_status);
 		}
+		spin_unlock(&cdev->lock);
 		break;
 	case USB_REQ_GET_INTERFACE:
 		if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
@@ -2511,7 +2514,13 @@
 				f->func_wakeup_pending = 0;
 			}
 
-			if (gadget->speed != USB_SPEED_SUPER && f->resume)
+			/*
+			 * Call function resume irrespective of the speed.
+			 * Individual function needs to retain the USB3 Function
+			 * suspend state through out the Device suspend entry
+			 * and exit process.
+			 */
+			if (f->resume)
 				f->resume(f);
 		}
 
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index b8fa93e..f814472 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -16,6 +16,7 @@
 usb_f_obex-y			:= f_obex.o
 obj-$(CONFIG_USB_F_OBEX)	+= usb_f_obex.o
 obj-$(CONFIG_USB_U_ETHER)	+= u_ether.o
+obj-$(CONFIG_USB_RNDIS)		+= rndis.o
 usb_f_ncm-y			:= f_ncm.o
 obj-$(CONFIG_USB_F_NCM)		+= usb_f_ncm.o
 usb_f_ecm-y			:= f_ecm.o
@@ -26,7 +27,7 @@
 obj-$(CONFIG_USB_F_EEM)		+= usb_f_eem.o
 usb_f_ecm_subset-y		:= f_subset.o
 obj-$(CONFIG_USB_F_SUBSET)	+= usb_f_ecm_subset.o
-usb_f_rndis-y			:= f_rndis.o rndis.o
+usb_f_rndis-y			:= f_rndis.o
 obj-$(CONFIG_USB_F_RNDIS)	+= usb_f_rndis.o
 usb_f_mass_storage-y		:= f_mass_storage.o storage_common.o
 obj-$(CONFIG_USB_F_MASS_STORAGE)+= usb_f_mass_storage.o
@@ -60,7 +61,7 @@
 obj-$(CONFIG_USB_F_CDEV)	+= usb_f_cdev.o
 usb_f_ccid-y			:= f_ccid.o
 obj-$(CONFIG_USB_F_CCID)   	+= usb_f_ccid.o
-usb_f_gsi-y			:= f_gsi.o rndis.o
+usb_f_gsi-y			:= f_gsi.o
 obj-$(CONFIG_USB_F_GSI)         += usb_f_gsi.o
 usb_f_qdss-y			:= f_qdss.o u_qdss.o
 obj-$(CONFIG_USB_F_QDSS)        += usb_f_qdss.o
@@ -68,3 +69,5 @@
 obj-$(CONFIG_USB_F_QCRNDIS)	+= usb_f_qcrndis.o
 usb_f_rmnet_bam-y		:= f_rmnet.o u_ctrl_qti.o u_bam_dmux.o u_data_ipa.o
 obj-$(CONFIG_USB_F_RMNET_BAM)	+= usb_f_rmnet_bam.o
+usb_f_ipc-y			:= f_ipc.o
+obj-$(CONFIG_USB_F_IPC)		+= usb_f_ipc.o
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index afa62e8..5453df1 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -37,6 +37,7 @@
 #include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/debugfs.h>
 #include <linux/cdev.h>
 #include <linux/spinlock.h>
 #include <linux/usb/gadget.h>
@@ -134,6 +135,9 @@
 	unsigned long		nbytes_to_host;
 	unsigned long           nbytes_to_port_bridge;
 	unsigned long		nbytes_from_port_bridge;
+
+	/* To test remote wakeup using debugfs */
+	u8 debugfs_rw_enable;
 };
 
 struct f_cdev_opts {
@@ -143,6 +147,12 @@
 	u8 port_num;
 };
 
+struct usb_cser_debugfs {
+	struct dentry *debugfs_root;
+};
+
+static struct usb_cser_debugfs debugfs;
+
 static int major, minors;
 struct class *fcdev_classp;
 static DEFINE_IDA(chardev_ida);
@@ -155,6 +165,7 @@
 static void usb_cser_disconnect(struct f_cdev *port);
 static struct f_cdev *f_cdev_alloc(char *func_name, int portno);
 static void usb_cser_free_req(struct usb_ep *ep, struct usb_request *req);
+static void usb_cser_debugfs_exit(void);
 
 static struct usb_interface_descriptor cser_interface_desc = {
 	.bLength =		USB_DT_INTERFACE_SIZE,
@@ -530,6 +541,32 @@
 	return rc;
 }
 
+static int usb_cser_func_suspend(struct usb_function *f, u8 options)
+{
+	bool func_wakeup_allowed;
+
+	func_wakeup_allowed =
+		((options & FUNC_SUSPEND_OPT_RW_EN_MASK) != 0);
+
+	f->func_wakeup_allowed = func_wakeup_allowed;
+	if (options & FUNC_SUSPEND_OPT_SUSP_MASK) {
+		if (!f->func_is_suspended)
+			f->func_is_suspended = true;
+	} else {
+		if (f->func_is_suspended)
+			f->func_is_suspended = false;
+	}
+	return 0;
+}
+
+static int usb_cser_get_status(struct usb_function *f)
+{
+	bool remote_wakeup_en_status = f->func_wakeup_allowed ? 1 : 0;
+
+	return (remote_wakeup_en_status << FUNC_WAKEUP_ENABLE_SHIFT) |
+		(1 << FUNC_WAKEUP_CAPABLE_SHIFT);
+}
+
 static void usb_cser_disable(struct usb_function *f)
 {
 	struct f_cdev	*port = func_to_port(f);
@@ -850,6 +887,7 @@
 		cdev_del(&opts->port->fcdev_cdev);
 	}
 	usb_cser_chardev_deinit();
+	usb_cser_debugfs_exit();
 	kfree(opts->func_name);
 	kfree(opts->port);
 	kfree(opts);
@@ -1570,6 +1608,119 @@
 	.compat_ioctl = f_cdev_ioctl,
 };
 
+static ssize_t cser_rw_write(struct file *file, const char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct f_cdev *port = s->private;
+	u8 input;
+	struct cserial *cser;
+	struct usb_function *func;
+	struct usb_gadget *gadget;
+	int ret;
+
+	cser = &port->port_usb;
+	if (!cser) {
+		pr_err("cser is NULL\n");
+		return -EINVAL;
+	}
+
+	if (!port->is_connected) {
+		pr_debug("port disconnected\n");
+		return -ENODEV;
+	}
+
+	func = &cser->func;
+	if (!func) {
+		pr_err("func is NULL\n");
+		return -EINVAL;
+	}
+
+	if (ubuf == NULL) {
+		pr_debug("buffer is Null.\n");
+		goto err;
+	}
+
+	ret = kstrtou8_from_user(ubuf, count, 0, &input);
+	if (ret) {
+		pr_err("Invalid value. err:%d\n", ret);
+		goto err;
+	}
+
+	if (port->debugfs_rw_enable == !!input) {
+		if (!!input)
+			pr_debug("RW already enabled\n");
+		else
+			pr_debug("RW already disabled\n");
+		goto err;
+	}
+
+	port->debugfs_rw_enable = !!input;
+	if (port->debugfs_rw_enable) {
+		gadget = cser->func.config->cdev->gadget;
+		if (gadget->speed == USB_SPEED_SUPER &&
+			func->func_is_suspended) {
+			pr_debug("Calling usb_func_wakeup\n");
+			ret = usb_func_wakeup(func);
+		} else {
+			pr_debug("Calling usb_gadget_wakeup");
+			ret = usb_gadget_wakeup(gadget);
+		}
+
+		if ((ret == -EBUSY) || (ret == -EAGAIN))
+			pr_debug("RW delayed due to LPM exit.");
+		else if (ret)
+			pr_err("wakeup failed. ret=%d.", ret);
+	} else {
+		pr_debug("RW disabled.");
+	}
+err:
+	return count;
+}
+
+static int usb_cser_rw_show(struct seq_file *s, void *unused)
+{
+	struct f_cdev *port = s->private;
+
+	if (!port) {
+		pr_err("port is null\n");
+		return 0;
+	}
+
+	seq_printf(s, "%d\n", port->debugfs_rw_enable);
+
+	return 0;
+}
+
+static int debug_cdev_rw_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, usb_cser_rw_show, inode->i_private);
+}
+
+static const struct file_operations cser_rem_wakeup_fops = {
+	.open = debug_cdev_rw_open,
+	.read = seq_read,
+	.write = cser_rw_write,
+	.owner = THIS_MODULE,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static void usb_cser_debugfs_init(struct f_cdev *port)
+{
+	debugfs.debugfs_root = debugfs_create_dir(port->name, NULL);
+	if (IS_ERR(debugfs.debugfs_root))
+		return;
+
+	debugfs_create_file("remote_wakeup", 0600,
+			debugfs.debugfs_root, port, &cser_rem_wakeup_fops);
+}
+
+static void usb_cser_debugfs_exit(void)
+{
+	debugfs_remove_recursive(debugfs.debugfs_root);
+}
+
 static struct f_cdev *f_cdev_alloc(char *func_name, int portno)
 {
 	int ret;
@@ -1637,6 +1788,8 @@
 		goto err_create_dev;
 	}
 
+	usb_cser_debugfs_init(port);
+
 	pr_info("port_name:%s (%pK) portno:(%d)\n",
 			port->name, port, port->port_num);
 	return port;
@@ -1899,6 +2052,8 @@
 	port->port_usb.func.set_alt = usb_cser_set_alt;
 	port->port_usb.func.disable = usb_cser_disable;
 	port->port_usb.func.setup = usb_cser_setup;
+	port->port_usb.func.func_suspend = usb_cser_func_suspend;
+	port->port_usb.func.get_status = usb_cser_get_status;
 	port->port_usb.func.free_func = usb_cser_free_func;
 
 	return &port->port_usb.func;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 6fe4fc5..15c67c1 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3733,7 +3733,7 @@
 
 	ffs_log("exit");
 
-	return USB_GADGET_DELAYED_STATUS;
+	return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
 }
 
 static bool ffs_func_req_match(struct usb_function *f,
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 0065c06..726b1aa 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1083,9 +1083,11 @@
 			ipa_resume_work_handler(d_port);
 			d_port->sm_state = STATE_CONNECTED;
 		} else if (event == EVT_DISCONNECTED) {
+			usb_gadget_autopm_get(d_port->gadget);
 			ipa_disconnect_work_handler(d_port);
 			d_port->sm_state = STATE_INITIALIZED;
 			log_event_dbg("%s: ST_SUS_EVT_DIS", __func__);
+			usb_gadget_autopm_put_async(d_port->gadget);
 		}
 		break;
 	default:
@@ -1393,7 +1395,7 @@
 						ctrl_device);
 	struct f_gsi *gsi;
 	struct gsi_ctrl_pkt *cpkt;
-	struct ep_info info;
+	struct ep_info info = { {0} };
 	int val, ret = 0;
 	unsigned long flags;
 
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index 5231213..683d6c5 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -39,7 +39,7 @@
 #define GSI_GPS_CTRL_NAME "gps"
 
 #define GSI_CTRL_NAME_LEN (sizeof(GSI_MBIM_CTRL_NAME)+2)
-#define GSI_MAX_CTRL_PKT_SIZE 4096
+#define GSI_MAX_CTRL_PKT_SIZE 8192
 #define GSI_CTRL_DTR (1 << 0)
 
 #define GSI_NUM_IN_RNDIS_BUFFERS 50
diff --git a/drivers/usb/gadget/function/f_ipc.c b/drivers/usb/gadget/function/f_ipc.c
new file mode 100644
index 0000000..a79a559
--- /dev/null
+++ b/drivers/usb/gadget/function/f_ipc.c
@@ -0,0 +1,842 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/usb/ipc_bridge.h>
+
+#define MAX_INST_NAME_LEN	40
+
+#define IPC_BRIDGE_MAX_READ_SZ	(8 * 1024)
+#define IPC_BRIDGE_MAX_WRITE_SZ	(8 * 1024)
+
+/* for configfs support */
+struct ipc_opts {
+	struct usb_function_instance func_inst;
+	struct ipc_context *ctxt;
+};
+
+static inline struct ipc_opts *to_ipc_opts(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct ipc_opts,
+			    func_inst.group);
+}
+
+static struct usb_interface_descriptor intf_desc = {
+	.bLength            =	sizeof(intf_desc),
+	.bDescriptorType    =	USB_DT_INTERFACE,
+	.bNumEndpoints      =	2,
+	.bInterfaceClass    =	0xFF,
+	.bInterfaceSubClass =	0xFF,
+	.bInterfaceProtocol =	0x30,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_in_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(512),
+	.bInterval        =	0,
+};
+static struct usb_endpoint_descriptor fs_bulk_in_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(512),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor fs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor ss_bulk_in_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_in_comp_desc = {
+	.bLength =		sizeof(ss_bulk_in_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_endpoint_descriptor ss_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_out_comp_desc = {
+	.bLength =		sizeof(ss_bulk_out_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *fs_ipc_desc[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &fs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fs_bulk_out_desc,
+	NULL,
+	};
+static struct usb_descriptor_header *hs_ipc_desc[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &hs_bulk_in_desc,
+	(struct usb_descriptor_header *) &hs_bulk_out_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ss_ipc_desc[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &ss_bulk_in_desc,
+	(struct usb_descriptor_header *) &ss_bulk_in_comp_desc,
+	(struct usb_descriptor_header *) &ss_bulk_out_desc,
+	(struct usb_descriptor_header *) &ss_bulk_out_comp_desc,
+	NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string ipc_string_defs[] = {
+	[0].s = "IPC",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings ipc_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		ipc_string_defs,
+};
+
+static struct usb_gadget_strings *ipc_strings[] = {
+	&ipc_string_table,
+	NULL,
+};
+
+enum current_state_type {
+	IPC_DISCONNECTED,
+	IPC_CONNECTED,
+};
+
+/*
+ * struct ipc_context - USB IPC router function driver private structure
+ * @function: function structure for USB interface
+ * @out: USB OUT endpoint struct
+ * @in: USB IN endpoint struct
+ * @in_req: USB IN endpoint request
+ * @out_req: USB OUT endpoint request
+ * @lock: Spinlock to protect structure members
+ * @state_wq: Waitqueue to wait on online and disconnected states
+ * @read_done: Denote OUT endpoint request completion
+ * @write_done: Denote IN endpoint request completion
+ * @online: If true, function is ready to send and receive data
+ * @connected: If true, set_alt issued by the host
+ * @opened: If true, IPC router platform device has opened this route
+ * @cdev: USB composite device struct
+ * @pdev: Platform device to register with IPC router
+ * @func_work: Work item to register pdev with IPC router and update states
+ * @current_state: Current status of the interface
+ */
+struct ipc_context {
+	struct usb_function function;
+	struct usb_ep *out;
+	struct usb_ep *in;
+	struct usb_request *in_req;
+	struct usb_request *out_req;
+	spinlock_t lock;
+	wait_queue_head_t state_wq;
+	struct completion read_done;
+	struct completion write_done;
+	unsigned int online;
+	unsigned int connected;
+	bool opened;
+	struct usb_composite_dev *cdev;
+	struct platform_device *pdev;
+	struct work_struct func_work;
+	enum current_state_type current_state;
+
+	/* pkt counters */
+	unsigned long bytes_to_host;
+	unsigned long bytes_to_mdm;
+	unsigned int pending_writes;
+	unsigned int pending_reads;
+};
+
+static struct ipc_context *ipc_dev;
+
+static inline struct ipc_context *func_to_ipc(struct usb_function *f)
+{
+	return container_of(f, struct ipc_context, function);
+}
+
+static void ipc_in_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	complete(&ipc_dev->write_done);
+	ipc_dev->bytes_to_host += req->actual;
+	ipc_dev->pending_writes--;
+}
+
+/*
+ * ipc_write() - Write IPC data from IPC router
+ * @pdev: IPC router USB transport platform device
+ * @buf: Data buffer from IPC core
+ * @count: Data buffer size
+ *
+ * Enqueue a request on IN endpoint of the interface corresponding to this
+ * channel. This function returns proper error code if the interface or data
+ * buffer is not configured properly. If ep_queue fails because interface is
+ * suspended, then it waits for interface to be online or get disconnected.
+ *
+ * This function operates asynchronously. WRITE_DONE event is notified after
+ * completion of IN request.
+ */
+static int ipc_write(struct platform_device *pdev, char *buf,
+							unsigned int count)
+{
+	unsigned long flags;
+	struct usb_request *req;
+	struct usb_ep *in;
+
+	if (!ipc_dev)
+		return -ENODEV;
+	if (ipc_dev->pdev != pdev)
+		return -EINVAL;
+	if (!ipc_dev->opened)
+		return -EPERM;
+	if (count > IPC_BRIDGE_MAX_WRITE_SZ)
+		return -ENOSPC;
+
+	spin_lock_irqsave(&ipc_dev->lock, flags);
+	in = ipc_dev->in;
+	req = ipc_dev->in_req;
+	req->buf = buf;
+	req->length = count;
+	ipc_dev->pending_writes++;
+	spin_unlock_irqrestore(&ipc_dev->lock, flags);
+
+retry_write:
+	if (ipc_dev->current_state == IPC_DISCONNECTED) {
+		pr_err("%s: Interface disconnected, cannot queue req\n",
+								__func__);
+		ipc_dev->pending_writes--;
+		return -EINVAL;
+	}
+
+	if (usb_ep_queue(in, req, GFP_KERNEL)) {
+		wait_event_interruptible(ipc_dev->state_wq, ipc_dev->online ||
+				ipc_dev->current_state == IPC_DISCONNECTED);
+		pr_debug("%s: Interface ready, Retry IN request\n", __func__);
+		goto retry_write;
+	}
+
+	if (unlikely(wait_for_completion_interruptible(&ipc_dev->write_done))) {
+		usb_ep_dequeue(in, req);
+		return -EINTR;
+	}
+
+	return !req->status ? req->actual : req->status;
+}
+
+static void ipc_out_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	complete(&ipc_dev->read_done);
+	ipc_dev->bytes_to_mdm += req->actual;
+	ipc_dev->pending_reads--;
+}
+
+/*
+ * ipc_read() - Read IPC data from USB
+ * @pdev: IPC router USB transport platform device
+ * @buf: Data buffer to be populated
+ * @count: Data buffer size
+ *
+ * Enqueue a request on OUT endpoint of the interface corresponding to this
+ * channel. This function returns proper error code if the interface or data
+ * buffer is not configured properly. If ep_queue fails because interface is
+ * suspended, then it waits for interface to be online or get disconnected.
+ *
+ * This function operates asynchronously. READ_DONE event is notified after
+ * completion of OUT request.
+ */
+static int ipc_read(struct platform_device *pdev, char *buf, unsigned int count)
+{
+	unsigned long flags;
+	struct usb_request *req;
+	struct usb_ep *out;
+
+	if (!ipc_dev)
+		return -ENODEV;
+	if (ipc_dev->pdev != pdev)
+		return -EINVAL;
+	if (!ipc_dev->opened)
+		return -EPERM;
+	if (count > IPC_BRIDGE_MAX_READ_SZ)
+		return -ENOSPC;
+
+	spin_lock_irqsave(&ipc_dev->lock, flags);
+	out = ipc_dev->out;
+	req = ipc_dev->out_req;
+	req->buf = buf;
+	req->length = count;
+	ipc_dev->pending_reads++;
+	spin_unlock_irqrestore(&ipc_dev->lock, flags);
+
+retry_read:
+	if (ipc_dev->current_state == IPC_DISCONNECTED) {
+		pr_err("%s: Interface disconnected, cannot queue req\n",
+							__func__);
+		ipc_dev->pending_reads--;
+		return -EINVAL;
+	}
+
+	if (usb_ep_queue(out, req, GFP_KERNEL)) {
+		wait_event_interruptible(ipc_dev->state_wq, ipc_dev->online ||
+				ipc_dev->current_state == IPC_DISCONNECTED);
+		pr_debug("%s: Interface ready, Retry OUT request\n", __func__);
+		goto retry_read;
+	}
+
+	if (unlikely(wait_for_completion_interruptible(&ipc_dev->read_done))) {
+		usb_ep_dequeue(out, req);
+		return -EINTR;
+	}
+
+	return !req->status ? req->actual : req->status;
+}
+
+static int ipc_open(struct platform_device *pdev)
+{
+	unsigned long flags;
+
+	if (ipc_dev->pdev != pdev)
+		return -EINVAL;
+
+	pr_debug("%s: Trying to open IPC bridge\n", __func__);
+	spin_lock_irqsave(&ipc_dev->lock, flags);
+	if (ipc_dev->opened) {
+		spin_unlock_irqrestore(&ipc_dev->lock, flags);
+		pr_err("%s: Bridge already opened\n", __func__);
+		return -EBUSY;
+	}
+
+	ipc_dev->opened = true;
+	spin_unlock_irqrestore(&ipc_dev->lock, flags);
+
+	return 0;
+}
+
+static void ipc_close(struct platform_device *pdev)
+{
+	unsigned long flags;
+
+	WARN_ON(ipc_dev->pdev != pdev);
+
+	pr_debug("%s: Trying to close IPC bridge\n", __func__);
+	spin_lock_irqsave(&ipc_dev->lock, flags);
+	if (!ipc_dev->opened) {
+		spin_unlock_irqrestore(&ipc_dev->lock, flags);
+		pr_err("%s: Bridge already closed\n", __func__);
+		return;
+	}
+
+	ipc_dev->opened = false;
+	spin_unlock_irqrestore(&ipc_dev->lock, flags);
+}
+
+static const struct ipc_bridge_platform_data ipc_pdata = {
+	.max_read_size = IPC_BRIDGE_MAX_READ_SZ,
+	.max_write_size = IPC_BRIDGE_MAX_WRITE_SZ,
+	.open = ipc_open,
+	.read = ipc_read,
+	.write = ipc_write,
+	.close = ipc_close,
+};
+
+static void ipc_function_work(struct work_struct *w)
+{
+	struct ipc_context *ctxt = container_of(w, struct ipc_context,
+								func_work);
+	int ret;
+
+	switch (ctxt->current_state) {
+	case IPC_DISCONNECTED:
+		if (!ctxt->connected)
+			break;
+
+		ctxt->current_state = IPC_CONNECTED;
+		ctxt->pdev = platform_device_alloc("ipc_bridge", -1);
+		if (!ctxt->pdev)
+			goto pdev_fail;
+
+		ret = platform_device_add_data(ctxt->pdev, &ipc_pdata,
+				sizeof(struct ipc_bridge_platform_data));
+		if (ret) {
+			platform_device_put(ctxt->pdev);
+			pr_err("%s: fail to add pdata\n", __func__);
+			goto pdev_fail;
+		}
+
+		ret = platform_device_add(ctxt->pdev);
+		if (ret) {
+			platform_device_put(ctxt->pdev);
+			pr_err("%s: fail to add pdev\n", __func__);
+			goto pdev_fail;
+		}
+		break;
+	case IPC_CONNECTED:
+		if (ctxt->connected)
+			break;
+
+		ctxt->current_state = IPC_DISCONNECTED;
+		wake_up(&ctxt->state_wq);
+		platform_device_unregister(ctxt->pdev);
+		break;
+	default:
+		pr_debug("%s: Unknown current state\n", __func__);
+	}
+
+	return;
+
+pdev_fail:
+	ctxt->current_state = IPC_DISCONNECTED;
+	return;
+}
+
+static int ipc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct ipc_context *ctxt = func_to_ipc(f);
+	struct usb_ep *ep;
+	int status;
+
+	pr_debug("%s: start binding\n", __func__);
+	ctxt->cdev = c->cdev;
+
+	if (ipc_string_defs[0].id == 0) {
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		ipc_string_defs[0].id = status;
+	}
+
+	intf_desc.bInterfaceNumber =  usb_interface_id(c, f);
+
+	status = -ENODEV;
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_in_desc);
+	if (!ep)
+		goto fail;
+	ctxt->in = ep;
+	ep->driver_data = ctxt;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_out_desc);
+	if (!ep)
+		goto fail;
+	ctxt->out = ep;
+	ep->driver_data = ctxt;
+
+	status = -ENOMEM;
+	ctxt->in_req = usb_ep_alloc_request(ctxt->in, GFP_KERNEL);
+	if (!ctxt->in_req)
+		goto fail;
+
+	ctxt->in_req->complete = ipc_in_complete;
+	ctxt->out_req = usb_ep_alloc_request(ctxt->out, GFP_KERNEL);
+	if (!ctxt->out_req)
+		goto fail;
+
+	ctxt->out_req->complete = ipc_out_complete;
+	/* copy descriptors, and track endpoint copies */
+	f->fs_descriptors = usb_copy_descriptors(fs_ipc_desc);
+	if (!f->fs_descriptors)
+		goto fail;
+
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		hs_bulk_in_desc.bEndpointAddress =
+				fs_bulk_in_desc.bEndpointAddress;
+		hs_bulk_out_desc.bEndpointAddress =
+				fs_bulk_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(hs_ipc_desc);
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		ss_bulk_in_desc.bEndpointAddress =
+				fs_bulk_in_desc.bEndpointAddress;
+		ss_bulk_out_desc.bEndpointAddress =
+				fs_bulk_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(ss_ipc_desc);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	return 0;
+fail:
+	if (f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+	if (f->fs_descriptors)
+		usb_free_descriptors(f->fs_descriptors);
+	if (ctxt->out_req)
+		usb_ep_free_request(ctxt->out, ctxt->out_req);
+	if (ctxt->in_req)
+		usb_ep_free_request(ctxt->in, ctxt->in_req);
+	if (ctxt->out)
+		ctxt->out->driver_data = NULL;
+	if (ctxt->in)
+		ctxt->in->driver_data = NULL;
+
+	pr_err("%s: can't bind, err %d\n", __func__, status);
+	return status;
+}
+
+static void ipc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct ipc_context *ctxt = func_to_ipc(f);
+
+	pr_debug("%s: start unbinding\nclear_desc\n", __func__);
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+
+	usb_free_descriptors(f->fs_descriptors);
+
+	if (ctxt->out_req)
+		usb_ep_free_request(ctxt->out, ctxt->out_req);
+	if (ctxt->in_req)
+		usb_ep_free_request(ctxt->in, ctxt->in_req);
+}
+
+static int ipc_set_alt(struct usb_function *f, unsigned int intf,
+				unsigned int alt)
+{
+	struct ipc_context *ctxt = func_to_ipc(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	unsigned long flags;
+	int rc = 0;
+
+	pr_debug("%s: ipc_dev: %pK\n", __func__, ctxt);
+	if (config_ep_by_speed(cdev->gadget, f, ctxt->in) ||
+	    config_ep_by_speed(cdev->gadget, f, ctxt->out)) {
+		ctxt->in->desc = NULL;
+		ctxt->out->desc = NULL;
+		return -EINVAL;
+	}
+
+	ctxt->in->driver_data = ctxt;
+	rc = usb_ep_enable(ctxt->in);
+	if (rc) {
+		ERROR(ctxt->cdev, "can't enable %s, result %d\n",
+						ctxt->in->name, rc);
+		return rc;
+	}
+
+	ctxt->out->driver_data = ctxt;
+	rc = usb_ep_enable(ctxt->out);
+	if (rc) {
+		ERROR(ctxt->cdev, "can't enable %s, result %d\n",
+						ctxt->out->name, rc);
+		usb_ep_disable(ctxt->in);
+		return rc;
+	}
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	ctxt->connected = 1;
+	ctxt->online = 1;
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+	schedule_work(&ctxt->func_work);
+
+	return rc;
+}
+
+static void ipc_disable(struct usb_function *f)
+{
+	struct ipc_context *ctxt = func_to_ipc(f);
+	unsigned long flags;
+
+	pr_debug("%s: Disabling\n", __func__);
+	spin_lock_irqsave(&ctxt->lock, flags);
+	ctxt->online = 0;
+	ctxt->connected = 0;
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+	schedule_work(&ctxt->func_work);
+
+	usb_ep_disable(ctxt->in);
+	ctxt->in->driver_data = NULL;
+
+	usb_ep_disable(ctxt->out);
+	ctxt->out->driver_data = NULL;
+}
+
+static void ipc_suspend(struct usb_function *f)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipc_dev->lock, flags);
+	ipc_dev->online = 0;
+	spin_unlock_irqrestore(&ipc_dev->lock, flags);
+}
+
+static void ipc_resume(struct usb_function *f)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipc_dev->lock, flags);
+	ipc_dev->online = 1;
+	spin_unlock_irqrestore(&ipc_dev->lock, flags);
+	wake_up(&ipc_dev->state_wq);
+}
+
+static void ipc_free(struct usb_function *f) {}
+
+static struct usb_function *ipc_bind_config(struct usb_function_instance *fi)
+{
+	struct ipc_opts *opts;
+	struct ipc_context *ctxt;
+	struct usb_function *f;
+
+	opts = container_of(fi, struct ipc_opts, func_inst);
+	ctxt = opts->ctxt;
+
+	f = &ctxt->function;
+	f->name = "ipc";
+	f->strings = ipc_strings;
+	f->bind = ipc_bind;
+	f->unbind = ipc_unbind;
+	f->set_alt = ipc_set_alt;
+	f->disable = ipc_disable;
+	f->suspend = ipc_suspend;
+	f->resume = ipc_resume;
+	f->free_func = ipc_free;
+
+	pr_debug("%s: complete\n", __func__);
+
+	return f;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static char ipc_debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	char *buf = ipc_debug_buffer;
+	int temp = 0;
+	unsigned long flags;
+
+	if (ipc_dev) {
+		spin_lock_irqsave(&ipc_dev->lock, flags);
+		temp += scnprintf(buf + temp, PAGE_SIZE - temp,
+				"endpoints: %s, %s\n"
+				"bytes to host: %lu\n"
+				"bytes to mdm:  %lu\n"
+				"pending writes:  %u\n"
+				"pending reads: %u\n",
+				ipc_dev->in->name, ipc_dev->out->name,
+				ipc_dev->bytes_to_host,
+				ipc_dev->bytes_to_mdm,
+				ipc_dev->pending_writes,
+				ipc_dev->pending_reads);
+		spin_unlock_irqrestore(&ipc_dev->lock, flags);
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	unsigned long flags;
+
+	if (ipc_dev) {
+		spin_lock_irqsave(&ipc_dev->lock, flags);
+		ipc_dev->bytes_to_host = 0;
+		ipc_dev->bytes_to_mdm = 0;
+		spin_unlock_irqrestore(&ipc_dev->lock, flags);
+	}
+
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations debug_fipc_ops = {
+	.open = debug_open,
+	.read = debug_read_stats,
+	.write = debug_reset_stats,
+};
+
+static struct dentry *dent_ipc;
+static void fipc_debugfs_init(void)
+{
+	struct dentry *dent_ipc_status;
+
+	dent_ipc = debugfs_create_dir("usb_ipc", NULL);
+	if (!dent_ipc || IS_ERR(dent_ipc))
+		return;
+
+	dent_ipc_status = debugfs_create_file("status", 0444, dent_ipc, NULL,
+			&debug_fipc_ops);
+
+	if (!dent_ipc_status || IS_ERR(dent_ipc_status)) {
+		debugfs_remove(dent_ipc);
+		dent_ipc = NULL;
+		return;
+	}
+}
+
+static void fipc_debugfs_remove(void)
+{
+	debugfs_remove_recursive(dent_ipc);
+}
+#else
+static inline void fipc_debugfs_init(void) {}
+static inline void fipc_debugfs_remove(void) {}
+#endif
+
+static void ipc_opts_release(struct config_item *item)
+{
+	struct ipc_opts *opts = to_ipc_opts(item);
+
+	usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations ipc_item_ops = {
+	.release	= ipc_opts_release,
+};
+
+static struct config_item_type ipc_func_type = {
+	.ct_item_ops	= &ipc_item_ops,
+	.ct_owner	= THIS_MODULE,
+};
+
+static int ipc_set_inst_name(struct usb_function_instance *fi,
+	const char *name)
+{
+	struct ipc_opts *opts = container_of(fi, struct ipc_opts, func_inst);
+	int name_len;
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	ipc_dev = kzalloc(sizeof(*ipc_dev), GFP_KERNEL);
+	if (!ipc_dev)
+		return -ENOMEM;
+
+	spin_lock_init(&ipc_dev->lock);
+	init_waitqueue_head(&ipc_dev->state_wq);
+	init_completion(&ipc_dev->read_done);
+	init_completion(&ipc_dev->write_done);
+	INIT_WORK(&ipc_dev->func_work, ipc_function_work);
+
+	opts->ctxt = ipc_dev;
+
+	return 0;
+}
+
+static void ipc_free_inst(struct usb_function_instance *f)
+{
+	struct ipc_opts *opts = container_of(f, struct ipc_opts, func_inst);
+
+	kfree(opts->ctxt);
+	kfree(opts);
+}
+
+static struct usb_function_instance *ipc_alloc_inst(void)
+{
+	struct ipc_opts *opts;
+
+	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+	if (!opts)
+		return ERR_PTR(-ENOMEM);
+
+	opts->func_inst.set_inst_name = ipc_set_inst_name;
+	opts->func_inst.free_func_inst = ipc_free_inst;
+	config_group_init_type_name(&opts->func_inst.group, "",
+				    &ipc_func_type);
+
+	return &opts->func_inst;
+}
+
+static struct usb_function *ipc_alloc(struct usb_function_instance *fi)
+{
+	return ipc_bind_config(fi);
+}
+
+DECLARE_USB_FUNCTION(ipc, ipc_alloc_inst, ipc_alloc);
+
+static int __init ipc_init(void)
+{
+	int ret;
+
+	ret = usb_function_register(&ipcusb_func);
+	if (ret)
+		pr_err("%s: failed to register ipc %d\n", __func__, ret);
+
+	fipc_debugfs_init();
+
+	return ret;
+}
+
+static void __exit ipc_exit(void)
+{
+	fipc_debugfs_remove();
+	usb_function_unregister(&ipcusb_func);
+}
+
+module_init(ipc_init);
+module_exit(ipc_exit);
+
+MODULE_DESCRIPTION("IPC function driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 626e020..15dfbae 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1042,14 +1042,14 @@
 };
 
 struct cntrl_cur_lay3 {
-	__u32	dCUR;
+	__le32	dCUR;
 };
 
 struct cntrl_range_lay3 {
-	__u16	wNumSubRanges;
-	__u32	dMIN;
-	__u32	dMAX;
-	__u32	dRES;
+	__le16	wNumSubRanges;
+	__le32	dMIN;
+	__le32	dMAX;
+	__le32	dRES;
 } __packed;
 
 static inline void
@@ -1421,9 +1421,9 @@
 		memset(&c, 0, sizeof(struct cntrl_cur_lay3));
 
 		if (entity_id == USB_IN_CLK_ID)
-			c.dCUR = p_srate;
+			c.dCUR = cpu_to_le32(p_srate);
 		else if (entity_id == USB_OUT_CLK_ID)
-			c.dCUR = c_srate;
+			c.dCUR = cpu_to_le32(c_srate);
 
 		value = min_t(unsigned, w_length, sizeof c);
 		memcpy(req->buf, &c, value);
@@ -1461,15 +1461,15 @@
 
 	if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
 		if (entity_id == USB_IN_CLK_ID)
-			r.dMIN = p_srate;
+			r.dMIN = cpu_to_le32(p_srate);
 		else if (entity_id == USB_OUT_CLK_ID)
-			r.dMIN = c_srate;
+			r.dMIN = cpu_to_le32(c_srate);
 		else
 			return -EOPNOTSUPP;
 
 		r.dMAX = r.dMIN;
 		r.dRES = 0;
-		r.wNumSubRanges = 1;
+		r.wNumSubRanges = cpu_to_le16(1);
 
 		value = min_t(unsigned, w_length, sizeof r);
 		memcpy(req->buf, &r, value);
diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c
index 7af152b3..6712ca2 100644
--- a/drivers/usb/gadget/function/u_data_ipa.c
+++ b/drivers/usb/gadget/function/u_data_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -451,8 +451,9 @@
 
 	/* update IPA Parameteres here. */
 	port->ipa_params.usb_connection_speed = gadget->speed;
-	port->ipa_params.reset_pipe_after_lpm =
-				msm_dwc3_reset_ep_after_lpm(gadget);
+	if (!gadget->is_chipidea)
+		port->ipa_params.reset_pipe_after_lpm =
+			msm_dwc3_reset_ep_after_lpm(gadget);
 	port->ipa_params.skip_ep_cfg = true;
 	port->ipa_params.keep_ipa_awake = true;
 	port->ipa_params.cons_clnt_hdl = -1;
@@ -469,19 +470,29 @@
 				__func__);
 			goto out;
 		}
-
-		sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+		if (!gadget->is_chipidea) {
+			sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
 				| MSM_PRODUCER | port->src_pipe_idx;
-		port->rx_req->length = 32*1024;
-		port->rx_req->udc_priv = sps_params;
-		configure_fifo(port->usb_bam_type,
-				port->src_connection_idx,
-				port->port_usb->out);
-		ret = msm_ep_config(gport->out, port->rx_req);
-		if (ret) {
-			pr_err("msm_ep_config() failed for OUT EP\n");
-			spin_unlock_irqrestore(&port->port_lock, flags);
-			goto out;
+			port->rx_req->length = 32*1024;
+			port->rx_req->udc_priv = sps_params;
+			configure_fifo(port->usb_bam_type,
+					port->src_connection_idx,
+					port->port_usb->out);
+			ret = msm_ep_config(gport->out, port->rx_req);
+			if (ret) {
+				pr_err("msm_ep_config() failed for OUT EP\n");
+				spin_unlock_irqrestore(&port->port_lock, flags);
+				goto out;
+			}
+		} else {
+			/* gadget->is_chipidea */
+			get_bam2bam_connection_info(port->usb_bam_type,
+					port->src_connection_idx,
+					&port->src_pipe_idx,
+					NULL, NULL, NULL);
+			sps_params = (MSM_SPS_MODE | port->src_pipe_idx |
+				MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+			port->rx_req->udc_priv = sps_params;
 		}
 	}
 
@@ -496,17 +507,29 @@
 				__func__);
 			goto unconfig_msm_ep_out;
 		}
-		sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
-						port->dst_pipe_idx;
-		port->tx_req->length = 32*1024;
-		port->tx_req->udc_priv = sps_params;
-		configure_fifo(port->usb_bam_type,
-				port->dst_connection_idx, gport->in);
-		ret = msm_ep_config(gport->in, port->tx_req);
-		if (ret) {
-			pr_err("msm_ep_config() failed for IN EP\n");
-			spin_unlock_irqrestore(&port->port_lock, flags);
-			goto unconfig_msm_ep_out;
+		if (!gadget->is_chipidea) {
+			sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
+				port->dst_pipe_idx;
+			port->tx_req->length = 32*1024;
+			port->tx_req->udc_priv = sps_params;
+			configure_fifo(port->usb_bam_type,
+					port->dst_connection_idx, gport->in);
+
+			ret = msm_ep_config(gport->in, port->tx_req);
+			if (ret) {
+				pr_err("msm_ep_config() failed for IN EP\n");
+				spin_unlock_irqrestore(&port->port_lock, flags);
+				goto unconfig_msm_ep_out;
+			}
+		} else {
+			/* gadget->is_chipidea */
+			get_bam2bam_connection_info(port->usb_bam_type,
+					port->dst_connection_idx,
+					&port->dst_pipe_idx,
+					NULL, NULL, NULL);
+			sps_params = (MSM_SPS_MODE | port->dst_pipe_idx |
+				MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+			port->tx_req->udc_priv = sps_params;
 		}
 	}
 
@@ -1163,8 +1186,8 @@
 		spin_unlock_irqrestore(&port->port_lock, flags);
 		msm_dwc3_reset_dbm_ep(port->port_usb->in);
 		spin_lock_irqsave(&port->port_lock, flags);
-		usb_bam_resume(port->usb_bam_type, &port->ipa_params);
 	}
+	usb_bam_resume(port->usb_bam_type, &port->ipa_params);
 
 exit:
 	spin_unlock_irqrestore(&port->port_lock, flags);
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 6ba122c..95df2b3 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1066,12 +1066,15 @@
 static int fotg210_udc_remove(struct platform_device *pdev)
 {
 	struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
+	int i;
 
 	usb_del_gadget_udc(&fotg210->gadget);
 	iounmap(fotg210->reg);
 	free_irq(platform_get_irq(pdev, 0), fotg210);
 
 	fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+	for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+		kfree(fotg210->ep[i]);
 	kfree(fotg210);
 
 	return 0;
@@ -1102,7 +1105,7 @@
 	/* initialize udc */
 	fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
 	if (fotg210 == NULL)
-		goto err_alloc;
+		goto err;
 
 	for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
 		_ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
@@ -1114,7 +1117,7 @@
 	fotg210->reg = ioremap(res->start, resource_size(res));
 	if (fotg210->reg == NULL) {
 		pr_err("ioremap error.\n");
-		goto err_map;
+		goto err_alloc;
 	}
 
 	spin_lock_init(&fotg210->lock);
@@ -1162,7 +1165,7 @@
 	fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
 				GFP_KERNEL);
 	if (fotg210->ep0_req == NULL)
-		goto err_req;
+		goto err_map;
 
 	fotg210_init(fotg210);
 
@@ -1190,12 +1193,14 @@
 	fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
 
 err_map:
-	if (fotg210->reg)
-		iounmap(fotg210->reg);
+	iounmap(fotg210->reg);
 
 err_alloc:
+	for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+		kfree(fotg210->ep[i]);
 	kfree(fotg210);
 
+err:
 	return ret;
 }
 
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index d133252..7a8c366 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1549,11 +1549,14 @@
 		writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
 	} else {
 		writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
-		stop_activity(dev, dev->driver);
+		stop_activity(dev, NULL);
 	}
 
 	spin_unlock_irqrestore(&dev->lock, flags);
 
+	if (!is_on && dev->driver)
+		dev->driver->disconnect(&dev->gadget);
+
 	return 0;
 }
 
@@ -2470,8 +2473,11 @@
 		nuke(&dev->ep[i]);
 
 	/* report disconnect; the driver is already quiesced */
-	if (driver)
+	if (driver) {
+		spin_unlock(&dev->lock);
 		driver->disconnect(&dev->gadget);
+		spin_lock(&dev->lock);
+	}
 
 	usb_reinit(dev);
 }
@@ -3345,6 +3351,8 @@
 		BIT(PCI_RETRY_ABORT_INTERRUPT))
 
 static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
+__releases(dev->lock)
+__acquires(dev->lock)
 {
 	struct net2280_ep	*ep;
 	u32			tmp, num, mask, scratch;
@@ -3385,12 +3393,14 @@
 			if (disconnect || reset) {
 				stop_activity(dev, dev->driver);
 				ep0_start(dev);
+				spin_unlock(&dev->lock);
 				if (reset)
 					usb_gadget_udc_reset
 						(&dev->gadget, dev->driver);
 				else
 					(dev->driver->disconnect)
 						(&dev->gadget);
+				spin_lock(&dev->lock);
 				return;
 			}
 		}
@@ -3409,6 +3419,7 @@
 	tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
 	if (stat & tmp) {
 		writel(tmp, &dev->regs->irqstat1);
+		spin_unlock(&dev->lock);
 		if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
 			if (dev->driver->suspend)
 				dev->driver->suspend(&dev->gadget);
@@ -3419,6 +3430,7 @@
 				dev->driver->resume(&dev->gadget);
 			/* at high speed, note erratum 0133 */
 		}
+		spin_lock(&dev->lock);
 		stat &= ~tmp;
 	}
 
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index f2c8862..230e324 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -835,11 +835,11 @@
 
 		r8a66597_bset(r8a66597, XCKE, SYSCFG0);
 
-		msleep(3);
+		mdelay(3);
 
 		r8a66597_bset(r8a66597, PLLC, SYSCFG0);
 
-		msleep(1);
+		mdelay(1);
 
 		r8a66597_bset(r8a66597, SCKE, SYSCFG0);
 
@@ -1193,7 +1193,7 @@
 	r8a66597->ep0_req->length = 2;
 	/* AV: what happens if we get called again before that gets through? */
 	spin_unlock(&r8a66597->lock);
-	r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
+	r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
 	spin_lock(&r8a66597->lock);
 }
 
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index b1ae944..924c08a 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -628,12 +628,15 @@
 	switch (speed) {
 	case USB_STA_SPEED_SS:
 		usb3->gadget.speed = USB_SPEED_SUPER;
+		usb3->gadget.ep0->maxpacket = USB3_EP0_SS_MAX_PACKET_SIZE;
 		break;
 	case USB_STA_SPEED_HS:
 		usb3->gadget.speed = USB_SPEED_HIGH;
+		usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
 		break;
 	case USB_STA_SPEED_FS:
 		usb3->gadget.speed = USB_SPEED_FULL;
+		usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
 		break;
 	default:
 		usb3->gadget.speed = USB_SPEED_UNKNOWN;
@@ -1858,7 +1861,7 @@
 			/* for control pipe */
 			usb3->gadget.ep0 = &usb3_ep->ep;
 			usb_ep_set_maxpacket_limit(&usb3_ep->ep,
-						USB3_EP0_HSFS_MAX_PACKET_SIZE);
+						USB3_EP0_SS_MAX_PACKET_SIZE);
 			usb3_ep->ep.caps.type_control = true;
 			usb3_ep->ep.caps.dir_in = true;
 			usb3_ep->ep.caps.dir_out = true;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 43d5293..4361897 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -2559,7 +2559,7 @@
 	} else {
 		int frame = 0;
 		dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n");
-		msleep(100);
+		mdelay(100);
 		return frame;
 	}
 }
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 040feda..6b71b45 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -650,7 +650,7 @@
 	if (!ep->stream_info)
 		return NULL;
 
-	if (stream_id > ep->stream_info->num_streams)
+	if (stream_id >= ep->stream_info->num_streams)
 		return NULL;
 	return ep->stream_info->stream_rings[stream_id];
 }
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index ce9e457..c108758 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -735,10 +735,10 @@
 	xhci_mtk_host_enable(mtk);
 
 	xhci_dbg(xhci, "%s: restart port polling\n", __func__);
-	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-	usb_hcd_poll_rh_status(hcd);
 	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
 	usb_hcd_poll_rh_status(xhci->shared_hcd);
+	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	usb_hcd_poll_rh_status(hcd);
 	return 0;
 }
 
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f6782a3..b514055 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -179,6 +179,8 @@
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
 	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
 	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
 	     pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
 		xhci->quirks |= XHCI_MISSING_CAS;
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index a59fafb..97d57a9 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -482,7 +482,7 @@
 	unsigned long mask;
 	unsigned int port;
 	bool idle, enable;
-	int err;
+	int err = 0;
 
 	memset(&rsp, 0, sizeof(rsp));
 
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ccad0be..79d2189 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -934,6 +934,41 @@
 	spin_unlock_irqrestore(&xhci->lock, flags);
 }
 
+static bool xhci_pending_portevent(struct xhci_hcd *xhci)
+{
+	__le32 __iomem		**port_array;
+	int			port_index;
+	u32			status;
+	u32			portsc;
+
+	status = readl(&xhci->op_regs->status);
+	if (status & STS_EINT)
+		return true;
+	/*
+	 * Checking STS_EINT is not enough as there is a lag between a change
+	 * bit being set and the Port Status Change Event that it generated
+	 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
+	 */
+
+	port_index = xhci->num_usb2_ports;
+	port_array = xhci->usb2_ports;
+	while (port_index--) {
+		portsc = readl(port_array[port_index]);
+		if (portsc & PORT_CHANGE_MASK ||
+		    (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+			return true;
+	}
+	port_index = xhci->num_usb3_ports;
+	port_array = xhci->usb3_ports;
+	while (port_index--) {
+		portsc = readl(port_array[port_index]);
+		if (portsc & PORT_CHANGE_MASK ||
+		    (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+			return true;
+	}
+	return false;
+}
+
 /*
  * Stop HC (not bus-specific)
  *
@@ -1031,7 +1066,7 @@
  */
 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 {
-	u32			command, temp = 0, status;
+	u32			command, temp = 0;
 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
 	struct usb_hcd		*secondary_hcd;
 	int			retval = 0;
@@ -1065,8 +1100,13 @@
 		command = readl(&xhci->op_regs->command);
 		command |= CMD_CRS;
 		writel(command, &xhci->op_regs->command);
+		/*
+		 * Some controllers take up to 55+ ms to complete the controller
+		 * restore so setting the timeout to 100ms. Xhci specification
+		 * doesn't mention any timeout value.
+		 */
 		if (xhci_handshake(&xhci->op_regs->status,
-			      STS_RESTORE, 0, 10 * 1000)) {
+			      STS_RESTORE, 0, 100 * 1000)) {
 			xhci_warn(xhci, "WARN: xHC restore state timeout\n");
 			spin_unlock_irq(&xhci->lock);
 			return -ETIMEDOUT;
@@ -1153,8 +1193,7 @@
  done:
 	if (retval == 0) {
 		/* Resume root hubs only when have pending events. */
-		status = readl(&xhci->op_regs->status);
-		if (status & STS_EINT) {
+		if (xhci_pending_portevent(xhci)) {
 			usb_hcd_resume_root_hub(xhci->shared_hcd);
 			usb_hcd_resume_root_hub(hcd);
 		}
@@ -3663,6 +3702,9 @@
 
 	virt_dev->udev = NULL;
 	spin_lock_irqsave(&xhci->lock, flags);
+
+	virt_dev->udev = NULL;
+
 	/* Don't disable the slot if the host controller is dead. */
 	state = readl(&xhci->op_regs->status);
 	if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index a5153ca..39068a7 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -385,6 +385,10 @@
 #define PORT_PLC	(1 << 22)
 /* port configure error change - port failed to configure its link partner */
 #define PORT_CEC	(1 << 23)
+#define PORT_CHANGE_MASK	(PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+				 PORT_RC | PORT_PLC | PORT_CEC)
+
+
 /* Cold Attach Status - xHC can set this bit to report device attached during
  * Sx state. Warm port reset should be perfomed to clear this bit and move port
  * to connected state.
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 9ff6652..e77465a 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -385,7 +385,7 @@
 	mask &= 0x0f;
 	val &= 0x0f;
 	d = (priv->reg[1] & (~mask)) ^ val;
-	if (set_1284_register(pp, 2, d, GFP_KERNEL))
+	if (set_1284_register(pp, 2, d, GFP_ATOMIC))
 		return 0;
 	priv->reg[1] = d;
 	return d & 0xf;
@@ -395,7 +395,7 @@
 {
 	unsigned char ret;
 
-	if (get_1284_register(pp, 1, &ret, GFP_KERNEL))
+	if (get_1284_register(pp, 1, &ret, GFP_ATOMIC))
 		return 0;
 	return ret & 0xf8;
 }
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 54e53ac..1e67234 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -406,8 +406,7 @@
 			  loff_t *ppos)
 {
 	struct usb_yurex *dev;
-	int retval = 0;
-	int bytes_read = 0;
+	int len = 0;
 	char in_buffer[20];
 	unsigned long flags;
 
@@ -415,26 +414,19 @@
 
 	mutex_lock(&dev->io_mutex);
 	if (!dev->interface) {		/* already disconnected */
-		retval = -ENODEV;
-		goto exit;
+		mutex_unlock(&dev->io_mutex);
+		return -ENODEV;
 	}
 
 	spin_lock_irqsave(&dev->lock, flags);
-	bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
+	len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
 	spin_unlock_irqrestore(&dev->lock, flags);
-
-	if (*ppos < bytes_read) {
-		if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
-			retval = -EFAULT;
-		else {
-			retval = bytes_read - *ppos;
-			*ppos += bytes_read;
-		}
-	}
-
-exit:
 	mutex_unlock(&dev->io_mutex);
-	return retval;
+
+	if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
+		return -EIO;
+
+	return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
 }
 
 static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
@@ -442,13 +434,13 @@
 {
 	struct usb_yurex *dev;
 	int i, set = 0, retval = 0;
-	char buffer[16];
+	char buffer[16 + 1];
 	char *data = buffer;
 	unsigned long long c, c2 = 0;
 	signed long timeout = 0;
 	DEFINE_WAIT(wait);
 
-	count = min(sizeof(buffer), count);
+	count = min(sizeof(buffer) - 1, count);
 	dev = file->private_data;
 
 	/* verify that we actually have some data to write */
@@ -467,6 +459,7 @@
 		retval = -EFAULT;
 		goto error;
 	}
+	buffer[count] = 0;
 	memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE);
 
 	switch (buffer[0]) {
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 10d11ac..f9a3407 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -2101,7 +2101,8 @@
 
 	/* Disconnect? */
 	if (pd->current_pr == PR_NONE) {
-		if (pd->current_state == PE_UNKNOWN)
+		if (pd->current_state == PE_UNKNOWN &&
+				pd->current_dr == DR_NONE)
 			goto sm_done;
 
 		if (pd->vconn_enabled) {
@@ -4007,6 +4008,13 @@
 }
 EXPORT_SYMBOL(devm_usbpd_get_by_phandle);
 
+static void usbpd_release(struct device *dev)
+{
+	struct usbpd *pd = container_of(dev, struct usbpd, dev);
+
+	kfree(pd);
+}
+
 static int num_pd_instances;
 
 /**
@@ -4031,6 +4039,7 @@
 	device_initialize(&pd->dev);
 	pd->dev.class = &usbpd_class;
 	pd->dev.parent = parent;
+	pd->dev.release = usbpd_release;
 	dev_set_drvdata(&pd->dev, pd);
 
 	ret = dev_set_name(&pd->dev, "usbpd%d", num_pd_instances++);
@@ -4205,7 +4214,7 @@
 	device_del(&pd->dev);
 free_pd:
 	num_pd_instances--;
-	kfree(pd);
+	put_device(&pd->dev);
 	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL(usbpd_create);
@@ -4223,8 +4232,7 @@
 	power_supply_unreg_notifier(&pd->psy_nb);
 	power_supply_put(pd->usb_psy);
 	destroy_workqueue(pd->wq);
-	device_del(&pd->dev);
-	kfree(pd);
+	device_unregister(&pd->dev);
 }
 EXPORT_SYMBOL(usbpd_destroy);
 
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 94eb292..85d031c 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -879,6 +879,7 @@
 	if (pdata->init && pdata->init(pdev) != 0)
 		return -EINVAL;
 
+#ifdef CONFIG_PPC32
 	if (pdata->big_endian_mmio) {
 		_fsl_readl = _fsl_readl_be;
 		_fsl_writel = _fsl_writel_be;
@@ -886,6 +887,7 @@
 		_fsl_readl = _fsl_readl_le;
 		_fsl_writel = _fsl_writel_le;
 	}
+#endif
 
 	/* request irq */
 	p_otg->irq = platform_get_irq(pdev, 0);
@@ -976,7 +978,7 @@
 /*
  * state file in sysfs
  */
-static int show_fsl_usb2_otg_state(struct device *dev,
+static ssize_t show_fsl_usb2_otg_state(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
 	struct otg_fsm *fsm = &fsl_otg_dev->fsm;
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 5b734be..4360e89 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -23,6 +23,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/usb/phy.h>
 #include <linux/clk.h>
+#include <linux/extcon.h>
 #include <linux/reset.h>
 
 enum ldo_levels {
@@ -133,6 +134,8 @@
 	struct reset_control	*phy_reset;
 	struct reset_control	*phy_phy_reset;
 	struct reset_control	*global_phy_reset;
+	struct extcon_dev	*extcon_dp;
+	struct notifier_block	dp_nb;
 	bool			power_enabled;
 	bool			clk_enabled;
 	bool			cable_connected;
@@ -377,9 +380,12 @@
 	switch (phy->phy.type) {
 	case USB_PHY_TYPE_USB3_AND_DP:
 		/* override hardware control for reset of qmp phy */
-		writel_relaxed(SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
-			SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET,
-			phy->base + phy->phy_reg[USB3_DP_COM_RESET_OVRD_CTRL]);
+		if (!(phy->phy.flags & PHY_USB_DP_CONCURRENT_MODE)) {
+			writel_relaxed(SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+				SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET,
+				phy->base +
+				phy->phy_reg[USB3_DP_COM_RESET_OVRD_CTRL]);
+		}
 
 		/* update port select */
 		if (val > 0) {
@@ -389,12 +395,16 @@
 				phy->phy_reg[USB3_DP_COM_TYPEC_CTRL]);
 		}
 
-		writel_relaxed(USB3_MODE | DP_MODE,
-			phy->base + phy->phy_reg[USB3_DP_COM_PHY_MODE_CTRL]);
+		if (!(phy->phy.flags & PHY_USB_DP_CONCURRENT_MODE)) {
+			writel_relaxed(USB3_MODE | DP_MODE,
+				phy->base +
+				phy->phy_reg[USB3_DP_COM_PHY_MODE_CTRL]);
 
-		/* bring both QMP USB and QMP DP PHYs PCS block out of reset */
-		writel_relaxed(0x00,
-			phy->base + phy->phy_reg[USB3_DP_COM_RESET_OVRD_CTRL]);
+			/* bring both USB and DP PHYs PCS block out of reset */
+			writel_relaxed(0x00,
+				phy->base +
+				phy->phy_reg[USB3_DP_COM_RESET_OVRD_CTRL]);
+		}
 		break;
 	case  USB_PHY_TYPE_USB3_OR_DP:
 		if (val > 0) {
@@ -485,7 +495,8 @@
 	}
 
 	/* perform software reset of PHY common logic */
-	if (phy->phy.type == USB_PHY_TYPE_USB3_AND_DP)
+	if (phy->phy.type == USB_PHY_TYPE_USB3_AND_DP &&
+				!(phy->phy.flags & PHY_USB_DP_CONCURRENT_MODE))
 		writel_relaxed(0x00,
 			phy->base + phy->phy_reg[USB3_DP_COM_SW_RESET]);
 
@@ -523,6 +534,25 @@
 					phy);
 	int ret = 0;
 
+	if (phy->phy.flags & PHY_USB_DP_CONCURRENT_MODE) {
+		dev_dbg(uphy->dev, "Resetting USB part of QMP phy\n");
+
+		/* Assert USB3 PHY CSR reset */
+		ret = reset_control_assert(phy->phy_reset);
+		if (ret) {
+			dev_err(uphy->dev, "phy_reset assert failed\n");
+			goto exit;
+		}
+
+		/* Deassert USB3 PHY CSR reset */
+		ret = reset_control_deassert(phy->phy_reset);
+		if (ret) {
+			dev_err(uphy->dev, "phy_reset deassert failed\n");
+			goto exit;
+		}
+		return 0;
+	}
+
 	dev_dbg(uphy->dev, "Global reset of QMP DP combo phy\n");
 	/* Assert global PHY reset */
 	ret = reset_control_assert(phy->global_phy_reset);
@@ -716,6 +746,49 @@
 	return 0;
 }
 
+static int msm_ssphy_qmp_dp_notifier(struct notifier_block *nb,
+		unsigned long dp_lane, void *ptr)
+{
+	struct msm_ssphy_qmp *phy = container_of(nb,
+			struct msm_ssphy_qmp, dp_nb);
+
+	if (dp_lane == 2 || dp_lane == 4)
+		phy->phy.flags |= PHY_USB_DP_CONCURRENT_MODE;
+	else
+		phy->phy.flags &= ~PHY_USB_DP_CONCURRENT_MODE;
+
+	return 0;
+
+}
+
+static int msm_ssphy_qmp_extcon_register(struct msm_ssphy_qmp *phy,
+				struct device *dev)
+{
+	struct device_node *node = dev->of_node;
+	struct extcon_dev *edev;
+	int ret = 0;
+
+	if (!of_property_read_bool(node, "extcon"))
+		return 0;
+
+	edev = extcon_get_edev_by_phandle(dev, 0);
+	if (IS_ERR(edev)) {
+		dev_err(dev, "failed to get phandle for msm_ssphy_qmp\n");
+		return PTR_ERR(edev);
+	}
+
+	phy->extcon_dp = edev;
+	phy->dp_nb.notifier_call = msm_ssphy_qmp_dp_notifier;
+	ret = extcon_register_blocking_notifier(edev, EXTCON_DISP_DP,
+								&phy->dp_nb);
+	if (ret < 0) {
+		dev_err(dev, "failed to register blocking notifier\n");
+		return ret;
+	}
+
+	return 0;
+}
+
 static int msm_ssphy_qmp_get_clks(struct msm_ssphy_qmp *phy, struct device *dev)
 {
 	int ret = 0;
@@ -1065,6 +1138,10 @@
 	else
 		phy->phy.reset		= msm_ssphy_qmp_reset;
 
+	ret = msm_ssphy_qmp_extcon_register(phy, dev);
+	if (ret)
+		goto err;
+
 	ret = usb_add_phy_dev(&phy->phy);
 
 err:
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 44c917d..c0c8a88 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1638,12 +1638,14 @@
 
 	atomic_set(&motg->in_lpm, 1);
 
-	/* Enable ASYNC IRQ during LPM */
-	enable_irq(motg->async_irq);
+	if (host_bus_suspend || device_bus_suspend) {
+		/* Enable ASYNC IRQ during LPM */
+		enable_irq(motg->async_irq);
+		enable_irq(motg->irq);
+	}
 	if (motg->phy_irq)
 		enable_irq(motg->phy_irq);
 
-	enable_irq(motg->irq);
 	pm_relax(&motg->pdev->dev);
 
 	dev_dbg(phy->dev, "LPM caps = %lu flags = %lu\n",
@@ -1688,10 +1690,12 @@
 		return 0;
 	}
 
-	disable_irq(motg->irq);
+	pm_stay_awake(&motg->pdev->dev);
 	if (motg->phy_irq)
 		disable_irq(motg->phy_irq);
-	pm_stay_awake(&motg->pdev->dev);
+
+	if (motg->host_bus_suspend || motg->device_bus_suspend)
+		disable_irq(motg->irq);
 
 	/*
 	 * If we are resuming from the device bus suspend, restore
@@ -1814,7 +1818,8 @@
 	enable_irq(motg->irq);
 
 	/* Enable ASYNC_IRQ only during LPM */
-	disable_irq(motg->async_irq);
+	if (motg->host_bus_suspend || motg->device_bus_suspend)
+		disable_irq(motg->async_irq);
 
 	if (motg->phy_irq_pending) {
 		motg->phy_irq_pending = false;
@@ -3676,6 +3681,11 @@
 	struct extcon_dev *edev;
 	int ret = 0;
 
+	if (motg->extcon_registered) {
+		dev_info(&motg->pdev->dev, "extcon_nb already registered\n");
+		return 0;
+	}
+
 	if (!of_property_read_bool(node, "extcon"))
 		return 0;
 
@@ -3712,6 +3722,7 @@
 			goto err;
 		}
 	}
+	motg->extcon_registered = true;
 
 	return 0;
 err:
@@ -4312,7 +4323,7 @@
 	INIT_DELAYED_WORK(&motg->sdp_check, check_for_sdp_connection);
 	INIT_WORK(&motg->notify_charger_work, msm_otg_notify_charger_work);
 	INIT_WORK(&motg->extcon_register_work, msm_otg_extcon_register_work);
-	motg->otg_wq = alloc_ordered_workqueue("k_otg", 0);
+	motg->otg_wq = alloc_ordered_workqueue("k_otg", WQ_FREEZABLE);
 	if (!motg->otg_wq) {
 		pr_err("%s: Unable to create workqueue otg_wq\n",
 			__func__);
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index e98590a..9a2c0c7 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -118,7 +118,7 @@
 	r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
 			    USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
 			    value, index, buf, bufsize, DEFAULT_TIMEOUT);
-	if (r < bufsize) {
+	if (r < (int)bufsize) {
 		if (r >= 0) {
 			dev_err(&dev->dev,
 				"short control message received (%d < %u)\n",
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 71a8ede..39709fe 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -146,6 +146,7 @@
 	{ USB_DEVICE(0x10C4, 0x8977) },	/* CEL MeshWorks DevKit Device */
 	{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
 	{ USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
+	{ USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
 	{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
 	{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
 	{ USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h
index 1bd67b2..bc9ff5e 100644
--- a/drivers/usb/serial/io_ti.h
+++ b/drivers/usb/serial/io_ti.h
@@ -178,7 +178,7 @@
 }  __attribute__((packed));
 
 
-#define TIUMP_GET_PORT_FROM_CODE(c)	(((c) >> 4) - 3)
+#define TIUMP_GET_PORT_FROM_CODE(c)	(((c) >> 6) & 0x01)
 #define TIUMP_GET_FUNC_FROM_CODE(c)	((c) & 0x0f)
 #define TIUMP_INTERRUPT_CODE_LSR	0x03
 #define TIUMP_INTERRUPT_CODE_MSR	0x04
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index d2dab2a..d17f787 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -373,8 +373,10 @@
 			     3, /* get pins */
 			     USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
 			     0, 0, data, 1, 2000);
-	if (rc >= 0)
+	if (rc == 1)
 		*value = *data;
+	else if (rc >= 0)
+		rc = -EIO;
 
 	kfree(data);
 	return rc;
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 813035f..7d25267 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -408,12 +408,20 @@
 			  transfer_buffer_length,
 			  KOBIL_TIMEOUT);
 
-	dev_dbg(&port->dev, "%s - Send get_status_line_state URB returns: %i. Statusline: %02x\n",
-		__func__, result, transfer_buffer[0]);
+	dev_dbg(&port->dev, "Send get_status_line_state URB returns: %i\n",
+			result);
+	if (result < 1) {
+		if (result >= 0)
+			result = -EIO;
+		goto out_free;
+	}
+
+	dev_dbg(&port->dev, "Statusline: %02x\n", transfer_buffer[0]);
 
 	result = 0;
 	if ((transfer_buffer[0] & SUSBCR_GSL_DSR) != 0)
 		result = TIOCM_DSR;
+out_free:
 	kfree(transfer_buffer);
 	return result;
 }
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 6baacf6..03d63ba 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -482,6 +482,9 @@
 	}
 
 	dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
+	if (urb->actual_length < 1)
+		goto out;
+
 	dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
 		mos7840_port->MsrLsr, mos7840_port->port_num);
 	data = urb->transfer_buffer;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index d982c45..2b81939 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -199,6 +199,8 @@
 #define DELL_PRODUCT_5800_V2_MINICARD_VZW	0x8196  /* Novatel E362 */
 #define DELL_PRODUCT_5804_MINICARD_ATT		0x819b  /* Novatel E371 */
 
+#define DELL_PRODUCT_5821E			0x81d7
+
 #define KYOCERA_VENDOR_ID			0x0c88
 #define KYOCERA_PRODUCT_KPC650			0x17da
 #define KYOCERA_PRODUCT_KPC680			0x180a
@@ -1033,6 +1035,8 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
+	  .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },	/* ADU-E100, ADU-310 */
 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
 	{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index e1994e2..fbc7b29 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -790,9 +790,9 @@
 		kfree(urb->transfer_buffer);
 		usb_free_urb(urb);
 		usb_autopm_put_interface_async(serial->interface);
-		spin_lock(&portdata->lock);
+		spin_lock_irq(&portdata->lock);
 		portdata->outstanding_urbs--;
-		spin_unlock(&portdata->lock);
+		spin_unlock_irq(&portdata->lock);
 	}
 
 	sierra_stop_rx_urbs(port);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 6bcb874b..836cb93 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1129,7 +1129,7 @@
 
 static int ti_get_port_from_code(unsigned char code)
 {
-	return (code >> 4) - 3;
+	return (code >> 6) & 0x01;
 }
 
 static int ti_get_func_from_code(unsigned char code)
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 2674da4..6d6acf2 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -87,7 +87,8 @@
 
 /* Motorola Tetra driver */
 #define MOTOROLA_TETRA_IDS()			\
-	{ USB_DEVICE(0x0cad, 0x9011) }	/* Motorola Solutions TETRA PEI */
+	{ USB_DEVICE(0x0cad, 0x9011) },	/* Motorola Solutions TETRA PEI */ \
+	{ USB_DEVICE(0x0cad, 0x9012) }	/* MTP6550 */
 DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
 
 /* Novatel Wireless GPS driver */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 8cd2926..344ec86 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -392,6 +392,15 @@
 		return 0;
 	}
 
+	if ((us->fflags & US_FL_NO_ATA_1X) &&
+			(srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) {
+		memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB,
+		       sizeof(usb_stor_sense_invalidCDB));
+		srb->result = SAM_STAT_CHECK_CONDITION;
+		done(srb);
+		return 0;
+	}
+
 	/* enqueue the command and wake up the control thread */
 	srb->scsi_done = done;
 	us->srb = srb;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 8dd200f..64af889 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -842,6 +842,27 @@
 		sdev->skip_ms_page_8 = 1;
 		sdev->wce_default_on = 1;
 	}
+
+	/*
+	 * Some disks return the total number of blocks in response
+	 * to READ CAPACITY rather than the highest block number.
+	 * If this device makes that mistake, tell the sd driver.
+	 */
+	if (devinfo->flags & US_FL_FIX_CAPACITY)
+		sdev->fix_capacity = 1;
+
+	/*
+	 * Some devices don't like MODE SENSE with page=0x3f,
+	 * which is the command used for checking if a device
+	 * is write-protected.  Now that we tell the sd driver
+	 * to do a 192-byte transfer with this command the
+	 * majority of devices work fine, but a few still can't
+	 * handle it.  The sd driver will simply assume those
+	 * devices are write-enabled.
+	 */
+	if (devinfo->flags & US_FL_NO_WP_DETECT)
+		sdev->skip_ms_page_3f = 1;
+
 	scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
 	return 0;
 }
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index fc5ed35..0a86b3f 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2307,6 +2307,13 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_GO_SLOW ),
 
+/* Reported-by: Tim Anderson <tsa@biglakesoftware.com> */
+UNUSUAL_DEV(  0x2ca3, 0x0031, 0x0000, 0x9999,
+		"DJI",
+		"CineSSD",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_NO_ATA_1X),
+
 /*
  * Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
  * Mio Moov 330
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 8c9421b..6bf86ca 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -230,7 +230,7 @@
 
 	result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
 				    0, secd, sizeof(*secd));
-	if (result < sizeof(*secd)) {
+	if (result < (int)sizeof(*secd)) {
 		dev_err(dev, "Can't read security descriptor or "
 			"not enough data: %d\n", result);
 		goto out;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 9a53912..5d3ba74 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -873,6 +873,7 @@
 error_rc_add:
 	usb_put_intf(iface);
 	usb_put_dev(hwarc->usb_dev);
+	kfree(hwarc);
 error_alloc:
 	uwb_rc_put(uwb_rc);
 error_rc_alloc:
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 43559be..7338e43 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -28,6 +28,7 @@
 #include <linux/uaccess.h>
 #include <linux/vfio.h>
 #include <linux/vgaarb.h>
+#include <linux/nospec.h>
 
 #include "vfio_pci_private.h"
 
@@ -755,6 +756,9 @@
 			if (info.index >=
 			    VFIO_PCI_NUM_REGIONS + vdev->num_regions)
 				return -EINVAL;
+			info.index = array_index_nospec(info.index,
+							VFIO_PCI_NUM_REGIONS +
+							vdev->num_regions);
 
 			i = info.index - VFIO_PCI_NUM_REGIONS;
 
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index d781428..d143d08 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -696,18 +696,23 @@
 	group = vfio_iommu_group_get(dev);
 	if (!group) {
 		pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto put_reset;
 	}
 
 	ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
-	if (ret) {
-		vfio_iommu_group_put(group, dev);
-		return ret;
-	}
+	if (ret)
+		goto put_iommu;
 
 	mutex_init(&vdev->igate);
 
 	return 0;
+
+put_iommu:
+	vfio_iommu_group_put(group, dev);
+put_reset:
+	vfio_platform_put_reset(vdev);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
 
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 487586e..353c93b 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1052,7 +1052,8 @@
 	if (ubufs)
 		vhost_net_ubuf_put_wait_and_free(ubufs);
 err_ubufs:
-	sockfd_put(sock);
+	if (sock)
+		sockfd_put(sock);
 err_vq:
 	mutex_unlock(&vq->mutex);
 err:
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 8b6489a..c569b64 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -905,7 +905,7 @@
 	list_for_each_entry_safe(node, n, &d->pending_list, node) {
 		struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
 		if (msg->iova <= vq_msg->iova &&
-		    msg->iova + msg->size - 1 > vq_msg->iova &&
+		    msg->iova + msg->size - 1 >= vq_msg->iova &&
 		    vq_msg->type == VHOST_IOTLB_MISS) {
 			vhost_poll_queue(&node->vq->poll);
 			list_del(&node->node);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 52bbbc4..b2c8819 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1703,12 +1703,12 @@
 	return 0;
 }
 
-static int do_unregister_framebuffer(struct fb_info *fb_info)
+static int unbind_console(struct fb_info *fb_info)
 {
 	struct fb_event event;
-	int i, ret = 0;
+	int ret;
+	int i = fb_info->node;
 
-	i = fb_info->node;
 	if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
 		return -EINVAL;
 
@@ -1723,17 +1723,29 @@
 	unlock_fb_info(fb_info);
 	console_unlock();
 
+	return ret;
+}
+
+static int __unlink_framebuffer(struct fb_info *fb_info);
+
+static int do_unregister_framebuffer(struct fb_info *fb_info)
+{
+	struct fb_event event;
+	int ret;
+
+	ret = unbind_console(fb_info);
+
 	if (ret)
 		return -EINVAL;
 
 	pm_vt_switch_unregister(fb_info->dev);
 
-	unlink_framebuffer(fb_info);
+	__unlink_framebuffer(fb_info);
 	if (fb_info->pixmap.addr &&
 	    (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
 		kfree(fb_info->pixmap.addr);
 	fb_destroy_modelist(&fb_info->modelist);
-	registered_fb[i] = NULL;
+	registered_fb[fb_info->node] = NULL;
 	num_registered_fb--;
 	fb_cleanup_device(fb_info);
 	event.info = fb_info;
@@ -1746,7 +1758,7 @@
 	return 0;
 }
 
-int unlink_framebuffer(struct fb_info *fb_info)
+static int __unlink_framebuffer(struct fb_info *fb_info)
 {
 	int i;
 
@@ -1758,6 +1770,20 @@
 		device_destroy(fb_class, MKDEV(FB_MAJOR, i));
 		fb_info->dev = NULL;
 	}
+
+	return 0;
+}
+
+int unlink_framebuffer(struct fb_info *fb_info)
+{
+	int ret;
+
+	ret = __unlink_framebuffer(fb_info);
+	if (ret)
+		return ret;
+
+	unbind_console(fb_info);
+
 	return 0;
 }
 EXPORT_SYMBOL(unlink_framebuffer);
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index 2510fa72..de119f1 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -644,7 +644,7 @@
  *
  *     Valid mode specifiers for @mode_option:
  *
- *     <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m] or
+ *     <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][p][m] or
  *     <name>[-<bpp>][@<refresh>]
  *
  *     with <xres>, <yres>, <bpp> and <refresh> decimal numbers and
@@ -653,10 +653,10 @@
  *      If 'M' is present after yres (and before refresh/bpp if present),
  *      the function will compute the timings using VESA(tm) Coordinated
  *      Video Timings (CVT).  If 'R' is present after 'M', will compute with
- *      reduced blanking (for flatpanels).  If 'i' is present, compute
- *      interlaced mode.  If 'm' is present, add margins equal to 1.8%
- *      of xres rounded down to 8 pixels, and 1.8% of yres. The char
- *      'i' and 'm' must be after 'M' and 'R'. Example:
+ *      reduced blanking (for flatpanels).  If 'i' or 'p' are present, compute
+ *      interlaced or progressive mode.  If 'm' is present, add margins equal
+ *      to 1.8% of xres rounded down to 8 pixels, and 1.8% of yres. The chars
+ *      'i', 'p' and 'm' must be after 'M' and 'R'. Example:
  *
  *      1024x768MR-8@60m - Reduced blank with margins at 60Hz.
  *
@@ -697,7 +697,8 @@
 		unsigned int namelen = strlen(name);
 		int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
 		unsigned int xres = 0, yres = 0, bpp = default_bpp, refresh = 0;
-		int yres_specified = 0, cvt = 0, rb = 0, interlace = 0;
+		int yres_specified = 0, cvt = 0, rb = 0;
+		int interlace_specified = 0, interlace = 0;
 		int margins = 0;
 		u32 best, diff, tdiff;
 
@@ -748,9 +749,17 @@
 				if (!cvt)
 					margins = 1;
 				break;
+			case 'p':
+				if (!cvt) {
+					interlace = 0;
+					interlace_specified = 1;
+				}
+				break;
 			case 'i':
-				if (!cvt)
+				if (!cvt) {
 					interlace = 1;
+					interlace_specified = 1;
+				}
 				break;
 			default:
 				goto done;
@@ -819,11 +828,21 @@
 			if ((name_matches(db[i], name, namelen) ||
 			     (res_specified && res_matches(db[i], xres, yres))) &&
 			    !fb_try_mode(var, info, &db[i], bpp)) {
-				if (refresh_specified && db[i].refresh == refresh)
-					return 1;
+				const int db_interlace = (db[i].vmode &
+					FB_VMODE_INTERLACED ? 1 : 0);
+				int score = abs(db[i].refresh - refresh);
 
-				if (abs(db[i].refresh - refresh) < diff) {
-					diff = abs(db[i].refresh - refresh);
+				if (interlace_specified)
+					score += abs(db_interlace - interlace);
+
+				if (!interlace_specified ||
+				    db_interlace == interlace)
+					if (refresh_specified &&
+					    db[i].refresh == refresh)
+						return 1;
+
+				if (score < diff) {
+					diff = score;
 					best = i;
 				}
 			}
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index 1e56b50..8c93ad1d 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -302,6 +302,7 @@
 	dma_free_coherent(&pdev->dev, framesize, (void *)fb->fb.screen_base,
 						fb->fb.fix.smem_start);
 	iounmap(fb->reg_base);
+	kfree(fb);
 	return 0;
 }
 
diff --git a/drivers/video/fbdev/msm/mdp3.c b/drivers/video/fbdev/msm/mdp3.c
index 5e9e49c..903a083 100644
--- a/drivers/video/fbdev/msm/mdp3.c
+++ b/drivers/video/fbdev/msm/mdp3.c
@@ -1812,7 +1812,8 @@
 		pr_err("invalid bus handle %d\n", bus_handle->handle);
 		return -EINVAL;
 	}
-	mdp3_calc_dma_res(panel_info, &mdp_clk_rate, &ab, &ib, panel_info->bpp);
+	mdp3_calc_dma_res(panel_info, &mdp_clk_rate, &ab,
+					&ib, MAX_BPP_SUPPORTED);
 
 	mdp3_clk_set_rate(MDP3_CLK_VSYNC, MDP_VSYNC_CLK_RATE,
 			MDP3_CLIENT_DMA_P);
@@ -1968,6 +1969,8 @@
 	int rc;
 	struct mdss_data_type *mdata;
 	struct mdss_debug_data *mdd;
+	struct mdss_debug_base *mdp_dbg_blk = NULL;
+	struct mdss_debug_base *vbif_dbg_blk = NULL;
 
 	mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
 	if (!mdata)
@@ -1996,8 +1999,27 @@
 	debugfs_create_file("stat", 0644, mdd->root, mdp3_res,
 				&mdp3_debug_dump_stats_fops);
 
-	rc = mdss_debug_register_base(NULL, mdp3_res->mdp_base,
-					mdp3_res->mdp_reg_size, NULL);
+	/* MDP Debug base registration */
+	rc = mdss_debug_register_base("mdp", mdp3_res->mdp_base,
+					mdp3_res->mdp_reg_size, &mdp_dbg_blk);
+	if (rc)
+		return rc;
+
+	mdss_debug_register_dump_range(pdev, mdp_dbg_blk, "qcom,regs-dump-mdp",
+		"qcom,regs-dump-names-mdp", "qcom,regs-dump-xin-id-mdp");
+
+
+	/* VBIF Debug base registration */
+	if (mdp3_res->vbif_base) {
+		rc = mdss_debug_register_base("vbif", mdp3_res->vbif_base,
+					mdp3_res->vbif_reg_size, &vbif_dbg_blk);
+		if (rc)
+			return rc;
+
+		mdss_debug_register_dump_range(pdev, vbif_dbg_blk,
+			 "qcom,regs-dump-vbif", "qcom,regs-dump-names-vbif",
+						 "qcom,regs-dump-xin-id-vbif");
+	}
 
 	return rc;
 }
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c
index c976c0e..70864c4 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.c
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.c
@@ -32,8 +32,6 @@
 #include "mdss_spi_panel.h"
 #include "mdss_sync.h"
 
-#define VSYNC_EXPIRE_TICK	4
-
 static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd);
 static int mdp3_overlay_unset(struct msm_fb_data_type *mfd, int ndx);
 static int mdp3_histogram_stop(struct mdp3_session_data *session,
@@ -153,16 +151,9 @@
 	return blocking_notifier_call_chain(&ses->notifier_head, event, ses);
 }
 
-static void mdp3_dispatch_dma_done(struct kthread_work *work)
+static void __mdp3_dispatch_dma_done(struct mdp3_session_data *session)
 {
-	struct mdp3_session_data *session;
-	int cnt = 0;
-
-	pr_debug("%s\n", __func__);
-	session = container_of(work, struct mdp3_session_data,
-				dma_done_work);
-	if (!session)
-		return;
+	int cnt;
 
 	cnt = atomic_read(&session->dma_done_cnt);
 	MDSS_XLOG(cnt);
@@ -173,6 +164,29 @@
 	}
 }
 
+void mdp3_flush_dma_done(struct mdp3_session_data *session)
+{
+	if (!session)
+		return;
+
+	pr_debug("%s\n", __func__);
+
+	__mdp3_dispatch_dma_done(session);
+}
+
+static void mdp3_dispatch_dma_done(struct kthread_work *work)
+{
+	struct mdp3_session_data *session;
+
+	pr_debug("%s\n", __func__);
+	session = container_of(work, struct mdp3_session_data,
+				dma_done_work);
+	if (!session)
+		return;
+
+	__mdp3_dispatch_dma_done(session);
+}
+
 static void mdp3_dispatch_clk_off(struct work_struct *work)
 {
 	struct mdp3_session_data *session;
@@ -188,7 +202,8 @@
 		return;
 
 	mutex_lock(&session->lock);
-	MDSS_XLOG(0x111);
+	MDSS_XLOG(0x111, atomic_read(&session->vsync_countdown),
+			session->dma->vsync_period);
 	if (session->vsync_enabled ||
 		atomic_read(&session->vsync_countdown) > 0) {
 		mutex_unlock(&session->lock);
@@ -206,14 +221,14 @@
 	if (session->intf->active) {
 retry_dma_done:
 		rc = wait_for_completion_timeout(&session->dma_completion,
-							WAIT_DMA_TIMEOUT);
-		MDSS_XLOG(0x222);
+					dma_timeout_value(session->dma));
 		if (rc <= 0) {
 			struct mdss_panel_data *panel;
 
 			panel = session->panel;
 			pr_debug("cmd kickoff timed out (%d)\n", rc);
 			dmap_busy = session->dma->busy();
+			MDSS_XLOG(0x222, dmap_busy);
 			if (dmap_busy) {
 				if (--retry_count) {
 					pr_err("dmap is busy, retry %d\n",
@@ -222,7 +237,7 @@
 					goto retry_dma_done;
 				}
 				pr_err("dmap is still busy, bug_on\n");
-				WARN_ON(1);
+				BUG_ON(1);
 			} else {
 				pr_debug("dmap is not busy, continue\n");
 			}
@@ -303,10 +318,13 @@
 	struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
 
 	/* We are counting down to turn off clocks */
-	if (atomic_read(&session->vsync_countdown) > 0)
+	if (atomic_read(&session->vsync_countdown) > 0) {
 		atomic_dec(&session->vsync_countdown);
-	if (atomic_read(&session->vsync_countdown) == 0)
-		schedule_work(&session->clk_off_work);
+		MDSS_XLOG(atomic_read(&session->vsync_countdown),
+				session->dma->vsync_period);
+		if (atomic_read(&session->vsync_countdown) == 0)
+			schedule_work(&session->clk_off_work);
+	}
 }
 
 void mdp3_ctrl_reset_countdown(struct mdp3_session_data *session,
@@ -314,6 +332,8 @@
 {
 	if (mdp3_ctrl_get_intf_type(mfd) == MDP3_DMA_OUTPUT_SEL_DSI_CMD)
 		atomic_set(&session->vsync_countdown, VSYNC_EXPIRE_TICK);
+
+	MDSS_XLOG(atomic_read(&session->vsync_countdown));
 }
 
 static int mdp3_ctrl_vsync_enable(struct msm_fb_data_type *mfd, int enable)
@@ -378,7 +398,7 @@
 	 */
 	if (mod_vsync_timer && (intf_type != MDP3_DMA_OUTPUT_SEL_SPI_CMD)) {
 		mod_timer(&mdp3_session->vsync_timer,
-			jiffies + msecs_to_jiffies(mdp3_session->vsync_period));
+		jiffies + msecs_to_jiffies(mdp3_session->dma->vsync_period));
 	} else if (enable && !mdp3_session->clk_on) {
 		mdp3_ctrl_reset_countdown(mdp3_session, mfd);
 		mdp3_ctrl_clk_enable(mfd, 1);
@@ -398,7 +418,7 @@
 		pr_debug("mdp3_vsync_timer_func trigger\n");
 		vsync_notify_handler(session);
 		mod_timer(&session->vsync_timer,
-			jiffies + msecs_to_jiffies(session->vsync_period));
+			jiffies + msecs_to_jiffies(session->dma->vsync_period));
 	}
 }
 
@@ -905,6 +925,7 @@
 	int rc = 0;
 	struct mdp3_session_data *mdp3_session;
 	struct mdss_panel_data *panel;
+	u32 framerate = 0;
 
 	pr_debug("mdp3_ctrl_on\n");
 	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
@@ -1027,7 +1048,7 @@
 		mdp3_session->status = 1;
 
 	mdp3_ctrl_pp_resume(mfd);
-	MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__, mfd->panel_power_state);
+
 on_error:
 	if (rc || (mdp3_res->idle_pc_enabled &&
 			(mfd->panel_info->type == MIPI_CMD_PANEL))) {
@@ -1038,6 +1059,12 @@
 		pm_runtime_put(&mdp3_res->pdev->dev);
 	}
 end:
+	framerate = mdss_panel_get_framerate(mfd->panel_info,
+			FPS_RESOLUTION_HZ);
+	if (framerate != 0)
+		mdp3_session->dma->vsync_period = DIV_ROUND_UP(1000, framerate);
+
+	MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__, mfd->panel_power_state, framerate);
 	mutex_unlock(&mdp3_session->lock);
 	return rc;
 }
@@ -1054,6 +1081,7 @@
 	bool intf_stopped = true;
 	struct mdp3_session_data *mdp3_session;
 	struct mdss_panel_data *panel;
+	u32 framerate = 0;
 
 	pr_debug("mdp3_ctrl_off\n");
 	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
@@ -1242,7 +1270,13 @@
 		mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
 		mdp3_bufq_deinit(&mdp3_session->bufq_in, client);
 	}
-	MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__);
+
+	framerate = mdss_panel_get_framerate(mfd->panel_info,
+			FPS_RESOLUTION_HZ);
+	if (framerate != 0)
+		mdp3_session->dma->vsync_period = DIV_ROUND_UP(1000, framerate);
+
+	MDSS_XLOG(XLOG_FUNC_EXIT, __LINE__, framerate);
 	mutex_unlock(&mdp3_session->lock);
 	/* Release the last reference to the runtime device */
 	pm_runtime_put(&mdp3_res->pdev->dev);
@@ -1511,6 +1545,7 @@
 	struct mdss_panel_data *panel;
 	int frame_rate = DEFAULT_FRAME_RATE;
 	int stride;
+	int prev_bl;
 
 	if (!mfd || !mfd->mdp.private1)
 		return -EINVAL;
@@ -1528,22 +1563,6 @@
 		return -EPERM;
 	}
 
-	if (panel_info->partial_update_enabled &&
-		is_roi_valid(mdp3_session->dma->source_config,
-			     cmt_data->l_roi) &&
-		update_roi(mdp3_session->dma->roi, cmt_data->l_roi)) {
-		mdp3_session->dma->roi.x = cmt_data->l_roi.x;
-		mdp3_session->dma->roi.y = cmt_data->l_roi.y;
-		mdp3_session->dma->roi.w = cmt_data->l_roi.w;
-		mdp3_session->dma->roi.h = cmt_data->l_roi.h;
-		mdp3_session->dma->update_src_cfg = true;
-		pr_debug("%s: ROI: x=%d y=%d w=%d h=%d\n", __func__,
-			mdp3_session->dma->roi.x,
-			mdp3_session->dma->roi.y,
-			mdp3_session->dma->roi.w,
-			mdp3_session->dma->roi.h);
-	}
-
 	panel = mdp3_session->panel;
 	mutex_lock(&mdp3_res->fs_idle_pc_lock);
 	if (mdp3_session->in_splash_screen ||
@@ -1556,6 +1575,9 @@
 			mutex_unlock(&mdp3_res->fs_idle_pc_lock);
 			return -EINVAL;
 		}
+		if ((mdp3_session->dma->roi.x || mdp3_session->dma->roi.y) &&
+			panel_info->partial_update_enabled)
+			mdp3_session->dma->update_src_cfg = true;
 	}
 	mutex_unlock(&mdp3_res->fs_idle_pc_lock);
 
@@ -1567,7 +1589,8 @@
 		mutex_unlock(&mdp3_session->lock);
 		return -EPERM;
 	}
-	MDSS_XLOG(0x111);
+	MDSS_XLOG(0x111, mdp3_session->dma->vsync_period);
+
 	mdp3_ctrl_notify(mdp3_session, MDP_NOTIFY_FRAME_BEGIN);
 	data = mdp3_bufq_pop(&mdp3_session->bufq_in);
 	if (data) {
@@ -1637,9 +1660,15 @@
 	}
 
 	mdp3_session->vsync_before_commit = 0;
+	prev_bl = mfd->bl_level;
 	if (!splash_done || mdp3_session->esd_recovery == true) {
-		if (panel && panel->set_backlight)
-			panel->set_backlight(panel, panel->panel_info.bl_max);
+		if (panel && panel->set_backlight) {
+			if (mdp3_session->esd_recovery == true && prev_bl > 0)
+				panel->set_backlight(panel, prev_bl);
+			else
+				panel->set_backlight(panel,
+					panel->panel_info.bl_max);
+		}
 		splash_done = true;
 		mdp3_session->esd_recovery = false;
 	}
@@ -2894,7 +2923,7 @@
 
 	if (session->dma_active) {
 		rc = wait_for_completion_timeout(&session->dma_completion,
-			KOFF_TIMEOUT);
+			 dma_timeout_value(session->dma));
 		if (rc > 0) {
 			session->dma_active = 0;
 			rc = 0;
@@ -2963,14 +2992,17 @@
 		return -ENOMEM;
 	}
 
-	/* Add retire vsync handler */
-	retire_client.handler = mdp3_vsync_retire_handle_vsync;
-	retire_client.arg = mdp3_session;
+	if (mfd->panel_info->type == MIPI_CMD_PANEL) {
+		/* Add retire vsync handler */
+		retire_client.handler = mdp3_vsync_retire_handle_vsync;
+		retire_client.arg = mdp3_session;
 
-	if (mdp3_session->dma)
-		mdp3_session->dma->retire_client = retire_client;
+		if (mdp3_session->dma)
+			mdp3_session->dma->retire_client = retire_client;
 
-	INIT_WORK(&mdp3_session->retire_work, mdp3_vsync_retire_work_handler);
+		INIT_WORK(&mdp3_session->retire_work,
+			mdp3_vsync_retire_work_handler);
+	}
 
 	return 0;
 }
@@ -3044,6 +3076,7 @@
 		pr_err("fail to init dma\n");
 		goto init_done;
 	}
+	mdp3_session->dma->session = mdp3_session;
 
 	intf_type = mdp3_ctrl_get_intf_type(mfd);
 	mdp3_session->intf = mdp3_get_display_intf(intf_type);
@@ -3071,7 +3104,11 @@
 	init_timer(&mdp3_session->vsync_timer);
 	mdp3_session->vsync_timer.function = mdp3_vsync_timer_func;
 	mdp3_session->vsync_timer.data = (u32)mdp3_session;
-	mdp3_session->vsync_period = 1000 / frame_rate;
+
+	if (frame_rate != 0)
+		mdp3_session->dma->vsync_period =
+				DIV_ROUND_UP(1000, frame_rate);
+
 	mfd->mdp.private1 = mdp3_session;
 	init_completion(&mdp3_session->dma_completion);
 	if (intf_type != MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
@@ -3145,13 +3182,10 @@
 	mdp3_session->vsync_before_commit = true;
 	mdp3_session->dyn_pu_state = mfd->panel_info->partial_update_enabled;
 
-	if (mfd->panel_info->mipi.dms_mode ||
-			mfd->panel_info->type == MIPI_CMD_PANEL) {
-		rc = mdp3_vsync_retire_setup(mfd);
-		if (IS_ERR_VALUE(rc)) {
-			pr_err("unable to create vsync timeline\n");
-			goto init_done;
-		}
+	rc = mdp3_vsync_retire_setup(mfd);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("unable to create vsync timeline\n");
+		goto init_done;
 	}
 init_done:
 	if (IS_ERR_VALUE(rc))
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.h b/drivers/video/fbdev/msm/mdp3_ctrl.h
index 5193af1..de90127 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.h
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.h
@@ -45,7 +45,6 @@
 	struct msm_fb_data_type *mfd;
 	ktime_t vsync_time;
 	struct timer_list vsync_timer;
-	int vsync_period;
 	struct kernfs_node *vsync_event_sd;
 	struct kernfs_node *bl_event_sd;
 	struct mdp_overlay overlay;
@@ -92,5 +91,6 @@
 int mdp3_ctrl_get_pack_pattern(u32 imgType);
 int mdp3_ctrl_reset(struct msm_fb_data_type *mfd);
 int mdp3_get_ion_client(struct msm_fb_data_type *mfd);
+void mdp3_flush_dma_done(struct mdp3_session_data *mdp3_session);
 
 #endif /* MDP3_CTRL_H */
diff --git a/drivers/video/fbdev/msm/mdp3_dma.c b/drivers/video/fbdev/msm/mdp3_dma.c
index b223c87..f37c378 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.c
+++ b/drivers/video/fbdev/msm/mdp3_dma.c
@@ -16,6 +16,7 @@
 #include "mdp3_dma.h"
 #include "mdp3_hwio.h"
 #include "mdss_debug.h"
+#include "mdp3_ctrl.h"
 
 #define DMA_STOP_POLL_SLEEP_US 1000
 #define DMA_STOP_POLL_TIMEOUT_US 200000
@@ -36,7 +37,13 @@
 	struct mdp3_notification retire_client;
 	unsigned int wait_for_next_vs;
 
+	if (!dma) {
+		pr_err("dma is null\n");
+		return;
+	}
+
 	pr_debug("mdp3_vsync_intr_handler\n");
+	MDSS_XLOG(0x111, dma->vsync_period);
 	spin_lock(&dma->dma_lock);
 	vsync_client = dma->vsync_client;
 	retire_client = dma->retire_client;
@@ -61,6 +68,11 @@
 	struct mdp3_dma *dma = (struct mdp3_dma *)arg;
 	struct mdp3_notification dma_client;
 
+	if (!dma) {
+		pr_err("dma is null\n");
+		return;
+	}
+
 	pr_debug("mdp3_dma_done_intr_handler\n");
 	spin_lock(&dma->dma_lock);
 	dma_client = dma->dma_notifier_client;
@@ -76,6 +88,11 @@
 	struct mdp3_dma *dma = (struct mdp3_dma *)arg;
 	u32 isr, mask;
 
+	if (!dma) {
+		pr_err("dma is null\n");
+		return;
+	}
+
 	isr = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_INTR_STATUS);
 	mask = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_INTR_ENABLE);
 	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_CLEAR, isr);
@@ -674,7 +691,7 @@
 			ATRACE_BEGIN("mdp3_wait_for_dma_comp");
 retry_dma_done:
 			rc = wait_for_completion_timeout(&dma->dma_comp,
-				KOFF_TIMEOUT);
+			 dma_timeout_value(dma));
 			if (rc <= 0 && --retry_count) {
 				int  vsync_status;
 
@@ -735,7 +752,7 @@
 		ATRACE_BEGIN("mdp3_wait_for_vsync_comp");
 retry_vsync:
 		rc = wait_for_completion_timeout(&dma->vsync_comp,
-			KOFF_TIMEOUT);
+			 dma_timeout_value(dma));
 		if (rc <= 0 && --retry_count) {
 			int vsync = MDP3_REG_READ(MDP3_REG_INTR_STATUS) &
 					(1 << MDP3_INTR_LCDC_START_OF_FRAME);
@@ -1064,6 +1081,16 @@
 
 	reinit_completion(&dma->dma_comp);
 	dma->vsync_client.handler = NULL;
+
+	/*
+	 * Interrupts are disabled.
+	 * Check for blocked dma done interrupt.
+	 * Flush items waiting for dma done interrupt.
+	 */
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD &&
+		atomic_read(&dma->session->dma_done_cnt))
+		mdp3_flush_dma_done(dma->session);
+
 	return ret;
 }
 
diff --git a/drivers/video/fbdev/msm/mdp3_dma.h b/drivers/video/fbdev/msm/mdp3_dma.h
index ec327b6..03d3cf0 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.h
+++ b/drivers/video/fbdev/msm/mdp3_dma.h
@@ -25,6 +25,7 @@
 #define MDP_HISTOGRAM_CSC_VECTOR_MAX 0x200
 #define MDP_HISTOGRAM_BIN_NUM	32
 #define MDP_LUT_SIZE 256
+#define VSYNC_EXPIRE_TICK     4
 
 enum {
 	MDP3_DMA_P,
@@ -255,6 +256,7 @@
 	u32 capability;
 	int in_use;
 	int available;
+	int vsync_period;
 
 	spinlock_t dma_lock;
 	spinlock_t histo_lock;
@@ -291,6 +293,8 @@
 	struct fb_cmap *gc_cmap;
 	struct fb_cmap *hist_cmap;
 
+	struct mdp3_session_data *session;
+
 	bool (*busy)(void);
 
 	int (*dma_config)(struct mdp3_dma *dma,
@@ -384,6 +388,14 @@
 	int (*stop)(struct mdp3_intf *intf);
 };
 
+static inline unsigned long dma_timeout_value(struct mdp3_dma *dma)
+{
+	if (dma->vsync_period)
+		return msecs_to_jiffies(VSYNC_EXPIRE_TICK * dma->vsync_period);
+	else
+		return msecs_to_jiffies(84);
+}
+
 int mdp3_dma_init(struct mdp3_dma *dma);
 
 int mdp3_intf_init(struct mdp3_intf *intf);
diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c
index 86f3bce..bdf3b7a 100644
--- a/drivers/video/fbdev/msm/mdss_debug.c
+++ b/drivers/video/fbdev/msm/mdss_debug.c
@@ -456,6 +456,39 @@
 	return 0;
 }
 
+/**
+ * mdss_debug_base_is_valid_range - verify if requested memory range is valid
+ * @off: address offset in bytes
+ * @cnt: memory size in bytes
+ * Return: true if valid; false otherwise
+ */
+static bool mdss_debug_base_is_valid_range(u32 off, u32 cnt)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	struct mdss_debug_data *mdd = mdata->debug_inf.debug_data;
+	struct range_dump_node *node;
+	struct mdss_debug_base *base;
+
+	pr_debug("check offset=0x%x cnt=0x%x\n", off, cnt);
+
+	list_for_each_entry(base, &mdd->base_list, head) {
+		list_for_each_entry(node, &base->dump_list, head) {
+			pr_debug("%s: start=0x%x end=0x%x\n", node->range_name,
+					node->offset.start, node->offset.end);
+
+			if (node->offset.start <= off
+					&& off <= node->offset.end
+					&& off + cnt <= node->offset.end) {
+				pr_debug("valid range requested\n");
+				return true;
+			}
+		}
+	}
+
+	pr_err("invalid range requested\n");
+	return false;
+}
+
 static ssize_t mdss_debug_base_offset_write(struct file *file,
 		    const char __user *user_buf, size_t count, loff_t *ppos)
 {
@@ -487,6 +520,9 @@
 	if (cnt > (dbg->max_offset - off))
 		cnt = dbg->max_offset - off;
 
+	if (!mdss_debug_base_is_valid_range(off, cnt))
+		return -EINVAL;
+
 	mutex_lock(&mdss_debug_lock);
 	dbg->off = off;
 	dbg->cnt = cnt;
diff --git a/drivers/video/fbdev/msm/mdss_debug_xlog.c b/drivers/video/fbdev/msm/mdss_debug_xlog.c
index ed00fc5..fbc46674 100644
--- a/drivers/video/fbdev/msm/mdss_debug_xlog.c
+++ b/drivers/video/fbdev/msm/mdss_debug_xlog.c
@@ -486,8 +486,8 @@
 		}
 	}
 
-	if (!from_isr)
-		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+	if (!from_isr && mdata->debug_inf.debug_enable_clock)
+		mdata->debug_inf.debug_enable_clock(MDP_BLOCK_POWER_ON);
 
 	for (i = 0; i < len; i++) {
 		u32 x0, x4, x8, xc;
@@ -511,8 +511,8 @@
 		addr += 16;
 	}
 
-	if (!from_isr)
-		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+	if (!from_isr && mdata->debug_inf.debug_enable_clock)
+		mdata->debug_inf.debug_enable_clock(MDP_BLOCK_POWER_OFF);
 }
 
 static void mdss_dump_reg_by_ranges(struct mdss_debug_base *dbg,
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index b4e4bdd..5d8398e 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -1951,6 +1951,9 @@
 		return;
 	}
 
+	if (ctrl_pdata->timing_db_mode)
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x1e8, 0x1);
+
 	vsync_period =
 		mdss_panel_get_vtotal(&pdata->panel_info);
 	hsync_period =
@@ -1960,23 +1963,13 @@
 	new_dsi_v_total =
 		((vsync_period - 1) << 16) | (hsync_period - 1);
 
-	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
-			(current_dsi_v_total | 0x8000000));
-	if (new_dsi_v_total & 0x8000000) {
-		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
-				new_dsi_v_total);
-	} else {
-		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
-				(new_dsi_v_total | 0x8000000));
-		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
-				(new_dsi_v_total & 0x7ffffff));
-	}
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C, new_dsi_v_total);
 
 	if (ctrl_pdata->timing_db_mode)
 		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x1e4, 0x1);
 
-	pr_debug("%s new_fps:%d vsync:%d hsync:%d frame_rate:%d\n",
-			__func__, new_fps, vsync_period, hsync_period,
+	pr_debug("%s new_fps:%d new_vtotal:0x%X cur_vtotal:0x%X frame_rate:%d\n",
+			__func__, new_fps, new_dsi_v_total, current_dsi_v_total,
 			ctrl_pdata->panel_data.panel_info.mipi.frame_rate);
 
 	ctrl_pdata->panel_data.panel_info.current_fps = new_fps;
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 72adb17..850b872 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -1908,7 +1908,7 @@
 	ret = mfd->mdp.off_fnc(mfd);
 	if (ret)
 		mfd->panel_power_state = cur_power_state;
-	else if (mdss_panel_is_power_off(req_power_state))
+	else if (!mdss_panel_is_power_on_interactive(req_power_state))
 		mdss_fb_release_fences(mfd);
 	mfd->op_enable = true;
 	complete(&mfd->power_off_comp);
@@ -2032,6 +2032,14 @@
 	 * supported for command mode panels. For all other panel, treat lp
 	 * mode as full unblank and ulp mode as full blank.
 	 */
+	if ((mfd->panel_info->type == SPI_PANEL) &&
+		((blank_mode == BLANK_FLAG_LP) ||
+		(blank_mode == BLANK_FLAG_ULP))) {
+		pr_debug("lp/ulp mode are not supported for SPI panels\n");
+		if (mdss_fb_is_power_on_interactive(mfd))
+			return 0;
+	}
+
 	if (mfd->panel_info->type != MIPI_CMD_PANEL) {
 		if (blank_mode == BLANK_FLAG_LP) {
 			pr_debug("lp mode only valid for cmd mode panels\n");
@@ -2100,7 +2108,10 @@
 	int ret;
 	struct mdss_panel_data *pdata;
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	ktime_t start, end;
+	s64 actual_time;
 
+	start = ktime_get();
 	ret = mdss_fb_pan_idle(mfd);
 	if (ret) {
 		pr_warn("mdss_fb_pan_idle for fb%d failed. ret=%d\n",
@@ -2133,7 +2144,12 @@
 	}
 
 	ret = mdss_fb_blank_sub(blank_mode, info, mfd->op_enable);
-	MDSS_XLOG(blank_mode);
+	end = ktime_get();
+	actual_time = ktime_ms_delta(end, start);
+
+	MDSS_XLOG(blank_mode, actual_time);
+	pr_debug("blank_mode: %d and transition time: %lldms\n",
+					blank_mode, actual_time);
 
 end:
 	mutex_unlock(&mfd->mdss_sysfs_lock);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index 7837559..ed98df6 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -1204,16 +1204,7 @@
 	new_vsync_period_f0 = (vsync_period * hsync_period);
 
 	mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
-			current_vsync_period_f0 | 0x800000);
-	if (new_vsync_period_f0 & 0x800000) {
-		mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
 			new_vsync_period_f0);
-	} else {
-		mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
-			new_vsync_period_f0 | 0x800000);
-		mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
-			new_vsync_period_f0 & 0x7fffff);
-	}
 
 	pr_debug("if:%d vtotal:%d htotal:%d f0:0x%x nw_f0:0x%x\n",
 		ctx->intf_num, vsync_period, hsync_period,
@@ -1432,6 +1423,11 @@
 			}
 
 			/*
+			 * Make sure controller setting committed
+			 */
+			wmb();
+
+			/*
 			 * MDP INTF registers support DB on targets
 			 * starting from MDP v1.5.
 			 */
@@ -1743,7 +1739,9 @@
 	h_total = mdss_panel_get_htotal(pinfo, true);
 
 	fetch_start = (v_total - pinfo->prg_fet) * h_total + 1;
-	fetch_enable = BIT(31);
+
+	fetch_enable = mdp_video_read(ctx, MDSS_MDP_REG_INTF_CONFIG);
+	fetch_enable |= BIT(31);
 
 	if (pinfo->dynamic_fps && (pinfo->dfps_update ==
 			DFPS_IMMEDIATE_CLK_UPDATE_MODE))
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 6429f33..77c97c6 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -956,7 +956,7 @@
 {
 	int r;
 
-	if ((unsigned)omapfb_nb->plane_idx > OMAPFB_PLANE_NUM)
+	if ((unsigned)omapfb_nb->plane_idx >= OMAPFB_PLANE_NUM)
 		return -EINVAL;
 
 	if (!notifier_inited) {
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index ef69273..a3edb20 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -496,6 +496,9 @@
 	if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
 		return -EFAULT;
 
+	if (mr->w > 4096 || mr->h > 4096)
+		return -EINVAL;
+
 	if (mr->w * mr->h * 3 > mr->buffer_size)
 		return -EINVAL;
 
@@ -509,7 +512,7 @@
 			mr->x, mr->y, mr->w, mr->h);
 
 	if (r > 0) {
-		if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+		if (copy_to_user(mr->buffer, buf, r))
 			r = -EFAULT;
 	}
 
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index ef73f14..8503310 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2128,8 +2128,8 @@
 		return -EINVAL;
 
 	ret = -ENOMEM;
-	info->modes = kmalloc_array(timings->num_timings,
-				    sizeof(info->modes[0]), GFP_KERNEL);
+	info->modes = kcalloc(timings->num_timings, sizeof(info->modes[0]),
+			      GFP_KERNEL);
 	if (!info->modes)
 		goto out;
 	info->num_modes = timings->num_timings;
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index badee04..71b5dca 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -19,6 +19,7 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#include <linux/compiler.h>
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
@@ -1468,7 +1469,7 @@
 
 #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
 
-static int viafb_sup_odev_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused viafb_sup_odev_proc_show(struct seq_file *m, void *v)
 {
 	via_odev_to_seq(m, supported_odev_map[
 		viaparinfo->shared->chip_info.gfx_chip_name]);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index a7c08cc..30076956 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -493,7 +493,9 @@
 	tell_host(vb, vb->inflate_vq);
 
 	/* balloon's page migration 2nd step -- deflate "page" */
+	spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
 	balloon_page_delete(page);
+	spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
 	vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
 	set_page_pfns(vb, vb->pfns, page);
 	tell_host(vb, vb->deflate_vq);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 6d9e517..fbc4761 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -121,6 +121,7 @@
 	struct virtqueue *vq;
 	u16 num;
 	int err;
+	u64 q_pfn;
 
 	/* Select the queue we're interested in */
 	iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
@@ -139,9 +140,17 @@
 	if (!vq)
 		return ERR_PTR(-ENOMEM);
 
+	q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
+	if (q_pfn >> 32) {
+		dev_err(&vp_dev->pci_dev->dev,
+			"platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
+			0x1ULL << (32 + PAGE_SHIFT - 30));
+		err = -E2BIG;
+		goto out_del_vq;
+	}
+
 	/* activate the queue */
-	iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
-		  vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+	iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
 
 	vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
 
@@ -158,6 +167,7 @@
 
 out_deactivate:
 	iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+out_del_vq:
 	vring_del_virtqueue(vq);
 	return ERR_PTR(err);
 }
diff --git a/drivers/vservices/Kconfig b/drivers/vservices/Kconfig
new file mode 100644
index 0000000..16b3bda
--- /dev/null
+++ b/drivers/vservices/Kconfig
@@ -0,0 +1,81 @@
+#
+# OKL4 Virtual Services framework
+#
+
+menuconfig VSERVICES_SUPPORT
+	tristate "OKL4 Virtual Services support"
+	default OKL4_GUEST || OKL4_VIRTUALISATION
+	select HOTPLUG
+	help
+	  This option adds core support for OKL4 Virtual Services. The Virtual
+	  Services framework is an inter-OS device/service sharing
+	  protocol which is supported on OKL4 Microvisor virtualization
+	  platforms. You will also need drivers from the following menu in
+	  order to make use of it.
+
+if VSERVICES_SUPPORT
+
+config VSERVICES_CHAR_DEV
+	bool "Virtual Services user-space service API"
+	default y
+	help
+	  Select this if you want to use user-space service drivers. You will
+	  also need udev rules that create device nodes, and protocol code
+	  generated by the OK Mill tool.
+
+config VSERVICES_DEBUG
+	bool "Virtual Services debugging support"
+	help
+	  Select this if you want to enable Virtual Services core framework
+	  debugging. The debug messages for various components of the Virtual
+	  Services core framework can be toggled at runtime on a per-session
+	  basis via sysfs. When Virtual Services debugging is enabled here,
+	  but disabled at runtime it has a minimal performance impact.
+
+config VSERVICES_LOCK_DEBUG
+	bool "Debug Virtual Services state locks"
+	default DEBUG_KERNEL
+	help
+	  This option enables some runtime checks that Virtual Services
+	  state lock functions are used correctly in service drivers.
+
+config VSERVICES_SERVER
+	tristate "Virtual Services server support"
+	depends on SYSFS
+	default y
+	help
+	  This option adds support for Virtual Services servers, which allows
+	  exporting of services from this Linux to other environments. Servers
+	  are created at runtime by writing to files in
+	  /sys/bus/vservices-server.
+
+config VSERVICES_CLIENT
+	tristate "Virtual Services client support"
+	default y
+	help
+	  This option adds support for Virtual Services clients, which allows
+	  connecting to services exported from other environments.
+
+config VSERVICES_SKELETON_DRIVER
+	tristate "Virtual Services skeleton driver"
+	depends on VSERVICES_SERVER || VSERVICES_CLIENT
+	default n
+	help
+	  This option adds support for a skeleton virtual service driver. This
+	  driver can be used for templating or testing of virtual service
+	  drivers. If unsure say N.
+
+config VSERVICES_NAMED_DEVICE
+	bool "Virtual Services use named device node in /dev"
+	default n
+	help
+	  Select this if you want to use a named device name over a numeric
+	  device name in /dev
+
+source "drivers/vservices/transport/Kconfig"
+
+source "drivers/vservices/protocol/Kconfig"
+
+source "drivers/vservices/Kconfig.stacks"
+
+endif # VSERVICES_SUPPORT
diff --git a/drivers/vservices/Kconfig.stacks b/drivers/vservices/Kconfig.stacks
new file mode 100644
index 0000000..97eba53
--- /dev/null
+++ b/drivers/vservices/Kconfig.stacks
@@ -0,0 +1,7 @@
+#
+# vServices drivers configuration
+#
+
+menu "Client and Server drivers"
+
+endmenu
diff --git a/drivers/vservices/Makefile b/drivers/vservices/Makefile
new file mode 100644
index 0000000..685ba0a
--- /dev/null
+++ b/drivers/vservices/Makefile
@@ -0,0 +1,16 @@
+ccflags-y += -Werror
+ccflags-$(CONFIG_VSERVICES_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VSERVICES_SUPPORT)	+= vservices.o
+vservices-objs-$(CONFIG_VSERVICES_CHAR_DEV) += devio.o
+vservices-objs = session.o $(vservices-objs-y)
+
+obj-$(CONFIG_VSERVICES_CLIENT) += core_client.o
+obj-$(CONFIG_VSERVICES_SERVER) += core_server.o
+
+obj-$(CONFIG_VSERVICES_SKELETON_DRIVER) += vservices_skeleton_driver.o
+vservices_skeleton_driver-objs = skeleton_driver.o
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += transport/
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += protocol/
diff --git a/drivers/vservices/compat.h b/drivers/vservices/compat.h
new file mode 100644
index 0000000..5f6926d
--- /dev/null
+++ b/drivers/vservices/compat.h
@@ -0,0 +1,59 @@
+/*
+ * drivers/vservices/compat.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Wrapper functions/definitions for compatibility between differnet kernel
+ * versions.
+ */
+
+#ifndef _VSERVICES_COMPAT_H
+#define _VSERVICES_COMPAT_H
+
+#include <linux/workqueue.h>
+#include <linux/version.h>
+
+/* The INIT_WORK_ONSTACK macro has a slightly different name in older kernels */
+#ifndef INIT_WORK_ONSTACK
+#define INIT_WORK_ONSTACK(_work, _func) INIT_WORK_ON_STACK(_work, _func)
+#endif
+
+/*
+ * We require a workqueue with  no concurrency. This is provided by
+ * create_singlethread_workqueue() in kernel prior to 2.6.36.
+ * In later versions, create_singlethread_workqueue() enables WQ_MEM_RECLAIM and
+ * thus WQ_RESCUER, which allows work items to be grabbed by a rescuer thread
+ * and run concurrently if the queue is running too slowly. We must use
+ * alloc_ordered_workqueue() instead, to disable the rescuer.
+ */
+static inline struct workqueue_struct *
+vs_create_workqueue(const char *name)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+	return create_singlethread_workqueue(name);
+#else
+	return alloc_ordered_workqueue(name, 0);
+#endif
+}
+
+/*
+ * The max3 macro has only been present from 2.6.37
+ * (commit: f27c85c56b32c42bcc54a43189c1e00fdceb23ec)
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
+#define max3(x, y, z) ({			\
+	typeof(x) _max1 = (x);			\
+	typeof(y) _max2 = (y);			\
+	typeof(z) _max3 = (z);			\
+	(void) (&_max1 == &_max2);		\
+	(void) (&_max1 == &_max3);		\
+	_max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
+		(_max2 > _max3 ? _max2 : _max3); })
+#endif
+
+#endif /* _VSERVICES_COMPAT_H */
diff --git a/drivers/vservices/core_client.c b/drivers/vservices/core_client.c
new file mode 100644
index 0000000..4cc78ac
--- /dev/null
+++ b/drivers/vservices/core_client.c
@@ -0,0 +1,733 @@
+/*
+ * drivers/vservices/core_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Client side core service application driver. This is responsible for:
+ *
+ *  - automatically connecting to the server when it becomes ready;
+ *  - sending a reset command to the server if something has gone wrong; and
+ *  - enumerating all the available services.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <vservices/types.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/client.h>
+
+#include "session.h"
+#include "transport.h"
+#include "compat.h"
+
+struct core_client {
+	struct vs_client_core_state	state;
+	struct vs_service_device	*service;
+
+	struct list_head		message_queue;
+	struct mutex			message_queue_lock;
+	struct work_struct		message_queue_work;
+};
+
+struct pending_reset {
+	struct vs_service_device	*service;
+	struct list_head		list;
+};
+
+#define to_core_client(x)	container_of(x, struct core_client, state)
+#define dev_to_core_client(x)	to_core_client(dev_get_drvdata(x))
+
+static int vs_client_core_fatal_error(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+
+	/* Force a transport level reset */
+	dev_err(&client->service->dev," Fatal error - resetting session\n");
+	return -EPROTO;
+}
+
+static struct core_client *
+vs_client_session_core_client(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service = session->core_service;
+
+	if (!core_service)
+		return NULL;
+
+	return dev_to_core_client(&core_service->dev);
+}
+
+static ssize_t client_core_reset_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *core_service = to_vs_service_device(dev);
+	struct vs_session_device *session =
+		vs_service_get_session(core_service);
+	struct vs_service_device *target;
+	vs_service_id_t service_id;
+	unsigned long val;
+	int err;
+
+	/* Writing a valid service id to this file resets that service */
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	service_id = val;
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -ENODEV;
+
+	err = vs_service_reset(target, core_service);
+
+	vs_put_service(target);
+	return err < 0 ? err : count;
+}
+
+static DEVICE_ATTR(reset_service, S_IWUSR, NULL,
+		client_core_reset_service_store);
+
+static struct attribute *client_core_dev_attrs[] = {
+	&dev_attr_reset_service.attr,
+	NULL,
+};
+
+static const struct attribute_group client_core_attr_group = {
+	.attrs = client_core_dev_attrs,
+};
+
+/*
+ * Protocol callbacks
+ */
+static int
+vs_client_core_handle_service_removed(struct vs_client_core_state *state,
+		u32 service_id)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+			vs_service_get_session(client->service);
+	struct vs_service_device *service;
+	int ret;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service)
+		return -EINVAL;
+
+	ret = vs_service_handle_delete(service);
+	vs_put_service(service);
+	return ret;
+}
+
+static int vs_client_core_create_service(struct core_client *client,
+		struct vs_session_device *session, vs_service_id_t service_id,
+		struct vs_string *protocol_name_string,
+		struct vs_string *service_name_string)
+{
+	char *protocol_name, *service_name;
+	struct vs_service_device *service;
+	int ret = 0;
+
+	protocol_name = vs_string_dup(protocol_name_string, GFP_KERNEL);
+	if (!protocol_name) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	service_name = vs_string_dup(service_name_string, GFP_KERNEL);
+	if (!service_name) {
+		ret = -ENOMEM;
+		goto out_free_protocol_name;
+	}
+
+	service = vs_service_register(session, client->service, service_id,
+			protocol_name, service_name, NULL);
+	if (IS_ERR(service)) {
+		ret = PTR_ERR(service);
+		goto out_free_service_name;
+	}
+
+	vs_service_start(service);
+
+out_free_service_name:
+	kfree(service_name);
+out_free_protocol_name:
+	kfree(protocol_name);
+out:
+	return ret;
+}
+
+static int
+vs_client_core_handle_service_created(struct vs_client_core_state *state,
+		u32 service_id, struct vs_string service_name,
+		struct vs_string protocol_name, struct vs_mbuf *mbuf)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+			vs_service_get_session(client->service);
+	int err;
+
+	vs_dev_debug(VS_DEBUG_CLIENT_CORE,
+			vs_service_get_session(client->service),
+			&client->service->dev, "Service info for %d received\n",
+			service_id);
+
+	err = vs_client_core_create_service(client, session, service_id,
+			&protocol_name, &service_name);
+	if (err)
+		dev_err(&session->dev,
+				"Failed to create service with id %d: %d\n",
+				service_id, err);
+
+	vs_client_core_core_free_service_created(state, &service_name,
+			&protocol_name, mbuf);
+
+	return err;
+}
+
+static int
+vs_client_core_send_service_reset(struct core_client *client,
+		struct vs_service_device *service)
+{
+	return vs_client_core_core_send_service_reset(&client->state,
+			service->id, GFP_KERNEL);
+}
+
+static int
+vs_client_core_queue_service_reset(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_client *client =
+		vs_client_session_core_client(session);
+	struct pending_reset *msg;
+
+	if (!client)
+		return -ENODEV;
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+			"Sending reset for service %d\n", service->id);
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	mutex_lock(&client->message_queue_lock);
+
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+	list_add_tail(&msg->list, &client->message_queue);
+
+	mutex_unlock(&client->message_queue_lock);
+	queue_work(client->service->work_queue, &client->message_queue_work);
+
+	return 0;
+}
+
+static int vs_core_client_tx_ready(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+
+	queue_work(client->service->work_queue, &client->message_queue_work);
+
+	return 0;
+}
+
+static void message_queue_work(struct work_struct *work)
+{
+	struct core_client *client = container_of(work, struct core_client,
+			message_queue_work);
+	struct vs_session_device *session =
+		vs_service_get_session(client->service);
+	struct pending_reset *msg;
+	int err;
+
+	vs_service_state_lock(client->service);
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(client->state.state.core)) {
+		vs_service_state_unlock(client->service);
+		return;
+	}
+
+	vs_dev_debug(VS_DEBUG_CLIENT, session, &session->dev, "tx_ready\n");
+
+	mutex_lock(&client->message_queue_lock);
+	while (!list_empty(&client->message_queue)) {
+		msg = list_first_entry(&client->message_queue,
+				struct pending_reset, list);
+
+		err = vs_client_core_send_service_reset(client, msg->service);
+
+		/* If we're out of quota there's no point continuing */
+		if (err == -ENOBUFS)
+			break;
+
+		/* Any other error is fatal */
+		if (err < 0) {
+			dev_err(&client->service->dev,
+					"Failed to send pending reset for %d (%d) - resetting session",
+					msg->service->id, err);
+			vs_service_reset_nosync(client->service);
+			break;
+		}
+
+		/*
+		 * The message sent successfully - remove it from the queue.
+		 * The corresponding vs_get_service() was done when the pending
+		 * message was enqueued.
+		 */
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+	mutex_unlock(&client->message_queue_lock);
+	vs_service_state_unlock(client->service);
+}
+
+static int
+vs_client_core_handle_server_ready(struct vs_client_core_state *state,
+		u32 service_id, u32 in_quota, u32 out_quota, u32 in_bit_offset,
+		u32 in_num_bits, u32 out_bit_offset, u32 out_num_bits)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session;
+	struct vs_service_device *service;
+	int ret;
+
+	if (service_id == 0)
+		return -EPROTO;
+
+	if (!in_quota || !out_quota)
+		return -EINVAL;
+
+	session = vs_service_get_session(client->service);
+	service = vs_session_get_service(session, service_id);
+	if (!service)
+		return -EINVAL;
+
+	service->send_quota = in_quota;
+	service->recv_quota = out_quota;
+	service->notify_send_offset = in_bit_offset;
+	service->notify_send_bits = in_num_bits;
+	service->notify_recv_offset = out_bit_offset;
+	service->notify_recv_bits = out_num_bits;
+
+	ret = vs_service_enable(service);
+	vs_put_service(service);
+	return ret;
+}
+
+static int
+vs_client_core_handle_service_reset(struct vs_client_core_state *state,
+		u32 service_id)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session;
+
+	if (service_id == 0)
+		return -EPROTO;
+
+	session = vs_service_get_session(client->service);
+
+	return vs_service_handle_reset(session, service_id, true);
+}
+
+static void vs_core_client_start(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+			vs_service_get_session(client->service);
+
+	/* FIXME - start callback should return int */
+	vs_dev_debug(VS_DEBUG_CLIENT_CORE, session, &client->service->dev,
+			"Core client start\n");
+}
+
+static void vs_core_client_reset(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_session_device *session =
+		vs_service_get_session(client->service);
+	struct pending_reset *msg;
+
+	/* Flush the pending resets - we're about to delete everything */
+	while (!list_empty(&client->message_queue)) {
+		msg = list_first_entry(&client->message_queue,
+				struct pending_reset, list);
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+
+	vs_session_delete_noncore(session);
+
+	/* Return to the initial quotas, until the next startup message */
+	client->service->send_quota = 0;
+	client->service->recv_quota = 1;
+}
+
+static int vs_core_client_startup(struct vs_client_core_state *state,
+		u32 core_in_quota, u32 core_out_quota)
+{
+	struct core_client *client = to_core_client(state);
+	struct vs_service_device *service = state->service;
+	struct vs_session_device *session = vs_service_get_session(service);
+	int ret;
+
+	if (!core_in_quota || !core_out_quota)
+		return -EINVAL;
+
+	/*
+	 * Update the service struct with our real quotas and tell the
+	 * transport about the change
+	 */
+
+	service->send_quota = core_in_quota;
+	service->recv_quota = core_out_quota;
+	ret = session->transport->vt->service_start(session->transport, service);
+	if (ret < 0)
+		return ret;
+
+	WARN_ON(!list_empty(&client->message_queue));
+
+	return vs_client_core_core_req_connect(state, GFP_KERNEL);
+}
+
+static struct vs_client_core_state *
+vs_core_client_alloc(struct vs_service_device *service)
+{
+	struct core_client *client;
+	int err;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		goto fail;
+
+	client->service = service;
+	INIT_LIST_HEAD(&client->message_queue);
+	INIT_WORK(&client->message_queue_work, message_queue_work);
+	mutex_init(&client->message_queue_lock);
+
+	err = sysfs_create_group(&service->dev.kobj, &client_core_attr_group);
+	if (err)
+		goto fail_free_client;
+
+	/*
+	 * Default transport resources for the core service client. The
+	 * server will inform us of the real quotas in the startup message.
+	 * Note that it is important that the quotas never decrease, so these
+	 * numbers are as small as possible.
+	 */
+	service->send_quota = 0;
+	service->recv_quota = 1;
+	service->notify_send_bits = 0;
+	service->notify_send_offset = 0;
+	service->notify_recv_bits = 0;
+	service->notify_recv_offset = 0;
+
+	return &client->state;
+
+fail_free_client:
+	kfree(client);
+fail:
+	return NULL;
+}
+
+static void vs_core_client_release(struct vs_client_core_state *state)
+{
+	struct core_client *client = to_core_client(state);
+
+	sysfs_remove_group(&client->service->dev.kobj, &client_core_attr_group);
+	kfree(client);
+}
+
+static struct vs_client_core vs_core_client_driver = {
+	.alloc		= vs_core_client_alloc,
+	.release	= vs_core_client_release,
+	.start		= vs_core_client_start,
+	.reset		= vs_core_client_reset,
+	.tx_ready	= vs_core_client_tx_ready,
+
+	.core = {
+		.nack_connect		= vs_client_core_fatal_error,
+
+		/* FIXME: Jira ticket SDK-3074 - ryanm. */
+		.ack_disconnect		= vs_client_core_fatal_error,
+		.nack_disconnect	= vs_client_core_fatal_error,
+
+		.msg_service_created	= vs_client_core_handle_service_created,
+		.msg_service_removed	= vs_client_core_handle_service_removed,
+
+		.msg_startup		= vs_core_client_startup,
+		/* FIXME: Jira ticket SDK-3074 - philipd. */
+		.msg_shutdown		= vs_client_core_fatal_error,
+		.msg_server_ready	= vs_client_core_handle_server_ready,
+		.msg_service_reset	= vs_client_core_handle_service_reset,
+	},
+};
+
+/*
+ * Client bus driver
+ */
+static int vs_client_bus_match(struct device *dev, struct device_driver *driver)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(driver);
+
+	/* Don't match anything to the devio driver; it's bound manually */
+	if (!vsdrv->protocol)
+		return 0;
+
+	WARN_ON_ONCE(service->is_server || vsdrv->is_server);
+
+	/* Match if the protocol strings are the same */
+	if (strcmp(service->protocol, vsdrv->protocol) == 0)
+		return 1;
+
+	return 0;
+}
+
+static ssize_t is_server_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->is_server);
+}
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->id);
+}
+
+static ssize_t dev_protocol_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->protocol ?: "");
+}
+
+static ssize_t service_name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->name);
+}
+
+static ssize_t quota_in_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->send_quota);
+}
+
+static ssize_t quota_out_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->recv_quota);
+}
+
+static struct device_attribute vs_client_dev_attrs[] = {
+	__ATTR_RO(id),
+	__ATTR_RO(is_server),
+	__ATTR(protocol, S_IRUGO, dev_protocol_show, NULL),
+	__ATTR_RO(service_name),
+	__ATTR_RO(quota_in),
+	__ATTR_RO(quota_out),
+	__ATTR_NULL
+};
+
+static ssize_t protocol_show(struct device_driver *drv, char *buf)
+{
+	struct vs_service_driver *driver = to_vs_service_driver(drv);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", driver->protocol);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+static struct driver_attribute vs_client_drv_attrs[] = {
+	__ATTR_RO(protocol),
+	__ATTR_NULL
+};
+#else
+static DRIVER_ATTR_RO(protocol);
+
+static struct attribute *vs_client_drv_attrs[] = {
+	&driver_attr_protocol.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vs_client_drv);
+#endif
+
+struct bus_type vs_client_bus_type = {
+	.name		= "vservices-client",
+	.dev_attrs	= vs_client_dev_attrs,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+	.drv_attrs	= vs_client_drv_attrs,
+#else
+	.drv_groups	= vs_client_drv_groups,
+#endif
+	.match		= vs_client_bus_match,
+	.probe		= vs_service_bus_probe,
+	.remove		= vs_service_bus_remove,
+	.uevent		= vs_service_bus_uevent,
+};
+EXPORT_SYMBOL(vs_client_bus_type);
+
+/*
+ * Client session driver
+ */
+static int vs_client_session_probe(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_service_device *service;
+	char *protocol, *name;
+	int ret = 0;
+
+	if (session->is_server) {
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	/* create a service for the core protocol client */
+	protocol = kstrdup(VSERVICE_CORE_PROTOCOL_NAME, GFP_KERNEL);
+	if (!protocol) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	name = kstrdup("core", GFP_KERNEL);
+	if (!name) {
+		ret = -ENOMEM;
+		goto fail_free_protocol;
+	}
+
+	service = vs_service_register(session, NULL, 0, protocol, name, NULL);
+	if (IS_ERR(service)) {
+		ret = PTR_ERR(service);
+		goto fail_free_name;
+	}
+
+fail_free_name:
+	kfree(name);
+fail_free_protocol:
+	kfree(protocol);
+fail:
+	return ret;
+}
+
+static int
+vs_client_session_send_service_reset(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	if (WARN_ON(service->id == 0))
+		return -EINVAL;
+
+	return vs_client_core_queue_service_reset(session, service);
+}
+
+static struct vs_session_driver vs_client_session_driver = {
+	.driver	= {
+		.name			= "vservices-client-session",
+		.owner			= THIS_MODULE,
+		.bus			= &vs_session_bus_type,
+		.probe			= vs_client_session_probe,
+		.suppress_bind_attrs	= true,
+	},
+	.is_server		= false,
+	.service_bus		= &vs_client_bus_type,
+	.service_local_reset	= vs_client_session_send_service_reset,
+};
+
+static int __init vs_core_client_init(void)
+{
+	int ret;
+
+	ret = bus_register(&vs_client_bus_type);
+	if (ret)
+		goto fail_bus_register;
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	vs_devio_client_driver.driver.bus = &vs_client_bus_type;
+	vs_devio_client_driver.driver.owner = THIS_MODULE;
+	ret = driver_register(&vs_devio_client_driver.driver);
+	if (ret)
+		goto fail_devio_register;
+#endif
+
+	ret = driver_register(&vs_client_session_driver.driver);
+	if (ret)
+		goto fail_driver_register;
+
+	ret = vservice_core_client_register(&vs_core_client_driver,
+			"vs_core_client");
+	if (ret)
+		goto fail_core_register;
+
+	vservices_client_root = kobject_create_and_add("client-sessions",
+			vservices_root);
+	if (!vservices_client_root) {
+		ret = -ENOMEM;
+		goto fail_create_root;
+	}
+
+	return 0;
+
+fail_create_root:
+	vservice_core_client_unregister(&vs_core_client_driver);
+fail_core_register:
+	driver_unregister(&vs_client_session_driver.driver);
+fail_driver_register:
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_client_driver.driver);
+	vs_devio_client_driver.driver.bus = NULL;
+	vs_devio_client_driver.driver.owner = NULL;
+fail_devio_register:
+#endif
+	bus_unregister(&vs_client_bus_type);
+fail_bus_register:
+	return ret;
+}
+
+static void __exit vs_core_client_exit(void)
+{
+	kobject_put(vservices_client_root);
+	vservice_core_client_unregister(&vs_core_client_driver);
+	driver_unregister(&vs_client_session_driver.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_client_driver.driver);
+	vs_devio_client_driver.driver.bus = NULL;
+	vs_devio_client_driver.driver.owner = NULL;
+#endif
+	bus_unregister(&vs_client_bus_type);
+}
+
+subsys_initcall(vs_core_client_init);
+module_exit(vs_core_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Core Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/core_server.c b/drivers/vservices/core_server.c
new file mode 100644
index 0000000..76ca83c
--- /dev/null
+++ b/drivers/vservices/core_server.c
@@ -0,0 +1,1651 @@
+/*
+ * drivers/vservices/core_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Server side core service application driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+
+#include <vservices/types.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/server.h>
+
+#include "transport.h"
+#include "session.h"
+#include "compat.h"
+
+#define VSERVICE_CORE_SERVICE_NAME	"core"
+
+struct core_server {
+	struct vs_server_core_state	state;
+	struct vs_service_device	*service;
+
+	/*
+	 * A list of messages to send, a mutex protecting it, and a
+	 * work item to process the list.
+	 */
+	struct list_head		message_queue;
+	struct mutex			message_queue_lock;
+	struct work_struct		message_queue_work;
+
+	struct mutex			alloc_lock;
+
+	/* The following are all protected by alloc_lock. */
+	unsigned long			*in_notify_map;
+	int				in_notify_map_bits;
+
+	unsigned long			*out_notify_map;
+	int				out_notify_map_bits;
+
+	unsigned			in_quota_remaining;
+	unsigned			out_quota_remaining;
+};
+
+/*
+ * Used for message deferral when the core service is over quota.
+ */
+struct pending_message {
+	vservice_core_message_id_t		type;
+	struct vs_service_device		*service;
+	struct list_head			list;
+};
+
+#define to_core_server(x)	container_of(x, struct core_server, state)
+#define dev_to_core_server(x)	to_core_server(dev_get_drvdata(x))
+
+static struct vs_session_device *
+vs_core_server_session(struct core_server *server)
+{
+	return vs_service_get_session(server->service);
+}
+
+static struct core_server *
+vs_server_session_core_server(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service = session->core_service;
+
+	if (!core_service)
+		return NULL;
+
+	return dev_to_core_server(&core_service->dev);
+}
+
+static int vs_server_core_send_service_removed(struct core_server *server,
+		struct vs_service_device *service)
+{
+	return vs_server_core_core_send_service_removed(&server->state,
+			service->id, GFP_KERNEL);
+}
+
+static bool
+cancel_pending_created(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	list_for_each_entry(msg, &server->message_queue, list) {
+		if (msg->type == VSERVICE_CORE_CORE_MSG_SERVICE_CREATED &&
+				msg->service == service) {
+			vs_put_service(msg->service);
+			list_del(&msg->list);
+			kfree(msg);
+
+			/* there can only be one */
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int vs_server_core_queue_service_removed(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	mutex_lock(&server->message_queue_lock);
+
+	/*
+	 * If we haven't sent the notification that the service was created,
+	 * nuke it and do nothing else.
+	 *
+	 * This is not just an optimisation; see below.
+	 */
+	if (cancel_pending_created(server, service)) {
+		mutex_unlock(&server->message_queue_lock);
+		return 0;
+	}
+
+	/*
+	 * Do nothing if the core state is not connected. We must avoid
+	 * queueing service_removed messages on a reset service.
+	 *
+	 * Note that we cannot take the core server state lock here, because
+	 * we may (or may not) have been called from a core service message
+	 * handler. Thus, we must beware of races with changes to this
+	 * condition:
+	 *
+	 * - It becomes true when the req_connect handler sends an
+	 *   ack_connect, *after* it queues service_created for each existing
+	 *   service (while holding the service ready lock). The handler sends
+	 *   ack_connect with the message queue lock held.
+	 *
+	 *   - If we see the service as connected, then the req_connect
+	 *     handler has already queued and sent a service_created for this
+	 *     service, so it's ok for us to send a service_removed.
+	 *
+	 *   - If we see it as disconnected, the req_connect handler hasn't
+	 *     taken the message queue lock to send ack_connect yet, and thus
+	 *     has not released the service state lock; so if it queued a
+	 *     service_created we caught it in the flush above before it was
+	 *     sent.
+	 *
+	 * - It becomes false before the reset / disconnect handlers are
+	 *   called and those will both flush the message queue afterwards.
+	 *
+	 *   - If we see the service as connected, then the reset / disconnect
+	 *     handler is going to flush the message.
+	 *
+	 *   - If we see it disconnected, the state change has occurred and
+	 *     implicitly had the same effect as this message, so doing
+	 *     nothing is correct.
+	 *
+	 * Note that ordering in all of the above cases is guaranteed by the
+	 * message queue lock.
+	 */
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+		mutex_unlock(&server->message_queue_lock);
+		return 0;
+	}
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&server->message_queue_lock);
+		return -ENOMEM;
+	}
+
+	msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED;
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+
+	list_add_tail(&msg->list, &server->message_queue);
+
+	mutex_unlock(&server->message_queue_lock);
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static int vs_server_core_send_service_created(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct vs_session_device *session =
+			vs_service_get_session(server->service);
+
+	struct vs_mbuf *mbuf;
+	struct vs_string service_name, protocol_name;
+	size_t service_name_len, protocol_name_len;
+
+	int err;
+
+	mbuf = vs_server_core_core_alloc_service_created(&server->state,
+			&service_name, &protocol_name, GFP_KERNEL);
+
+	if (IS_ERR(mbuf))
+		return PTR_ERR(mbuf);
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+			"Sending service created message for %d (%s:%s)\n",
+			service->id, service->name, service->protocol);
+
+	service_name_len = strlen(service->name);
+	protocol_name_len = strlen(service->protocol);
+
+	if (service_name_len > vs_string_max_size(&service_name) ||
+			protocol_name_len > vs_string_max_size(&protocol_name)) {
+		dev_err(&session->dev,
+				"Invalid name/protocol for service %d (%s:%s)\n",
+				service->id, service->name,
+				service->protocol);
+		err = -EINVAL;
+		goto fail;
+	}
+
+	vs_string_copyin(&service_name, service->name);
+	vs_string_copyin(&protocol_name, service->protocol);
+
+	err = vs_server_core_core_send_service_created(&server->state,
+			service->id, service_name, protocol_name, mbuf);
+	if (err) {
+		dev_err(&session->dev,
+				"Fatal error sending service creation message for %d (%s:%s): %d\n",
+				service->id, service->name,
+				service->protocol, err);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	vs_server_core_core_free_service_created(&server->state,
+			&service_name, &protocol_name, mbuf);
+
+	return err;
+}
+
+static int vs_server_core_queue_service_created(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	lockdep_assert_held(&service->ready_lock);
+	lockdep_assert_held(&server->service->state_mutex);
+
+	mutex_lock(&server->message_queue_lock);
+
+	/*  Do nothing if the core state is disconnected.  */
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+		mutex_unlock(&server->message_queue_lock);
+		return 0;
+	}
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&server->message_queue_lock);
+		return -ENOMEM;
+	}
+
+	msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+
+	list_add_tail(&msg->list, &server->message_queue);
+
+	mutex_unlock(&server->message_queue_lock);
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static struct vs_service_device *
+__vs_server_core_register_service(struct vs_session_device *session,
+		vs_service_id_t service_id, struct vs_service_device *owner,
+		const char *name, const char *protocol, const void *plat_data)
+{
+	if (!session->is_server)
+		return ERR_PTR(-ENODEV);
+
+	if (!name || strnlen(name, VSERVICE_CORE_SERVICE_NAME_SIZE + 1) >
+			VSERVICE_CORE_SERVICE_NAME_SIZE || name[0] == '\n')
+		return ERR_PTR(-EINVAL);
+
+	/* The server core must only be registered as service_id zero */
+	if (service_id == 0 && (owner != NULL ||
+			strcmp(name, VSERVICE_CORE_SERVICE_NAME) != 0 ||
+			strcmp(protocol, VSERVICE_CORE_PROTOCOL_NAME) != 0))
+		return ERR_PTR(-EINVAL);
+
+	return vs_service_register(session, owner, service_id, protocol, name,
+			plat_data);
+}
+
+static struct vs_service_device *
+vs_server_core_create_service(struct core_server *server,
+		struct vs_session_device *session,
+		struct vs_service_device *owner, vs_service_id_t service_id,
+		const char *name, const char *protocol, const void *plat_data)
+{
+	struct vs_service_device *service;
+
+	service = __vs_server_core_register_service(session, service_id,
+			owner, name, protocol, plat_data);
+	if (IS_ERR(service))
+		return service;
+
+	if (protocol) {
+		vs_service_state_lock(server->service);
+		vs_service_start(service);
+		if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
+			vs_service_enable(service);
+		vs_service_state_unlock(server->service);
+	}
+
+	return service;
+}
+
+static int
+vs_server_core_send_service_reset_ready(struct core_server *server,
+		vservice_core_message_id_t type,
+		struct vs_service_device *service)
+{
+	bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(server->service);
+	int err;
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+			"Sending %s for service %d\n",
+			is_reset ? "reset" : "ready", service->id);
+
+	if (is_reset)
+		err = vs_server_core_core_send_service_reset(&server->state,
+				service->id, GFP_KERNEL);
+	else
+		err = vs_server_core_core_send_server_ready(&server->state,
+				service->id, service->recv_quota,
+				service->send_quota,
+				service->notify_recv_offset,
+				service->notify_recv_bits,
+				service->notify_send_offset,
+				service->notify_send_bits,
+				GFP_KERNEL);
+
+	return err;
+}
+
+static bool
+cancel_pending_ready(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct pending_message *msg;
+
+	list_for_each_entry(msg, &server->message_queue, list) {
+		if (msg->type == VSERVICE_CORE_CORE_MSG_SERVER_READY &&
+				msg->service == service) {
+			vs_put_service(msg->service);
+			list_del(&msg->list);
+			kfree(msg);
+
+			/* there can only be one */
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int
+vs_server_core_queue_service_reset_ready(struct core_server *server,
+		vservice_core_message_id_t type,
+		struct vs_service_device *service)
+{
+	bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
+	struct pending_message *msg;
+
+	mutex_lock(&server->message_queue_lock);
+
+	/*
+	 * If this is a reset, and there is an outgoing ready in the
+	 * queue, we must cancel it so it can't be sent with invalid
+	 * transport resources, and then return immediately so we
+	 * don't send a redundant reset.
+	 */
+	if (is_reset && cancel_pending_ready(server, service)) {
+		mutex_unlock(&server->message_queue_lock);
+		return VS_SERVICE_ALREADY_RESET;
+	}
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		mutex_unlock(&server->message_queue_lock);
+		return -ENOMEM;
+	}
+
+	msg->type = type;
+	/* put by message_queue_work */
+	msg->service = vs_get_service(service);
+	list_add_tail(&msg->list, &server->message_queue);
+
+	mutex_unlock(&server->message_queue_lock);
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static int vs_core_server_tx_ready(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(server->service);
+
+	vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev, "tx_ready\n");
+
+	queue_work(server->service->work_queue, &server->message_queue_work);
+
+	return 0;
+}
+
+static void message_queue_work(struct work_struct *work)
+{
+	struct core_server *server = container_of(work, struct core_server,
+			message_queue_work);
+	struct pending_message *msg;
+	int err;
+
+	vs_service_state_lock(server->service);
+
+	if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+		vs_service_state_unlock(server->service);
+		return;
+	}
+
+	/*
+	 * If any pending message fails we exit the loop immediately so that
+	 * we preserve the message order.
+	 */
+	mutex_lock(&server->message_queue_lock);
+	while (!list_empty(&server->message_queue)) {
+		msg = list_first_entry(&server->message_queue,
+				struct pending_message, list);
+
+		switch (msg->type) {
+		case VSERVICE_CORE_CORE_MSG_SERVICE_CREATED:
+			err = vs_server_core_send_service_created(server,
+					msg->service);
+			break;
+
+		case VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED:
+			err = vs_server_core_send_service_removed(server,
+					msg->service);
+			break;
+
+		case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+		case VSERVICE_CORE_CORE_MSG_SERVER_READY:
+			err = vs_server_core_send_service_reset_ready(
+					server, msg->type, msg->service);
+			break;
+
+		default:
+			dev_warn(&server->service->dev,
+					"Don't know how to handle pending message type %d\n",
+					msg->type);
+			err = 0;
+			break;
+		}
+
+		/*
+		 * If we're out of quota we exit and wait for tx_ready to
+		 * queue us again.
+		 */
+		if (err == -ENOBUFS)
+			break;
+
+		/* Any other error is fatal */
+		if (err < 0) {
+			dev_err(&server->service->dev,
+					"Failed to send pending message type %d: %d - resetting session",
+					msg->type, err);
+			vs_service_reset_nosync(server->service);
+			break;
+		}
+
+		/*
+		 * The message sent successfully - remove it from the
+		 * queue. The corresponding vs_get_service() was done
+		 * when the pending message was created.
+		 */
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+	mutex_unlock(&server->message_queue_lock);
+
+	vs_service_state_unlock(server->service);
+
+	return;
+}
+
+/*
+ * Core server sysfs interface
+ */
+static ssize_t server_core_create_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = to_vs_session_device(dev->parent);
+	struct core_server *server = dev_to_core_server(&service->dev);
+	struct vs_service_device *new_service;
+	char *p;
+	ssize_t ret = count;
+
+	/* FIXME - Buffer sizes are not defined in generated headers */
+	/* discard leading whitespace */
+	while (count && isspace(*buf)) {
+		buf++;
+		count--;
+	}
+	if (!count) {
+		dev_info(dev, "empty service name");
+		return -EINVAL;
+	}
+	/* discard trailing whitespace */
+	while (count && isspace(buf[count - 1]))
+		count--;
+
+	if (count > VSERVICE_CORE_SERVICE_NAME_SIZE) {
+		dev_info(dev, "service name too long (max %d)\n", VSERVICE_CORE_SERVICE_NAME_SIZE);
+		return -EINVAL;
+	}
+
+	p = kstrndup(buf, count, GFP_KERNEL);
+
+	/*
+	 * Writing a service name to this file creates a new service. The
+	 * service is created without a protocol. It will appear in sysfs
+	 * but will not be bound to a driver until a valid protocol name
+	 * has been written to the created devices protocol sysfs attribute.
+	 */
+	new_service = vs_server_core_create_service(server, session, service,
+			VS_SERVICE_AUTO_ALLOCATE_ID, p, NULL, NULL);
+	if (IS_ERR(new_service))
+		ret = PTR_ERR(new_service);
+
+	kfree(p);
+
+	return ret;
+}
+
+static ssize_t server_core_reset_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *core_service = to_vs_service_device(dev);
+	struct vs_session_device *session =
+		vs_service_get_session(core_service);
+	struct vs_service_device *target;
+	vs_service_id_t service_id;
+	unsigned long val;
+	int err;
+
+	/*
+	 * Writing a valid service_id to this file does a reset of that service
+	 */
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	service_id = val;
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -EINVAL;
+
+	err = vs_service_reset(target, core_service);
+
+	vs_put_service(target);
+	return err < 0 ? err : count;
+}
+
+static ssize_t server_core_remove_service_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_service_device *target;
+	vs_service_id_t service_id;
+	unsigned long val;
+	int err;
+
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	service_id = val;
+	if (service_id == 0) {
+		/*
+		 * We don't allow removing the core service this way. The
+		 * core service will be removed when the session is removed.
+		 */
+		return -EINVAL;
+	}
+
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -EINVAL;
+
+	err = vs_service_delete(target, service);
+
+	vs_put_service(target);
+	return err < 0 ? err : count;
+}
+
+static DEVICE_ATTR(create_service, S_IWUSR,
+		NULL, server_core_create_service_store);
+static DEVICE_ATTR(reset_service, S_IWUSR,
+		NULL, server_core_reset_service_store);
+static DEVICE_ATTR(remove_service, S_IWUSR,
+		NULL, server_core_remove_service_store);
+
+static struct attribute *server_core_dev_attrs[] = {
+	&dev_attr_create_service.attr,
+	&dev_attr_reset_service.attr,
+	&dev_attr_remove_service.attr,
+	NULL,
+};
+
+static const struct attribute_group server_core_attr_group = {
+	.attrs = server_core_dev_attrs,
+};
+
+static int init_transport_resource_allocation(struct core_server *server)
+{
+	struct vs_session_device *session = vs_core_server_session(server);
+	struct vs_transport *transport = session->transport;
+	size_t size;
+	int err;
+
+	mutex_init(&server->alloc_lock);
+	mutex_lock(&server->alloc_lock);
+
+	transport->vt->get_quota_limits(transport, &server->out_quota_remaining,
+			&server->in_quota_remaining);
+
+	transport->vt->get_notify_bits(transport, &server->out_notify_map_bits,
+			&server->in_notify_map_bits);
+
+	size = BITS_TO_LONGS(server->in_notify_map_bits) *
+			sizeof(unsigned long);
+	server->in_notify_map = kzalloc(size, GFP_KERNEL);
+	if (server->in_notify_map_bits && !server->in_notify_map) {
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	size = BITS_TO_LONGS(server->out_notify_map_bits) *
+			sizeof(unsigned long);
+	server->out_notify_map = kzalloc(size, GFP_KERNEL);
+	if (server->out_notify_map_bits && !server->out_notify_map) {
+		err = -ENOMEM;
+		goto fail_free_in_bits;
+	}
+
+	mutex_unlock(&server->alloc_lock);
+
+	return 0;
+
+fail_free_in_bits:
+	kfree(server->in_notify_map);
+fail:
+	mutex_unlock(&server->alloc_lock);
+	return err;
+}
+
+static int alloc_quota(unsigned minimum, unsigned best, unsigned set,
+		unsigned *remaining)
+{
+	unsigned quota;
+
+	if (set) {
+		quota = set;
+
+		if (quota > *remaining)
+			return -ENOSPC;
+	} else if (best) {
+		quota = min(best, *remaining);
+	} else {
+		quota = minimum;
+	}
+
+	if (quota < minimum)
+		return -ENOSPC;
+
+	*remaining -= quota;
+
+	return min_t(unsigned, quota, INT_MAX);
+}
+
+static int alloc_notify_bits(unsigned notify_count, unsigned long *map,
+		unsigned nr_bits)
+{
+	unsigned offset;
+
+	if (notify_count) {
+		offset = bitmap_find_next_zero_area(map, nr_bits, 0,
+				notify_count, 0);
+
+		if (offset >= nr_bits || offset > (unsigned)INT_MAX)
+			return -ENOSPC;
+
+		bitmap_set(map, offset, notify_count);
+	} else {
+		offset = 0;
+	}
+
+	return offset;
+}
+
+/*
+ * alloc_transport_resources - Allocates the quotas and notification bits for
+ * a service.
+ * @server: the core service state.
+ * @service: the service device to allocate resources for.
+ *
+ * This function allocates message quotas and notification bits. It is called
+ * for the core service in alloc(), and for every other service by the server
+ * bus probe() function.
+ */
+static int alloc_transport_resources(struct core_server *server,
+		struct vs_service_device *service)
+{
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(service);
+	unsigned in_bit_offset, out_bit_offset;
+	unsigned in_quota, out_quota;
+	int ret;
+	struct vs_service_driver *driver;
+
+	if (WARN_ON(!service->dev.driver))
+		return -ENODEV;
+
+	mutex_lock(&server->alloc_lock);
+
+	driver = to_vs_service_driver(service->dev.driver);
+
+	/* Quota allocations */
+	ret = alloc_quota(driver->in_quota_min, driver->in_quota_best,
+			service->in_quota_set, &server->in_quota_remaining);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate in quota\n");
+		goto fail_in_quota;
+	}
+	in_quota = ret;
+
+	ret = alloc_quota(driver->out_quota_min, driver->out_quota_best,
+			service->out_quota_set, &server->out_quota_remaining);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate out quota\n");
+		goto fail_out_quota;
+	}
+	out_quota = ret;
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+			"%d: quota in: %u out: %u; remaining in: %u out: %u\n",
+			service->id, in_quota, out_quota,
+			server->in_quota_remaining,
+			server->out_quota_remaining);
+
+	/* Notification bit allocations */
+	ret = alloc_notify_bits(service->notify_recv_bits,
+			server->in_notify_map, server->in_notify_map_bits);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate in notify bits\n");
+		goto fail_in_notify;
+	}
+	in_bit_offset = ret;
+
+	ret = alloc_notify_bits(service->notify_send_bits,
+			server->out_notify_map, server->out_notify_map_bits);
+	if (ret < 0) {
+		dev_err(&service->dev, "cannot allocate out notify bits\n");
+		goto fail_out_notify;
+	}
+	out_bit_offset = ret;
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+			"notify bits in: %u/%u out: %u/%u\n",
+			in_bit_offset, service->notify_recv_bits,
+			out_bit_offset, service->notify_send_bits);
+
+	/* Fill in the device's allocations */
+	service->recv_quota = in_quota;
+	service->send_quota = out_quota;
+	service->notify_recv_offset = in_bit_offset;
+	service->notify_send_offset = out_bit_offset;
+
+	mutex_unlock(&server->alloc_lock);
+
+	return 0;
+
+fail_out_notify:
+	if (service->notify_recv_bits)
+		bitmap_clear(server->in_notify_map,
+				in_bit_offset, service->notify_recv_bits);
+fail_in_notify:
+	server->out_quota_remaining += out_quota;
+fail_out_quota:
+	server->in_quota_remaining += in_quota;
+fail_in_quota:
+
+	mutex_unlock(&server->alloc_lock);
+
+	service->recv_quota = 0;
+	service->send_quota = 0;
+	service->notify_recv_bits = 0;
+	service->notify_recv_offset = 0;
+	service->notify_send_bits = 0;
+	service->notify_send_offset = 0;
+
+	return ret;
+}
+
+/*
+ * free_transport_resources - Frees the quotas and notification bits for
+ * a non-core service.
+ * @server: the core service state.
+ * @service: the service device to free resources for.
+ *
+ * This function is called by the server to free message quotas and
+ * notification bits that were allocated by alloc_transport_resources. It must
+ * only be called when the target service is in reset, and must be called with
+ * the core service's state lock held.
+ */
+static int free_transport_resources(struct core_server *server,
+		struct vs_service_device *service)
+{
+	mutex_lock(&server->alloc_lock);
+
+	if (service->notify_recv_bits)
+		bitmap_clear(server->in_notify_map,
+				service->notify_recv_offset,
+				service->notify_recv_bits);
+
+	if (service->notify_send_bits)
+		bitmap_clear(server->out_notify_map,
+				service->notify_send_offset,
+				service->notify_send_bits);
+
+	server->in_quota_remaining += service->recv_quota;
+	server->out_quota_remaining += service->send_quota;
+
+	mutex_unlock(&server->alloc_lock);
+
+	service->recv_quota = 0;
+	service->send_quota = 0;
+	service->notify_recv_bits = 0;
+	service->notify_recv_offset = 0;
+	service->notify_send_bits = 0;
+	service->notify_send_offset = 0;
+
+	return 0;
+}
+
+static struct vs_server_core_state *
+vs_core_server_alloc(struct vs_service_device *service)
+{
+	struct core_server *server;
+	int err;
+
+	if (WARN_ON(service->id != 0))
+		goto fail;
+
+	server = kzalloc(sizeof(*server), GFP_KERNEL);
+	if (!server)
+		goto fail;
+
+	server->service = service;
+	INIT_LIST_HEAD(&server->message_queue);
+	INIT_WORK(&server->message_queue_work, message_queue_work);
+	mutex_init(&server->message_queue_lock);
+
+	err = init_transport_resource_allocation(server);
+	if (err)
+		goto fail_init_alloc;
+
+	err = alloc_transport_resources(server, service);
+	if (err)
+		goto fail_alloc_transport;
+
+	err = sysfs_create_group(&service->dev.kobj, &server_core_attr_group);
+	if (err)
+		goto fail_sysfs;
+
+	return &server->state;
+
+fail_sysfs:
+	free_transport_resources(server, service);
+fail_alloc_transport:
+	kfree(server->out_notify_map);
+	kfree(server->in_notify_map);
+fail_init_alloc:
+	kfree(server);
+fail:
+	return NULL;
+}
+
+static void vs_core_server_release(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+
+	/* Delete all the other services */
+	vs_session_delete_noncore(session);
+
+	sysfs_remove_group(&server->service->dev.kobj, &server_core_attr_group);
+	kfree(server->out_notify_map);
+	kfree(server->in_notify_map);
+	kfree(server);
+}
+
+/**
+ * vs_server_create_service - create and register a new vService server
+ * @session: the session to create the vService server on
+ * @parent: an existing server that is managing the new server
+ * @name: the name of the new service
+ * @protocol: the protocol for the new service
+ * @plat_data: value to be assigned to (struct device *)->platform_data
+ */
+struct vs_service_device *
+vs_server_create_service(struct vs_session_device *session,
+		struct vs_service_device *parent, const char *name,
+		const char *protocol, const void *plat_data)
+{
+	struct vs_service_device *core_service, *new_service;
+	struct core_server *server;
+
+	if (!session->is_server || !name || !protocol)
+		return NULL;
+
+	core_service = session->core_service;
+	if (!core_service)
+		return NULL;
+
+	device_lock(&core_service->dev);
+	if (!core_service->dev.driver) {
+		device_unlock(&core_service->dev);
+		return NULL;
+	}
+
+	server = dev_to_core_server(&core_service->dev);
+
+	if (!parent)
+		parent = core_service;
+
+	new_service = vs_server_core_create_service(server, session, parent,
+			VS_SERVICE_AUTO_ALLOCATE_ID, name, protocol, plat_data);
+
+	device_unlock(&core_service->dev);
+
+	if (IS_ERR(new_service))
+		return NULL;
+
+	return new_service;
+}
+EXPORT_SYMBOL(vs_server_create_service);
+
+/**
+ * vs_server_destroy_service - destroy and unregister a vService server. This
+ * function must _not_ be used from the target service's own workqueue.
+ * @service: The service to destroy
+ */
+int vs_server_destroy_service(struct vs_service_device *service,
+		struct vs_service_device *parent)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	if (!session->is_server || service->id == 0)
+		return -EINVAL;
+
+	if (!parent)
+		parent = session->core_service;
+
+	return vs_service_delete(service, parent);
+}
+EXPORT_SYMBOL(vs_server_destroy_service);
+
+static void __queue_service_created(struct vs_service_device *service,
+		void *data)
+{
+	struct core_server *server = (struct core_server *)data;
+
+	vs_server_core_queue_service_created(server, service);
+}
+
+static int vs_server_core_handle_connect(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+	int err;
+
+	/* Tell the other end that we've finished connecting. */
+	err = vs_server_core_core_send_ack_connect(state, GFP_KERNEL);
+	if (err)
+		return err;
+
+	/* Queue a service-created message for each existing service. */
+	vs_session_for_each_service(session, __queue_service_created, server);
+
+	/* Re-enable all the services. */
+	vs_session_enable_noncore(session);
+
+	return 0;
+}
+
+static void vs_core_server_disable_services(struct core_server *server)
+{
+	struct vs_session_device *session = vs_core_server_session(server);
+	struct pending_message *msg;
+
+	/* Disable all the other services */
+	vs_session_disable_noncore(session);
+
+	/* Flush all the pending service-readiness messages */
+	mutex_lock(&server->message_queue_lock);
+	while (!list_empty(&server->message_queue)) {
+		msg = list_first_entry(&server->message_queue,
+				struct pending_message, list);
+		vs_put_service(msg->service);
+		list_del(&msg->list);
+		kfree(msg);
+	}
+	mutex_unlock(&server->message_queue_lock);
+}
+
+static int vs_server_core_handle_disconnect(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+
+	vs_core_server_disable_services(server);
+
+	return vs_server_core_core_send_ack_disconnect(state, GFP_KERNEL);
+}
+
+static int
+vs_server_core_handle_service_reset(struct vs_server_core_state *state,
+		unsigned service_id)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+
+	if (service_id == 0)
+		return -EPROTO;
+
+	return vs_service_handle_reset(session, service_id, false);
+}
+
+static void vs_core_server_start(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+	int err;
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
+			"Core server start\n");
+
+	err = vs_server_core_core_send_startup(&server->state,
+			server->service->recv_quota,
+			server->service->send_quota, GFP_KERNEL);
+
+	if (err)
+		dev_err(&session->dev, "Failed to start core protocol: %d\n",
+				err);
+}
+
+static void vs_core_server_reset(struct vs_server_core_state *state)
+{
+	struct core_server *server = to_core_server(state);
+	struct vs_session_device *session = vs_core_server_session(server);
+
+	vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
+			"Core server reset\n");
+
+	vs_core_server_disable_services(server);
+}
+
+static struct vs_server_core vs_core_server_driver = {
+	.alloc		= vs_core_server_alloc,
+	.release	= vs_core_server_release,
+	.start		= vs_core_server_start,
+	.reset		= vs_core_server_reset,
+	.tx_ready	= vs_core_server_tx_ready,
+	.core = {
+		.req_connect		= vs_server_core_handle_connect,
+		.req_disconnect		= vs_server_core_handle_disconnect,
+		.msg_service_reset	= vs_server_core_handle_service_reset,
+	},
+};
+
+/*
+ * Server bus driver
+ */
+static int vs_server_bus_match(struct device *dev, struct device_driver *driver)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(driver);
+
+	/* Don't match anything to the devio driver; it's bound manually */
+	if (!vsdrv->protocol)
+		return 0;
+
+	WARN_ON_ONCE(!service->is_server || !vsdrv->is_server);
+
+	/* Don't match anything that doesn't have a protocol set yet */
+	if (!service->protocol)
+		return 0;
+
+	if (strcmp(service->protocol, vsdrv->protocol) == 0)
+		return 1;
+
+	return 0;
+}
+
+static int vs_server_bus_probe(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	int ret;
+
+	/*
+	 * Set the notify counts for the service, unless the driver is the
+	 * devio driver in which case it has already been done by the devio
+	 * bind ioctl. The devio driver cannot be bound automatically.
+	 */
+	struct vs_service_driver *driver =
+		to_vs_service_driver(service->dev.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	if (driver != &vs_devio_server_driver)
+#endif
+	{
+		service->notify_recv_bits = driver->in_notify_count;
+		service->notify_send_bits = driver->out_notify_count;
+	}
+
+	/*
+	 * We can't allocate transport resources here for the core service
+	 * because the resource pool doesn't exist yet. It's done in alloc()
+	 * instead (which is called, indirectly, by vs_service_bus_probe()).
+	 */
+	if (service->id == 0)
+		return vs_service_bus_probe(dev);
+
+	if (!server)
+		return -ENODEV;
+	ret = alloc_transport_resources(server, service);
+	if (ret < 0)
+		goto fail;
+
+	ret = vs_service_bus_probe(dev);
+	if (ret < 0)
+		goto fail_free_resources;
+
+	return 0;
+
+fail_free_resources:
+	free_transport_resources(server, service);
+fail:
+	return ret;
+}
+
+static int vs_server_bus_remove(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+
+	vs_service_bus_remove(dev);
+
+	/*
+	 * We skip free_transport_resources for the core service because the
+	 * resource pool has already been freed at this point. It's also
+	 * possible that the core service has disappeared, in which case
+	 * there's no work to do here.
+	 */
+	if (server != NULL && service->id != 0)
+		free_transport_resources(server, service);
+
+	return 0;
+}
+
+static ssize_t is_server_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->is_server);
+}
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", service->id);
+}
+
+static ssize_t dev_protocol_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->protocol ?: "");
+}
+
+struct service_enable_work_struct {
+	struct vs_service_device *service;
+	struct work_struct work;
+};
+
+static void service_enable_work(struct work_struct *work)
+{
+	struct service_enable_work_struct *enable_work = container_of(work,
+			struct service_enable_work_struct, work);
+	struct vs_service_device *service = enable_work->service;
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	bool started;
+	int ret;
+
+	kfree(enable_work);
+
+	if (!server)
+		return;
+	/* Start and enable the service */
+	vs_service_state_lock(server->service);
+	started = vs_service_start(service);
+	if (!started) {
+		vs_service_state_unlock(server->service);
+		vs_put_service(service);
+		return;
+	}
+
+	if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
+		vs_service_enable(service);
+	vs_service_state_unlock(server->service);
+
+	/* Tell the bus to search for a driver that supports the protocol */
+	ret = device_attach(&service->dev);
+	if (ret == 0)
+		dev_warn(&service->dev, "No driver found for protocol: %s\n",
+				service->protocol);
+	kobject_uevent(&service->dev.kobj, KOBJ_CHANGE);
+
+	/* The corresponding vs_get_service was done when the work was queued */
+	vs_put_service(service);
+}
+
+static ssize_t dev_protocol_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct service_enable_work_struct *enable_work;
+
+	/* The protocol can only be set once */
+	if (service->protocol)
+		return -EPERM;
+
+	/* Registering additional core servers is not allowed */
+	if (strcmp(buf, VSERVICE_CORE_PROTOCOL_NAME) == 0)
+		return -EINVAL;
+
+	if (strnlen(buf, VSERVICE_CORE_PROTOCOL_NAME_SIZE) + 1 >
+			VSERVICE_CORE_PROTOCOL_NAME_SIZE)
+		return -E2BIG;
+
+	enable_work = kmalloc(sizeof(*enable_work), GFP_KERNEL);
+	if (!enable_work)
+		return -ENOMEM;
+
+	/* Set the protocol and tell the client about it */
+	service->protocol = kstrdup(buf, GFP_KERNEL);
+	if (!service->protocol) {
+		kfree(enable_work);
+		return -ENOMEM;
+	}
+	strim(service->protocol);
+
+	/*
+	 * Schedule work to enable the service. We can't do it here because
+	 * we need to take the core service lock, and doing that here makes
+	 * it depend circularly on this sysfs attribute, which can be deleted
+	 * with that lock held.
+	 *
+	 * The corresponding vs_put_service is called in the enable_work
+	 * function.
+	 */
+	INIT_WORK(&enable_work->work, service_enable_work);
+	enable_work->service = vs_get_service(service);
+	schedule_work(&enable_work->work);
+
+	return count;
+}
+
+static ssize_t service_name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", service->name);
+}
+
+static ssize_t quota_in_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	int ret;
+	unsigned long in_quota;
+
+	if (!server)
+		return -ENODEV;
+	/*
+	 * Don't allow quota to be changed for services that have a driver
+	 * bound. We take the alloc lock here because the device lock is held
+	 * while creating and destroying this sysfs item. This means we can
+	 * race with driver binding, but that doesn't matter: we actually just
+	 * want to know that alloc_transport_resources() hasn't run yet, and
+	 * that takes the alloc lock.
+	 */
+	mutex_lock(&server->alloc_lock);
+	if (service->dev.driver) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	ret = kstrtoul(buf, 0, &in_quota);
+	if (ret < 0)
+		goto out;
+
+	service->in_quota_set = in_quota;
+	ret = count;
+
+out:
+	mutex_unlock(&server->alloc_lock);
+
+	return ret;
+}
+
+static ssize_t quota_in_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", service->recv_quota);
+}
+
+static ssize_t quota_out_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct core_server *server = vs_server_session_core_server(session);
+	int ret;
+	unsigned long out_quota;
+
+	if (!server)
+		return -ENODEV;
+	/* See comment in quota_in_store. */
+	mutex_lock(&server->alloc_lock);
+	if (service->dev.driver) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	ret = kstrtoul(buf, 0, &out_quota);
+	if (ret < 0)
+		goto out;
+
+	service->out_quota_set = out_quota;
+	ret = count;
+
+out:
+	mutex_unlock(&server->alloc_lock);
+
+	return ret;
+}
+
+static ssize_t quota_out_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", service->send_quota);
+}
+
+static struct device_attribute vs_server_dev_attrs[] = {
+	__ATTR_RO(id),
+	__ATTR_RO(is_server),
+	__ATTR(protocol, S_IRUGO | S_IWUSR,
+			dev_protocol_show, dev_protocol_store),
+	__ATTR_RO(service_name),
+	__ATTR(quota_in, S_IRUGO | S_IWUSR,
+			quota_in_show, quota_in_store),
+	__ATTR(quota_out, S_IRUGO | S_IWUSR,
+			quota_out_show, quota_out_store),
+	__ATTR_NULL
+};
+
+static ssize_t protocol_show(struct device_driver *drv, char *buf)
+{
+	struct vs_service_driver *vsdrv = to_vs_service_driver(drv);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", vsdrv->protocol);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+static struct driver_attribute vs_server_drv_attrs[] = {
+	__ATTR_RO(protocol),
+	__ATTR_NULL
+};
+#else
+static DRIVER_ATTR_RO(protocol);
+
+static struct attribute *vs_server_drv_attrs[] = {
+	&driver_attr_protocol.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vs_server_drv);
+#endif
+
+struct bus_type vs_server_bus_type = {
+	.name		= "vservices-server",
+	.dev_attrs	= vs_server_dev_attrs,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+	.drv_attrs	= vs_server_drv_attrs,
+#else
+	.drv_groups	= vs_server_drv_groups,
+#endif
+	.match		= vs_server_bus_match,
+	.probe		= vs_server_bus_probe,
+	.remove		= vs_server_bus_remove,
+	.uevent		= vs_service_bus_uevent,
+};
+EXPORT_SYMBOL(vs_server_bus_type);
+
+/*
+ * Server session driver
+ */
+static int vs_server_session_probe(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_service_device *service;
+
+	service = __vs_server_core_register_service(session, 0, NULL,
+			VSERVICE_CORE_SERVICE_NAME,
+			VSERVICE_CORE_PROTOCOL_NAME, NULL);
+	if (IS_ERR(service))
+		return PTR_ERR(service);
+
+	return 0;
+}
+
+static int
+vs_server_session_service_added(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	if (WARN_ON(!server || !service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_created(server, service);
+
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send service_created: %d\n", err);
+
+	return err;
+}
+
+static int
+vs_server_session_service_start(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	if (WARN_ON(!server || !service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_reset_ready(server,
+			VSERVICE_CORE_CORE_MSG_SERVER_READY, service);
+
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send server_ready: %d\n", err);
+
+	return err;
+}
+
+static int
+vs_server_session_service_local_reset(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	if (WARN_ON(!server || !service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_reset_ready(server,
+			VSERVICE_CORE_CORE_MSG_SERVICE_RESET, service);
+
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send service_reset: %d\n", err);
+
+	return err;
+}
+
+static int
+vs_server_session_service_removed(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	struct core_server *server = vs_server_session_core_server(session);
+	int err;
+
+	/*
+	 * It's possible for the core server to be forcibly removed before
+	 * the other services, for example when the underlying transport
+	 * vanishes. If that happens, we can end up here with a NULL core
+	 * server pointer.
+	 */
+	if (!server)
+		return 0;
+
+	if (WARN_ON(!service->id))
+		return -EINVAL;
+
+	err = vs_server_core_queue_service_removed(server, service);
+	if (err)
+		vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+				"failed to send service_removed: %d\n", err);
+
+	return err;
+}
+
+static struct vs_session_driver vs_server_session_driver = {
+	.driver	= {
+		.name			= "vservices-server-session",
+		.owner			= THIS_MODULE,
+		.bus			= &vs_session_bus_type,
+		.probe			= vs_server_session_probe,
+		.suppress_bind_attrs	= true,
+	},
+	.is_server		= true,
+	.service_bus		= &vs_server_bus_type,
+	.service_added		= vs_server_session_service_added,
+	.service_start		= vs_server_session_service_start,
+	.service_local_reset	= vs_server_session_service_local_reset,
+	.service_removed	= vs_server_session_service_removed,
+};
+
+static int __init vs_core_server_init(void)
+{
+	int ret;
+
+	ret = bus_register(&vs_server_bus_type);
+	if (ret)
+		goto fail_bus_register;
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	vs_devio_server_driver.driver.bus = &vs_server_bus_type;
+	vs_devio_server_driver.driver.owner = THIS_MODULE;
+	ret = driver_register(&vs_devio_server_driver.driver);
+	if (ret)
+		goto fail_devio_register;
+#endif
+
+	ret = driver_register(&vs_server_session_driver.driver);
+	if (ret)
+		goto fail_driver_register;
+
+	ret = vservice_core_server_register(&vs_core_server_driver,
+			"vs_core_server");
+	if (ret)
+		goto fail_core_register;
+
+	vservices_server_root = kobject_create_and_add("server-sessions",
+			vservices_root);
+	if (!vservices_server_root) {
+		ret = -ENOMEM;
+		goto fail_create_root;
+	}
+
+	return 0;
+
+fail_create_root:
+	vservice_core_server_unregister(&vs_core_server_driver);
+fail_core_register:
+	driver_unregister(&vs_server_session_driver.driver);
+fail_driver_register:
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_server_driver.driver);
+	vs_devio_server_driver.driver.bus = NULL;
+	vs_devio_server_driver.driver.owner = NULL;
+fail_devio_register:
+#endif
+	bus_unregister(&vs_server_bus_type);
+fail_bus_register:
+	return ret;
+}
+
+static void __exit vs_core_server_exit(void)
+{
+	kobject_put(vservices_server_root);
+	vservice_core_server_unregister(&vs_core_server_driver);
+	driver_unregister(&vs_server_session_driver.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	driver_unregister(&vs_devio_server_driver.driver);
+	vs_devio_server_driver.driver.bus = NULL;
+	vs_devio_server_driver.driver.owner = NULL;
+#endif
+	bus_unregister(&vs_server_bus_type);
+}
+
+subsys_initcall(vs_core_server_init);
+module_exit(vs_core_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Core Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/debug.h b/drivers/vservices/debug.h
new file mode 100644
index 0000000..b379b04
--- /dev/null
+++ b/drivers/vservices/debug.h
@@ -0,0 +1,74 @@
+/*
+ * drivers/vservices/debug.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Debugging macros and support functions for Virtual Services.
+ */
+#ifndef _VSERVICES_DEBUG_H
+#define _VSERVICES_DEBUG_H
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+#include <linux/printk.h>
+#else
+#ifndef no_printk
+#define no_printk(format, args...) do { } while (0)
+#endif
+#endif
+
+#include <vservices/session.h>
+#include "transport.h"
+
+#define VS_DEBUG_TRANSPORT		(1 << 0)
+#define VS_DEBUG_TRANSPORT_MESSAGES	(1 << 1)
+#define VS_DEBUG_SESSION		(1 << 2)
+#define VS_DEBUG_CLIENT			(1 << 3)
+#define VS_DEBUG_CLIENT_CORE		(1 << 4)
+#define VS_DEBUG_SERVER			(1 << 5)
+#define VS_DEBUG_SERVER_CORE		(1 << 6)
+#define VS_DEBUG_PROTOCOL		(1 << 7)
+#define VS_DEBUG_ALL			0xff
+
+#ifdef CONFIG_VSERVICES_DEBUG
+
+#define vs_debug(type, session, format, args...)			\
+	do {								\
+		if ((session)->debug_mask & (type))			\
+			dev_dbg(&(session)->dev, format, ##args);	\
+	} while (0)
+
+#define vs_dev_debug(type, session, dev, format, args...)		\
+	do {								\
+		if ((session)->debug_mask & (type))			\
+			dev_dbg(dev, format, ##args);			\
+	} while (0)
+
+static inline void vs_debug_dump_mbuf(struct vs_session_device *session,
+		struct vs_mbuf *mbuf)
+{
+	if (session->debug_mask & VS_DEBUG_TRANSPORT_MESSAGES)
+		print_hex_dump_bytes("msg:", DUMP_PREFIX_OFFSET,
+				mbuf->data, mbuf->size);
+}
+
+#else
+
+/* Dummy versions: Use no_printk to retain type/format string checking */
+#define vs_debug(type, session, format, args...) \
+	do { (void)session; no_printk(format, ##args); } while(0)
+
+#define vs_dev_debug(type, session, dev, format, args...) \
+	do { (void)session; (void)dev; no_printk(format, ##args); } while(0)
+
+static inline void vs_debug_dump_mbuf(struct vs_session_device *session,
+		struct vs_mbuf *mbuf) {}
+
+#endif /* CONFIG_VSERVICES_DEBUG */
+
+#endif /* _VSERVICES_DEBUG_H */
diff --git a/drivers/vservices/devio.c b/drivers/vservices/devio.c
new file mode 100644
index 0000000..b3ed4ab
--- /dev/null
+++ b/drivers/vservices/devio.c
@@ -0,0 +1,1059 @@
+/*
+ * devio.c - cdev I/O for service devices
+ *
+ * Copyright (c) 2016 Cog Systems Pty Ltd
+ *     Author: Philip Derrin <philip@cog.systems>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/security.h>
+#include <linux/compat.h>
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+#include <vservices/ioctl.h>
+#include "session.h"
+
+#define VSERVICES_DEVICE_MAX (VS_MAX_SERVICES * VS_MAX_SESSIONS)
+
+struct vs_devio_priv {
+	struct kref kref;
+	bool running, reset;
+
+	/* Receive queue */
+	wait_queue_head_t recv_wq;
+	atomic_t notify_pending;
+	struct list_head recv_queue;
+};
+
+static void
+vs_devio_priv_free(struct kref *kref)
+{
+	struct vs_devio_priv *priv = container_of(kref, struct vs_devio_priv,
+			kref);
+
+	WARN_ON(priv->running);
+	WARN_ON(!list_empty_careful(&priv->recv_queue));
+	WARN_ON(waitqueue_active(&priv->recv_wq));
+
+	kfree(priv);
+}
+
+static void vs_devio_priv_put(struct vs_devio_priv *priv)
+{
+	kref_put(&priv->kref, vs_devio_priv_free);
+}
+
+static int
+vs_devio_service_probe(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv;
+
+	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	kref_init(&priv->kref);
+	priv->running = false;
+	priv->reset = false;
+	init_waitqueue_head(&priv->recv_wq);
+	atomic_set(&priv->notify_pending, 0);
+	INIT_LIST_HEAD(&priv->recv_queue);
+
+	dev_set_drvdata(&service->dev, priv);
+
+	wake_up(&service->quota_wq);
+
+	return 0;
+}
+
+static int
+vs_devio_service_remove(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+	WARN_ON(priv->running);
+	WARN_ON(!list_empty_careful(&priv->recv_queue));
+	WARN_ON(waitqueue_active(&priv->recv_wq));
+
+	vs_devio_priv_put(priv);
+
+	return 0;
+}
+
+static int
+vs_devio_service_receive(struct vs_service_device *service,
+		struct vs_mbuf *mbuf)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+	WARN_ON(!priv->running);
+
+	spin_lock(&priv->recv_wq.lock);
+	list_add_tail(&mbuf->queue, &priv->recv_queue);
+	wake_up_locked(&priv->recv_wq);
+	spin_unlock(&priv->recv_wq.lock);
+
+	return 0;
+}
+
+static void
+vs_devio_service_notify(struct vs_service_device *service, u32 flags)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+	int old, cur;
+
+	WARN_ON(!priv->running);
+
+	if (!flags)
+		return;
+
+	/* open-coded atomic_or() */
+	cur = atomic_read(&priv->notify_pending);
+	while ((old = atomic_cmpxchg(&priv->notify_pending,
+					cur, cur | flags)) != cur)
+		cur = old;
+
+	wake_up(&priv->recv_wq);
+}
+
+static void
+vs_devio_service_start(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+	if (!priv->reset) {
+		WARN_ON(priv->running);
+		priv->running = true;
+		wake_up(&service->quota_wq);
+	}
+}
+
+static void
+vs_devio_service_reset(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+	struct vs_mbuf *mbuf, *tmp;
+
+	WARN_ON(!priv->running && !priv->reset);
+
+	/*
+	 * Mark the service as being in reset. This flag can never be cleared
+	 * on an open device; the user must acknowledge the reset by closing
+	 * and reopening the device.
+	 */
+	priv->reset = true;
+	priv->running = false;
+
+	spin_lock_irq(&priv->recv_wq.lock);
+	list_for_each_entry_safe(mbuf, tmp, &priv->recv_queue, queue)
+		vs_service_free_mbuf(service, mbuf);
+	INIT_LIST_HEAD(&priv->recv_queue);
+	spin_unlock_irq(&priv->recv_wq.lock);
+	wake_up_all(&priv->recv_wq);
+}
+
+/*
+ * This driver will be registered by the core server module, which must also
+ * set its bus and owner function pointers.
+ */
+struct vs_service_driver vs_devio_server_driver = {
+	/* No protocol, so the normal bus match will never bind this. */
+	.protocol	= NULL,
+	.is_server	= true,
+	.rx_atomic	= true,
+
+	.probe		= vs_devio_service_probe,
+	.remove		= vs_devio_service_remove,
+	.receive	= vs_devio_service_receive,
+	.notify		= vs_devio_service_notify,
+	.start		= vs_devio_service_start,
+	.reset		= vs_devio_service_reset,
+
+	/*
+	 * Set reasonable default quotas. These can be overridden by passing
+	 * nonzero values to IOCTL_VS_BIND_SERVER, which will set the
+	 * service's *_quota_set fields.
+	 */
+	.in_quota_min	= 1,
+	.in_quota_best	= 8,
+	.out_quota_min	= 1,
+	.out_quota_best	= 8,
+
+	/* Mark the notify counts as invalid; the service's will be used. */
+	.in_notify_count = (unsigned)-1,
+	.out_notify_count = (unsigned)-1,
+
+	.driver		= {
+		.name			= "vservices-server-devio",
+		.owner			= NULL, /* set by core server */
+		.bus			= NULL, /* set by core server */
+		.suppress_bind_attrs	= true, /* see vs_devio_poll */
+	},
+};
+EXPORT_SYMBOL_GPL(vs_devio_server_driver);
+
+static int
+vs_devio_bind_server(struct vs_service_device *service,
+		struct vs_ioctl_bind *bind)
+{
+	int ret = -ENODEV;
+
+	/* Ensure the server module is loaded and the driver is registered. */
+	if (!try_module_get(vs_devio_server_driver.driver.owner))
+		goto fail_module_get;
+
+	device_lock(&service->dev);
+	ret = -EBUSY;
+	if (service->dev.driver != NULL)
+		goto fail_device_unbound;
+
+	/* Set up the quota and notify counts. */
+	service->in_quota_set = bind->recv_quota;
+	service->out_quota_set = bind->send_quota;
+	service->notify_send_bits = bind->send_notify_bits;
+	service->notify_recv_bits = bind->recv_notify_bits;
+
+	/* Manually probe the driver. */
+	service->dev.driver = &vs_devio_server_driver.driver;
+	ret = service->dev.bus->probe(&service->dev);
+	if (ret < 0)
+		goto fail_probe_driver;
+
+	ret = device_bind_driver(&service->dev);
+	if (ret < 0)
+		goto fail_bind_driver;
+
+	/* Pass the allocated quotas back to the user. */
+	bind->recv_quota = service->recv_quota;
+	bind->send_quota = service->send_quota;
+	bind->msg_size = vs_service_max_mbuf_size(service);
+
+	device_unlock(&service->dev);
+	module_put(vs_devio_server_driver.driver.owner);
+
+	return 0;
+
+fail_bind_driver:
+	ret = service->dev.bus->remove(&service->dev);
+fail_probe_driver:
+	service->dev.driver = NULL;
+fail_device_unbound:
+	device_unlock(&service->dev);
+	module_put(vs_devio_server_driver.driver.owner);
+fail_module_get:
+	return ret;
+}
+
+/*
+ * This driver will be registered by the core client module, which must also
+ * set its bus and owner pointers.
+ */
+struct vs_service_driver vs_devio_client_driver = {
+	/* No protocol, so the normal bus match will never bind this. */
+	.protocol	= NULL,
+	.is_server	= false,
+	.rx_atomic	= true,
+
+	.probe		= vs_devio_service_probe,
+	.remove		= vs_devio_service_remove,
+	.receive	= vs_devio_service_receive,
+	.notify		= vs_devio_service_notify,
+	.start		= vs_devio_service_start,
+	.reset		= vs_devio_service_reset,
+
+	.driver		= {
+		.name			= "vservices-client-devio",
+		.owner			= NULL, /* set by core client */
+		.bus			= NULL, /* set by core client */
+		.suppress_bind_attrs	= true, /* see vs_devio_poll */
+	},
+};
+EXPORT_SYMBOL_GPL(vs_devio_client_driver);
+
+static int
+vs_devio_bind_client(struct vs_service_device *service,
+		struct vs_ioctl_bind *bind)
+{
+	int ret = -ENODEV;
+
+	/* Ensure the client module is loaded and the driver is registered. */
+	if (!try_module_get(vs_devio_client_driver.driver.owner))
+		goto fail_module_get;
+
+	device_lock(&service->dev);
+	ret = -EBUSY;
+	if (service->dev.driver != NULL)
+		goto fail_device_unbound;
+
+	/* Manually probe the driver. */
+	service->dev.driver = &vs_devio_client_driver.driver;
+	ret = service->dev.bus->probe(&service->dev);
+	if (ret < 0)
+		goto fail_probe_driver;
+
+	ret = device_bind_driver(&service->dev);
+	if (ret < 0)
+		goto fail_bind_driver;
+
+	/* Pass the allocated quotas back to the user. */
+	bind->recv_quota = service->recv_quota;
+	bind->send_quota = service->send_quota;
+	bind->msg_size = vs_service_max_mbuf_size(service);
+	bind->send_notify_bits = service->notify_send_bits;
+	bind->recv_notify_bits = service->notify_recv_bits;
+
+	device_unlock(&service->dev);
+	module_put(vs_devio_client_driver.driver.owner);
+
+	return 0;
+
+fail_bind_driver:
+	ret = service->dev.bus->remove(&service->dev);
+fail_probe_driver:
+	service->dev.driver = NULL;
+fail_device_unbound:
+	device_unlock(&service->dev);
+	module_put(vs_devio_client_driver.driver.owner);
+fail_module_get:
+	return ret;
+}
+
+static struct vs_devio_priv *
+vs_devio_priv_get_from_service(struct vs_service_device *service)
+{
+	struct vs_devio_priv *priv = NULL;
+	struct device_driver *drv;
+
+	if (!service)
+		return NULL;
+
+	device_lock(&service->dev);
+	drv = service->dev.driver;
+
+	if ((drv == &vs_devio_client_driver.driver) ||
+			(drv == &vs_devio_server_driver.driver)) {
+		vs_service_state_lock(service);
+		priv = dev_get_drvdata(&service->dev);
+		if (priv)
+			kref_get(&priv->kref);
+		vs_service_state_unlock(service);
+	}
+
+	device_unlock(&service->dev);
+
+	return priv;
+}
+
+static int
+vs_devio_open(struct inode *inode, struct file *file)
+{
+	struct vs_service_device *service;
+
+	if (imajor(inode) != vservices_cdev_major)
+		return -ENODEV;
+
+	service = vs_service_lookup_by_devt(inode->i_rdev);
+	if (!service)
+		return -ENODEV;
+
+	file->private_data = service;
+
+	return 0;
+}
+
+static int
+vs_devio_release(struct inode *inode, struct file *file)
+{
+	struct vs_service_device *service = file->private_data;
+
+	if (service) {
+		struct vs_devio_priv *priv =
+			vs_devio_priv_get_from_service(service);
+
+		if (priv) {
+			device_release_driver(&service->dev);
+			vs_devio_priv_put(priv);
+		}
+
+		file->private_data = NULL;
+		vs_put_service(service);
+	}
+
+	return 0;
+}
+
+static struct iovec *
+vs_devio_check_iov(struct vs_ioctl_iovec *io, bool is_send, ssize_t *total)
+{
+	struct iovec *iov;
+	unsigned i;
+	int ret;
+
+	if (io->iovcnt > UIO_MAXIOV)
+		return ERR_PTR(-EINVAL);
+
+	iov = kmalloc(sizeof(*iov) * io->iovcnt, GFP_KERNEL);
+	if (!iov)
+		return ERR_PTR(-ENOMEM);
+
+	if (copy_from_user(iov, io->iov, sizeof(*iov) * io->iovcnt)) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	*total = 0;
+	for (i = 0; i < io->iovcnt; i++) {
+		ssize_t iov_len = (ssize_t)iov[i].iov_len;
+
+		if (iov_len > MAX_RW_COUNT - *total) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		if (!access_ok(is_send ? VERIFY_READ : VERIFY_WRITE,
+					iov[i].iov_base, iov_len)) {
+			ret = -EFAULT;
+			goto fail;
+		}
+
+		*total += iov_len;
+	}
+
+	return iov;
+
+fail:
+	kfree(iov);
+	return ERR_PTR(ret);
+}
+
+static ssize_t
+vs_devio_send(struct vs_service_device *service, struct iovec *iov,
+		size_t iovcnt, ssize_t to_send, bool nonblocking)
+{
+	struct vs_mbuf *mbuf = NULL;
+	struct vs_devio_priv *priv;
+	unsigned i;
+	ssize_t offset = 0;
+	ssize_t ret;
+	DEFINE_WAIT(wait);
+
+	priv = vs_devio_priv_get_from_service(service);
+	ret = -ENODEV;
+	if (!priv)
+		goto fail_priv_get;
+
+	vs_service_state_lock(service);
+
+	/*
+	 * Waiting alloc. We must open-code this because there is no real
+	 * state structure or base state.
+	 */
+	ret = 0;
+	while (!vs_service_send_mbufs_available(service)) {
+		if (nonblocking) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		prepare_to_wait_exclusive(&service->quota_wq, &wait,
+				TASK_INTERRUPTIBLE);
+
+		vs_service_state_unlock(service);
+		schedule();
+		vs_service_state_lock(service);
+
+		if (priv->reset) {
+			ret = -ECONNRESET;
+			break;
+		}
+
+		if (!priv->running) {
+			ret = -ENOTCONN;
+			break;
+		}
+	}
+	finish_wait(&service->quota_wq, &wait);
+
+	if (ret)
+		goto fail_alloc;
+
+	mbuf = vs_service_alloc_mbuf(service, to_send, GFP_KERNEL);
+	if (IS_ERR(mbuf)) {
+		ret = PTR_ERR(mbuf);
+		goto fail_alloc;
+	}
+
+	/* Ready to send; copy data into the mbuf. */
+	ret = -EFAULT;
+	for (i = 0; i < iovcnt; i++) {
+		if (copy_from_user(mbuf->data + offset, iov[i].iov_base,
+					iov[i].iov_len))
+			goto fail_copy;
+		offset += iov[i].iov_len;
+	}
+	mbuf->size = to_send;
+
+	/* Send the message. */
+	ret = vs_service_send(service, mbuf);
+	if (ret < 0)
+		goto fail_send;
+
+	/* Wake the next waiter, if there's more quota available. */
+	if (waitqueue_active(&service->quota_wq) &&
+			vs_service_send_mbufs_available(service) > 0)
+		wake_up(&service->quota_wq);
+
+	vs_service_state_unlock(service);
+	vs_devio_priv_put(priv);
+
+	return to_send;
+
+fail_send:
+fail_copy:
+	vs_service_free_mbuf(service, mbuf);
+	wake_up(&service->quota_wq);
+fail_alloc:
+	vs_service_state_unlock(service);
+	vs_devio_priv_put(priv);
+fail_priv_get:
+	return ret;
+}
+
+static ssize_t
+vs_devio_recv(struct vs_service_device *service, struct iovec *iov,
+		size_t iovcnt, u32 *notify_bits, ssize_t recv_space,
+		bool nonblocking)
+{
+	struct vs_mbuf *mbuf = NULL;
+	struct vs_devio_priv *priv;
+	unsigned i;
+	ssize_t offset = 0;
+	ssize_t ret;
+	DEFINE_WAIT(wait);
+
+	priv = vs_devio_priv_get_from_service(service);
+	ret = -ENODEV;
+	if (!priv)
+		goto fail_priv_get;
+
+	/* Take the recv_wq lock, which also protects recv_queue. */
+	spin_lock_irq(&priv->recv_wq.lock);
+
+	/* Wait for a message, notification, or reset. */
+	ret = wait_event_interruptible_exclusive_locked_irq(priv->recv_wq,
+			!list_empty(&priv->recv_queue) || priv->reset ||
+			atomic_read(&priv->notify_pending) || nonblocking);
+
+	if (priv->reset)
+		ret = -ECONNRESET; /* Service reset */
+	else if (!ret && list_empty(&priv->recv_queue))
+		ret = -EAGAIN; /* Nonblocking, or notification */
+
+	if (ret < 0) {
+		spin_unlock_irq(&priv->recv_wq.lock);
+		goto no_mbuf;
+	}
+
+	/* Take the first mbuf from the list, and check its size. */
+	mbuf = list_first_entry(&priv->recv_queue, struct vs_mbuf, queue);
+	if (mbuf->size > recv_space) {
+		spin_unlock_irq(&priv->recv_wq.lock);
+		ret = -EMSGSIZE;
+		goto fail_msg_size;
+	}
+	list_del_init(&mbuf->queue);
+
+	spin_unlock_irq(&priv->recv_wq.lock);
+
+	/* Copy to user. */
+	ret = -EFAULT;
+	for (i = 0; (mbuf->size > offset) && (i < iovcnt); i++) {
+		size_t len = min(mbuf->size - offset, iov[i].iov_len);
+		if (copy_to_user(iov[i].iov_base, mbuf->data + offset, len))
+			goto fail_copy;
+		offset += len;
+	}
+	ret = offset;
+
+no_mbuf:
+	/*
+	 * Read and clear the pending notification bits. If any notifications
+	 * are received, don't return an error, even if we failed to receive a
+	 * message.
+	 */
+	*notify_bits = atomic_xchg(&priv->notify_pending, 0);
+	if ((ret < 0) && *notify_bits)
+		ret = 0;
+
+fail_copy:
+	if (mbuf)
+		vs_service_free_mbuf(service, mbuf);
+fail_msg_size:
+	vs_devio_priv_put(priv);
+fail_priv_get:
+	return ret;
+}
+
+static int
+vs_devio_check_perms(struct file *file, unsigned flags)
+{
+	if ((flags & MAY_READ) & !(file->f_mode & FMODE_READ))
+		return -EBADF;
+
+	if ((flags & MAY_WRITE) & !(file->f_mode & FMODE_WRITE))
+		return -EBADF;
+
+	return security_file_permission(file, flags);
+}
+
+static long
+vs_devio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	void __user *ptr = (void __user *)arg;
+	struct vs_service_device *service = file->private_data;
+	struct vs_ioctl_bind bind;
+	struct vs_ioctl_iovec io;
+	u32 flags;
+	long ret;
+	ssize_t iov_total;
+	struct iovec *iov;
+
+	if (!service)
+		return -ENODEV;
+
+	switch (cmd) {
+	case IOCTL_VS_RESET_SERVICE:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		ret = vs_service_reset(service, service);
+		break;
+	case IOCTL_VS_GET_NAME:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (service->name != NULL) {
+			size_t len = strnlen(service->name,
+					_IOC_SIZE(IOCTL_VS_GET_NAME) - 1);
+			if (copy_to_user(ptr, service->name, len + 1))
+				ret = -EFAULT;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case IOCTL_VS_GET_PROTOCOL:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (service->protocol != NULL) {
+			size_t len = strnlen(service->protocol,
+					_IOC_SIZE(IOCTL_VS_GET_PROTOCOL) - 1);
+			if (copy_to_user(ptr, service->protocol, len + 1))
+				ret = -EFAULT;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case IOCTL_VS_BIND_CLIENT:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		ret = vs_devio_bind_client(service, &bind);
+		if (!ret && copy_to_user(ptr, &bind, sizeof(bind)))
+			ret = -EFAULT;
+		break;
+	case IOCTL_VS_BIND_SERVER:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&bind, ptr, sizeof(bind))) {
+			ret = -EFAULT;
+			break;
+		}
+		ret = vs_devio_bind_server(service, &bind);
+		if (!ret && copy_to_user(ptr, &bind, sizeof(bind)))
+			ret = -EFAULT;
+		break;
+	case IOCTL_VS_NOTIFY:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&flags, ptr, sizeof(flags))) {
+			ret = -EFAULT;
+			break;
+		}
+		ret = vs_service_notify(service, flags);
+		break;
+	case IOCTL_VS_SEND:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&io, ptr, sizeof(io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_iov(&io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_send(service, iov, io.iovcnt, iov_total,
+				file->f_flags & O_NONBLOCK);
+		kfree(iov);
+		break;
+	case IOCTL_VS_RECV:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&io, ptr, sizeof(io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_iov(&io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_recv(service, iov, io.iovcnt,
+			&io.notify_bits, iov_total,
+			file->f_flags & O_NONBLOCK);
+		kfree(iov);
+
+		if (ret >= 0) {
+			u32 __user *notify_bits_ptr = ptr + offsetof(
+					struct vs_ioctl_iovec, notify_bits);
+			if (copy_to_user(notify_bits_ptr, &io.notify_bits,
+					sizeof(io.notify_bits)))
+				ret = -EFAULT;
+		}
+		break;
+	default:
+		dev_dbg(&service->dev, "Unknown ioctl %#x, arg: %lx\n", cmd,
+				arg);
+		ret = -ENOSYS;
+		break;
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct vs_compat_ioctl_bind {
+	__u32 send_quota;
+	__u32 recv_quota;
+	__u32 send_notify_bits;
+	__u32 recv_notify_bits;
+	compat_size_t msg_size;
+};
+
+#define compat_ioctl_bind_conv(dest, src) ({ \
+	dest.send_quota = src.send_quota;		\
+	dest.recv_quota = src.recv_quota;		\
+	dest.send_notify_bits = src.send_notify_bits;	\
+	dest.recv_notify_bits = src.recv_notify_bits;	\
+	dest.msg_size = (compat_size_t)src.msg_size;	\
+})
+
+#define COMPAT_IOCTL_VS_BIND_CLIENT _IOR('4', 3, struct vs_compat_ioctl_bind)
+#define COMPAT_IOCTL_VS_BIND_SERVER _IOWR('4', 4, struct vs_compat_ioctl_bind)
+
+struct vs_compat_ioctl_iovec {
+	union {
+		__u32 iovcnt; /* input */
+		__u32 notify_bits; /* output (recv only) */
+	};
+	compat_uptr_t iov;
+};
+
+#define COMPAT_IOCTL_VS_SEND \
+    _IOW('4', 6, struct vs_compat_ioctl_iovec)
+#define COMPAT_IOCTL_VS_RECV \
+    _IOWR('4', 7, struct vs_compat_ioctl_iovec)
+
+static struct iovec *
+vs_devio_check_compat_iov(struct vs_compat_ioctl_iovec *c_io,
+	bool is_send, ssize_t *total)
+{
+	struct iovec *iov;
+	struct compat_iovec *c_iov;
+
+	unsigned i;
+	int ret;
+
+	if (c_io->iovcnt > UIO_MAXIOV)
+		return ERR_PTR(-EINVAL);
+
+	c_iov = kzalloc(sizeof(*c_iov) * c_io->iovcnt, GFP_KERNEL);
+	if (!c_iov)
+		return ERR_PTR(-ENOMEM);
+
+	iov = kzalloc(sizeof(*iov) * c_io->iovcnt, GFP_KERNEL);
+	if (!iov) {
+		kfree(c_iov);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (copy_from_user(c_iov, (struct compat_iovec __user *)
+		compat_ptr(c_io->iov), sizeof(*c_iov) * c_io->iovcnt)) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	*total = 0;
+	for (i = 0; i < c_io->iovcnt; i++) {
+		ssize_t iov_len;
+		iov[i].iov_base = compat_ptr (c_iov[i].iov_base);
+		iov[i].iov_len = (compat_size_t) c_iov[i].iov_len;
+
+		iov_len = (ssize_t)iov[i].iov_len;
+
+		if (iov_len > MAX_RW_COUNT - *total) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		if (!access_ok(is_send ? VERIFY_READ : VERIFY_WRITE,
+					iov[i].iov_base, iov_len)) {
+			ret = -EFAULT;
+			goto fail;
+		}
+
+		*total += iov_len;
+	}
+
+	kfree (c_iov);
+	return iov;
+
+fail:
+	kfree(c_iov);
+	kfree(iov);
+	return ERR_PTR(ret);
+}
+
+static long
+vs_devio_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	void __user *ptr = (void __user *)arg;
+	struct vs_service_device *service = file->private_data;
+	struct vs_ioctl_bind bind;
+	struct vs_compat_ioctl_bind compat_bind;
+	struct vs_compat_ioctl_iovec compat_io;
+	long ret;
+	ssize_t iov_total;
+	struct iovec *iov;
+
+	if (!service)
+		return -ENODEV;
+
+	switch (cmd) {
+	case IOCTL_VS_RESET_SERVICE:
+	case IOCTL_VS_GET_NAME:
+	case IOCTL_VS_GET_PROTOCOL:
+		return vs_devio_ioctl (file, cmd, arg);
+	case COMPAT_IOCTL_VS_SEND:
+		ret = vs_devio_check_perms(file, MAY_WRITE);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&compat_io, ptr, sizeof(compat_io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_compat_iov(&compat_io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_send(service, iov, compat_io.iovcnt, iov_total,
+				file->f_flags & O_NONBLOCK);
+		kfree(iov);
+
+		break;
+	case COMPAT_IOCTL_VS_RECV:
+		ret = vs_devio_check_perms(file, MAY_READ);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&compat_io, ptr, sizeof(compat_io))) {
+			ret = -EFAULT;
+			break;
+		}
+
+		iov = vs_devio_check_compat_iov(&compat_io, true, &iov_total);
+		if (IS_ERR(iov)) {
+			ret = PTR_ERR(iov);
+			break;
+		}
+
+		ret = vs_devio_recv(service, iov, compat_io.iovcnt,
+			&compat_io.notify_bits, iov_total,
+			file->f_flags & O_NONBLOCK);
+		kfree(iov);
+
+		if (ret >= 0) {
+			u32 __user *notify_bits_ptr = ptr + offsetof(
+					struct vs_compat_ioctl_iovec, notify_bits);
+			if (copy_to_user(notify_bits_ptr, &compat_io.notify_bits,
+					sizeof(compat_io.notify_bits)))
+				ret = -EFAULT;
+		}
+		break;
+	case COMPAT_IOCTL_VS_BIND_CLIENT:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		ret = vs_devio_bind_client(service, &bind);
+		compat_ioctl_bind_conv(compat_bind, bind);
+		if (!ret && copy_to_user(ptr, &compat_bind,
+					sizeof(compat_bind)))
+			ret = -EFAULT;
+		break;
+	case COMPAT_IOCTL_VS_BIND_SERVER:
+		ret = vs_devio_check_perms(file, MAY_EXEC);
+		if (ret < 0)
+			break;
+		if (copy_from_user(&compat_bind, ptr, sizeof(compat_bind))) {
+			ret = -EFAULT;
+			break;
+		}
+		compat_ioctl_bind_conv(bind, compat_bind);
+		ret = vs_devio_bind_server(service, &bind);
+		compat_ioctl_bind_conv(compat_bind, bind);
+		if (!ret && copy_to_user(ptr, &compat_bind,
+					sizeof(compat_bind)))
+			ret = -EFAULT;
+		break;
+	default:
+		dev_dbg(&service->dev, "Unknown ioctl %#x, arg: %lx\n", cmd,
+				arg);
+		ret = -ENOSYS;
+		break;
+	}
+
+	return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
+static unsigned int
+vs_devio_poll(struct file *file, struct poll_table_struct *wait)
+{
+	struct vs_service_device *service = file->private_data;
+	struct vs_devio_priv *priv = vs_devio_priv_get_from_service(service);
+	unsigned int flags = 0;
+
+	poll_wait(file, &service->quota_wq, wait);
+
+	if (priv) {
+		/*
+		 * Note: there is no way for us to ensure that all poll
+		 * waiters on a given workqueue have gone away, other than to
+		 * actually close the file. So, this poll_wait() is only safe
+		 * if we never release our claim on the service before the
+		 * file is closed.
+		 *
+		 * We try to guarantee this by only unbinding the devio driver
+		 * on close, and setting suppress_bind_attrs in the driver so
+		 * root can't unbind us with sysfs.
+		 */
+		poll_wait(file, &priv->recv_wq, wait);
+
+		if (priv->reset) {
+			/* Service reset; raise poll error. */
+			flags |= POLLERR | POLLHUP;
+		} else if (priv->running) {
+			if (!list_empty_careful(&priv->recv_queue))
+				flags |= POLLRDNORM | POLLIN;
+			if (atomic_read(&priv->notify_pending))
+				flags |= POLLRDNORM | POLLIN;
+			if (vs_service_send_mbufs_available(service) > 0)
+				flags |= POLLWRNORM | POLLOUT;
+		}
+
+		vs_devio_priv_put(priv);
+	} else {
+		/* No driver attached. Return error flags. */
+		flags |= POLLERR | POLLHUP;
+	}
+
+	return flags;
+}
+
+static const struct file_operations vs_fops = {
+	.owner		= THIS_MODULE,
+	.open		= vs_devio_open,
+	.release	= vs_devio_release,
+	.unlocked_ioctl	= vs_devio_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= vs_devio_compat_ioctl,
+#endif
+	.poll		= vs_devio_poll,
+};
+
+int vservices_cdev_major;
+static struct cdev vs_cdev;
+
+int __init
+vs_devio_init(void)
+{
+	dev_t dev;
+	int r;
+
+	r = alloc_chrdev_region(&dev, 0, VSERVICES_DEVICE_MAX,
+			"vs_service");
+	if (r < 0)
+		goto fail_alloc_chrdev;
+	vservices_cdev_major = MAJOR(dev);
+
+	cdev_init(&vs_cdev, &vs_fops);
+	r = cdev_add(&vs_cdev, dev, VSERVICES_DEVICE_MAX);
+	if (r < 0)
+		goto fail_cdev_add;
+
+	return 0;
+
+fail_cdev_add:
+	unregister_chrdev_region(dev, VSERVICES_DEVICE_MAX);
+fail_alloc_chrdev:
+	return r;
+}
+
+void __exit
+vs_devio_exit(void)
+{
+	cdev_del(&vs_cdev);
+	unregister_chrdev_region(MKDEV(vservices_cdev_major, 0),
+			VSERVICES_DEVICE_MAX);
+}
diff --git a/drivers/vservices/protocol/Kconfig b/drivers/vservices/protocol/Kconfig
new file mode 100644
index 0000000..e0f2798c
--- /dev/null
+++ b/drivers/vservices/protocol/Kconfig
@@ -0,0 +1,44 @@
+#
+# vServices protocol drivers configuration
+#
+
+if VSERVICES_SERVER || VSERVICES_CLIENT
+
+menu "Protocol drivers"
+config VSERVICES_PROTOCOL_BLOCK
+	bool
+
+config VSERVICES_PROTOCOL_BLOCK_SERVER
+	tristate "Block server protocol"
+	depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+	select VSERVICES_PROTOCOL_BLOCK
+	help
+	  This option adds support for Virtual Services block protocol server.
+
+config VSERVICES_PROTOCOL_BLOCK_CLIENT
+	tristate "Block client protocol"
+	depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+	select VSERVICES_PROTOCOL_BLOCK
+	help
+	  This option adds support for Virtual Services block protocol client.
+
+config VSERVICES_PROTOCOL_SERIAL
+	bool
+
+config VSERVICES_PROTOCOL_SERIAL_SERVER
+	tristate "Serial server protocol"
+	depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+	select VSERVICES_PROTOCOL_SERIAL
+	help
+	  This option adds support for Virtual Services serial protocol server.
+
+config VSERVICES_PROTOCOL_SERIAL_CLIENT
+	tristate "Serial client protocol"
+	depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+	select VSERVICES_PROTOCOL_SERIAL
+	help
+	  This option adds support for Virtual Services serial protocol client.
+
+endmenu
+
+endif # VSERVICES_SERVER || VSERVICES_CLIENT
diff --git a/drivers/vservices/protocol/Makefile b/drivers/vservices/protocol/Makefile
new file mode 100644
index 0000000..0c714e0
--- /dev/null
+++ b/drivers/vservices/protocol/Makefile
@@ -0,0 +1,5 @@
+# This is a autogenerated Makefile for vservice-linux-stacks
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += core/
+obj-$(CONFIG_VSERVICES_PROTOCOL_BLOCK) += block/
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL) += serial/
diff --git a/drivers/vservices/protocol/block/Makefile b/drivers/vservices/protocol/block/Makefile
new file mode 100644
index 0000000..325b57e
--- /dev/null
+++ b/drivers/vservices/protocol/block/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_BLOCK_SERVER)	+= vservices_protocol_block_server.o
+vservices_protocol_block_server-objs = server.o
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_BLOCK_CLIENT)	+= vservices_protocol_block_client.o
+vservices_protocol_block_client-objs = client.o
diff --git a/drivers/vservices/protocol/block/client.c b/drivers/vservices/protocol/block/client.c
new file mode 100644
index 0000000..702a30a8
--- /dev/null
+++ b/drivers/vservices/protocol/block/client.c
@@ -0,0 +1,1186 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the block client protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+static int _vs_client_block_req_open(struct vs_client_block_state *_state);
+
+/*** Linux driver model integration ***/
+struct vs_block_client_driver {
+	struct vs_client_block *client;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+        container_of(d, struct vs_block_client_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	int i __maybe_unused;
+
+	/* Clear out pending  read commands */
+	for_each_set_bit(i, state->state.io.read_bitmask,
+			 VSERVICE_BLOCK_IO_READ_MAX_PENDING) {
+		void *tag = state->state.io.read_tags[i];
+
+		if (client->io.nack_read)
+			client->io.nack_read(state, tag,
+					     VSERVICE_BLOCK_SERVICE_RESET);
+
+		__clear_bit(i, state->state.io.read_bitmask);
+	}
+
+	/* Clear out pending  write commands */
+	for_each_set_bit(i, state->state.io.write_bitmask,
+			 VSERVICE_BLOCK_IO_WRITE_MAX_PENDING) {
+		void *tag = state->state.io.write_tags[i];
+
+		if (client->io.nack_write)
+			client->io.nack_write(state, tag,
+					      VSERVICE_BLOCK_SERVICE_RESET);
+
+		__clear_bit(i, state->state.io.write_bitmask);
+	}
+
+}
+
+static void block_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	_vs_client_block_req_open(state);
+
+	vs_service_state_unlock(service);
+}
+
+static void block_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (client->closed)
+		client->closed(state);
+
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void block_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	_vs_client_block_req_open(state);
+
+	vs_service_state_unlock_bh(service);
+}
+
+static void block_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock_bh(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (client->closed)
+		client->closed(state);
+
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static int block_client_probe(struct vs_service_device *service);
+static int block_client_remove(struct vs_service_device *service);
+static int block_handle_message(struct vs_service_device *service,
+				struct vs_mbuf *_mbuf);
+static void block_handle_notify(struct vs_service_device *service,
+				uint32_t flags);
+static void block_handle_start(struct vs_service_device *service);
+static void block_handle_start_bh(struct vs_service_device *service);
+static void block_handle_reset(struct vs_service_device *service);
+static void block_handle_reset_bh(struct vs_service_device *service);
+static int block_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_block_client_register(struct vs_client_block *client,
+				     const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_block_client_driver *driver;
+
+	if (client->tx_atomic && !client->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	client->driver = &driver->vsdrv;
+	driver->client = client;
+
+	driver->vsdrv.protocol = VSERVICE_BLOCK_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = false;
+	driver->vsdrv.rx_atomic = client->rx_atomic;
+	driver->vsdrv.tx_atomic = client->tx_atomic;
+
+	driver->vsdrv.probe = block_client_probe;
+	driver->vsdrv.remove = block_client_remove;
+	driver->vsdrv.receive = block_handle_message;
+	driver->vsdrv.notify = block_handle_notify;
+	driver->vsdrv.start = client->tx_atomic ?
+	    block_handle_start_bh : block_handle_start;
+	driver->vsdrv.reset = client->tx_atomic ?
+	    block_handle_reset_bh : block_handle_reset;
+	driver->vsdrv.tx_ready = block_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	client->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_block_client_register);
+
+int vservice_block_client_unregister(struct vs_client_block *client)
+{
+	struct vs_block_client_driver *driver;
+
+	if (!client->driver)
+		return 0;
+
+	driver = to_client_driver(client->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	client->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_block_client_unregister);
+
+static int block_client_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client = to_client_driver(vsdrv)->client;
+	struct vs_client_block_state *state;
+
+	state = client->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int block_client_remove(struct vs_service_device *service)
+{
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client = to_client_driver(vsdrv)->client;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	client->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int block_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_block *client = to_client_driver(vsdrv)->client;
+	struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+		return 0;
+
+	if (client->tx_ready)
+		client->tx_ready(state);
+
+	return 0;
+}
+
+static int _vs_client_block_req_open(struct vs_client_block_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_block *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_REQ_OPEN;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_block_req_open);
+static int _vs_client_block_req_close(struct vs_client_block_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_block *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_REQ_CLOSE;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_block_req_close);
+static int _vs_client_block_req_reopen(struct vs_client_block_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_block *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_REQ_REOPEN;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_block_req_reopen);
+static int
+block_base_handle_ack_open(const struct vs_client_block *_client,
+			   struct vs_client_block_state *_state,
+			   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 28UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+	_state->io.sector_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	_state->io.segment_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+	_state->readonly =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	_state->sector_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	_state->segment_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+	_state->device_sectors =
+	    *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   12UL);
+	_state->flushable =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL);
+	_state->committable =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	_client->opened(_state);
+	return 0;
+
+}
+
+static int
+block_base_handle_nack_open(const struct vs_client_block *_client,
+			    struct vs_client_block_state *_state,
+			    struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	dev_err(&VS_STATE_SERVICE_PTR(_state)->dev,
+		"Open operation failed for device %s\n",
+		VS_STATE_SERVICE_PTR(_state)->name);
+
+	return 0;
+
+}
+
+EXPORT_SYMBOL(block_base_handle_ack_open);
+static int
+block_base_handle_ack_close(const struct vs_client_block *_client,
+			    struct vs_client_block_state *_state,
+			    struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return 0;
+
+}
+
+static int
+block_base_handle_nack_close(const struct vs_client_block *_client,
+			     struct vs_client_block_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return 0;
+
+}
+
+EXPORT_SYMBOL(block_base_handle_ack_close);
+static int
+block_base_handle_ack_reopen(const struct vs_client_block *_client,
+			     struct vs_client_block_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE__RESET;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->reopened) {
+		_client->reopened(_state);
+		return 0;
+	}
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return _vs_client_block_req_open(_state);
+
+}
+
+static int
+block_base_handle_nack_reopen(const struct vs_client_block *_client,
+			      struct vs_client_block_state *_state,
+			      struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return 0;
+
+}
+
+EXPORT_SYMBOL(block_base_handle_ack_reopen);
+int vs_client_block_io_getbufs_ack_read(struct vs_client_block_state *_state,
+					struct vs_pbuf *data,
+					struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_ACK_READ;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->io.segment_size + 8UL;
+	const size_t _min_size = _max_size - _state->io.segment_size;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	data->size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	data->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL +
+			   sizeof(uint32_t));
+	data->max_size = data->size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->io.segment_size - data->size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_getbufs_ack_read);
+int vs_client_block_io_free_ack_read(struct vs_client_block_state *_state,
+				     struct vs_pbuf *data,
+				     struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_free_ack_read);
+struct vs_mbuf *vs_client_block_io_alloc_req_write(struct vs_client_block_state
+						   *_state,
+						   struct vs_pbuf *data,
+						   gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_REQ_WRITE;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + _state->io.segment_size + 32UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!data)
+		goto fail;
+	data->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   28UL + sizeof(uint32_t));
+	data->size = _state->io.segment_size;
+	data->max_size = data->size;
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_alloc_req_write);
+int vs_client_block_io_free_req_write(struct vs_client_block_state *_state,
+				      struct vs_pbuf *data,
+				      struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_free_req_write);
+int
+vs_client_block_io_req_read(struct vs_client_block_state *_state, void *_opaque,
+			    uint64_t sector_index, uint32_t num_sects,
+			    bool nodelay, bool flush, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 24UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_block *_client =
+	    to_client_driver(vsdrv)->client;
+	uint32_t _opaque_tmp;
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	_opaque_tmp =
+	    find_first_zero_bit(_state->state.io.read_bitmask,
+				VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+	if (_opaque_tmp >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+		return -EPROTO;
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = VSERVICE_BLOCK_IO_REQ_READ;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _opaque_tmp;
+	*(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    sector_index;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+	    num_sects;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+	    nodelay;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+	    flush;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.io.read_tags[_opaque_tmp] = _opaque;
+	__set_bit(_opaque_tmp, _state->state.io.read_bitmask);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_req_read);
+int
+vs_client_block_io_req_write(struct vs_client_block_state *_state,
+			     void *_opaque, uint64_t sector_index,
+			     uint32_t num_sects, bool nodelay, bool flush,
+			     bool commit, struct vs_pbuf data,
+			     struct vs_mbuf *_mbuf)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_block *_client =
+	    to_client_driver(vsdrv)->client;
+	uint32_t _opaque_tmp;
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	_opaque_tmp =
+	    find_first_zero_bit(_state->state.io.write_bitmask,
+				VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+	if (_opaque_tmp >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+		return -EPROTO;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_BLOCK_IO_REQ_WRITE)
+
+		return -EINVAL;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _opaque_tmp;
+	*(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    sector_index;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+	    num_sects;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+	    nodelay;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+	    flush;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+	    commit;
+	if ((data.size + sizeof(vs_message_id_t) + 28UL) > VS_MBUF_SIZE(_mbuf))
+		return -EINVAL;
+
+	if (data.size < data.max_size)
+		VS_MBUF_SIZE(_mbuf) -= (data.max_size - data.size);
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 28UL) =
+	    data.size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.io.write_tags[_opaque_tmp] = _opaque;
+	__set_bit(_opaque_tmp, _state->state.io.write_bitmask);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_req_write);
+static int
+block_io_handle_ack_read(const struct vs_client_block *_client,
+			 struct vs_client_block_state *_state,
+			 struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->io.segment_size + 8UL;
+	void *_opaque;
+	struct vs_pbuf data;
+	const size_t _min_size = _max_size - _state->io.segment_size;
+	size_t _exact_size;
+	uint32_t _opaque_tmp;
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+	_opaque_tmp =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	if (_opaque_tmp >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque_tmp, _state->state.io.read_bitmask))
+		return -EPROTO;
+	_opaque = _state->state.io.read_tags[_opaque_tmp];
+	__clear_bit(_opaque_tmp, _state->state.io.read_bitmask);
+
+	data.size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	data.data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL +
+			   sizeof(uint32_t));
+	data.max_size = data.size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->io.segment_size - data.size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_client->io.ack_read)
+		return _client->io.ack_read(_state, _opaque, data, _mbuf);
+	return 0;
+}
+
+static int
+block_io_handle_nack_read(const struct vs_client_block *_client,
+			  struct vs_client_block_state *_state,
+			  struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+	void *_opaque;
+	vservice_block_block_io_error_t err;
+	uint32_t _opaque_tmp;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	_opaque_tmp =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	if (_opaque_tmp >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque_tmp, _state->state.io.read_bitmask))
+		return -EPROTO;
+	_opaque = _state->state.io.read_tags[_opaque_tmp];
+	__clear_bit(_opaque_tmp, _state->state.io.read_bitmask);
+	err =
+	    *(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+						  sizeof(vs_message_id_t) +
+						  4UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->io.nack_read)
+		return _client->io.nack_read(_state, _opaque, err);
+	return 0;
+}
+
+EXPORT_SYMBOL(block_io_handle_ack_read);
+static int
+block_io_handle_ack_write(const struct vs_client_block *_client,
+			  struct vs_client_block_state *_state,
+			  struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	void *_opaque;
+	uint32_t _opaque_tmp;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	_opaque_tmp =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	if (_opaque_tmp >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque_tmp, _state->state.io.write_bitmask))
+		return -EPROTO;
+	_opaque = _state->state.io.write_tags[_opaque_tmp];
+	__clear_bit(_opaque_tmp, _state->state.io.write_bitmask);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->io.ack_write)
+		return _client->io.ack_write(_state, _opaque);
+	return 0;
+}
+
+static int
+block_io_handle_nack_write(const struct vs_client_block *_client,
+			   struct vs_client_block_state *_state,
+			   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+	void *_opaque;
+	vservice_block_block_io_error_t err;
+	uint32_t _opaque_tmp;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	_opaque_tmp =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	if (_opaque_tmp >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque_tmp, _state->state.io.write_bitmask))
+		return -EPROTO;
+	_opaque = _state->state.io.write_tags[_opaque_tmp];
+	__clear_bit(_opaque_tmp, _state->state.io.write_bitmask);
+	err =
+	    *(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+						  sizeof(vs_message_id_t) +
+						  4UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->io.nack_write)
+		return _client->io.nack_write(_state, _opaque, err);
+	return 0;
+}
+
+EXPORT_SYMBOL(block_io_handle_ack_write);
+static int
+block_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_client_block_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_block *client =
+	    to_client_driver(vsdrv)->client;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+	case VSERVICE_BLOCK_BASE_ACK_OPEN:
+		ret = block_base_handle_ack_open(client, state, _mbuf);
+		break;
+	case VSERVICE_BLOCK_BASE_NACK_OPEN:
+		ret = block_base_handle_nack_open(client, state, _mbuf);
+		break;
+
+/* command in sync close */
+	case VSERVICE_BLOCK_BASE_ACK_CLOSE:
+		ret = block_base_handle_ack_close(client, state, _mbuf);
+		break;
+	case VSERVICE_BLOCK_BASE_NACK_CLOSE:
+		ret = block_base_handle_nack_close(client, state, _mbuf);
+		break;
+
+/* command in sync reopen */
+	case VSERVICE_BLOCK_BASE_ACK_REOPEN:
+		ret = block_base_handle_ack_reopen(client, state, _mbuf);
+		break;
+	case VSERVICE_BLOCK_BASE_NACK_REOPEN:
+		ret = block_base_handle_nack_reopen(client, state, _mbuf);
+		break;
+
+/** interface block_io **/
+/* command in parallel read */
+	case VSERVICE_BLOCK_IO_ACK_READ:
+		ret = block_io_handle_ack_read(client, state, _mbuf);
+		break;
+	case VSERVICE_BLOCK_IO_NACK_READ:
+		ret = block_io_handle_nack_read(client, state, _mbuf);
+		break;
+
+/* command in parallel write */
+	case VSERVICE_BLOCK_IO_ACK_WRITE:
+		ret = block_io_handle_ack_write(client, state, _mbuf);
+		break;
+	case VSERVICE_BLOCK_IO_NACK_WRITE:
+		ret = block_io_handle_nack_write(client, state, _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void block_handle_notify(struct vs_service_device *service,
+				uint32_t notify_bits)
+{
+	__maybe_unused struct vs_client_block_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_block *client =
+	    to_client_driver(vsdrv)->client;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface block_io **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+int vs_client_block_reopen(struct vs_client_block_state *_state)
+{
+	return _vs_client_block_req_reopen(_state);
+}
+
+EXPORT_SYMBOL(vs_client_block_reopen);
+
+int vs_client_block_close(struct vs_client_block_state *_state)
+{
+	return _vs_client_block_req_close(_state);
+}
+
+EXPORT_SYMBOL(vs_client_block_close);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services blockClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/block/server.c b/drivers/vservices/protocol/block/server.c
new file mode 100644
index 0000000..a4a7d1a
--- /dev/null
+++ b/drivers/vservices/protocol/block/server.c
@@ -0,0 +1,1371 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the block server protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_block_server_driver {
+	struct vs_server_block *server;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+        container_of(d, struct vs_block_server_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void block_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void block_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (server->closed)
+		server->closed(state);
+
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void block_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static void block_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock_bh(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (server->closed)
+		server->closed(state);
+
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static int block_server_probe(struct vs_service_device *service);
+static int block_server_remove(struct vs_service_device *service);
+static int block_handle_message(struct vs_service_device *service,
+				struct vs_mbuf *_mbuf);
+static void block_handle_notify(struct vs_service_device *service,
+				uint32_t flags);
+static void block_handle_start(struct vs_service_device *service);
+static void block_handle_start_bh(struct vs_service_device *service);
+static void block_handle_reset(struct vs_service_device *service);
+static void block_handle_reset_bh(struct vs_service_device *service);
+static int block_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_block_server_register(struct vs_server_block *server,
+				     const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_block_server_driver *driver;
+
+	if (server->tx_atomic && !server->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	server->driver = &driver->vsdrv;
+	driver->server = server;
+
+	driver->vsdrv.protocol = VSERVICE_BLOCK_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = true;
+	driver->vsdrv.rx_atomic = server->rx_atomic;
+	driver->vsdrv.tx_atomic = server->tx_atomic;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.in_quota_min = 1;
+	driver->vsdrv.in_quota_best = server->in_quota_best ?
+	    server->in_quota_best : driver->vsdrv.in_quota_min;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.out_quota_min = 1;
+	driver->vsdrv.out_quota_best = server->out_quota_best ?
+	    server->out_quota_best : driver->vsdrv.out_quota_min;
+	driver->vsdrv.in_notify_count = VSERVICE_BLOCK_NBIT_IN__COUNT;
+	driver->vsdrv.out_notify_count = VSERVICE_BLOCK_NBIT_OUT__COUNT;
+
+	driver->vsdrv.probe = block_server_probe;
+	driver->vsdrv.remove = block_server_remove;
+	driver->vsdrv.receive = block_handle_message;
+	driver->vsdrv.notify = block_handle_notify;
+	driver->vsdrv.start = server->tx_atomic ?
+	    block_handle_start_bh : block_handle_start;
+	driver->vsdrv.reset = server->tx_atomic ?
+	    block_handle_reset_bh : block_handle_reset;
+	driver->vsdrv.tx_ready = block_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	server->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_block_server_register);
+
+int vservice_block_server_unregister(struct vs_server_block *server)
+{
+	struct vs_block_server_driver *driver;
+
+	if (!server->driver)
+		return 0;
+
+	driver = to_server_driver(server->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	server->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_block_server_unregister);
+
+static int block_server_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server = to_server_driver(vsdrv)->server;
+	struct vs_server_block_state *state;
+
+	state = server->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_BLOCK_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int block_server_remove(struct vs_service_device *service)
+{
+	struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server = to_server_driver(vsdrv)->server;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	server->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int block_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_block *server = to_server_driver(vsdrv)->server;
+	struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+		return 0;
+
+	if (server->tx_ready)
+		server->tx_ready(state);
+
+	return 0;
+}
+
+static int
+vs_server_block_send_ack_open(struct vs_server_block_state *_state, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 28UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_ACK_OPEN;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _state->readonly;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    _state->sector_size;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL) =
+	    _state->segment_size;
+	*(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+	    _state->device_sectors;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+	    _state->flushable;
+	*(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+	    _state->committable;
+	_state->io.sector_size = _state->sector_size;
+	_state->io.segment_size = _state->segment_size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_ack_open);
+static int
+vs_server_block_send_nack_open(struct vs_server_block_state *_state,
+			       gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_NACK_OPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_nack_open);
+static int
+vs_server_block_send_ack_close(struct vs_server_block_state *_state,
+			       gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_ACK_CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_ack_close);
+static int
+vs_server_block_send_nack_close(struct vs_server_block_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_NACK_CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_nack_close);
+static int
+vs_server_block_send_ack_reopen(struct vs_server_block_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_ACK_REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE__RESET;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_ack_reopen);
+static int
+vs_server_block_send_nack_reopen(struct vs_server_block_state *_state,
+				 gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_BASE_NACK_REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_nack_reopen);
+static int
+vs_server_block_handle_req_open(const struct vs_server_block *_server,
+				struct vs_server_block_state *_state,
+				struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->open)
+		return vs_server_block_open_complete(_state,
+						     _server->open(_state));
+	return vs_server_block_open_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_block_open_complete(struct vs_server_block_state *_state,
+				  vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS)
+		ret =
+		    vs_server_block_send_ack_open(_state,
+						  vs_service_has_atomic_rx
+						  (VS_STATE_SERVICE_PTR(_state))
+						  ? GFP_ATOMIC : GFP_KERNEL);
+	else if (resp == VS_SERVER_RESP_FAILURE)
+		ret =
+		    vs_server_block_send_nack_open(_state,
+						   vs_service_has_atomic_rx
+						   (VS_STATE_SERVICE_PTR
+						    (_state)) ? GFP_ATOMIC :
+						   GFP_KERNEL);
+
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_block_open_complete);
+
+EXPORT_SYMBOL(vs_server_block_handle_req_open);
+static int
+vs_server_block_handle_req_close(const struct vs_server_block *_server,
+				 struct vs_server_block_state *_state,
+				 struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->close)
+		return vs_server_block_close_complete(_state,
+						      _server->close(_state));
+	return vs_server_block_close_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_block_close_complete(struct vs_server_block_state *_state,
+				   vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS)
+		ret =
+		    vs_server_block_send_ack_close(_state,
+						   vs_service_has_atomic_rx
+						   (VS_STATE_SERVICE_PTR
+						    (_state)) ? GFP_ATOMIC :
+						   GFP_KERNEL);
+	else if (resp == VS_SERVER_RESP_FAILURE)
+		ret =
+		    vs_server_block_send_nack_close(_state,
+						    vs_service_has_atomic_rx
+						    (VS_STATE_SERVICE_PTR
+						     (_state)) ? GFP_ATOMIC :
+						    GFP_KERNEL);
+	if ((resp == VS_SERVER_RESP_SUCCESS) && (ret == 0)) {
+		wake_up_all(&_state->service->quota_wq);
+	}
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_block_close_complete);
+
+EXPORT_SYMBOL(vs_server_block_handle_req_close);
+static int
+vs_server_block_handle_req_reopen(const struct vs_server_block *_server,
+				  struct vs_server_block_state *_state,
+				  struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->reopen)
+		return vs_server_block_reopen_complete(_state,
+						       _server->reopen(_state));
+	else
+		return vs_server_block_send_nack_reopen(_state,
+							vs_service_has_atomic_rx
+							(VS_STATE_SERVICE_PTR
+							 (_state)) ? GFP_ATOMIC
+							: GFP_KERNEL);
+
+}
+
+int vs_server_block_reopen_complete(struct vs_server_block_state *_state,
+				    vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS) {
+		_state->io.sector_size = _state->sector_size;
+		_state->io.segment_size = _state->segment_size;
+		ret =
+		    vs_server_block_send_ack_reopen(_state,
+						    vs_service_has_atomic_rx
+						    (VS_STATE_SERVICE_PTR
+						     (_state)) ? GFP_ATOMIC :
+						    GFP_KERNEL);
+	} else if (resp == VS_SERVER_RESP_FAILURE) {
+		ret =
+		    vs_server_block_send_nack_reopen(_state,
+						     vs_service_has_atomic_rx
+						     (VS_STATE_SERVICE_PTR
+						      (_state)) ? GFP_ATOMIC :
+						     GFP_KERNEL);
+	}
+
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_block_reopen_complete);
+
+EXPORT_SYMBOL(vs_server_block_handle_req_reopen);
+struct vs_mbuf *vs_server_block_io_alloc_ack_read(struct vs_server_block_state
+						  *_state, struct vs_pbuf *data,
+						  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_ACK_READ;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + _state->io.segment_size + 8UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!data)
+		goto fail;
+	data->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL +
+			   sizeof(uint32_t));
+	data->size = _state->io.segment_size;
+	data->max_size = data->size;
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_alloc_ack_read);
+int vs_server_block_io_free_ack_read(struct vs_server_block_state *_state,
+				     struct vs_pbuf *data,
+				     struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_free_ack_read);
+int vs_server_block_io_getbufs_req_write(struct vs_server_block_state *_state,
+					 struct vs_pbuf *data,
+					 struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_REQ_WRITE;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->io.segment_size + 32UL;
+	const size_t _min_size = _max_size - _state->io.segment_size;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	data->size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   28UL);
+	data->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   28UL + sizeof(uint32_t));
+	data->max_size = data->size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->io.segment_size - data->size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_getbufs_req_write);
+int vs_server_block_io_free_req_write(struct vs_server_block_state *_state,
+				      struct vs_pbuf *data,
+				      struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_free_req_write);
+int
+vs_server_block_io_send_ack_read(struct vs_server_block_state *_state,
+				 uint32_t _opaque, struct vs_pbuf data,
+				 struct vs_mbuf *_mbuf)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	if (_opaque >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque, _state->state.io.read_bitmask))
+		return -EPROTO;
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_BLOCK_IO_ACK_READ)
+
+		return -EINVAL;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _opaque;
+	if ((data.size + sizeof(vs_message_id_t) + 4UL) > VS_MBUF_SIZE(_mbuf))
+		return -EINVAL;
+
+	if (data.size < data.max_size)
+		VS_MBUF_SIZE(_mbuf) -= (data.max_size - data.size);
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    data.size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	__clear_bit(_opaque, _state->state.io.read_bitmask);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_ack_read);
+int
+vs_server_block_io_send_nack_read(struct vs_server_block_state *_state,
+				  uint32_t _opaque,
+				  vservice_block_block_io_error_t err,
+				  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	if (_opaque >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque, _state->state.io.read_bitmask))
+		return -EPROTO;
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_IO_NACK_READ;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _opaque;
+	*(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+					      sizeof(vs_message_id_t) + 4UL) =
+	    err;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	__clear_bit(_opaque, _state->state.io.read_bitmask);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_nack_read);
+int
+vs_server_block_io_send_ack_write(struct vs_server_block_state *_state,
+				  uint32_t _opaque, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	if (_opaque >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque, _state->state.io.write_bitmask))
+		return -EPROTO;
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_IO_ACK_WRITE;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _opaque;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	__clear_bit(_opaque, _state->state.io.write_bitmask);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_ack_write);
+int
+vs_server_block_io_send_nack_write(struct vs_server_block_state *_state,
+				   uint32_t _opaque,
+				   vservice_block_block_io_error_t err,
+				   gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_block *_server =
+	    to_server_driver(vsdrv)->server;
+
+	if (_opaque >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+		return -EPROTO;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+	if (!test_bit(_opaque, _state->state.io.write_bitmask))
+		return -EPROTO;
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_BLOCK_IO_NACK_WRITE;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _opaque;
+	*(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+					      sizeof(vs_message_id_t) + 4UL) =
+	    err;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	__clear_bit(_opaque, _state->state.io.write_bitmask);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_nack_write);
+static int
+vs_server_block_io_handle_req_read(const struct vs_server_block *_server,
+				   struct vs_server_block_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 24UL;
+	uint32_t _opaque;
+	uint64_t sector_index;
+	uint32_t num_sects;
+	bool nodelay;
+	bool flush;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	_opaque =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	if (test_bit(_opaque, _state->state.io.read_bitmask))
+		return -EPROTO;
+	__set_bit(_opaque, _state->state.io.read_bitmask);
+	_opaque =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	sector_index =
+	    *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	num_sects =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   12UL);
+	nodelay =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL);
+	flush =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->io.req_read)
+		return _server->io.req_read(_state, _opaque, sector_index,
+					    num_sects, nodelay, flush);
+	else
+		dev_warn(&_state->service->dev,
+			 "[%s:%d] Protocol warning: No handler registered for _server->io.req_read, command will never be acknowledged\n",
+			 __func__, __LINE__);
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_handle_req_read);
+static int
+vs_server_block_io_handle_req_write(const struct vs_server_block *_server,
+				    struct vs_server_block_state *_state,
+				    struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->io.segment_size + 32UL;
+	uint32_t _opaque;
+	uint64_t sector_index;
+	uint32_t num_sects;
+	bool nodelay;
+	bool flush;
+	bool commit;
+	struct vs_pbuf data;
+	const size_t _min_size = _max_size - _state->io.segment_size;
+	size_t _exact_size;
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+	_opaque =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	if (test_bit(_opaque, _state->state.io.write_bitmask))
+		return -EPROTO;
+	__set_bit(_opaque, _state->state.io.write_bitmask);
+	_opaque =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	sector_index =
+	    *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	num_sects =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   12UL);
+	nodelay =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL);
+	flush =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL);
+	commit =
+	    *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL);
+	data.size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   28UL);
+	data.data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   28UL + sizeof(uint32_t));
+	data.max_size = data.size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->io.segment_size - data.size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_server->io.req_write)
+		return _server->io.req_write(_state, _opaque, sector_index,
+					     num_sects, nodelay, flush, commit,
+					     data, _mbuf);
+	else
+		dev_warn(&_state->service->dev,
+			 "[%s:%d] Protocol warning: No handler registered for _server->io.req_write, command will never be acknowledged\n",
+			 __func__, __LINE__);
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_handle_req_write);
+static int
+block_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_server_block_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_block *server =
+	    to_server_driver(vsdrv)->server;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+	case VSERVICE_BLOCK_BASE_REQ_OPEN:
+		ret = vs_server_block_handle_req_open(server, state, _mbuf);
+		break;
+
+/* command in sync close */
+	case VSERVICE_BLOCK_BASE_REQ_CLOSE:
+		ret = vs_server_block_handle_req_close(server, state, _mbuf);
+		break;
+
+/* command in sync reopen */
+	case VSERVICE_BLOCK_BASE_REQ_REOPEN:
+		ret = vs_server_block_handle_req_reopen(server, state, _mbuf);
+		break;
+
+/** interface block_io **/
+/* command in parallel read */
+	case VSERVICE_BLOCK_IO_REQ_READ:
+		ret = vs_server_block_io_handle_req_read(server, state, _mbuf);
+		break;
+
+/* command in parallel write */
+	case VSERVICE_BLOCK_IO_REQ_WRITE:
+		ret = vs_server_block_io_handle_req_write(server, state, _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void block_handle_notify(struct vs_service_device *service,
+				uint32_t notify_bits)
+{
+	__maybe_unused struct vs_server_block_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_block *server =
+	    to_server_driver(vsdrv)->server;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface block_io **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services blockServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/core/Makefile b/drivers/vservices/protocol/core/Makefile
new file mode 100644
index 0000000..6bef7f5
--- /dev/null
+++ b/drivers/vservices/protocol/core/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_SERVER) += vservices_protocol_core_server.o
+vservices_protocol_core_server-objs = server.o
+
+obj-$(CONFIG_VSERVICES_CLIENT) += vservices_protocol_core_client.o
+vservices_protocol_core_client-objs = client.o
diff --git a/drivers/vservices/protocol/core/client.c b/drivers/vservices/protocol/core/client.c
new file mode 100644
index 0000000..2dd2136
--- /dev/null
+++ b/drivers/vservices/protocol/core/client.c
@@ -0,0 +1,1069 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the core client protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_core_client_driver {
+	struct vs_client_core *client;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+        container_of(d, struct vs_core_client_driver, vsdrv)
+
+static void core_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->start)
+		client->start(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->reset)
+		client->reset(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->start)
+		client->start(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static void core_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (client->reset)
+		client->reset(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static int core_client_probe(struct vs_service_device *service);
+static int core_client_remove(struct vs_service_device *service);
+static int core_handle_message(struct vs_service_device *service,
+			       struct vs_mbuf *_mbuf);
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t flags);
+static void core_handle_start(struct vs_service_device *service);
+static void core_handle_start_bh(struct vs_service_device *service);
+static void core_handle_reset(struct vs_service_device *service);
+static void core_handle_reset_bh(struct vs_service_device *service);
+static int core_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_core_client_register(struct vs_client_core *client,
+				    const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_core_client_driver *driver;
+
+	if (client->tx_atomic && !client->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	client->driver = &driver->vsdrv;
+	driver->client = client;
+
+	driver->vsdrv.protocol = VSERVICE_CORE_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = false;
+	driver->vsdrv.rx_atomic = client->rx_atomic;
+	driver->vsdrv.tx_atomic = client->tx_atomic;
+
+	driver->vsdrv.probe = core_client_probe;
+	driver->vsdrv.remove = core_client_remove;
+	driver->vsdrv.receive = core_handle_message;
+	driver->vsdrv.notify = core_handle_notify;
+	driver->vsdrv.start = client->tx_atomic ?
+	    core_handle_start_bh : core_handle_start;
+	driver->vsdrv.reset = client->tx_atomic ?
+	    core_handle_reset_bh : core_handle_reset;
+	driver->vsdrv.tx_ready = core_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	client->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_core_client_register);
+
+int vservice_core_client_unregister(struct vs_client_core *client)
+{
+	struct vs_core_client_driver *driver;
+
+	if (!client->driver)
+		return 0;
+
+	driver = to_client_driver(client->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	client->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_core_client_unregister);
+
+static int core_client_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client = to_client_driver(vsdrv)->client;
+	struct vs_client_core_state *state;
+
+	state = client->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int core_client_remove(struct vs_service_device *service)
+{
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client = to_client_driver(vsdrv)->client;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	client->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int core_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_core *client = to_client_driver(vsdrv)->client;
+	struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+
+	if (client->tx_ready)
+		client->tx_ready(state);
+
+	return 0;
+}
+
+int vs_client_core_core_getbufs_service_created(struct vs_client_core_state
+						*_state,
+						struct vs_string *service_name,
+						struct vs_string *protocol_name,
+						struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+	    VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+	const size_t _min_size = _max_size - VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	service_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	service_name->max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+
+	protocol_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+	protocol_name->max_size =
+	    VS_MBUF_SIZE(_mbuf) - (sizeof(vs_message_id_t) +
+				   VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+
+	/* Now check the size received is the exact size expected */
+	_exact_size =
+	    _max_size - (VSERVICE_CORE_PROTOCOL_NAME_SIZE -
+			 protocol_name->max_size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_getbufs_service_created);
+int vs_client_core_core_free_service_created(struct vs_client_core_state
+					     *_state,
+					     struct vs_string *service_name,
+					     struct vs_string *protocol_name,
+					     struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_free_service_created);
+int
+vs_client_core_core_req_connect(struct vs_client_core_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_core *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_REQ_CONNECT;
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED__CONNECT;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT);
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_req_connect);
+int
+vs_client_core_core_req_disconnect(struct vs_client_core_state *_state,
+				   gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_core *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_REQ_DISCONNECT;
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED__DISCONNECT;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT);
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_req_disconnect);
+static int
+core_core_handle_ack_connect(const struct vs_client_core *_client,
+			     struct vs_client_core_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.ack_connect)
+		return _client->core.ack_connect(_state);
+	return 0;
+}
+
+static int
+core_core_handle_nack_connect(const struct vs_client_core *_client,
+			      struct vs_client_core_state *_state,
+			      struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.nack_connect)
+		return _client->core.nack_connect(_state);
+	return 0;
+}
+
+EXPORT_SYMBOL(core_core_handle_ack_connect);
+static int
+core_core_handle_ack_disconnect(const struct vs_client_core *_client,
+				struct vs_client_core_state *_state,
+				struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.ack_disconnect)
+		return _client->core.ack_disconnect(_state);
+	return 0;
+}
+
+static int
+core_core_handle_nack_disconnect(const struct vs_client_core *_client,
+				 struct vs_client_core_state *_state,
+				 struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.nack_disconnect)
+		return _client->core.nack_disconnect(_state);
+	return 0;
+}
+
+EXPORT_SYMBOL(core_core_handle_ack_disconnect);
+static int
+vs_client_core_core_handle_startup(const struct vs_client_core *_client,
+				   struct vs_client_core_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+	uint32_t core_in_quota;
+	uint32_t core_out_quota;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_OFFLINE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_client->core.state_change)
+		_client->core.state_change(_state, VSERVICE_CORE_STATE_OFFLINE,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+	core_in_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	core_out_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_startup)
+		return _client->core.msg_startup(_state, core_in_quota,
+						 core_out_quota);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_startup);
+static int
+vs_client_core_core_handle_shutdown(const struct vs_client_core *_client,
+				    struct vs_client_core_state *_state,
+				    struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_client->core.state_change)
+			_client->core.state_change(_state,
+						   VSERVICE_CORE_STATE_DISCONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+	case VSERVICE_CORE_STATE_CONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_client->core.state_change)
+			_client->core.state_change(_state,
+						   VSERVICE_CORE_STATE_CONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+
+	default:
+		break;
+	}
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_shutdown)
+		return _client->core.msg_shutdown(_state);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_shutdown);
+static int
+vs_client_core_core_handle_service_created(const struct vs_client_core *_client,
+					   struct vs_client_core_state *_state,
+					   struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+	    VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+	uint32_t service_id;
+	struct vs_string service_name;
+	struct vs_string protocol_name;
+	const size_t _min_size = _max_size - VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+	size_t _exact_size;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	service_name.ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	service_name.max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+
+	protocol_name.ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+	protocol_name.max_size =
+	    VS_MBUF_SIZE(_mbuf) - (sizeof(vs_message_id_t) +
+				   VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+
+	/* Now check the size received is the exact size expected */
+	_exact_size =
+	    _max_size - (VSERVICE_CORE_PROTOCOL_NAME_SIZE -
+			 protocol_name.max_size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_client->core.msg_service_created)
+		return _client->core.msg_service_created(_state, service_id,
+							 service_name,
+							 protocol_name, _mbuf);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_created);
+static int
+vs_client_core_core_handle_service_removed(const struct vs_client_core *_client,
+					   struct vs_client_core_state *_state,
+					   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	uint32_t service_id;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_service_removed)
+		return _client->core.msg_service_removed(_state, service_id);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_removed);
+static int
+vs_client_core_core_handle_server_ready(const struct vs_client_core *_client,
+					struct vs_client_core_state *_state,
+					struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 28UL;
+	uint32_t service_id;
+	uint32_t in_quota;
+	uint32_t out_quota;
+	uint32_t in_bit_offset;
+	uint32_t in_num_bits;
+	uint32_t out_bit_offset;
+	uint32_t out_num_bits;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	in_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	out_quota =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+	in_bit_offset =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   12UL);
+	in_num_bits =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   16UL);
+	out_bit_offset =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   20UL);
+	out_num_bits =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+			   24UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_server_ready)
+		return _client->core.msg_server_ready(_state, service_id,
+						      in_quota, out_quota,
+						      in_bit_offset,
+						      in_num_bits,
+						      out_bit_offset,
+						      out_num_bits);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_server_ready);
+static int
+vs_client_core_core_handle_service_reset(const struct vs_client_core *_client,
+					 struct vs_client_core_state *_state,
+					 struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	uint32_t service_id;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->core.msg_service_reset)
+		return _client->core.msg_service_reset(_state, service_id);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_reset);
+int
+vs_client_core_core_send_service_reset(struct vs_client_core_state *_state,
+				       uint32_t service_id, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_core *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVICE_RESET;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_send_service_reset);
+static int
+core_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_client_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_core *client =
+	    to_client_driver(vsdrv)->client;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface core **/
+/* command in sync connect */
+	case VSERVICE_CORE_CORE_ACK_CONNECT:
+		ret = core_core_handle_ack_connect(client, state, _mbuf);
+		break;
+	case VSERVICE_CORE_CORE_NACK_CONNECT:
+		ret = core_core_handle_nack_connect(client, state, _mbuf);
+		break;
+
+/* command in sync disconnect */
+	case VSERVICE_CORE_CORE_ACK_DISCONNECT:
+		ret = core_core_handle_ack_disconnect(client, state, _mbuf);
+		break;
+	case VSERVICE_CORE_CORE_NACK_DISCONNECT:
+		ret = core_core_handle_nack_disconnect(client, state, _mbuf);
+		break;
+
+/* message startup */
+	case VSERVICE_CORE_CORE_MSG_STARTUP:
+		ret = vs_client_core_core_handle_startup(client, state, _mbuf);
+		break;
+
+/* message shutdown */
+	case VSERVICE_CORE_CORE_MSG_SHUTDOWN:
+		ret = vs_client_core_core_handle_shutdown(client, state, _mbuf);
+		break;
+
+/* message service_created */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_CREATED:
+		ret =
+		    vs_client_core_core_handle_service_created(client, state,
+							       _mbuf);
+		break;
+
+/* message service_removed */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED:
+		ret =
+		    vs_client_core_core_handle_service_removed(client, state,
+							       _mbuf);
+		break;
+
+/* message server_ready */
+	case VSERVICE_CORE_CORE_MSG_SERVER_READY:
+		ret =
+		    vs_client_core_core_handle_server_ready(client, state,
+							    _mbuf);
+		break;
+
+/* message service_reset */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+		ret =
+		    vs_client_core_core_handle_service_reset(client, state,
+							     _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t notify_bits)
+{
+	__maybe_unused struct vs_client_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_core *client =
+	    to_client_driver(vsdrv)->client;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface core **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services coreClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/core/server.c b/drivers/vservices/protocol/core/server.c
new file mode 100644
index 0000000..c3f3686
--- /dev/null
+++ b/drivers/vservices/protocol/core/server.c
@@ -0,0 +1,1226 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the core server protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_core_server_driver {
+	struct vs_server_core *server;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+        container_of(d, struct vs_core_server_driver, vsdrv)
+
+static void core_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->start)
+		server->start(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->reset)
+		server->reset(state);
+	vs_service_state_unlock(service);
+}
+
+static void core_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->start)
+		server->start(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static void core_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+	if (server->reset)
+		server->reset(state);
+	vs_service_state_unlock_bh(service);
+}
+
+static int core_server_probe(struct vs_service_device *service);
+static int core_server_remove(struct vs_service_device *service);
+static int core_handle_message(struct vs_service_device *service,
+			       struct vs_mbuf *_mbuf);
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t flags);
+static void core_handle_start(struct vs_service_device *service);
+static void core_handle_start_bh(struct vs_service_device *service);
+static void core_handle_reset(struct vs_service_device *service);
+static void core_handle_reset_bh(struct vs_service_device *service);
+static int core_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_core_server_register(struct vs_server_core *server,
+				    const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_core_server_driver *driver;
+
+	if (server->tx_atomic && !server->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	server->driver = &driver->vsdrv;
+	driver->server = server;
+
+	driver->vsdrv.protocol = VSERVICE_CORE_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = true;
+	driver->vsdrv.rx_atomic = server->rx_atomic;
+	driver->vsdrv.tx_atomic = server->tx_atomic;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.in_quota_min = 1;
+	driver->vsdrv.in_quota_best = server->in_quota_best ?
+	    server->in_quota_best : driver->vsdrv.in_quota_min;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.out_quota_min = 1;
+	driver->vsdrv.out_quota_best = server->out_quota_best ?
+	    server->out_quota_best : driver->vsdrv.out_quota_min;
+	driver->vsdrv.in_notify_count = VSERVICE_CORE_NBIT_IN__COUNT;
+	driver->vsdrv.out_notify_count = VSERVICE_CORE_NBIT_OUT__COUNT;
+
+	driver->vsdrv.probe = core_server_probe;
+	driver->vsdrv.remove = core_server_remove;
+	driver->vsdrv.receive = core_handle_message;
+	driver->vsdrv.notify = core_handle_notify;
+	driver->vsdrv.start = server->tx_atomic ?
+	    core_handle_start_bh : core_handle_start;
+	driver->vsdrv.reset = server->tx_atomic ?
+	    core_handle_reset_bh : core_handle_reset;
+	driver->vsdrv.tx_ready = core_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	server->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_core_server_register);
+
+int vservice_core_server_unregister(struct vs_server_core *server)
+{
+	struct vs_core_server_driver *driver;
+
+	if (!server->driver)
+		return 0;
+
+	driver = to_server_driver(server->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	server->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_core_server_unregister);
+
+static int core_server_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server = to_server_driver(vsdrv)->server;
+	struct vs_server_core_state *state;
+
+	state = server->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int core_server_remove(struct vs_service_device *service)
+{
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server = to_server_driver(vsdrv)->server;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	server->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int core_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_core *server = to_server_driver(vsdrv)->server;
+	struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+
+	if (server->tx_ready)
+		server->tx_ready(state);
+
+	return 0;
+}
+
+struct vs_mbuf *vs_server_core_core_alloc_service_created(struct
+							  vs_server_core_state
+							  *_state,
+							  struct vs_string
+							  *service_name,
+							  struct vs_string
+							  *protocol_name,
+							  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+	    VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!service_name)
+		goto fail;
+	service_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+	service_name->max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+	if (!protocol_name)
+		goto fail;
+	protocol_name->ptr =
+	    (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+	protocol_name->max_size = VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_alloc_service_created);
+int vs_server_core_core_free_service_created(struct vs_server_core_state
+					     *_state,
+					     struct vs_string *service_name,
+					     struct vs_string *protocol_name,
+					     struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_free_service_created);
+int
+vs_server_core_core_send_ack_connect(struct vs_server_core_state *_state,
+				     gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_ACK_CONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_ack_connect);
+int
+vs_server_core_core_send_nack_connect(struct vs_server_core_state *_state,
+				      gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_NACK_CONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_nack_connect);
+int
+vs_server_core_core_send_ack_disconnect(struct vs_server_core_state *_state,
+					gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_ACK_DISCONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_ack_disconnect);
+int
+vs_server_core_core_send_nack_disconnect(struct vs_server_core_state *_state,
+					 gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_NACK_DISCONNECT;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+					   VSERVICE_CORE_STATE_CONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_nack_disconnect);
+static int
+vs_server_core_core_handle_req_connect(const struct vs_server_core *_server,
+				       struct vs_server_core_state *_state,
+				       struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED__CONNECT;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_DISCONNECTED,
+					   VSERVICE_CORE_STATE_DISCONNECTED__CONNECT);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->core.req_connect)
+		return _server->core.req_connect(_state);
+	else
+		dev_warn(&_state->service->dev,
+			 "[%s:%d] Protocol warning: No handler registered for _server->core.req_connect, command will never be acknowledged\n",
+			 __func__, __LINE__);
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_req_connect);
+static int
+vs_server_core_core_handle_req_disconnect(const struct vs_server_core *_server,
+					  struct vs_server_core_state *_state,
+					  struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	_state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED__DISCONNECT;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state,
+					   VSERVICE_CORE_STATE_CONNECTED,
+					   VSERVICE_CORE_STATE_CONNECTED__DISCONNECT);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->core.req_disconnect)
+		return _server->core.req_disconnect(_state);
+	else
+		dev_warn(&_state->service->dev,
+			 "[%s:%d] Protocol warning: No handler registered for _server->core.req_disconnect, command will never be acknowledged\n",
+			 __func__, __LINE__);
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_req_disconnect);
+int
+vs_server_core_core_send_startup(struct vs_server_core_state *_state,
+				 uint32_t core_in_quota,
+				 uint32_t core_out_quota, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_OFFLINE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_STARTUP;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    core_in_quota;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    core_out_quota;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+	if (_server->core.state_change)
+		_server->core.state_change(_state, VSERVICE_CORE_STATE_OFFLINE,
+					   VSERVICE_CORE_STATE_DISCONNECTED);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_startup);
+int
+vs_server_core_core_send_shutdown(struct vs_server_core_state *_state,
+				  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+	case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SHUTDOWN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_DISCONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_server->core.state_change)
+			_server->core.state_change(_state,
+						   VSERVICE_CORE_STATE_DISCONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+	case VSERVICE_CORE_STATE_CONNECTED:
+		_state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+		if (_server->core.state_change)
+			_server->core.state_change(_state,
+						   VSERVICE_CORE_STATE_CONNECTED,
+						   VSERVICE_CORE_STATE_OFFLINE);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_shutdown);
+int
+vs_server_core_core_send_service_created(struct vs_server_core_state *_state,
+					 uint32_t service_id,
+					 struct vs_string service_name,
+					 struct vs_string protocol_name,
+					 struct vs_mbuf *_mbuf)
+{
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_CORE_CORE_MSG_SERVICE_CREATED)
+
+		return -EINVAL;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+	{
+		size_t _size = strnlen(service_name.ptr, service_name.max_size);
+		if ((_size + sizeof(vs_message_id_t) + 4UL) >
+		    VS_MBUF_SIZE(_mbuf))
+			return -EINVAL;
+
+		memset(service_name.ptr + _size, 0,
+		       service_name.max_size - _size);
+	}
+	{
+		size_t _size =
+		    strnlen(protocol_name.ptr, protocol_name.max_size);
+		if ((_size + sizeof(vs_message_id_t) +
+		     VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL) >
+		    VS_MBUF_SIZE(_mbuf))
+			return -EINVAL;
+
+		if (_size < protocol_name.max_size)
+			VS_MBUF_SIZE(_mbuf) -= (protocol_name.max_size - _size);
+
+	}
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_created);
+int
+vs_server_core_core_send_service_removed(struct vs_server_core_state *_state,
+					 uint32_t service_id, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_removed);
+int
+vs_server_core_core_send_server_ready(struct vs_server_core_state *_state,
+				      uint32_t service_id, uint32_t in_quota,
+				      uint32_t out_quota,
+				      uint32_t in_bit_offset,
+				      uint32_t in_num_bits,
+				      uint32_t out_bit_offset,
+				      uint32_t out_num_bits, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 28UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+	case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVER_READY;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+	    in_quota;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL) =
+	    out_quota;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+	    in_bit_offset;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+	    in_num_bits;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+	    out_bit_offset;
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+	    out_num_bits;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_server_ready);
+int
+vs_server_core_core_send_service_reset(struct vs_server_core_state *_state,
+				       uint32_t service_id, gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_core *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_CORE_CORE_MSG_SERVICE_RESET;
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    service_id;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_reset);
+static int
+vs_server_core_core_handle_service_reset(const struct vs_server_core *_server,
+					 struct vs_server_core_state *_state,
+					 struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+	uint32_t service_id;
+
+	switch (_state->state.core.statenum) {
+	case VSERVICE_CORE_STATE_CONNECTED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.core.statenum,
+			vservice_core_get_state_string(_state->state.core));
+
+		return -EPROTO;
+
+	}
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	service_id =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->core.msg_service_reset)
+		return _server->core.msg_service_reset(_state, service_id);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_service_reset);
+static int
+core_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_server_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_core *server =
+	    to_server_driver(vsdrv)->server;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface core **/
+/* command in sync connect */
+	case VSERVICE_CORE_CORE_REQ_CONNECT:
+		ret =
+		    vs_server_core_core_handle_req_connect(server, state,
+							   _mbuf);
+		break;
+
+/* command in sync disconnect */
+	case VSERVICE_CORE_CORE_REQ_DISCONNECT:
+		ret =
+		    vs_server_core_core_handle_req_disconnect(server, state,
+							      _mbuf);
+		break;
+
+/* message service_reset */
+	case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+		ret =
+		    vs_server_core_core_handle_service_reset(server, state,
+							     _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void core_handle_notify(struct vs_service_device *service,
+			       uint32_t notify_bits)
+{
+	__maybe_unused struct vs_server_core_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_core *server =
+	    to_server_driver(vsdrv)->server;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface core **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services coreServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/serial/Makefile b/drivers/vservices/protocol/serial/Makefile
new file mode 100644
index 0000000..f5f29ed
--- /dev/null
+++ b/drivers/vservices/protocol/serial/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL_CLIENT) += vservices_protocol_serial_client.o
+vservices_protocol_serial_client-objs = client.o
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL_SERVER) += vservices_protocol_serial_server.o
+vservices_protocol_serial_server-objs = server.o
diff --git a/drivers/vservices/protocol/serial/client.c b/drivers/vservices/protocol/serial/client.c
new file mode 100644
index 0000000..1c37e72
--- /dev/null
+++ b/drivers/vservices/protocol/serial/client.c
@@ -0,0 +1,925 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the serial client protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+static int _vs_client_serial_req_open(struct vs_client_serial_state *_state);
+
+/*** Linux driver model integration ***/
+struct vs_serial_client_driver {
+	struct vs_client_serial *client;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+        container_of(d, struct vs_serial_client_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void serial_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	_vs_client_serial_req_open(state);
+
+	vs_service_state_unlock(service);
+}
+
+static void serial_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (client->closed)
+		client->closed(state);
+
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void serial_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	_vs_client_serial_req_open(state);
+
+	vs_service_state_unlock_bh(service);
+}
+
+static void serial_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client __maybe_unused =
+	    to_client_driver(vsdrv)->client;
+
+	vs_service_state_lock_bh(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock_bh(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (client->closed)
+		client->closed(state);
+
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static int serial_client_probe(struct vs_service_device *service);
+static int serial_client_remove(struct vs_service_device *service);
+static int serial_handle_message(struct vs_service_device *service,
+				 struct vs_mbuf *_mbuf);
+static void serial_handle_notify(struct vs_service_device *service,
+				 uint32_t flags);
+static void serial_handle_start(struct vs_service_device *service);
+static void serial_handle_start_bh(struct vs_service_device *service);
+static void serial_handle_reset(struct vs_service_device *service);
+static void serial_handle_reset_bh(struct vs_service_device *service);
+static int serial_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_serial_client_register(struct vs_client_serial *client,
+				      const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_serial_client_driver *driver;
+
+	if (client->tx_atomic && !client->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	client->driver = &driver->vsdrv;
+	driver->client = client;
+
+	driver->vsdrv.protocol = VSERVICE_SERIAL_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = false;
+	driver->vsdrv.rx_atomic = client->rx_atomic;
+	driver->vsdrv.tx_atomic = client->tx_atomic;
+
+	driver->vsdrv.probe = serial_client_probe;
+	driver->vsdrv.remove = serial_client_remove;
+	driver->vsdrv.receive = serial_handle_message;
+	driver->vsdrv.notify = serial_handle_notify;
+	driver->vsdrv.start = client->tx_atomic ?
+	    serial_handle_start_bh : serial_handle_start;
+	driver->vsdrv.reset = client->tx_atomic ?
+	    serial_handle_reset_bh : serial_handle_reset;
+	driver->vsdrv.tx_ready = serial_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	client->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_serial_client_register);
+
+int vservice_serial_client_unregister(struct vs_client_serial *client)
+{
+	struct vs_serial_client_driver *driver;
+
+	if (!client->driver)
+		return 0;
+
+	driver = to_client_driver(client->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	client->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_serial_client_unregister);
+
+static int serial_client_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+	struct vs_client_serial_state *state;
+
+	state = client->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int serial_client_remove(struct vs_service_device *service)
+{
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	client->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int serial_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+	struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+		return 0;
+
+	if (client->tx_ready)
+		client->tx_ready(state);
+
+	return 0;
+}
+
+static int _vs_client_serial_req_open(struct vs_client_serial_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_serial *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_REQ_OPEN;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_open);
+static int _vs_client_serial_req_close(struct vs_client_serial_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_serial *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_REQ_CLOSE;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_close);
+static int _vs_client_serial_req_reopen(struct vs_client_serial_state *_state)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_serial *_client =
+	    to_client_driver(vsdrv)->client;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+							   (_state)) ?
+				  GFP_ATOMIC : GFP_KERNEL);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_REQ_REOPEN;
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_reopen);
+static int
+serial_base_handle_ack_open(const struct vs_client_serial *_client,
+			    struct vs_client_serial_state *_state,
+			    struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+	_state->serial.packet_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	_state->packet_size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	_client->opened(_state);
+	return 0;
+
+}
+
+static int
+serial_base_handle_nack_open(const struct vs_client_serial *_client,
+			     struct vs_client_serial_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	dev_err(&VS_STATE_SERVICE_PTR(_state)->dev,
+		"Open operation failed for device %s\n",
+		VS_STATE_SERVICE_PTR(_state)->name);
+
+	return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_open);
+static int
+serial_base_handle_ack_close(const struct vs_client_serial *_client,
+			     struct vs_client_serial_state *_state,
+			     struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return 0;
+
+}
+
+static int
+serial_base_handle_nack_close(const struct vs_client_serial *_client,
+			      struct vs_client_serial_state *_state,
+			      struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_close);
+static int
+serial_base_handle_ack_reopen(const struct vs_client_serial *_client,
+			      struct vs_client_serial_state *_state,
+			      struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE__RESET;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_client->reopened) {
+		_client->reopened(_state);
+		return 0;
+	}
+	wake_up_all(&_state->service->quota_wq);
+	_client->closed(_state);
+	return _vs_client_serial_req_open(_state);
+
+}
+
+static int
+serial_base_handle_nack_reopen(const struct vs_client_serial *_client,
+			       struct vs_client_serial_state *_state,
+			       struct vs_mbuf *_mbuf)
+{
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_reopen);
+struct vs_mbuf *vs_client_serial_serial_alloc_msg(struct vs_client_serial_state
+						  *_state, struct vs_pbuf *b,
+						  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!b)
+		goto fail;
+	b->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b->size = _state->serial.packet_size;
+	b->max_size = b->size;
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_alloc_msg);
+int vs_client_serial_serial_getbufs_msg(struct vs_client_serial_state *_state,
+					struct vs_pbuf *b,
+					struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	const size_t _min_size = _max_size - _state->serial.packet_size;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	b->size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	b->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b->max_size = b->size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->serial.packet_size - b->size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_getbufs_msg);
+int vs_client_serial_serial_free_msg(struct vs_client_serial_state *_state,
+				     struct vs_pbuf *b, struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_free_msg);
+static int
+vs_client_serial_serial_handle_msg(const struct vs_client_serial *_client,
+				   struct vs_client_serial_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	struct vs_pbuf b;
+	const size_t _min_size = _max_size - _state->serial.packet_size;
+	size_t _exact_size;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	b.size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	b.data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b.max_size = b.size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->serial.packet_size - b.size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_client->serial.msg_msg)
+		return _client->serial.msg_msg(_state, b, _mbuf);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_handle_msg);
+int
+vs_client_serial_serial_send_msg(struct vs_client_serial_state *_state,
+				 struct vs_pbuf b, struct vs_mbuf *_mbuf)
+{
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_client_serial *_client =
+	    to_client_driver(vsdrv)->client;
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_SERIAL_SERIAL_MSG_MSG)
+
+		return -EINVAL;
+
+	if ((b.size + sizeof(vs_message_id_t) + 0UL) > VS_MBUF_SIZE(_mbuf))
+		return -EINVAL;
+
+	if (b.size < b.max_size)
+		VS_MBUF_SIZE(_mbuf) -= (b.max_size - b.size);
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    b.size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_send_msg);
+static int
+serial_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_client_serial_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_serial *client =
+	    to_client_driver(vsdrv)->client;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+	case VSERVICE_SERIAL_BASE_ACK_OPEN:
+		ret = serial_base_handle_ack_open(client, state, _mbuf);
+		break;
+	case VSERVICE_SERIAL_BASE_NACK_OPEN:
+		ret = serial_base_handle_nack_open(client, state, _mbuf);
+		break;
+
+/* command in sync close */
+	case VSERVICE_SERIAL_BASE_ACK_CLOSE:
+		ret = serial_base_handle_ack_close(client, state, _mbuf);
+		break;
+	case VSERVICE_SERIAL_BASE_NACK_CLOSE:
+		ret = serial_base_handle_nack_close(client, state, _mbuf);
+		break;
+
+/* command in sync reopen */
+	case VSERVICE_SERIAL_BASE_ACK_REOPEN:
+		ret = serial_base_handle_ack_reopen(client, state, _mbuf);
+		break;
+	case VSERVICE_SERIAL_BASE_NACK_REOPEN:
+		ret = serial_base_handle_nack_reopen(client, state, _mbuf);
+		break;
+
+/** interface serial **/
+/* message msg */
+	case VSERVICE_SERIAL_SERIAL_MSG_MSG:
+		ret = vs_client_serial_serial_handle_msg(client, state, _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void serial_handle_notify(struct vs_service_device *service,
+				 uint32_t notify_bits)
+{
+	__maybe_unused struct vs_client_serial_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_client_serial *client =
+	    to_client_driver(vsdrv)->client;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface serial **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+int vs_client_serial_reopen(struct vs_client_serial_state *_state)
+{
+	return _vs_client_serial_req_reopen(_state);
+}
+
+EXPORT_SYMBOL(vs_client_serial_reopen);
+
+int vs_client_serial_close(struct vs_client_serial_state *_state)
+{
+	return _vs_client_serial_req_close(_state);
+}
+
+EXPORT_SYMBOL(vs_client_serial_close);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services serialClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/serial/server.c b/drivers/vservices/protocol/serial/server.c
new file mode 100644
index 0000000..e5d1034
--- /dev/null
+++ b/drivers/vservices/protocol/serial/server.c
@@ -0,0 +1,1086 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+  * This is the generated code for the serial server protocol handling.
+  */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_serial_server_driver {
+	struct vs_server_serial *server;
+	struct list_head list;
+	struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+        container_of(d, struct vs_serial_server_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void serial_handle_start(struct vs_service_device *service)
+{
+
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void serial_handle_reset(struct vs_service_device *service)
+{
+
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (server->closed)
+		server->closed(state);
+
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock(service);
+}
+
+static void serial_handle_start_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static void serial_handle_reset_bh(struct vs_service_device *service)
+{
+
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server __maybe_unused =
+	    to_server_driver(vsdrv)->server;
+
+	vs_service_state_lock_bh(service);
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+		vs_service_state_unlock_bh(service);
+		return;
+	}
+	state->state.base = VSERVICE_BASE_RESET_STATE;
+	reset_nack_requests(service);
+	if (server->closed)
+		server->closed(state);
+
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	vs_service_state_unlock_bh(service);
+}
+
+static int serial_server_probe(struct vs_service_device *service);
+static int serial_server_remove(struct vs_service_device *service);
+static int serial_handle_message(struct vs_service_device *service,
+				 struct vs_mbuf *_mbuf);
+static void serial_handle_notify(struct vs_service_device *service,
+				 uint32_t flags);
+static void serial_handle_start(struct vs_service_device *service);
+static void serial_handle_start_bh(struct vs_service_device *service);
+static void serial_handle_reset(struct vs_service_device *service);
+static void serial_handle_reset_bh(struct vs_service_device *service);
+static int serial_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_serial_server_register(struct vs_server_serial *server,
+				      const char *name, struct module *owner)
+{
+	int ret;
+	struct vs_serial_server_driver *driver;
+
+	if (server->tx_atomic && !server->rx_atomic)
+		return -EINVAL;
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver) {
+		ret = -ENOMEM;
+		goto fail_alloc_driver;
+	}
+
+	server->driver = &driver->vsdrv;
+	driver->server = server;
+
+	driver->vsdrv.protocol = VSERVICE_SERIAL_PROTOCOL_NAME;
+
+	driver->vsdrv.is_server = true;
+	driver->vsdrv.rx_atomic = server->rx_atomic;
+	driver->vsdrv.tx_atomic = server->tx_atomic;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.in_quota_min = 1;
+	driver->vsdrv.in_quota_best = server->in_quota_best ?
+	    server->in_quota_best : driver->vsdrv.in_quota_min;
+	/* FIXME Jira ticket SDK-2835 - philipd. */
+	driver->vsdrv.out_quota_min = 1;
+	driver->vsdrv.out_quota_best = server->out_quota_best ?
+	    server->out_quota_best : driver->vsdrv.out_quota_min;
+	driver->vsdrv.in_notify_count = VSERVICE_SERIAL_NBIT_IN__COUNT;
+	driver->vsdrv.out_notify_count = VSERVICE_SERIAL_NBIT_OUT__COUNT;
+
+	driver->vsdrv.probe = serial_server_probe;
+	driver->vsdrv.remove = serial_server_remove;
+	driver->vsdrv.receive = serial_handle_message;
+	driver->vsdrv.notify = serial_handle_notify;
+	driver->vsdrv.start = server->tx_atomic ?
+	    serial_handle_start_bh : serial_handle_start;
+	driver->vsdrv.reset = server->tx_atomic ?
+	    serial_handle_reset_bh : serial_handle_reset;
+	driver->vsdrv.tx_ready = serial_handle_tx_ready;
+	driver->vsdrv.out_notify_count = 0;
+	driver->vsdrv.in_notify_count = 0;
+	driver->vsdrv.driver.name = name;
+	driver->vsdrv.driver.owner = owner;
+	driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+	ret = driver_register(&driver->vsdrv.driver);
+
+	if (ret) {
+		goto fail_driver_register;
+	}
+
+	return 0;
+
+ fail_driver_register:
+	server->driver = NULL;
+	kfree(driver);
+ fail_alloc_driver:
+	return ret;
+}
+
+EXPORT_SYMBOL(__vservice_serial_server_register);
+
+int vservice_serial_server_unregister(struct vs_server_serial *server)
+{
+	struct vs_serial_server_driver *driver;
+
+	if (!server->driver)
+		return 0;
+
+	driver = to_server_driver(server->driver);
+	driver_unregister(&driver->vsdrv.driver);
+
+	server->driver = NULL;
+	kfree(driver);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vservice_serial_server_unregister);
+
+static int serial_server_probe(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+	struct vs_server_serial_state *state;
+
+	state = server->alloc(service);
+	if (!state)
+		return -ENOMEM;
+	else if (IS_ERR(state))
+		return PTR_ERR(state);
+
+	state->service = vs_get_service(service);
+	state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+	dev_set_drvdata(&service->dev, state);
+
+	return 0;
+}
+
+static int serial_server_remove(struct vs_service_device *service)
+{
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+
+	state->released = true;
+	dev_set_drvdata(&service->dev, NULL);
+	server->release(state);
+
+	vs_put_service(service);
+
+	return 0;
+}
+
+static int serial_handle_tx_ready(struct vs_service_device *service)
+{
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+	struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+		return 0;
+
+	if (server->tx_ready)
+		server->tx_ready(state);
+
+	return 0;
+}
+
+static int
+vs_server_serial_send_ack_open(struct vs_server_serial_state *_state,
+			       gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_ACK_OPEN;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    _state->packet_size;
+	_state->serial.packet_size = _state->packet_size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_open);
+static int
+vs_server_serial_send_nack_open(struct vs_server_serial_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_NACK_OPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_open);
+static int
+vs_server_serial_send_ack_close(struct vs_server_serial_state *_state,
+				gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_ACK_CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_close);
+static int
+vs_server_serial_send_nack_close(struct vs_server_serial_state *_state,
+				 gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_NACK_CLOSE;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_close);
+static int
+vs_server_serial_send_ack_reopen(struct vs_server_serial_state *_state,
+				 gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_ACK_REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE__RESET;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_reopen);
+static int
+vs_server_serial_send_nack_reopen(struct vs_server_serial_state *_state,
+				  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+
+	const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return PTR_ERR(_mbuf);
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+
+		return -ENOMEM;
+	}
+
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+	    VSERVICE_SERIAL_BASE_NACK_REOPEN;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_reopen);
+static int
+vs_server_serial_handle_req_open(const struct vs_server_serial *_server,
+				 struct vs_server_serial_state *_state,
+				 struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_CLOSED:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->open)
+		return vs_server_serial_open_complete(_state,
+						      _server->open(_state));
+	return vs_server_serial_open_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_serial_open_complete(struct vs_server_serial_state *_state,
+				   vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS)
+		ret =
+		    vs_server_serial_send_ack_open(_state,
+						   vs_service_has_atomic_rx
+						   (VS_STATE_SERVICE_PTR
+						    (_state)) ? GFP_ATOMIC :
+						   GFP_KERNEL);
+	else if (resp == VS_SERVER_RESP_FAILURE)
+		ret =
+		    vs_server_serial_send_nack_open(_state,
+						    vs_service_has_atomic_rx
+						    (VS_STATE_SERVICE_PTR
+						     (_state)) ? GFP_ATOMIC :
+						    GFP_KERNEL);
+
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_open_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_open);
+static int
+vs_server_serial_handle_req_close(const struct vs_server_serial *_server,
+				  struct vs_server_serial_state *_state,
+				  struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->close)
+		return vs_server_serial_close_complete(_state,
+						       _server->close(_state));
+	return vs_server_serial_close_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_serial_close_complete(struct vs_server_serial_state *_state,
+				    vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS)
+		ret =
+		    vs_server_serial_send_ack_close(_state,
+						    vs_service_has_atomic_rx
+						    (VS_STATE_SERVICE_PTR
+						     (_state)) ? GFP_ATOMIC :
+						    GFP_KERNEL);
+	else if (resp == VS_SERVER_RESP_FAILURE)
+		ret =
+		    vs_server_serial_send_nack_close(_state,
+						     vs_service_has_atomic_rx
+						     (VS_STATE_SERVICE_PTR
+						      (_state)) ? GFP_ATOMIC :
+						     GFP_KERNEL);
+	if ((resp == VS_SERVER_RESP_SUCCESS) && (ret == 0)) {
+		wake_up_all(&_state->service->quota_wq);
+	}
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_close_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_close);
+static int
+vs_server_serial_handle_req_reopen(const struct vs_server_serial *_server,
+				   struct vs_server_serial_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+	if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+		return -EBADMSG;
+
+	switch (_state->state.base.statenum) {
+	case VSERVICE_BASE_STATE_RUNNING:
+
+		break;
+
+	default:
+		dev_err(&_state->service->dev,
+			"[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+			__func__, __LINE__, _state->state.base.statenum,
+			vservice_base_get_state_string(_state->state.base));
+
+		return -EPROTO;
+
+	}
+	_state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	if (_server->reopen)
+		return vs_server_serial_reopen_complete(_state,
+							_server->
+							reopen(_state));
+	else
+		return vs_server_serial_send_nack_reopen(_state,
+							 vs_service_has_atomic_rx
+							 (VS_STATE_SERVICE_PTR
+							  (_state)) ? GFP_ATOMIC
+							 : GFP_KERNEL);
+
+}
+
+int vs_server_serial_reopen_complete(struct vs_server_serial_state *_state,
+				     vs_server_response_type_t resp)
+{
+	int ret = 0;
+	if (resp == VS_SERVER_RESP_SUCCESS) {
+		ret =
+		    vs_server_serial_send_ack_reopen(_state,
+						     vs_service_has_atomic_rx
+						     (VS_STATE_SERVICE_PTR
+						      (_state)) ? GFP_ATOMIC :
+						     GFP_KERNEL);
+	} else if (resp == VS_SERVER_RESP_FAILURE) {
+		ret =
+		    vs_server_serial_send_nack_reopen(_state,
+						      vs_service_has_atomic_rx
+						      (VS_STATE_SERVICE_PTR
+						       (_state)) ? GFP_ATOMIC :
+						      GFP_KERNEL);
+	}
+
+	return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_reopen_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_reopen);
+struct vs_mbuf *vs_server_serial_serial_alloc_msg(struct vs_server_serial_state
+						  *_state, struct vs_pbuf *b,
+						  gfp_t flags)
+{
+	struct vs_mbuf *_mbuf;
+	const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+	const uint32_t _msg_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	_mbuf =
+	    vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+				  flags);
+	if (IS_ERR(_mbuf))
+		return _mbuf;
+	if (!_mbuf) {
+
+		WARN_ON_ONCE(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+	if (!b)
+		goto fail;
+	b->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b->size = _state->serial.packet_size;
+	b->max_size = b->size;
+	return _mbuf;
+
+ fail:
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+	return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_alloc_msg);
+int vs_server_serial_serial_getbufs_msg(struct vs_server_serial_state *_state,
+					struct vs_pbuf *b,
+					struct vs_mbuf *_mbuf)
+{
+	const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	const size_t _min_size = _max_size - _state->serial.packet_size;
+	size_t _exact_size;
+
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+		return -EINVAL;
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	b->size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	b->data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b->max_size = b->size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->serial.packet_size - b->size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_getbufs_msg);
+int vs_server_serial_serial_free_msg(struct vs_server_serial_state *_state,
+				     struct vs_pbuf *b, struct vs_mbuf *_mbuf)
+{
+	vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_free_msg);
+int
+vs_server_serial_serial_send_msg(struct vs_server_serial_state *_state,
+				 struct vs_pbuf b, struct vs_mbuf *_mbuf)
+{
+
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+	__maybe_unused struct vs_server_serial *_server =
+	    to_server_driver(vsdrv)->server;
+	if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+		return -EPROTO;
+	if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+	    VSERVICE_SERIAL_SERIAL_MSG_MSG)
+
+		return -EINVAL;
+
+	if ((b.size + sizeof(vs_message_id_t) + 0UL) > VS_MBUF_SIZE(_mbuf))
+		return -EINVAL;
+
+	if (b.size < b.max_size)
+		VS_MBUF_SIZE(_mbuf) -= (b.max_size - b.size);
+
+	*(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+	    b.size;
+
+	{
+		int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+		if (err) {
+			dev_warn(&_state->service->dev,
+				 "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+				 __func__, __LINE__, err);
+
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_send_msg);
+static int
+vs_server_serial_serial_handle_msg(const struct vs_server_serial *_server,
+				   struct vs_server_serial_state *_state,
+				   struct vs_mbuf *_mbuf)
+{
+	const size_t _max_size =
+	    sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+	struct vs_pbuf b;
+	const size_t _min_size = _max_size - _state->serial.packet_size;
+	size_t _exact_size;
+	if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+		return -EPROTO;
+
+	/* The first check is to ensure the message isn't complete garbage */
+	if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+	    || (VS_MBUF_SIZE(_mbuf) < _min_size))
+		return -EBADMSG;
+
+	b.size =
+	    *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+	b.data =
+	    (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+			   sizeof(uint32_t));
+	b.max_size = b.size;
+
+	/* Now check the size received is the exact size expected */
+	_exact_size = _max_size - (_state->serial.packet_size - b.size);
+	if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+		return -EBADMSG;
+	if (_server->serial.msg_msg)
+		return _server->serial.msg_msg(_state, b, _mbuf);
+	return 0;
+	return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_handle_msg);
+static int
+serial_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	vs_message_id_t message_id;
+	__maybe_unused struct vs_server_serial_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_serial *server =
+	    to_server_driver(vsdrv)->server;
+
+	int ret;
+
+	/* Extract the message ID */
+	if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Invalid message size %zd\n",
+			__func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+		return -EBADMSG;
+	}
+
+	message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+	switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+	case VSERVICE_SERIAL_BASE_REQ_OPEN:
+		ret = vs_server_serial_handle_req_open(server, state, _mbuf);
+		break;
+
+/* command in sync close */
+	case VSERVICE_SERIAL_BASE_REQ_CLOSE:
+		ret = vs_server_serial_handle_req_close(server, state, _mbuf);
+		break;
+
+/* command in sync reopen */
+	case VSERVICE_SERIAL_BASE_REQ_REOPEN:
+		ret = vs_server_serial_handle_req_reopen(server, state, _mbuf);
+		break;
+
+/** interface serial **/
+/* message msg */
+	case VSERVICE_SERIAL_SERIAL_MSG_MSG:
+		ret = vs_server_serial_serial_handle_msg(server, state, _mbuf);
+		break;
+
+	default:
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Unknown message type %d\n",
+			__func__, __LINE__, (int)message_id);
+
+		ret = -EPROTO;
+		break;
+	}
+
+	if (ret) {
+		dev_err(&state->service->dev,
+			"[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+			__func__, __LINE__, (int)message_id, ret);
+
+	}
+
+	return ret;
+}
+
+static void serial_handle_notify(struct vs_service_device *service,
+				 uint32_t notify_bits)
+{
+	__maybe_unused struct vs_server_serial_state *state =
+	    dev_get_drvdata(&service->dev);
+	struct vs_service_driver *vsdrv =
+	    to_vs_service_driver(service->dev.driver);
+	__maybe_unused struct vs_server_serial *server =
+	    to_server_driver(vsdrv)->server;
+
+	uint32_t bits = notify_bits;
+	int ret;
+
+	while (bits) {
+		uint32_t not = __ffs(bits);
+		switch (not) {
+
+    /** interface serial **/
+
+		default:
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Unknown notification %d\n",
+				__func__, __LINE__, (int)not);
+
+			ret = -EPROTO;
+			break;
+
+		}
+		bits &= ~(1 << not);
+		if (ret) {
+			dev_err(&state->service->dev,
+				"[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+				__func__, __LINE__, (int)not, ret);
+
+		}
+	}
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services serialServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/session.c b/drivers/vservices/session.c
new file mode 100644
index 0000000..d695184
--- /dev/null
+++ b/drivers/vservices/session.c
@@ -0,0 +1,2913 @@
+/*
+ * drivers/vservices/session.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the generic session-management code for the vServices framework.
+ * It creates service and session devices on request from session and
+ * transport drivers, respectively; it also queues incoming messages from the
+ * transport and distributes them to the session's services.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+
+#include "session.h"
+#include "transport.h"
+#include "compat.h"
+
+/* Minimum required time between resets to avoid throttling */
+#define RESET_THROTTLE_TIME msecs_to_jiffies(1000)
+
+/*
+ * Minimum/maximum reset throttling time. The reset throttle will start at
+ * the minimum and increase to the maximum exponetially.
+ */
+#define RESET_THROTTLE_MIN RESET_THROTTLE_TIME
+#define RESET_THROTTLE_MAX msecs_to_jiffies(8 * 1000)
+
+/*
+ * If the reset is being throttled and a sane reset (doesn't need throttling)
+ * is requested, then if the service's reset delay mutliplied by this value
+ * has elapsed throttling is disabled.
+ */
+#define RESET_THROTTLE_COOL_OFF_MULT 2
+
+/* IDR of session ids to sessions */
+static DEFINE_IDR(session_idr);
+DEFINE_MUTEX(vs_session_lock);
+EXPORT_SYMBOL_GPL(vs_session_lock);
+
+/* Notifier list for vService session events */
+static BLOCKING_NOTIFIER_HEAD(vs_session_notifier_list);
+
+static unsigned long default_debug_mask;
+module_param(default_debug_mask, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(default_debug_mask, "Default vServices debug mask");
+
+/* vServices root in sysfs at /sys/vservices */
+struct kobject *vservices_root;
+EXPORT_SYMBOL_GPL(vservices_root);
+
+/* vServices server root in sysfs at /sys/vservices/server-sessions */
+struct kobject *vservices_server_root;
+EXPORT_SYMBOL_GPL(vservices_server_root);
+
+/* vServices client root in sysfs at /sys/vservices/client-sessions */
+struct kobject *vservices_client_root;
+EXPORT_SYMBOL_GPL(vservices_client_root);
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+struct vs_service_device *vs_service_lookup_by_devt(dev_t dev)
+{
+	struct vs_session_device *session;
+	struct vs_service_device *service;
+
+	mutex_lock(&vs_session_lock);
+	session = idr_find(&session_idr, MINOR(dev) / VS_MAX_SERVICES);
+	get_device(&session->dev);
+	mutex_unlock(&vs_session_lock);
+
+	service = vs_session_get_service(session,
+			MINOR(dev) % VS_MAX_SERVICES);
+	put_device(&session->dev);
+
+	return service;
+}
+#endif
+
+struct vs_session_for_each_data {
+	int (*fn)(struct vs_session_device *session, void *data);
+	void *data;
+};
+
+int vs_session_for_each_from_idr(int id, void *session, void *_data)
+{
+	struct vs_session_for_each_data *data =
+		(struct vs_session_for_each_data *)_data;
+	return data->fn(session, data->data);
+}
+
+/**
+ * vs_session_for_each_locked - call a callback function for each session
+ * @fn: function to call
+ * @data: opaque pointer that is passed through to the function
+ */
+extern int vs_session_for_each_locked(
+		int (*fn)(struct vs_session_device *session, void *data),
+		void *data)
+{
+	struct vs_session_for_each_data priv = { .fn = fn, .data = data };
+
+	lockdep_assert_held(&vs_session_lock);
+
+	return idr_for_each(&session_idr, vs_session_for_each_from_idr,
+			&priv);
+}
+EXPORT_SYMBOL(vs_session_for_each_locked);
+
+/**
+ * vs_register_notify - register a notifier callback for vServices events
+ * @nb: pointer to the notifier block for the callback events.
+ */
+void vs_session_register_notify(struct notifier_block *nb)
+{
+	blocking_notifier_chain_register(&vs_session_notifier_list, nb);
+}
+EXPORT_SYMBOL(vs_session_register_notify);
+
+/**
+ * vs_unregister_notify - unregister a notifier callback for vServices events
+ * @nb: pointer to the notifier block for the callback events.
+ */
+void vs_session_unregister_notify(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&vs_session_notifier_list, nb);
+}
+EXPORT_SYMBOL(vs_session_unregister_notify);
+
+/*
+ * Helper function for returning how long ago something happened
+ * Marked as __maybe_unused since this is only needed when
+ * CONFIG_VSERVICES_DEBUG is enabled, but cannot be removed because it
+ * will cause compile time errors.
+ */
+static __maybe_unused unsigned msecs_ago(unsigned long jiffy_value)
+{
+	return jiffies_to_msecs(jiffies - jiffy_value);
+}
+
+static void session_fatal_error_work(struct work_struct *work)
+{
+	struct vs_session_device *session = container_of(work,
+			struct vs_session_device, fatal_error_work);
+
+	session->transport->vt->reset(session->transport);
+}
+
+static void session_fatal_error(struct vs_session_device *session, gfp_t gfp)
+{
+	schedule_work(&session->fatal_error_work);
+}
+
+/*
+ * Service readiness state machine
+ *
+ * The states are:
+ *
+ * INIT: Initial state. Service may not be completely configured yet
+ * (typically because the protocol hasn't been set); call vs_service_start
+ * once configuration is complete. The disable count must be nonzero, and
+ * must never reach zero in this state.
+ * DISABLED: Service is not permitted to communicate. Non-core services are
+ * in this state whenever the core protocol and/or transport state does not
+ * allow them to be active; core services are only in this state transiently.
+ * The disable count must be nonzero; when it reaches zero, the service
+ * transitions to RESET state.
+ * RESET: Service drivers are inactive at both ends, but the core service
+ * state allows the service to become active. The session will schedule a
+ * future transition to READY state when entering this state, but the
+ * transition may be delayed to throttle the rate at which resets occur.
+ * READY: All core-service and session-layer policy allows the service to
+ * communicate; it will become active as soon as it has a protocol driver.
+ * ACTIVE: The driver is present and communicating.
+ * LOCAL_RESET: We have initiated a reset at this end, but the remote end has
+ * not yet acknowledged it. We will enter the RESET state on receiving
+ * acknowledgement, unless the disable count is nonzero in which case we
+ * will enter DISABLED state.
+ * LOCAL_DELETE: As for LOCAL_RESET, but we will enter the DELETED state
+ * instead of RESET or DISABLED.
+ * DELETED: The service is no longer present on the session; the service
+ * device structure may still exist because something is holding a reference
+ * to it.
+ *
+ * The permitted transitions are:
+ *
+ * From          To            Trigger
+ * INIT          DISABLED      vs_service_start
+ * DISABLED      RESET         vs_service_enable (disable_count -> 0)
+ * RESET         READY         End of throttle delay (may be 0)
+ * READY         ACTIVE        Latter of probe() and entering READY
+ * {READY, ACTIVE}
+ *               LOCAL_RESET   vs_service_reset
+ * {READY, ACTIVE, LOCAL_RESET}
+ *               RESET         vs_service_handle_reset (server)
+ * RESET         DISABLED      vs_service_disable (server)
+ * {READY, ACTIVE, LOCAL_RESET}
+ *               DISABLED      vs_service_handle_reset (client)
+ * {INIT, RESET, READY, ACTIVE, LOCAL_RESET}
+ *               DISABLED      vs_service_disable_noncore
+ * {ACTIVE, LOCAL_RESET}
+ *               LOCAL_DELETE  vs_service_delete
+ * {INIT, DISABLED, RESET, READY}
+ *               DELETED       vs_service_delete
+ * LOCAL_DELETE  DELETED       vs_service_handle_reset
+ *                             vs_service_disable_noncore
+ *
+ * See the documentation for the triggers for details.
+ */
+
+enum vs_service_readiness {
+	VS_SERVICE_INIT,
+	VS_SERVICE_DISABLED,
+	VS_SERVICE_RESET,
+	VS_SERVICE_READY,
+	VS_SERVICE_ACTIVE,
+	VS_SERVICE_LOCAL_RESET,
+	VS_SERVICE_LOCAL_DELETE,
+	VS_SERVICE_DELETED,
+};
+
+/* Session activation states. */
+enum {
+	VS_SESSION_RESET,
+	VS_SESSION_ACTIVATE,
+	VS_SESSION_ACTIVE,
+};
+
+/**
+ * vs_service_start - Start a service by moving it from the init state to the
+ * disabled state.
+ *
+ * @service: The service to start.
+ *
+ * Returns true if the service was started, or false if it was not.
+ */
+bool vs_service_start(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+
+	WARN_ON(!service->protocol);
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	if (service->readiness != VS_SERVICE_INIT) {
+		if (service->readiness != VS_SERVICE_DELETED)
+			dev_err(&service->dev,
+					"start called from invalid state %d\n",
+					service->readiness);
+		mutex_unlock(&service->ready_lock);
+		return false;
+	}
+
+	if (service->id != 0 && session_drv->service_added) {
+		int err = session_drv->service_added(session, service);
+		if (err < 0) {
+			dev_err(&session->dev, "Failed to add service %d: %d\n",
+					service->id, err);
+			mutex_unlock(&service->ready_lock);
+			return false;
+		}
+	}
+
+	service->readiness = VS_SERVICE_DISABLED;
+	service->disable_count = 1;
+	service->last_reset_request = jiffies;
+
+	mutex_unlock(&service->ready_lock);
+
+	/* Tell userspace about the service. */
+	dev_set_uevent_suppress(&service->dev, false);
+	kobject_uevent(&service->dev.kobj, KOBJ_ADD);
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(vs_service_start);
+
+static void cancel_pending_rx(struct vs_service_device *service);
+static void queue_ready_work(struct vs_service_device *service);
+
+static void __try_start_service(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	struct vs_transport *transport;
+	int err;
+	struct vs_service_driver *driver;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	/* We can't start if the service is not ready yet. */
+	if (service->readiness != VS_SERVICE_READY)
+		return;
+
+	/*
+	 * There should never be anything in the RX queue at this point.
+	 * If there is, it can seriously confuse the service drivers for
+	 * no obvious reason, so we check.
+	 */
+	if (WARN_ON(!list_empty(&service->rx_queue)))
+		cancel_pending_rx(service);
+
+	if (!service->driver_probed) {
+		vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+				"ready with no driver\n");
+		return;
+	}
+
+	/* Prepare the transport to support the service. */
+	transport = session->transport;
+	err = transport->vt->service_start(transport, service);
+
+	if (err < 0) {
+		/* fatal error attempting to start; reset and try again */
+		service->readiness = VS_SERVICE_RESET;
+		service->last_reset_request = jiffies;
+		service->last_reset = jiffies;
+		queue_ready_work(service);
+
+		return;
+	}
+
+	service->readiness = VS_SERVICE_ACTIVE;
+
+	driver = to_vs_service_driver(service->dev.driver);
+	if (driver->start)
+		driver->start(service);
+
+	if (service->id && session_drv->service_start) {
+		err = session_drv->service_start(session, service);
+		if (err < 0) {
+			dev_err(&session->dev, "Failed to start service %s (%d): %d\n",
+					dev_name(&service->dev),
+					service->id, err);
+			session_fatal_error(session, GFP_KERNEL);
+		}
+	}
+}
+
+static void try_start_service(struct vs_service_device *service)
+{
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	__try_start_service(service);
+
+	mutex_unlock(&service->ready_lock);
+}
+
+static void service_ready_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, ready_work.work);
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+			"ready work - last reset request was %u ms ago\n",
+			msecs_ago(service->last_reset_request));
+
+	/*
+	 * Make sure there's no reset work pending from an earlier driver
+	 * failure. We should already be inactive at this point, so it's safe
+	 * to just cancel it.
+	 */
+	cancel_work_sync(&service->reset_work);
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	if (service->readiness != VS_SERVICE_RESET) {
+		vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+				"ready work found readiness of %d, doing nothing\n",
+				service->readiness);
+		mutex_unlock(&service->ready_lock);
+		return;
+	}
+
+	service->readiness = VS_SERVICE_READY;
+	/* Record the time at which this happened, for throttling. */
+	service->last_ready = jiffies;
+
+	/* Tell userspace that the service is ready. */
+	kobject_uevent(&service->dev.kobj, KOBJ_ONLINE);
+
+	/* Start the service, if it has a driver attached. */
+	__try_start_service(service);
+
+	mutex_unlock(&service->ready_lock);
+}
+
+static int __enable_service(struct vs_service_device *service);
+
+/**
+ * __reset_service - make a service inactive, and tell its driver, the
+ * transport, and possibly the remote partner
+ * @service:       The service to reset
+ * @notify_remote: If true, the partner is notified of the reset
+ *
+ * This routine is called to make an active service inactive. If the given
+ * service is currently active, it drops any queued messages for the service,
+ * and then informs the service driver and the transport layer that the
+ * service has reset. It sets the service readiness to VS_SERVICE_LOCAL_RESET
+ * to indicate that the driver is no longer active.
+ *
+ * This routine has no effect on services that are not active.
+ *
+ * The caller must hold the target service's ready lock.
+ */
+static void __reset_service(struct vs_service_device *service,
+		bool notify_remote)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	struct vs_service_driver *driver = NULL;
+	struct vs_transport *transport;
+	int err;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	/* If we're already inactive, there's nothing to do. */
+	if (service->readiness != VS_SERVICE_ACTIVE)
+		return;
+
+	service->last_reset = jiffies;
+	service->readiness = VS_SERVICE_LOCAL_RESET;
+
+	cancel_pending_rx(service);
+
+	if (!WARN_ON(!service->driver_probed))
+		driver = to_vs_service_driver(service->dev.driver);
+
+	if (driver && driver->reset)
+		driver->reset(service);
+
+	wake_up_all(&service->quota_wq);
+
+	transport = vs_service_get_session(service)->transport;
+
+	/*
+	 * Ask the transport to reset the service. If this returns a positive
+	 * value, we need to leave the service disabled, and the transport
+	 * will re-enable it. To avoid allowing the disable count to go
+	 * negative if that re-enable races with this callback returning, we
+	 * disable the service beforehand and re-enable it if the callback
+	 * returns zero.
+	 */
+	service->disable_count++;
+	err = transport->vt->service_reset(transport, service);
+	if (err < 0) {
+		dev_err(&session->dev, "Failed to reset service %d: %d (transport)\n",
+				service->id, err);
+		session_fatal_error(session, GFP_KERNEL);
+	} else if (!err) {
+		err = __enable_service(service);
+	}
+
+	if (notify_remote) {
+		if (service->id) {
+			err = session_drv->service_local_reset(session,
+					service);
+			if (err == VS_SERVICE_ALREADY_RESET) {
+				service->readiness = VS_SERVICE_RESET;
+                                service->last_reset = jiffies;
+                                queue_ready_work(service);
+
+			} else if (err < 0) {
+				dev_err(&session->dev, "Failed to reset service %d: %d (session)\n",
+						service->id, err);
+				session_fatal_error(session, GFP_KERNEL);
+			}
+		} else {
+			session->transport->vt->reset(session->transport);
+		}
+	}
+
+	/* Tell userspace that the service is no longer active. */
+	kobject_uevent(&service->dev.kobj, KOBJ_OFFLINE);
+}
+
+/**
+ * reset_service - reset a service and inform the remote partner
+ * @service: The service to reset
+ *
+ * This routine is called when a reset is locally initiated (other than
+ * implicitly by a session / core service reset). It bumps the reset request
+ * timestamp, acquires the necessary locks, and calls __reset_service.
+ *
+ * This routine returns with the service ready lock held, to allow the caller
+ * to make any other state changes that must be atomic with the service
+ * reset.
+ */
+static void reset_service(struct vs_service_device *service)
+	__acquires(service->ready_lock)
+{
+	service->last_reset_request = jiffies;
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	__reset_service(service, true);
+}
+
+/**
+ * vs_service_reset - initiate a service reset
+ * @service: the service that is to be reset
+ * @caller: the service that is initiating the reset
+ *
+ * This routine informs the partner that the given service is being reset,
+ * then disables and flushes the service's receive queues and resets its
+ * driver. The service will be automatically re-enabled once the partner has
+ * acknowledged the reset (see vs_session_handle_service_reset, above).
+ *
+ * If the given service is the core service, this will perform a transport
+ * reset, which implicitly resets (on the server side) or destroys (on
+ * the client side) every other service on the session.
+ *
+ * If the given service is already being reset, this has no effect, other
+ * than to delay completion of the reset if it is being throttled.
+ *
+ * For lock safety reasons, a service can only be directly reset by itself,
+ * the core service, or the service that created it (which is typically also
+ * the core service).
+ *
+ * A service that wishes to reset itself must not do so while holding its state
+ * lock or while running on its own workqueue. In these circumstances, call
+ * vs_service_reset_nosync() instead. Note that returning an error code
+ * (any negative number) from a driver callback forces a call to
+ * vs_service_reset_nosync() and prints an error message.
+ */
+int vs_service_reset(struct vs_service_device *service,
+		struct vs_service_device *caller)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	if (caller != service && caller != service->owner) {
+		struct vs_service_device *core_service = session->core_service;
+
+		WARN_ON(!core_service);
+		if (caller != core_service)
+			return -EPERM;
+	}
+
+	reset_service(service);
+	/* reset_service returns with ready_lock held, but we don't need it */
+	mutex_unlock(&service->ready_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_reset);
+
+/**
+ * vs_service_reset_nosync - asynchronously reset a service.
+ * @service: the service that is to be reset
+ *
+ * This routine triggers a reset for the nominated service. It may be called
+ * from any context, including interrupt context. It does not wait for the
+ * reset to occur, and provides no synchronisation guarantees when called from
+ * outside the target service.
+ *
+ * This is intended only for service drivers that need to reset themselves
+ * from a context that would not normally allow it. In other cases, use
+ * vs_service_reset.
+ */
+void vs_service_reset_nosync(struct vs_service_device *service)
+{
+	service->pending_reset = true;
+	schedule_work(&service->reset_work);
+}
+EXPORT_SYMBOL_GPL(vs_service_reset_nosync);
+
+static void
+vs_service_remove_sysfs_entries(struct vs_session_device *session,
+		struct vs_service_device *service)
+{
+	sysfs_remove_link(session->sysfs_entry, service->sysfs_name);
+	sysfs_remove_link(&service->dev.kobj, VS_SESSION_SYMLINK_NAME);
+}
+
+static void vs_session_release_service_id(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	mutex_lock(&session->service_idr_lock);
+	idr_remove(&session->service_idr, service->id);
+	mutex_unlock(&session->service_idr_lock);
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+			"service id deallocated\n");
+}
+
+static void destroy_service(struct vs_service_device *service,
+		bool notify_remote)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	struct vs_service_device *core_service __maybe_unused =
+			session->core_service;
+	int err;
+
+	lockdep_assert_held(&service->ready_lock);
+	WARN_ON(service->readiness != VS_SERVICE_DELETED);
+
+	/* Notify the core service and transport that the service is gone */
+	session->transport->vt->service_remove(session->transport, service);
+	if (notify_remote && service->id && session_drv->service_removed) {
+		err = session_drv->service_removed(session, service);
+		if (err < 0) {
+			dev_err(&session->dev,
+					"Failed to remove service %d: %d\n",
+					service->id, err);
+			session_fatal_error(session, GFP_KERNEL);
+		}
+	}
+
+	/*
+	 * At this point the service is guaranteed to be gone on the client
+	 * side, so we can safely release the service ID.
+	 */
+	if (session->is_server)
+		vs_session_release_service_id(service);
+
+	/*
+	 * This guarantees that any concurrent vs_session_get_service() that
+	 * found the service before we removed it from the IDR will take a
+	 * reference before we release ours.
+	 *
+	 * This similarly protects for_each_[usable_]service().
+	 */
+	synchronize_rcu();
+
+	/* Matches device_initialize() in vs_service_register() */
+	put_device(&service->dev);
+}
+
+/**
+ * disable_service - prevent a service becoming ready
+ * @service: the service that is to be disabled
+ * @force: true if the service is known to be in reset
+ *
+ * This routine may be called for any inactive service. Once disabled, the
+ * service cannot be made ready by the session, and thus cannot become active,
+ * until vs_service_enable() is called for it. If multiple calls are made to
+ * this function, they must be balanced by vs_service_enable() calls.
+ *
+ * If the force option is true, then any pending unacknowledged reset will be
+ * presumed to have been acknowledged. This is used when the core service is
+ * entering reset.
+ *
+ * This is used by the core service client to prevent the service restarting
+ * until the server is ready (i.e., a server_ready message is received); by
+ * the session layer to stop all communication while the core service itself
+ * is in reset; and by the transport layer when the transport was unable to
+ * complete reset of a service in its reset callback (typically because
+ * a service had passed message buffers to another Linux subsystem and could
+ * not free them immediately).
+ *
+ * In any case, there is no need for the operation to be signalled in any
+ * way, because the service is already in reset. It simply delays future
+ * signalling of service readiness.
+ */
+static void disable_service(struct vs_service_device *service, bool force)
+{
+	lockdep_assert_held(&service->ready_lock);
+
+	switch(service->readiness) {
+	case VS_SERVICE_INIT:
+	case VS_SERVICE_DELETED:
+	case VS_SERVICE_LOCAL_DELETE:
+		dev_err(&service->dev, "disabled while uninitialised\n");
+		break;
+	case VS_SERVICE_ACTIVE:
+		dev_err(&service->dev, "disabled while active\n");
+		break;
+	case VS_SERVICE_LOCAL_RESET:
+		/*
+		 * Will go to DISABLED state when reset completes, unless
+		 * it's being forced (i.e. we're moving to a core protocol
+		 * state that implies everything else is reset).
+		 */
+		if (force)
+			service->readiness = VS_SERVICE_DISABLED;
+		service->disable_count++;
+		break;
+	default:
+		service->readiness = VS_SERVICE_DISABLED;
+		service->disable_count++;
+		break;
+	}
+
+	cancel_delayed_work(&service->ready_work);
+}
+
+static int service_handle_reset(struct vs_session_device *session,
+		struct vs_service_device *target, bool disable)
+{
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	int err = 0;
+
+	mutex_lock_nested(&target->ready_lock, target->lock_subclass);
+
+	switch (target->readiness) {
+	case VS_SERVICE_LOCAL_DELETE:
+		target->readiness = VS_SERVICE_DELETED;
+		destroy_service(target, true);
+		break;
+	case VS_SERVICE_ACTIVE:
+		/*
+		 * Reset the service and send a reset notification.
+		 *
+		 * We only send notifications for non-core services. This is
+		 * because core notifies by sending a transport reset, which
+		 * is what brought us here in the first place. Note that we
+		 * must already hold the core service state lock iff the
+		 * target is non-core.
+		 */
+		target->last_reset_request = jiffies;
+		__reset_service(target, target->id != 0);
+		/* fall through */
+	case VS_SERVICE_LOCAL_RESET:
+		target->readiness = target->disable_count ?
+			VS_SERVICE_DISABLED : VS_SERVICE_RESET;
+		if (disable)
+			disable_service(target, false);
+		if (target->readiness != VS_SERVICE_DISABLED)
+			queue_ready_work(target);
+		break;
+	case VS_SERVICE_READY:
+		/* Tell userspace that the service is no longer ready. */
+		kobject_uevent(&target->dev.kobj, KOBJ_OFFLINE);
+		/* fall through */
+	case VS_SERVICE_RESET:
+		/*
+		 * This can happen for a non-core service if we get a reset
+		 * request from the server on the client side, after the
+		 * client has enabled the service but before it is active.
+		 * Note that the service is already active on the server side
+		 * at this point. The client's delay may be due to either
+		 * reset throttling or the absence of a driver.
+		 *
+		 * We bump the reset request timestamp, disable the service
+		 * again, and send back an acknowledgement.
+		 */
+		if (disable && target->id) {
+			target->last_reset_request = jiffies;
+
+			err = session_drv->service_local_reset(
+					session, target);
+			if (err < 0) {
+				dev_err(&session->dev,
+						"Failed to reset service %d; %d\n",
+						target->id, err);
+				session_fatal_error(session,
+						GFP_KERNEL);
+			}
+
+			disable_service(target, false);
+			break;
+		}
+		/* fall through */
+	case VS_SERVICE_DISABLED:
+		/*
+		 * This can happen for the core service if we get a reset
+		 * before the transport has activated, or before the core
+		 * service has become ready.
+		 *
+		 * We bump the reset request timestamp, and disable the
+		 * service again if the transport had already activated and
+		 * enabled it.
+		 */
+		if (disable && !target->id) {
+			target->last_reset_request = jiffies;
+
+			if (target->readiness != VS_SERVICE_DISABLED)
+				disable_service(target, false);
+
+			break;
+		}
+		/* fall through */
+	default:
+		dev_warn(&target->dev, "remote reset while inactive (%d)\n",
+				target->readiness);
+		err = -EPROTO;
+		break;
+	}
+
+	mutex_unlock(&target->ready_lock);
+	return err;
+}
+
+/**
+ * vs_service_handle_reset - handle an incoming notification of a reset
+ * @session: the session that owns the service
+ * @service_id: the ID of the service that is to be reset
+ * @disable: if true, the service will not be automatically re-enabled
+ *
+ * This routine is called by the core service when the remote end notifies us
+ * of a non-core service reset. The service must be in ACTIVE, LOCAL_RESET or
+ * LOCAL_DELETED state. It must be called with the core service's state lock
+ * held.
+ *
+ * If the service was in ACTIVE state, the core service is called back to send
+ * a notification to the other end. If it was in LOCAL_DELETED state, it is
+ * unregistered.
+ */
+int vs_service_handle_reset(struct vs_session_device *session,
+		vs_service_id_t service_id, bool disable)
+{
+	struct vs_service_device *target;
+	int ret;
+
+	if (!service_id)
+		return -EINVAL;
+
+	target = vs_session_get_service(session, service_id);
+	if (!target)
+		return -ENODEV;
+
+	ret = service_handle_reset(session, target, disable);
+	vs_put_service(target);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vs_service_handle_reset);
+
+static int __enable_service(struct vs_service_device *service)
+{
+	if (WARN_ON(!service->disable_count))
+		return -EINVAL;
+
+	if (--service->disable_count > 0)
+		return 0;
+
+	/*
+	 * If the service is still resetting, it can't become ready until the
+	 * reset completes. If it has been deleted, it will never become
+	 * ready. In either case, there's nothing more to do.
+	 */
+	if ((service->readiness == VS_SERVICE_LOCAL_RESET) ||
+			(service->readiness == VS_SERVICE_LOCAL_DELETE) ||
+			(service->readiness == VS_SERVICE_DELETED))
+		return 0;
+
+	if (WARN_ON(service->readiness != VS_SERVICE_DISABLED))
+		return -EINVAL;
+
+	service->readiness = VS_SERVICE_RESET;
+	service->last_reset = jiffies;
+	queue_ready_work(service);
+
+	return 0;
+}
+
+/**
+ * vs_service_enable - allow a service to become ready
+ * @service: the service that is to be enabled
+ *
+ * Calling this routine for a service permits the session layer to make the
+ * service ready. It will do so as soon as any outstanding reset throttling
+ * is complete, and will then start the service once it has a driver attached.
+ *
+ * Services are disabled, requiring a call to this routine to re-enable them:
+ * - when first initialised (after vs_service_start),
+ * - when reset on the client side by vs_service_handle_reset,
+ * - when the transport has delayed completion of a reset, and
+ * - when the server-side core protocol is disconnected or reset by
+ *   vs_session_disable_noncore.
+ */
+int vs_service_enable(struct vs_service_device *service)
+{
+	int ret;
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	ret = __enable_service(service);
+
+	mutex_unlock(&service->ready_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vs_service_enable);
+
+/*
+ * Service work functions
+ */
+static void queue_rx_work(struct vs_service_device *service)
+{
+	bool rx_atomic;
+
+	rx_atomic = vs_service_has_atomic_rx(service);
+	vs_dev_debug(VS_DEBUG_SESSION, vs_service_get_session(service),
+			&service->dev, "Queuing rx %s\n",
+			rx_atomic ? "tasklet (atomic)" : "work (cansleep)");
+
+	if (rx_atomic)
+		tasklet_schedule(&service->rx_tasklet);
+	else
+		queue_work(service->work_queue, &service->rx_work);
+}
+
+static void cancel_pending_rx(struct vs_service_device *service)
+{
+	struct vs_mbuf *mbuf;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	cancel_work_sync(&service->rx_work);
+	tasklet_kill(&service->rx_tasklet);
+
+	spin_lock_irq(&service->rx_lock);
+	while (!list_empty(&service->rx_queue)) {
+		mbuf = list_first_entry(&service->rx_queue,
+				struct vs_mbuf, queue);
+		list_del_init(&mbuf->queue);
+		spin_unlock_irq(&service->rx_lock);
+		vs_service_free_mbuf(service, mbuf);
+		spin_lock_irq(&service->rx_lock);
+	}
+	service->tx_ready = false;
+	spin_unlock_irq(&service->rx_lock);
+}
+
+static bool reset_throttle_cooled_off(struct vs_service_device *service);
+static unsigned long reset_cool_off(struct vs_service_device *service);
+
+static void service_cooloff_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, cooloff_work.work);
+	struct vs_session_device *session = vs_service_get_session(service);
+	unsigned long current_time = jiffies, wake_time;
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	if (reset_throttle_cooled_off(service)) {
+		vs_debug(VS_DEBUG_SESSION, session,
+				"Reset thrashing cooled off (delay = %u ms, cool off = %u ms, last reset %u ms ago, last reset request was %u ms ago)\n",
+				jiffies_to_msecs(service->reset_delay),
+				jiffies_to_msecs(reset_cool_off(service)),
+				msecs_ago(service->last_reset),
+				msecs_ago(service->last_reset_request));
+
+		service->reset_delay = 0;
+
+		/*
+		 * If the service is already in reset, then queue_ready_work
+		 * has already run and has deferred queuing of the ready_work
+		 * until cooloff. Schedule the ready work to run immediately.
+		 */
+		if (service->readiness == VS_SERVICE_RESET)
+			schedule_delayed_work(&service->ready_work, 0);
+	} else {
+		/*
+		 * This can happen if last_reset_request has been bumped
+		 * since the cooloff work was first queued. We need to
+		 * work out how long it is until the service cools off,
+		 * then reschedule ourselves.
+		 */
+		wake_time = reset_cool_off(service) +
+				service->last_reset_request;
+
+		WARN_ON(time_after(current_time, wake_time));
+
+		schedule_delayed_work(&service->cooloff_work,
+				wake_time - current_time);
+	}
+
+	mutex_unlock(&service->ready_lock);
+}
+
+static void
+service_reset_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, reset_work);
+
+	service->pending_reset = false;
+
+	vs_service_reset(service, service);
+}
+
+/* Returns true if there are more messages to handle */
+static bool
+dequeue_and_handle_received_message(struct vs_service_device *service)
+{
+	struct vs_service_driver *driver =
+			to_vs_service_driver(service->dev.driver);
+	struct vs_session_device *session = vs_service_get_session(service);
+	const struct vs_transport_vtable *vt = session->transport->vt;
+	struct vs_service_stats *stats = &service->stats;
+	struct vs_mbuf *mbuf;
+	size_t size;
+	int ret;
+
+	/* Don't do rx work unless the service is active */
+	if (service->readiness != VS_SERVICE_ACTIVE)
+		return false;
+
+	/* Atomically take an item from the queue */
+	spin_lock_irq(&service->rx_lock);
+	if (!list_empty(&service->rx_queue)) {
+		mbuf = list_first_entry(&service->rx_queue, struct vs_mbuf,
+				queue);
+		list_del_init(&mbuf->queue);
+		spin_unlock_irq(&service->rx_lock);
+		size = vt->mbuf_size(mbuf);
+
+		/*
+		 * Call the message handler for the service. The service's
+		 * message handler is responsible for freeing the mbuf when it
+		 * is done with it.
+		 */
+		ret = driver->receive(service, mbuf);
+		if (ret < 0) {
+			atomic_inc(&service->stats.recv_failures);
+			dev_err(&service->dev,
+					"receive returned %d; resetting service\n",
+					ret);
+			vs_service_reset_nosync(service);
+			return false;
+		} else {
+			atomic_add(size, &service->stats.recv_bytes);
+			atomic_inc(&service->stats.recv_mbufs);
+		}
+
+	} else if (service->tx_ready) {
+		service->tx_ready = false;
+		spin_unlock_irq(&service->rx_lock);
+
+		/*
+		 * Update the tx_ready stats accounting and then call the
+		 * service's tx_ready handler.
+		 */
+		atomic_inc(&stats->nr_tx_ready);
+		if (atomic_read(&stats->nr_over_quota) > 0) {
+			int total;
+
+			total = atomic_add_return(jiffies_to_msecs(jiffies -
+							stats->over_quota_time),
+					&stats->over_quota_time_total);
+			atomic_set(&stats->over_quota_time_avg, total /
+					atomic_read(&stats->nr_over_quota));
+		}
+		atomic_set(&service->is_over_quota, 0);
+
+		/*
+		 * Note that a service's quota may reduce at any point, even
+		 * during the tx_ready handler. This is important if a service
+		 * has an ordered list of pending messages to send. If a
+		 * message fails to send from the tx_ready handler due to
+		 * over-quota then subsequent messages in the same handler may
+		 * send successfully. To avoid sending messages in the
+		 * incorrect order the service's tx_ready handler should
+		 * return immediately if a message fails to send.
+		 */
+		ret = driver->tx_ready(service);
+		if (ret < 0) {
+			dev_err(&service->dev,
+					"tx_ready returned %d; resetting service\n",
+					ret);
+			vs_service_reset_nosync(service);
+			return false;
+		}
+	} else {
+		spin_unlock_irq(&service->rx_lock);
+	}
+
+	/*
+	 * There's no need to lock for this list_empty: if we race
+	 * with a msg enqueue, we'll be rescheduled by the other side,
+	 * and if we race with a dequeue, we'll just do nothing when
+	 * we run (or will be cancelled before we run).
+	 */
+	return !list_empty(&service->rx_queue) || service->tx_ready;
+}
+
+static void service_rx_tasklet(unsigned long data)
+{
+	struct vs_service_device *service = (struct vs_service_device *)data;
+	bool resched;
+
+	/*
+	 * There is no need to acquire the state spinlock or mutex here,
+	 * because this tasklet is disabled when the lock is held. These
+	 * are annotations for sparse and lockdep, respectively.
+	 *
+	 * We can't annotate the implicit mutex acquire because lockdep gets
+	 * upset about inconsistent softirq states.
+	 */
+	__acquire(service);
+	spin_acquire(&service->state_spinlock.dep_map, 0, 0, _THIS_IP_);
+
+	resched = dequeue_and_handle_received_message(service);
+
+	if (resched)
+		tasklet_schedule(&service->rx_tasklet);
+
+	spin_release(&service->state_spinlock.dep_map, 0, _THIS_IP_);
+	__release(service);
+}
+
+static void service_rx_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, rx_work);
+	bool requeue;
+
+	/*
+	 * We must acquire the state mutex here to protect services that
+	 * are using vs_service_state_lock().
+	 *
+	 * There is no need to acquire the spinlock, which is never used in
+	 * drivers with task context receive handlers.
+	 */
+	vs_service_state_lock(service);
+
+	requeue = dequeue_and_handle_received_message(service);
+
+	vs_service_state_unlock(service);
+
+	if (requeue)
+		queue_work(service->work_queue, work);
+}
+
+/*
+ * Service sysfs statistics counters. These files are all atomic_t, and
+ * read only, so we use a generator macro to avoid code duplication.
+ */
+#define service_stat_attr(__name)					\
+	static ssize_t service_stat_##__name##_show(struct device *dev, \
+			struct device_attribute *attr, char *buf)       \
+	{                                                               \
+		struct vs_service_device *service =                     \
+				to_vs_service_device(dev);              \
+									\
+		return scnprintf(buf, PAGE_SIZE, "%u\n",		\
+				atomic_read(&service->stats.__name));	\
+	}                                                               \
+	static DEVICE_ATTR(__name, S_IRUGO,                             \
+			service_stat_##__name##_show, NULL);
+
+service_stat_attr(sent_mbufs);
+service_stat_attr(sent_bytes);
+service_stat_attr(recv_mbufs);
+service_stat_attr(recv_bytes);
+service_stat_attr(nr_over_quota);
+service_stat_attr(nr_tx_ready);
+service_stat_attr(over_quota_time_total);
+service_stat_attr(over_quota_time_avg);
+
+static struct attribute *service_stat_dev_attrs[] = {
+	&dev_attr_sent_mbufs.attr,
+	&dev_attr_sent_bytes.attr,
+	&dev_attr_recv_mbufs.attr,
+	&dev_attr_recv_bytes.attr,
+	&dev_attr_nr_over_quota.attr,
+	&dev_attr_nr_tx_ready.attr,
+	&dev_attr_over_quota_time_total.attr,
+	&dev_attr_over_quota_time_avg.attr,
+	NULL,
+};
+
+static const struct attribute_group service_stat_attributes = {
+	.name   = "stats",
+	.attrs  = service_stat_dev_attrs,
+};
+
+static void delete_service(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	bool notify_on_destroy = true;
+
+	/* FIXME: Jira ticket SDK-3495 - philipd. */
+	/* This should be the caller's responsibility */
+	vs_get_service(service);
+
+	mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+	/*
+	 * If we're on the client side, the service should already have been
+	 * disabled at this point.
+	 */
+	WARN_ON(service->id != 0 && !session->is_server &&
+			service->readiness != VS_SERVICE_DISABLED &&
+			service->readiness != VS_SERVICE_DELETED);
+
+	/*
+	 * Make sure the service is not active, and notify the remote end if
+	 * it needs to be reset. Note that we already hold the core service
+	 * state lock iff this is a non-core service.
+	 */
+	__reset_service(service, true);
+
+	/*
+	 * If the remote end is aware that the service is inactive, we can
+	 * delete right away; otherwise we need to wait for a notification
+	 * that the service has reset.
+	 */
+	switch (service->readiness) {
+	case VS_SERVICE_LOCAL_DELETE:
+	case VS_SERVICE_DELETED:
+		/* Nothing to do here */
+		mutex_unlock(&service->ready_lock);
+		vs_put_service(service);
+		return;
+	case VS_SERVICE_ACTIVE:
+		BUG();
+		break;
+	case VS_SERVICE_LOCAL_RESET:
+		service->readiness = VS_SERVICE_LOCAL_DELETE;
+		break;
+	case VS_SERVICE_INIT:
+		notify_on_destroy = false;
+		/* Fall through */
+	default:
+		service->readiness = VS_SERVICE_DELETED;
+		destroy_service(service, notify_on_destroy);
+		break;
+	}
+
+	mutex_unlock(&service->ready_lock);
+
+	/*
+	 * Remove service syslink from
+	 * sys/vservices/(<server>/<client>)-sessions/ directory
+	 */
+	vs_service_remove_sysfs_entries(session, service);
+
+	sysfs_remove_group(&service->dev.kobj, &service_stat_attributes);
+
+	/*
+	 * On the client-side we need to release the service id as soon as
+	 * the service is deleted. Otherwise the server may attempt to create
+	 * a new service with this id.
+	 */
+	if (!session->is_server)
+		vs_session_release_service_id(service);
+
+	device_del(&service->dev);
+	vs_put_service(service);
+}
+
+/**
+ * vs_service_delete - deactivate and start removing a service device
+ * @service: the service to delete
+ * @caller: the service initiating deletion
+ *
+ * Services may only be deleted by their owner (on the server side), or by the
+ * core service. This function must not be called for the core service.
+ */
+int vs_service_delete(struct vs_service_device *service,
+		struct vs_service_device *caller)
+{
+	struct vs_session_device *session =
+			vs_service_get_session(service);
+	struct vs_service_device *core_service = session->core_service;
+
+	if (WARN_ON(!core_service))
+		return -ENODEV;
+
+	if (!service->id)
+		return -EINVAL;
+
+	if (caller != service->owner && caller != core_service)
+		return -EPERM;
+
+	delete_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_delete);
+
+/**
+ * vs_service_handle_delete - deactivate and start removing a service device
+ * @service: the service to delete
+ *
+ * This is a variant of vs_service_delete which must only be called by the
+ * core service. It is used by the core service client when a service_removed
+ * message is received.
+ */
+int vs_service_handle_delete(struct vs_service_device *service)
+{
+	struct vs_session_device *session __maybe_unused =
+			vs_service_get_session(service);
+	struct vs_service_device *core_service __maybe_unused =
+			session->core_service;
+
+	lockdep_assert_held(&core_service->state_mutex);
+
+	delete_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_handle_delete);
+
+static void service_cleanup_work(struct work_struct *work)
+{
+	struct vs_service_device *service = container_of(work,
+			struct vs_service_device, cleanup_work);
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev, "cleanup\n");
+
+	if (service->owner)
+		vs_put_service(service->owner);
+
+	/* Put our reference to the session */
+	if (service->dev.parent)
+		put_device(service->dev.parent);
+
+	tasklet_kill(&service->rx_tasklet);
+	cancel_work_sync(&service->rx_work);
+	cancel_delayed_work_sync(&service->cooloff_work);
+	cancel_delayed_work_sync(&service->ready_work);
+	cancel_work_sync(&service->reset_work);
+
+	if (service->work_queue)
+		destroy_workqueue(service->work_queue);
+
+	kfree(service->sysfs_name);
+	kfree(service->name);
+	kfree(service->protocol);
+	kfree(service);
+}
+
+static void vs_service_release(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+
+	vs_dev_debug(VS_DEBUG_SESSION, vs_service_get_session(service),
+			&service->dev, "release\n");
+
+	/*
+	 * We need to defer cleanup to avoid a circular dependency between the
+	 * core service's state lock (which can be held at this point, on the
+	 * client side) and any non-core service's reset work (which we must
+	 * cancel here, and which acquires the core service state lock).
+	 */
+	schedule_work(&service->cleanup_work);
+}
+
+static int service_add_idr(struct vs_session_device *session,
+		struct vs_service_device *service, vs_service_id_t service_id)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	int err, base_id, id;
+
+	if (service_id == VS_SERVICE_AUTO_ALLOCATE_ID)
+		base_id = 1;
+	else
+		base_id = service_id;
+
+retry:
+	if (!idr_pre_get(&session->service_idr, GFP_KERNEL))
+		return -ENOMEM;
+
+	mutex_lock(&session->service_idr_lock);
+	err = idr_get_new_above(&session->service_idr, service, base_id, &id);
+	if (err == 0) {
+		if (service_id != VS_SERVICE_AUTO_ALLOCATE_ID &&
+				id != service_id) {
+			/* Failed to allocated the requested service id */
+			idr_remove(&session->service_idr, id);
+			mutex_unlock(&session->service_idr_lock);
+			return -EBUSY;
+		}
+		if (id > VS_MAX_SERVICE_ID) {
+			/* We are out of service ids */
+			idr_remove(&session->service_idr, id);
+			mutex_unlock(&session->service_idr_lock);
+			return -ENOSPC;
+		}
+	}
+	mutex_unlock(&session->service_idr_lock);
+	if (err == -EAGAIN)
+		goto retry;
+	if (err < 0)
+		return err;
+#else
+	int start, end, id;
+
+	if (service_id == VS_SERVICE_AUTO_ALLOCATE_ID) {
+		start = 1;
+		end = VS_MAX_SERVICES;
+	} else {
+		start = service_id;
+		end = service_id + 1;
+	}
+
+	mutex_lock(&session->service_idr_lock);
+	id = idr_alloc(&session->service_idr, service, start, end,
+			GFP_KERNEL);
+	mutex_unlock(&session->service_idr_lock);
+
+	if (id == -ENOSPC)
+		return -EBUSY;
+	else if (id < 0)
+		return id;
+#endif
+
+	service->id = id;
+	return 0;
+}
+
+static int
+vs_service_create_sysfs_entries(struct vs_session_device *session,
+		struct vs_service_device *service, vs_service_id_t id)
+{
+	int ret;
+	char *sysfs_name, *c;
+
+	/* Add a symlink to session device inside service device sysfs */
+	ret = sysfs_create_link(&service->dev.kobj, &session->dev.kobj,
+			VS_SESSION_SYMLINK_NAME);
+	if (ret) {
+		dev_err(&service->dev, "Error %d creating session symlink\n",
+				ret);
+		goto fail;
+	}
+
+	/* Get the length of the string for sysfs dir */
+	sysfs_name = kasprintf(GFP_KERNEL, "%s:%d", service->name, id);
+	if (!sysfs_name) {
+		ret = -ENOMEM;
+		goto fail_session_link;
+	}
+
+	/*
+	 * We dont want to create symlinks with /'s which could get interpreted
+	 * as another directory so replace all /'s with !'s
+	 */
+	while ((c = strchr(sysfs_name, '/')))
+		*c = '!';
+	ret = sysfs_create_link(session->sysfs_entry, &service->dev.kobj,
+			sysfs_name);
+	if (ret)
+		goto fail_free_sysfs_name;
+
+	service->sysfs_name = sysfs_name;
+
+	return 0;
+
+fail_free_sysfs_name:
+	kfree(sysfs_name);
+fail_session_link:
+	sysfs_remove_link(&service->dev.kobj, VS_SESSION_SYMLINK_NAME);
+fail:
+	return ret;
+}
+
+/**
+ * vs_service_register - create and register a new vs_service_device
+ * @session: the session device that is the parent of the service
+ * @owner: the service responsible for managing the new service
+ * @service_id: the ID of the new service
+ * @name: the name of the new service
+ * @protocol: the protocol for the new service
+ * @plat_data: value to be assigned to (struct device *)->platform_data
+ *
+ * This function should only be called by a session driver that is bound to
+ * the given session.
+ *
+ * The given service_id must not have been passed to a prior successful
+ * vs_service_register call, unless the service ID has since been freed by a
+ * call to the session driver's service_removed callback.
+ *
+ * The core service state lock must not be held while calling this function.
+ */
+struct vs_service_device *vs_service_register(struct vs_session_device *session,
+		struct vs_service_device *owner, vs_service_id_t service_id,
+		const char *protocol, const char *name, const void *plat_data)
+{
+	struct vs_service_device *service;
+	struct vs_session_driver *session_drv;
+	int ret = -EIO;
+	char *c;
+
+	if (service_id && !owner) {
+		dev_err(&session->dev, "Non-core service must have an owner\n");
+		ret = -EINVAL;
+		goto fail;
+	} else if (!service_id && owner) {
+		dev_err(&session->dev, "Core service must not have an owner\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (!session->dev.driver)
+		goto fail;
+
+	session_drv = to_vs_session_driver(session->dev.driver);
+
+	service = kzalloc(sizeof(*service), GFP_KERNEL);
+	if (!service) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	INIT_LIST_HEAD(&service->rx_queue);
+	INIT_WORK(&service->rx_work, service_rx_work);
+	INIT_WORK(&service->reset_work, service_reset_work);
+	INIT_DELAYED_WORK(&service->ready_work, service_ready_work);
+	INIT_DELAYED_WORK(&service->cooloff_work, service_cooloff_work);
+	INIT_WORK(&service->cleanup_work, service_cleanup_work);
+	spin_lock_init(&service->rx_lock);
+	init_waitqueue_head(&service->quota_wq);
+
+	service->owner = vs_get_service(owner);
+
+	service->readiness = VS_SERVICE_INIT;
+	mutex_init(&service->ready_lock);
+	service->driver_probed = false;
+
+	/*
+	 * Service state locks - A service is only allowed to use one of these
+	 */
+	spin_lock_init(&service->state_spinlock);
+	mutex_init(&service->state_mutex);
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	service->state_spinlock_used = false;
+	service->state_mutex_used = false;
+#endif
+
+	/* Lock ordering
+	 *
+	 * The dependency order for the various service locks is as follows:
+	 *
+	 * cooloff_work
+	 * reset_work
+	 * ready_work
+	 * ready_lock/0
+	 * rx_work/0
+	 * state_mutex/0
+	 * ready_lock/1
+	 * ...
+	 * state_mutex/n
+	 * state_spinlock
+	 *
+	 * The subclass is the service's rank in the hierarchy of
+	 * service ownership. This results in core having subclass 0 on
+	 * server-side and 1 on client-side. Services directly created
+	 * by the core will have a lock subclass value of 2 for
+	 * servers, 3 for clients. Services created by non-core
+	 * services will have a lock subclass value of x + 1, where x
+	 * is the lock subclass of the creator service. (e.g servers
+	 * will have even numbered lock subclasses, clients will have
+	 * odd numbered lock subclasses).
+	 *
+	 * If a service driver has any additional locks for protecting
+	 * internal state, they will generally fit between state_mutex/n and
+	 * ready_lock/n+1 on this list. For the core service, this applies to
+	 * the session lock.
+	 */
+
+	if (owner)
+		service->lock_subclass = owner->lock_subclass + 2;
+	else
+		service->lock_subclass = session->is_server ? 0 : 1;
+
+#ifdef CONFIG_LOCKDEP
+	if (service->lock_subclass >= MAX_LOCKDEP_SUBCLASSES) {
+		dev_warn(&session->dev, "Owner hierarchy is too deep, lockdep will fail\n");
+	} else {
+		/*
+		 * We need to set the default subclass for the rx work,
+		 * because the workqueue API doesn't (and can't) provide
+		 * anything like lock_nested() for it.
+		 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+		/*
+		 * Lockdep allows a specific lock's subclass to be set with
+		 * the subclass argument to lockdep_init_map(). However, prior
+		 * to Linux 3.3, that only works the first time it is called
+		 * for a given class and subclass. So we have to fake it,
+		 * putting every subclass in a different class, so the only
+		 * thing that breaks is printing the subclass in lockdep
+		 * warnings.
+		 */
+		static struct lock_class_key
+				rx_work_keys[MAX_LOCKDEP_SUBCLASSES];
+		struct lock_class_key *key =
+				&rx_work_keys[service->lock_subclass];
+#else
+		struct lock_class_key *key = service->rx_work.lockdep_map.key;
+#endif
+
+		/*
+		 * We can't use the lockdep_set_class() macro because the
+		 * work's lockdep map is called .lockdep_map instead of
+		 * .dep_map.
+		 */
+		lockdep_init_map(&service->rx_work.lockdep_map,
+				"&service->rx_work", key,
+				service->lock_subclass);
+	}
+#endif
+
+	/*
+	 * Copy the protocol and name. Remove any leading or trailing
+	 * whitespace characters (including newlines) since the strings
+	 * may have been passed via sysfs files.
+	 */
+	if (protocol) {
+		service->protocol = kstrdup(protocol, GFP_KERNEL);
+		if (!service->protocol) {
+			ret = -ENOMEM;
+			goto fail_copy_protocol;
+		}
+		c = strim(service->protocol);
+		if (c != service->protocol)
+			memmove(service->protocol, c,
+					strlen(service->protocol) + 1);
+	}
+
+	service->name = kstrdup(name, GFP_KERNEL);
+	if (!service->name) {
+		ret = -ENOMEM;
+		goto fail_copy_name;
+	}
+	c = strim(service->name);
+	if (c != service->name)
+		memmove(service->name, c, strlen(service->name) + 1);
+
+	service->is_server = session_drv->is_server;
+
+	/* Grab a reference to the session we are on */
+	service->dev.parent = get_device(&session->dev);
+	service->dev.bus = session_drv->service_bus;
+	service->dev.release = vs_service_release;
+
+	service->last_reset = 0;
+	service->last_reset_request = 0;
+	service->last_ready = 0;
+	service->reset_delay = 0;
+
+	device_initialize(&service->dev);
+	service->dev.platform_data = (void *)plat_data;
+
+	ret = service_add_idr(session, service, service_id);
+	if (ret)
+		goto fail_add_idr;
+
+#ifdef CONFIG_VSERVICES_NAMED_DEVICE
+	/* Integrate session and service names in vservice devnodes */
+	dev_set_name(&service->dev, "vservice-%s:%s:%s:%d:%d",
+			session->is_server ? "server" : "client",
+			session->name, service->name,
+			session->session_num, service->id);
+#else
+	dev_set_name(&service->dev, "%s:%d", dev_name(&session->dev),
+			service->id);
+#endif
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+	if (service->id > 0)
+		service->dev.devt = MKDEV(vservices_cdev_major,
+			(session->session_num * VS_MAX_SERVICES) +
+			service->id);
+#endif
+
+	service->work_queue = vs_create_workqueue(dev_name(&service->dev));
+	if (!service->work_queue) {
+		ret = -ENOMEM;
+		goto fail_create_workqueue;
+	}
+
+	tasklet_init(&service->rx_tasklet, service_rx_tasklet,
+			(unsigned long)service);
+
+	/*
+	 * If this is the core service, set the core service pointer in the
+	 * session.
+	 */
+	if (service->id == 0) {
+		mutex_lock(&session->service_idr_lock);
+		if (session->core_service) {
+			ret = -EEXIST;
+			mutex_unlock(&session->service_idr_lock);
+			goto fail_become_core;
+		}
+
+		/* Put in vs_session_bus_remove() */
+		session->core_service = vs_get_service(service);
+		mutex_unlock(&session->service_idr_lock);
+	}
+
+	/* Notify the transport */
+	ret = session->transport->vt->service_add(session->transport, service);
+	if (ret) {
+		dev_err(&session->dev,
+				"Failed to add service %d (%s:%s) to transport: %d\n",
+				service->id, service->name,
+				service->protocol, ret);
+		goto fail_transport_add;
+	}
+
+	/* Delay uevent until vs_service_start(). */
+	dev_set_uevent_suppress(&service->dev, true);
+
+	ret = device_add(&service->dev);
+	if (ret)
+		goto fail_device_add;
+
+	/* Create the service statistics sysfs group */
+	ret = sysfs_create_group(&service->dev.kobj, &service_stat_attributes);
+	if (ret)
+		goto fail_sysfs_create_group;
+
+	/* Create additional sysfs files */
+	ret = vs_service_create_sysfs_entries(session, service, service->id);
+	if (ret)
+		goto fail_sysfs_add_entries;
+
+	return service;
+
+fail_sysfs_add_entries:
+	sysfs_remove_group(&service->dev.kobj, &service_stat_attributes);
+fail_sysfs_create_group:
+	device_del(&service->dev);
+fail_device_add:
+	session->transport->vt->service_remove(session->transport, service);
+fail_transport_add:
+	if (service->id == 0) {
+		session->core_service = NULL;
+		vs_put_service(service);
+	}
+fail_become_core:
+fail_create_workqueue:
+	vs_session_release_service_id(service);
+fail_add_idr:
+	/*
+	 * device_initialize() has been called, so we must call put_device()
+	 * and let vs_service_release() handle the rest of the cleanup.
+	 */
+	put_device(&service->dev);
+	return ERR_PTR(ret);
+
+fail_copy_name:
+	if (service->protocol)
+		kfree(service->protocol);
+fail_copy_protocol:
+	kfree(service);
+fail:
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(vs_service_register);
+
+/**
+ * vs_session_get_service - Look up a service by ID on a session and get
+ * a reference to it. The caller must call vs_put_service when it is finished
+ * with the service.
+ *
+ * @session: The session to search for the service on
+ * @service_id: ID of the service to find
+ */
+struct vs_service_device *
+vs_session_get_service(struct vs_session_device *session,
+		vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+
+	if (!session)
+		return NULL;
+
+	rcu_read_lock();
+	service = idr_find(&session->service_idr, service_id);
+	if (!service) {
+		rcu_read_unlock();
+		return NULL;
+	}
+	vs_get_service(service);
+	rcu_read_unlock();
+
+	return service;
+}
+EXPORT_SYMBOL_GPL(vs_session_get_service);
+
+/**
+ * __for_each_service - Iterate over all non-core services on a session.
+ *
+ * @session: Session to iterate services on
+ * @func: Callback function for each iterated service
+ *
+ * Iterate over all services on a session, excluding the core service, and
+ * call a callback function on each.
+ */
+static void __for_each_service(struct vs_session_device *session,
+		void (*func)(struct vs_service_device *))
+{
+	struct vs_service_device *service;
+	int id;
+
+	for (id = 1; ; id++) {
+		rcu_read_lock();
+		service = idr_get_next(&session->service_idr, &id);
+		if (!service) {
+			rcu_read_unlock();
+			break;
+		}
+		vs_get_service(service);
+		rcu_read_unlock();
+
+		func(service);
+		vs_put_service(service);
+	}
+}
+
+/**
+ * vs_session_delete_noncore - immediately delete all non-core services
+ * @session: the session whose services are to be deleted
+ *
+ * This function disables and deletes all non-core services without notifying
+ * the core service. It must only be called by the core service, with its state
+ * lock held. It is used when the core service client disconnects or
+ * resets, and when the core service server has its driver removed.
+ */
+void vs_session_delete_noncore(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service __maybe_unused =
+			session->core_service;
+
+	lockdep_assert_held(&core_service->state_mutex);
+
+	vs_session_disable_noncore(session);
+
+	__for_each_service(session, delete_service);
+}
+EXPORT_SYMBOL_GPL(vs_session_delete_noncore);
+
+/**
+ * vs_session_for_each_service - Iterate over all initialised and non-deleted
+ * non-core services on a session.
+ *
+ * @session: Session to iterate services on
+ * @func: Callback function for each iterated service
+ * @data: Extra data to pass to the callback
+ *
+ * Iterate over all services on a session, excluding the core service and any
+ * service that has been deleted or has not yet had vs_service_start() called,
+ * and call a callback function on each. The callback function is called with
+ * the service's ready lock held.
+ */
+void vs_session_for_each_service(struct vs_session_device *session,
+		void (*func)(struct vs_service_device *, void *), void *data)
+{
+	struct vs_service_device *service;
+	int id;
+
+	for (id = 1; ; id++) {
+		rcu_read_lock();
+		service = idr_get_next(&session->service_idr, &id);
+		if (!service) {
+			rcu_read_unlock();
+			break;
+		}
+		vs_get_service(service);
+		rcu_read_unlock();
+
+		mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+		if (service->readiness != VS_SERVICE_LOCAL_DELETE &&
+				service->readiness != VS_SERVICE_DELETED &&
+				service->readiness != VS_SERVICE_INIT)
+			func(service, data);
+
+		mutex_unlock(&service->ready_lock);
+		vs_put_service(service);
+	}
+}
+
+static void force_disable_service(struct vs_service_device *service,
+		void *unused)
+{
+	lockdep_assert_held(&service->ready_lock);
+
+	if (service->readiness == VS_SERVICE_ACTIVE)
+		__reset_service(service, false);
+
+	disable_service(service, true);
+}
+
+/**
+ * vs_session_disable_noncore - immediately disable all non-core services
+ * @session: the session whose services are to be disabled
+ *
+ * This function must be called by the core service driver to disable all
+ * services, whenever it resets or is otherwise disconnected. It is called
+ * directly by the server-side core service, and by the client-side core
+ * service via vs_session_delete_noncore().
+ */
+void vs_session_disable_noncore(struct vs_session_device *session)
+{
+	vs_session_for_each_service(session, force_disable_service, NULL);
+}
+EXPORT_SYMBOL_GPL(vs_session_disable_noncore);
+
+static void try_enable_service(struct vs_service_device *service, void *unused)
+{
+	lockdep_assert_held(&service->ready_lock);
+
+	__enable_service(service);
+}
+
+/**
+ * vs_session_enable_noncore - enable all disabled non-core services
+ * @session: the session whose services are to be enabled
+ *
+ * This function is called by the core server driver to enable all services
+ * when the core client connects.
+ */
+void vs_session_enable_noncore(struct vs_session_device *session)
+{
+	vs_session_for_each_service(session, try_enable_service, NULL);
+}
+EXPORT_SYMBOL_GPL(vs_session_enable_noncore);
+
+/**
+ * vs_session_handle_message - process an incoming message from a transport
+ * @session: the session that is receiving the message
+ * @mbuf: a buffer containing the message payload
+ * @service_id: the id of the service that the message was addressed to
+ *
+ * This routine will return 0 if the buffer was accepted, or a negative value
+ * otherwise. In the latter case the caller should free the buffer. If the
+ * error is fatal, this routine will reset the service.
+ *
+ * This routine may be called from interrupt context.
+ *
+ * The caller must always serialise calls to this function relative to
+ * vs_session_handle_reset and vs_session_handle_activate. We don't do this
+ * internally, to avoid having to disable interrupts when called from task
+ * context.
+ */
+int vs_session_handle_message(struct vs_session_device *session,
+		struct vs_mbuf *mbuf, vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+	struct vs_transport *transport;
+	unsigned long flags;
+
+	transport = session->transport;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service) {
+		dev_err(&session->dev, "message for unknown service %d\n",
+				service_id);
+		session_fatal_error(session, GFP_ATOMIC);
+		return -ENOTCONN;
+	}
+
+	/*
+	 * Take the rx lock before checking service readiness. This guarantees
+	 * that if __reset_service() has just made the service inactive, we
+	 * either see it and don't enqueue the message, or else enqueue the
+	 * message before cancel_pending_rx() runs (and removes it).
+	 */
+	spin_lock_irqsave(&service->rx_lock, flags);
+
+	/* If the service is not active, drop the message. */
+	if (service->readiness != VS_SERVICE_ACTIVE) {
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+		vs_put_service(service);
+		return -ECONNRESET;
+	}
+
+	list_add_tail(&mbuf->queue, &service->rx_queue);
+	spin_unlock_irqrestore(&service->rx_lock, flags);
+
+	/* Schedule processing of the message by the service's drivers. */
+	queue_rx_work(service);
+	vs_put_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_message);
+
+/**
+ * vs_session_quota_available - notify a service that it can transmit
+ * @session: the session owning the service that is ready
+ * @service_id: the id of the service that is ready
+ * @count: the number of buffers that just became ready
+ * @call_tx_ready: true if quota has just become nonzero due to a buffer being
+ *                 freed by the remote communication partner
+ *
+ * This routine is called by the transport driver when a send-direction
+ * message buffer becomes free. It wakes up any task that is waiting for
+ * send quota to become available.
+ *
+ * This routine may be called from interrupt context from the transport
+ * driver, and as such, it may not sleep.
+ *
+ * The caller must always serialise calls to this function relative to
+ * vs_session_handle_reset and vs_session_handle_activate. We don't do this
+ * internally, to avoid having to disable interrupts when called from task
+ * context.
+ *
+ * If the call_tx_ready argument is true, this function also schedules a
+ * call to the driver's tx_ready callback. Note that this never has priority
+ * over handling incoming messages; it will only be handled once the receive
+ * queue is empty. This is to increase batching of outgoing messages, and also
+ * to reduce the chance that an outgoing message will be dropped by the partner
+ * because an incoming message has already changed the state.
+ *
+ * In general, task context drivers should use the waitqueue, and softirq
+ * context drivers (with tx_atomic set) should use tx_ready.
+ */
+void vs_session_quota_available(struct vs_session_device *session,
+		vs_service_id_t service_id, unsigned count,
+		bool send_tx_ready)
+{
+	struct vs_service_device *service;
+	unsigned long flags;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service) {
+		dev_err(&session->dev, "tx ready for unknown service %d\n",
+				service_id);
+		session_fatal_error(session, GFP_ATOMIC);
+		return;
+	}
+
+	wake_up_nr(&service->quota_wq, count);
+
+	if (send_tx_ready) {
+		/*
+		 * Take the rx lock before checking service readiness. This
+		 * guarantees that if __reset_service() has just made the
+		 * service inactive, we either see it and don't set the tx_ready
+		 * flag, or else set the flag before cancel_pending_rx() runs
+		 * (and clears it).
+		 */
+		spin_lock_irqsave(&service->rx_lock, flags);
+
+		/* If the service is not active, drop the tx_ready event */
+		if (service->readiness != VS_SERVICE_ACTIVE) {
+			spin_unlock_irqrestore(&service->rx_lock, flags);
+			vs_put_service(service);
+			return;
+		}
+
+		service->tx_ready = true;
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+
+		/* Schedule RX processing by the service driver. */
+		queue_rx_work(service);
+	}
+
+	vs_put_service(service);
+}
+EXPORT_SYMBOL_GPL(vs_session_quota_available);
+
+/**
+ * vs_session_handle_notify - process an incoming notification from a transport
+ * @session: the session that is receiving the notification
+ * @flags: notification flags
+ * @service_id: the id of the service that the notification was addressed to
+ *
+ * This function may be called from interrupt context from the transport driver,
+ * and as such, it may not sleep.
+ */
+void vs_session_handle_notify(struct vs_session_device *session,
+		unsigned long bits, vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+	struct vs_service_driver *driver;
+	unsigned long flags;
+
+	service = vs_session_get_service(session, service_id);
+	if (!service) {
+		/* Ignore the notification since the service id doesn't exist */
+		dev_err(&session->dev, "notification for unknown service %d\n",
+				service_id);
+		return;
+	}
+
+	/*
+	 * Take the rx lock before checking service readiness. This guarantees
+	 * that if __reset_service() has just made the service inactive, we
+	 * either see it and don't send the notification, or else send it
+	 * before cancel_pending_rx() runs (and thus before the driver is
+	 * deactivated).
+	 */
+	spin_lock_irqsave(&service->rx_lock, flags);
+
+	/* If the service is not active, drop the notification. */
+	if (service->readiness != VS_SERVICE_ACTIVE) {
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+		vs_put_service(service);
+		return;
+	}
+
+	/* There should be a driver bound on the service */
+	if (WARN_ON(!service->dev.driver)) {
+		spin_unlock_irqrestore(&service->rx_lock, flags);
+		vs_put_service(service);
+		return;
+	}
+
+	driver = to_vs_service_driver(service->dev.driver);
+	/* Call the driver's notify function */
+	driver->notify(service, bits);
+
+	spin_unlock_irqrestore(&service->rx_lock, flags);
+	vs_put_service(service);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_notify);
+
+static unsigned long reset_cool_off(struct vs_service_device *service)
+{
+	return service->reset_delay * RESET_THROTTLE_COOL_OFF_MULT;
+}
+
+static bool ready_needs_delay(struct vs_service_device *service)
+{
+	/*
+	 * We throttle resets if too little time elapsed between the service
+	 * last becoming ready, and the service last starting a reset.
+	 *
+	 * We do not use the current time here because it includes the time
+	 * taken by the local service driver to actually process the reset.
+	 */
+	return service->last_reset && service->last_ready && time_before(
+			service->last_reset,
+			service->last_ready + RESET_THROTTLE_TIME);
+}
+
+static bool reset_throttle_cooled_off(struct vs_service_device *service)
+{
+	/*
+	 * Reset throttling cools off if enough time has elapsed since the
+	 * last reset request.
+	 *
+	 * We check against the last requested reset, not the last serviced
+	 * reset or ready. If we are throttling, a reset may not have been
+	 * serviced for some time even though we are still receiving requests.
+	 */
+	return service->reset_delay && service->last_reset_request &&
+			time_after(jiffies, service->last_reset_request +
+					reset_cool_off(service));
+}
+
+/*
+ * Queue up the ready work for a service. If a service is resetting too fast
+ * then it will be throttled using an exponentially increasing delay before
+ * marking it ready. If the reset speed backs off then the ready throttling
+ * will be cleared. If a service reaches the maximum throttling delay then all
+ * resets will be ignored until the cool off period has elapsed.
+ *
+ * The basic logic of the reset throttling is:
+ *
+ *  - If a reset request is processed and the last ready was less than
+ *    RESET_THROTTLE_TIME ago, then the ready needs to be delayed to
+ *    throttle resets.
+ *
+ *  - The ready delay increases exponentially on each throttled reset
+ *    between RESET_THROTTLE_MIN and RESET_THROTTLE_MAX.
+ *
+ *  - If RESET_THROTTLE_MAX is reached then no ready will be sent until the
+ *    reset requests have cooled off.
+ *
+ *  - Reset requests have cooled off when no reset requests have been
+ *    received for RESET_THROTTLE_COOL_OFF_MULT * the service's current
+ *    ready delay. The service's reset throttling is disabled.
+ *
+ * Note: Be careful when adding print statements, including debugging, to
+ * this function. The ready throttling is intended to prevent DOSing of the
+ * vServices due to repeated resets (e.g. because of a persistent failure).
+ * Adding a printk on each reset for example would reset in syslog spamming
+ * which is a DOS attack in itself.
+ *
+ * The ready lock must be held by the caller.
+ */
+static void queue_ready_work(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	unsigned long delay;
+	bool wait_for_cooloff = false;
+
+	lockdep_assert_held(&service->ready_lock);
+
+	/* This should only be called when the service enters reset. */
+	WARN_ON(service->readiness != VS_SERVICE_RESET);
+
+	if (ready_needs_delay(service)) {
+		/* Reset delay increments exponentially */
+		if (!service->reset_delay) {
+			service->reset_delay = RESET_THROTTLE_MIN;
+		} else if (service->reset_delay < RESET_THROTTLE_MAX) {
+			service->reset_delay *= 2;
+		} else {
+			wait_for_cooloff = true;
+		}
+
+		delay = service->reset_delay;
+	} else {
+		/* The reset request appears to have been be sane. */
+		delay = 0;
+
+	}
+
+	if (service->reset_delay > 0) {
+		/*
+		 * Schedule cooloff work, to set the reset_delay to 0 if
+		 * the reset requests stop for long enough.
+		 */
+		schedule_delayed_work(&service->cooloff_work,
+				reset_cool_off(service));
+	}
+
+	if (wait_for_cooloff) {
+		/*
+		 * We need to finish cooling off before we service resets
+		 * again. Schedule cooloff_work to run after the current
+		 * cooloff period ends; it may reschedule itself even later
+		 * if any more requests arrive.
+		 */
+		dev_err(&session->dev,
+				"Service %s is resetting too fast - must cool off for %u ms\n",
+				dev_name(&service->dev),
+				jiffies_to_msecs(reset_cool_off(service)));
+		return;
+	}
+
+	if (delay)
+		dev_err(&session->dev,
+				"Service %s is resetting too fast - delaying ready by %u ms\n",
+				dev_name(&service->dev),
+				jiffies_to_msecs(delay));
+
+	vs_debug(VS_DEBUG_SESSION, session,
+			"Service %s will become ready in %u ms\n",
+			dev_name(&service->dev),
+			jiffies_to_msecs(delay));
+
+	if (service->last_ready)
+		vs_debug(VS_DEBUG_SESSION, session,
+				"Last became ready %u ms ago\n",
+				msecs_ago(service->last_ready));
+	if (service->reset_delay >= RESET_THROTTLE_MAX)
+		dev_err(&session->dev, "Service %s hit max reset throttle\n",
+				dev_name(&service->dev));
+
+	schedule_delayed_work(&service->ready_work, delay);
+}
+
+static void session_activation_work(struct work_struct *work)
+{
+	struct vs_session_device *session = container_of(work,
+			struct vs_session_device, activation_work);
+	struct vs_service_device *core_service = session->core_service;
+	struct vs_session_driver *session_drv =
+			to_vs_session_driver(session->dev.driver);
+	int activation_state;
+	int ret;
+
+	if (WARN_ON(!core_service))
+		return;
+
+	if (WARN_ON(!session_drv))
+		return;
+
+	/*
+	 * We use an atomic to prevent duplicate activations if we race with
+	 * an activate after a reset. This is very unlikely, but possible if
+	 * this work item is preempted.
+	 */
+	activation_state = atomic_cmpxchg(&session->activation_state,
+			VS_SESSION_ACTIVATE, VS_SESSION_ACTIVE);
+
+	switch (activation_state) {
+	case VS_SESSION_ACTIVATE:
+		vs_debug(VS_DEBUG_SESSION, session,
+				"core service will be activated\n");
+		vs_service_enable(core_service);
+		break;
+
+	case VS_SESSION_RESET:
+		vs_debug(VS_DEBUG_SESSION, session,
+				"core service will be deactivated\n");
+
+		/* Handle the core service reset */
+		ret = service_handle_reset(session, core_service, true);
+
+		/* Tell the transport if the reset succeeded */
+		if (ret >= 0)
+			session->transport->vt->ready(session->transport);
+		else
+			dev_err(&session->dev, "core service reset unhandled: %d\n",
+					ret);
+
+		break;
+
+	default:
+		vs_debug(VS_DEBUG_SESSION, session,
+				"core service already active\n");
+		break;
+	}
+}
+
+/**
+ * vs_session_handle_reset - Handle a reset at the session layer.
+ * @session: Session to reset
+ *
+ * This function is called by the transport when it receives a transport-level
+ * reset notification.
+ *
+ * After a session is reset by calling this function, it will reset all of its
+ * attached services, and then call the transport's ready callback. The
+ * services will remain in reset until the session is re-activated by a call
+ * to vs_session_handle_activate().
+ *
+ * Calling this function on a session that is already reset is permitted, as
+ * long as the transport accepts the consequent duplicate ready callbacks.
+ *
+ * A newly created session is initially in the reset state, and will not call
+ * the transport's ready callback. The transport may choose to either act as
+ * if the ready callback had been called, or call this function again to
+ * trigger a new ready callback.
+ */
+void vs_session_handle_reset(struct vs_session_device *session)
+{
+	atomic_set(&session->activation_state, VS_SESSION_RESET);
+
+	schedule_work(&session->activation_work);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_reset);
+
+/**
+ * vs_session_handle_activate - Allow a session to leave the reset state.
+ * @session: Session to mark active.
+ *
+ * This function is called by the transport when a transport-level reset is
+ * completed; that is, after the session layer has reset its services and
+ * called the ready callback, at *both* ends of the connection.
+ */
+void vs_session_handle_activate(struct vs_session_device *session)
+{
+	atomic_set(&session->activation_state, VS_SESSION_ACTIVATE);
+
+	schedule_work(&session->activation_work);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_activate);
+
+static ssize_t id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", session->session_num);
+}
+
+/*
+ * The vServices session device type
+ */
+static ssize_t is_server_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", session->is_server);
+}
+
+static ssize_t name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", session->name);
+}
+
+#ifdef CONFIG_VSERVICES_DEBUG
+static ssize_t debug_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%.8lx\n", session->debug_mask);
+}
+
+static ssize_t debug_mask_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	int err;
+
+	err = kstrtoul(buf, 0, &session->debug_mask);
+	if (err)
+		return err;
+
+	/* Clear any bits we don't know about */
+	session->debug_mask &= VS_DEBUG_ALL;
+
+	return count;
+}
+#endif /* CONFIG_VSERVICES_DEBUG */
+
+static struct device_attribute vservices_session_dev_attrs[] = {
+	__ATTR_RO(id),
+	__ATTR_RO(is_server),
+	__ATTR_RO(name),
+#ifdef CONFIG_VSERVICES_DEBUG
+	__ATTR(debug_mask, S_IRUGO | S_IWUSR,
+			debug_mask_show, debug_mask_store),
+#endif
+	__ATTR_NULL,
+};
+
+static int vs_session_free_idr(struct vs_session_device *session)
+{
+	mutex_lock(&vs_session_lock);
+	idr_remove(&session_idr, session->session_num);
+	mutex_unlock(&vs_session_lock);
+	return 0;
+}
+
+static void vs_session_device_release(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	vs_session_free_idr(session);
+
+	kfree(session->name);
+	kfree(session);
+}
+
+/*
+ * The vServices session bus
+ */
+static int vs_session_bus_match(struct device *dev,
+		struct device_driver *driver)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_session_driver *session_drv = to_vs_session_driver(driver);
+
+	return (session->is_server == session_drv->is_server);
+}
+
+static int vs_session_bus_remove(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+	struct vs_service_device *core_service = session->core_service;
+
+	if (!core_service)
+		return 0;
+
+	/*
+	 * Abort any pending session activation. We rely on the transport to
+	 * not call vs_session_handle_activate after this point.
+	 */
+	cancel_work_sync(&session->activation_work);
+
+	/* Abort any pending fatal error handling, which is redundant now. */
+	cancel_work_sync(&session->fatal_error_work);
+
+	/*
+	 * Delete the core service. This will implicitly delete everything
+	 * else (in reset on the client side, and in release on the server
+	 * side). The session holds a reference, so this won't release the
+	 * service struct.
+	 */
+	delete_service(core_service);
+
+	/* Now clean up the core service. */
+	session->core_service = NULL;
+
+	/* Matches the get in vs_service_register() */
+	vs_put_service(core_service);
+
+	return 0;
+}
+
+static int vservices_session_uevent(struct device *dev,
+		struct kobj_uevent_env *env)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	dev_dbg(dev, "uevent\n");
+
+	if (add_uevent_var(env, "IS_SERVER=%d", session->is_server))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SESSION_ID=%d", session->session_num))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void vservices_session_shutdown(struct device *dev)
+{
+	struct vs_session_device *session = to_vs_session_device(dev);
+
+	dev_dbg(dev, "shutdown\n");
+
+	/* Do a transport reset */
+	session->transport->vt->reset(session->transport);
+}
+
+struct bus_type vs_session_bus_type = {
+	.name		= "vservices-session",
+	.match		= vs_session_bus_match,
+	.remove		= vs_session_bus_remove,
+	.dev_attrs	= vservices_session_dev_attrs,
+	.uevent		= vservices_session_uevent,
+	.shutdown	= vservices_session_shutdown,
+};
+EXPORT_SYMBOL_GPL(vs_session_bus_type);
+
+/*
+ * Common code for the vServices client and server buses
+ */
+int vs_service_bus_probe(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(dev->driver);
+	struct vs_session_device *session = vs_service_get_session(service);
+	int ret;
+
+	vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev, "probe\n");
+
+	/*
+	 * Increase the reference count on the service driver. We don't allow
+	 * service driver modules to be removed if there are any device
+	 * instances present. The devices must be explicitly removed first.
+	 */
+	if (!try_module_get(vsdrv->driver.owner))
+		return -ENODEV;
+
+	ret = vsdrv->probe(service);
+	if (ret) {
+		module_put(vsdrv->driver.owner);
+		return ret;
+	}
+
+	service->driver_probed = true;
+
+	try_start_service(service);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_probe);
+
+int vs_service_bus_remove(struct device *dev)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_service_driver *vsdrv = to_vs_service_driver(dev->driver);
+	int err = 0;
+
+	reset_service(service);
+
+	/* Prevent reactivation of the driver */
+	service->driver_probed = false;
+
+	/* The driver has now had its reset() callback called; remove it */
+	vsdrv->remove(service);
+
+	/*
+	 * Take the service's state mutex and spinlock. This ensures that any
+	 * thread that is calling vs_state_lock_safe[_bh] will either complete
+	 * now, or see the driver removal and fail, irrespective of which type
+	 * of lock it is using.
+	 */
+	mutex_lock_nested(&service->state_mutex, service->lock_subclass);
+	spin_lock_bh(&service->state_spinlock);
+
+	/* Release all the locks. */
+	spin_unlock_bh(&service->state_spinlock);
+	mutex_unlock(&service->state_mutex);
+	mutex_unlock(&service->ready_lock);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	service->state_spinlock_used = false;
+	service->state_mutex_used = false;
+#endif
+
+	module_put(vsdrv->driver.owner);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_remove);
+
+int vs_service_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct vs_service_device *service = to_vs_service_device(dev);
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	dev_dbg(dev, "uevent\n");
+
+	if (add_uevent_var(env, "IS_SERVER=%d", service->is_server))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SERVICE_ID=%d", service->id))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SESSION_ID=%d", session->session_num))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "SERVICE_NAME=%s", service->name))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "PROTOCOL=%s", service->protocol ?: ""))
+		return -ENOMEM;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_uevent);
+
+static int vs_session_create_sysfs_entry(struct vs_transport *transport,
+		struct vs_session_device *session, bool server,
+		const char *transport_name)
+{
+	char *sysfs_name;
+	struct kobject *sysfs_parent = vservices_client_root;
+
+	if (!transport_name)
+		return -EINVAL;
+
+	sysfs_name = kasprintf(GFP_KERNEL, "%s:%s", transport->type,
+			transport_name);
+	if (!sysfs_name)
+		return -ENOMEM;
+
+	if (server)
+		sysfs_parent = vservices_server_root;
+
+	session->sysfs_entry = kobject_create_and_add(sysfs_name, sysfs_parent);
+
+	kfree(sysfs_name);
+	if (!session->sysfs_entry)
+		return -ENOMEM;
+	return 0;
+}
+
+static int vs_session_alloc_idr(struct vs_session_device *session)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+	int err, id;
+
+retry:
+	if (!idr_pre_get(&session_idr, GFP_KERNEL))
+		return -ENOMEM;
+
+	mutex_lock(&vs_session_lock);
+	err = idr_get_new_above(&session_idr, session, 0, &id);
+	if (err == 0) {
+		if (id >= VS_MAX_SESSIONS) {
+			/* We are out of session ids */
+			idr_remove(&session_idr, id);
+			mutex_unlock(&vs_session_lock);
+			return -EBUSY;
+		}
+	}
+	mutex_unlock(&vs_session_lock);
+	if (err == -EAGAIN)
+		goto retry;
+	if (err < 0)
+		return err;
+#else
+	int id;
+
+	mutex_lock(&vs_session_lock);
+	id = idr_alloc(&session_idr, session, 0, VS_MAX_SESSIONS, GFP_KERNEL);
+	mutex_unlock(&vs_session_lock);
+
+	if (id == -ENOSPC)
+		return -EBUSY;
+	else if (id < 0)
+		return id;
+#endif
+
+	session->session_num = id;
+	return 0;
+}
+
+/**
+ * vs_session_register - register a vservices session on a transport
+ * @transport: vservices transport that the session will attach to
+ * @parent: device that implements the transport (for sysfs)
+ * @server: true if the session is server-side
+ * @transport_name: name of the transport
+ *
+ * This function is intended to be called from the probe() function of a
+ * transport driver. It sets up a new session device, which then either
+ * performs automatic service discovery (for clients) or creates sysfs nodes
+ * that allow the user to create services (for servers).
+ *
+ * Note that the parent is only used by the driver framework; it is not
+ * directly accessed by the session drivers. Thus, a single transport device
+ * can support multiple sessions, as long as they each have a unique struct
+ * vs_transport.
+ *
+ * Note: This function may sleep, and therefore must not be called from
+ * interrupt context.
+ *
+ * Returns a pointer to the new device, or an error pointer.
+ */
+struct vs_session_device *vs_session_register(struct vs_transport *transport,
+		struct device *parent, bool server, const char *transport_name)
+{
+	struct device *dev;
+	struct vs_session_device *session;
+	int ret = -ENOMEM;
+
+	WARN_ON(!transport);
+
+	session = kzalloc(sizeof(*session), GFP_KERNEL);
+	if (!session)
+		goto fail_session_alloc;
+
+	session->transport = transport;
+	session->is_server = server;
+	session->name = kstrdup(transport_name, GFP_KERNEL);
+	if (!session->name)
+		goto fail_free_session;
+
+	INIT_WORK(&session->activation_work, session_activation_work);
+	INIT_WORK(&session->fatal_error_work, session_fatal_error_work);
+
+#ifdef CONFIG_VSERVICES_DEBUG
+	session->debug_mask = default_debug_mask & VS_DEBUG_ALL;
+#endif
+
+	idr_init(&session->service_idr);
+	mutex_init(&session->service_idr_lock);
+
+	/*
+	 * We must create session sysfs entry before device_create
+	 * so, that sysfs entry is available while registering
+	 * core service.
+	 */
+	ret = vs_session_create_sysfs_entry(transport, session, server,
+			transport_name);
+	if (ret)
+		goto fail_free_session;
+
+	ret = vs_session_alloc_idr(session);
+	if (ret)
+		goto fail_sysfs_entry;
+
+	dev = &session->dev;
+	dev->parent = parent;
+	dev->bus = &vs_session_bus_type;
+	dev->release = vs_session_device_release;
+	dev_set_name(dev, "vservice:%d", session->session_num);
+
+	ret = device_register(dev);
+	if (ret) {
+		goto fail_session_map;
+	}
+
+	/* Add a symlink to transport device inside session device sysfs dir */
+	if (parent) {
+		ret = sysfs_create_link(&session->dev.kobj,
+				&parent->kobj, VS_TRANSPORT_SYMLINK_NAME);
+		if (ret) {
+			dev_err(&session->dev,
+					"Error %d creating transport symlink\n",
+					ret);
+			goto fail_session_device_unregister;
+		}
+	}
+
+	return session;
+
+fail_session_device_unregister:
+	device_unregister(&session->dev);
+	kobject_put(session->sysfs_entry);
+	/* Remaining cleanup will be done in vs_session_release */
+	return ERR_PTR(ret);
+fail_session_map:
+	vs_session_free_idr(session);
+fail_sysfs_entry:
+	kobject_put(session->sysfs_entry);
+fail_free_session:
+	kfree(session->name);
+	kfree(session);
+fail_session_alloc:
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(vs_session_register);
+
+void vs_session_start(struct vs_session_device *session)
+{
+	struct vs_service_device *core_service = session->core_service;
+
+	if (WARN_ON(!core_service))
+		return;
+
+	blocking_notifier_call_chain(&vs_session_notifier_list,
+			VS_SESSION_NOTIFY_ADD, session);
+
+	vs_service_start(core_service);
+}
+EXPORT_SYMBOL_GPL(vs_session_start);
+
+/**
+ * vs_session_unregister - unregister a session device
+ * @session: the session device to unregister
+ */
+void vs_session_unregister(struct vs_session_device *session)
+{
+	if (session->dev.parent)
+		sysfs_remove_link(&session->dev.kobj, VS_TRANSPORT_SYMLINK_NAME);
+	blocking_notifier_call_chain(&vs_session_notifier_list,
+			VS_SESSION_NOTIFY_REMOVE, session);
+
+	device_unregister(&session->dev);
+
+	kobject_put(session->sysfs_entry);
+}
+EXPORT_SYMBOL_GPL(vs_session_unregister);
+
+struct service_unbind_work_struct {
+	struct vs_service_device *service;
+	struct work_struct work;
+};
+
+static void service_unbind_work(struct work_struct *work)
+{
+	struct service_unbind_work_struct *unbind_work = container_of(work,
+			struct service_unbind_work_struct, work);
+
+	device_release_driver(&unbind_work->service->dev);
+
+	/* Matches vs_get_service() in vs_session_unbind_driver() */
+	vs_put_service(unbind_work->service);
+	kfree(unbind_work);
+}
+
+int vs_session_unbind_driver(struct vs_service_device *service)
+{
+	struct service_unbind_work_struct *unbind_work =
+			kmalloc(sizeof(*unbind_work), GFP_KERNEL);
+
+	if (!unbind_work)
+		return -ENOMEM;
+
+	INIT_WORK(&unbind_work->work, service_unbind_work);
+
+	/* Put in service_unbind_work() */
+	unbind_work->service = vs_get_service(service);
+	schedule_work(&unbind_work->work);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vs_session_unbind_driver);
+
+static int __init vservices_init(void)
+{
+	int r;
+
+	printk(KERN_INFO "vServices Framework 1.0\n");
+
+	vservices_root = kobject_create_and_add("vservices", NULL);
+	if (!vservices_root) {
+		r = -ENOMEM;
+		goto fail_create_root;
+	}
+
+	r = bus_register(&vs_session_bus_type);
+	if (r < 0)
+		goto fail_bus_register;
+
+	r = vs_devio_init();
+	if (r < 0)
+		goto fail_devio_init;
+
+	return 0;
+
+fail_devio_init:
+	bus_unregister(&vs_session_bus_type);
+fail_bus_register:
+	kobject_put(vservices_root);
+fail_create_root:
+	return r;
+}
+
+static void __exit vservices_exit(void)
+{
+	printk(KERN_INFO "vServices Framework exit\n");
+
+	vs_devio_exit();
+	bus_unregister(&vs_session_bus_type);
+	kobject_put(vservices_root);
+}
+
+subsys_initcall(vservices_init);
+module_exit(vservices_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Session");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/session.h b/drivers/vservices/session.h
new file mode 100644
index 0000000..f51d535
--- /dev/null
+++ b/drivers/vservices/session.h
@@ -0,0 +1,173 @@
+/*
+ * drivers/vservices/session.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Definitions related to the vservices session bus and its client and server
+ * session drivers. The interfaces in this file are implementation details of
+ * the vServices framework and should not be used by transport or service
+ * drivers.
+ */
+
+#ifndef _VSERVICES_SESSION_PRIV_H_
+#define _VSERVICES_SESSION_PRIV_H_
+
+/* Maximum number of sessions allowed */
+#define VS_MAX_SESSIONS 64
+
+#include "debug.h"
+
+/* For use by the core server */
+#define VS_SERVICE_AUTO_ALLOCATE_ID	0xffff
+#define VS_SERVICE_ALREADY_RESET	1
+
+/*
+ * The upper bits of the service id are reserved for transport driver specific
+ * use. The reserve bits are always zeroed out above the transport layer.
+ */
+#define VS_SERVICE_ID_TRANSPORT_BITS	4
+#define VS_SERVICE_ID_TRANSPORT_OFFSET	12
+#define VS_SERVICE_ID_TRANSPORT_MASK ((1 << VS_SERVICE_ID_TRANSPORT_BITS) - 1)
+#define VS_SERVICE_ID_MASK \
+	(~(VS_SERVICE_ID_TRANSPORT_MASK << VS_SERVICE_ID_TRANSPORT_OFFSET))
+
+/* Number of bits needed to represent the service id range as a bitmap. */
+#define VS_SERVICE_ID_BITMAP_BITS \
+	(1 << ((sizeof(vs_service_id_t) * 8) - VS_SERVICE_ID_TRANSPORT_BITS))
+
+/* High service ids are reserved for use by the transport drivers */
+#define VS_SERVICE_ID_RESERVED(x) \
+	((1 << VS_SERVICE_ID_TRANSPORT_OFFSET) - (x))
+
+#define VS_SERVICE_ID_RESERVED_1	VS_SERVICE_ID_RESERVED(1)
+
+/* Name of the session device symlink in service device sysfs directory */
+#define VS_SESSION_SYMLINK_NAME		"session"
+
+/* Name of the transport device symlink in session device sysfs directory */
+#define VS_TRANSPORT_SYMLINK_NAME	"transport"
+
+static inline unsigned int
+vs_get_service_id_reserved_bits(vs_service_id_t service_id)
+{
+	return (service_id >> VS_SERVICE_ID_TRANSPORT_OFFSET) &
+			VS_SERVICE_ID_TRANSPORT_MASK;
+}
+
+static inline vs_service_id_t vs_get_real_service_id(vs_service_id_t service_id)
+{
+	return service_id & VS_SERVICE_ID_MASK;
+}
+
+static inline void vs_set_service_id_reserved_bits(vs_service_id_t *service_id,
+		unsigned int reserved_bits)
+{
+	*service_id &= ~(VS_SERVICE_ID_TRANSPORT_MASK <<
+			VS_SERVICE_ID_TRANSPORT_OFFSET);
+	*service_id |= (reserved_bits & VS_SERVICE_ID_TRANSPORT_MASK) <<
+			VS_SERVICE_ID_TRANSPORT_OFFSET;
+}
+
+extern struct bus_type vs_session_bus_type;
+extern struct kobject *vservices_root;
+extern struct kobject *vservices_server_root;
+extern struct kobject *vservices_client_root;
+
+/**
+ * struct vs_session_driver - Session driver
+ * @driver: Linux device model driver structure
+ * @service_bus: Pointer to either the server or client bus type
+ * @is_server: True if this driver is for a server session, false if it is for
+ * a client session
+ * @service_added: Called when a non-core service is added.
+ * @service_start: Called when a non-core service is started.
+ * @service_local_reset: Called when an active non-core service driver becomes
+ * inactive.
+ * @service_removed: Called when a non-core service is removed.
+ */
+struct vs_session_driver {
+	struct device_driver driver;
+	struct bus_type *service_bus;
+	bool is_server;
+
+	/* These are all called with the core service state lock held. */
+	int (*service_added)(struct vs_session_device *session,
+			struct vs_service_device *service);
+	int (*service_start)(struct vs_session_device *session,
+			struct vs_service_device *service);
+	int (*service_local_reset)(struct vs_session_device *session,
+			struct vs_service_device *service);
+	int (*service_removed)(struct vs_session_device *session,
+			struct vs_service_device *service);
+};
+
+#define to_vs_session_driver(drv) \
+	container_of(drv, struct vs_session_driver, driver)
+
+/* Service lookup */
+extern struct vs_service_device * vs_session_get_service(
+		struct vs_session_device *session,
+		vs_service_id_t service_id);
+
+/* Service creation & destruction */
+extern struct vs_service_device *
+vs_service_register(struct vs_session_device *session,
+		struct vs_service_device *parent,
+		vs_service_id_t service_id,
+		const char *protocol,
+		const char *name,
+		const void *plat_data);
+
+extern bool vs_service_start(struct vs_service_device *service);
+
+extern int vs_service_delete(struct vs_service_device *service,
+		struct vs_service_device *caller);
+
+extern int vs_service_handle_delete(struct vs_service_device *service);
+
+/* Service reset handling */
+extern int vs_service_handle_reset(struct vs_session_device *session,
+		vs_service_id_t service_id, bool disable);
+extern int vs_service_enable(struct vs_service_device *service);
+
+extern void vs_session_enable_noncore(struct vs_session_device *session);
+extern void vs_session_disable_noncore(struct vs_session_device *session);
+extern void vs_session_delete_noncore(struct vs_session_device *session);
+
+/* Service bus driver management */
+extern int vs_service_bus_probe(struct device *dev);
+extern int vs_service_bus_remove(struct device *dev);
+extern int vs_service_bus_uevent(struct device *dev,
+		struct kobj_uevent_env *env);
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+
+extern int vs_devio_init(void);
+extern void vs_devio_exit(void);
+
+extern struct vs_service_device *vs_service_lookup_by_devt(dev_t dev);
+
+extern struct vs_service_driver vs_devio_server_driver;
+extern struct vs_service_driver vs_devio_client_driver;
+
+extern int vservices_cdev_major;
+
+#else /* !CONFIG_VSERVICES_CHAR_DEV */
+
+static inline int vs_devio_init(void)
+{
+	return 0;
+}
+
+static inline void vs_devio_exit(void)
+{
+}
+
+#endif /* !CONFIG_VSERVICES_CHAR_DEV */
+
+#endif /* _VSERVICES_SESSION_PRIV_H_ */
diff --git a/drivers/vservices/skeleton_driver.c b/drivers/vservices/skeleton_driver.c
new file mode 100644
index 0000000..cfbc5df
--- /dev/null
+++ b/drivers/vservices/skeleton_driver.c
@@ -0,0 +1,133 @@
+/*
+ * drivers/vservices/skeleton_driver.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Skeleton testing driver for templating vService client/server drivers
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+struct skeleton_info {
+	unsigned dummy;
+};
+
+static void vs_skeleton_handle_start(struct vs_service_device *service)
+{
+	/* NOTE: Do not change this message - is it used for system testing */
+	dev_info(&service->dev, "skeleton handle_start\n");
+}
+
+static int vs_skeleton_handle_message(struct vs_service_device *service,
+					  struct vs_mbuf *mbuf)
+{
+	dev_info(&service->dev, "skeleton handle_messasge\n");
+	return -EBADMSG;
+}
+
+static void vs_skeleton_handle_notify(struct vs_service_device *service,
+					  u32 flags)
+{
+	dev_info(&service->dev, "skeleton handle_notify\n");
+}
+
+static void vs_skeleton_handle_reset(struct vs_service_device *service)
+{
+	dev_info(&service->dev, "skeleton handle_reset %s service %d\n",
+			service->is_server ? "server" : "client", service->id);
+}
+
+static int vs_skeleton_probe(struct vs_service_device *service)
+{
+	struct skeleton_info *info;
+	int err = -ENOMEM;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		goto fail;
+
+	dev_set_drvdata(&service->dev, info);
+	return 0;
+
+fail:
+	return err;
+}
+
+static int vs_skeleton_remove(struct vs_service_device *service)
+{
+	struct skeleton_info *info = dev_get_drvdata(&service->dev);
+
+	dev_info(&service->dev, "skeleton remove\n");
+	kfree(info);
+	return 0;
+}
+
+static struct vs_service_driver server_skeleton_driver = {
+	.protocol	= "com.ok-labs.skeleton",
+	.is_server	= true,
+	.probe		= vs_skeleton_probe,
+	.remove		= vs_skeleton_remove,
+	.start		= vs_skeleton_handle_start,
+	.receive	= vs_skeleton_handle_message,
+	.notify		= vs_skeleton_handle_notify,
+	.reset		= vs_skeleton_handle_reset,
+	.driver		= {
+		.name		= "vs-server-skeleton",
+		.owner		= THIS_MODULE,
+		.bus		= &vs_server_bus_type,
+	},
+};
+
+static struct vs_service_driver client_skeleton_driver = {
+	.protocol	= "com.ok-labs.skeleton",
+	.is_server	= false,
+	.probe		= vs_skeleton_probe,
+	.remove		= vs_skeleton_remove,
+	.start		= vs_skeleton_handle_start,
+	.receive	= vs_skeleton_handle_message,
+	.notify		= vs_skeleton_handle_notify,
+	.reset		= vs_skeleton_handle_reset,
+	.driver		= {
+		.name		= "vs-client-skeleton",
+		.owner		= THIS_MODULE,
+		.bus		= &vs_client_bus_type,
+	},
+};
+
+static int __init vs_skeleton_init(void)
+{
+	int ret;
+
+	ret = driver_register(&server_skeleton_driver.driver);
+	if (ret)
+		return ret;
+
+	ret = driver_register(&client_skeleton_driver.driver);
+	if (ret)
+		driver_unregister(&server_skeleton_driver.driver);
+
+	return ret;
+}
+
+static void __exit vs_skeleton_exit(void)
+{
+	driver_unregister(&server_skeleton_driver.driver);
+	driver_unregister(&client_skeleton_driver.driver);
+}
+
+module_init(vs_skeleton_init);
+module_exit(vs_skeleton_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Skeleton Client/Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/transport.h b/drivers/vservices/transport.h
new file mode 100644
index 0000000..8e5055c
--- /dev/null
+++ b/drivers/vservices/transport.h
@@ -0,0 +1,40 @@
+/*
+ * include/vservices/transport.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the private interface that vServices transport drivers
+ * must provide to the vservices session and protocol layers. The transport,
+ * transport vtable, and message buffer structures are defined in the public
+ * <vservices/transport.h> header.
+ */
+
+#ifndef _VSERVICES_TRANSPORT_PRIV_H_
+#define _VSERVICES_TRANSPORT_PRIV_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+
+/**
+ * struct vs_notify_info - Notification information stored in the transport
+ * @service_id: Service id for this notification info
+ * @offset: Offset into the notification mapping
+ */
+struct vs_notify_info {
+	vs_service_id_t service_id;
+	unsigned offset;
+};
+
+#define VS_MAX_SERVICES		128
+#define VS_MAX_SERVICE_ID	(VS_MAX_SERVICES - 1)
+
+#endif /* _VSERVICES_TRANSPORT_PRIV_H_ */
diff --git a/drivers/vservices/transport/Kconfig b/drivers/vservices/transport/Kconfig
new file mode 100644
index 0000000..37e84c4
--- /dev/null
+++ b/drivers/vservices/transport/Kconfig
@@ -0,0 +1,20 @@
+#
+# vServices Transport driver configuration
+#
+
+menu "Transport drivers"
+
+config VSERVICES_OKL4_AXON
+	tristate "OKL4 Microvisor Axon driver"
+	depends on VSERVICES_SUPPORT && OKL4_GUEST
+	default y
+	help
+	  This option adds support for Virtual Services sessions using an OKL4
+	  Microvisor Axon object as a transport.
+
+	  If this driver is to be used in a Cell that has multiple
+	  discontiguous regions in its physical memory pool, the
+	  CONFIG_DMA_CMA option must also be selected (or CONFIG_CMA
+	  in older kernels that do not have CONFIG_DMA_CMA).
+
+endmenu
diff --git a/drivers/vservices/transport/Makefile b/drivers/vservices/transport/Makefile
new file mode 100644
index 0000000..222fb51
--- /dev/null
+++ b/drivers/vservices/transport/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Werror
+ccflags-$(CONFIG_VSERVICES_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VSERVICES_OKL4_AXON)	+= vtransport_axon.o
+vtransport_axon-objs = axon.o
diff --git a/drivers/vservices/transport/axon.c b/drivers/vservices/transport/axon.c
new file mode 100644
index 0000000..a140b4a
--- /dev/null
+++ b/drivers/vservices/transport/axon.c
@@ -0,0 +1,3573 @@
+/*
+ * drivers/vservices/transport/axon.c
+ *
+ * Copyright (c) 2015-2018 General Dynamics
+ * Copyright (c) 2015 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the OKL4 Virtual Services transport driver for OKL4 Microvisor
+ * Axons (virtual inter-Cell DMA engines).
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/jiffies.h>
+#include <linux/log2.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/dma-contiguous.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+#include <asm/dma-contiguous.h>
+#endif
+#include <linux/vmalloc.h>
+#include <linux/mmzone.h>
+#include <asm-generic/okl4_virq.h>
+#include <asm/byteorder.h>
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+
+#include <microvisor/microvisor.h>
+
+#include "../transport.h"
+#include "../session.h"
+#include "../debug.h"
+
+#define DRIVER_AUTHOR "Cog Systems Pty Ltd"
+#define DRIVER_DESC "OKL4 vServices Axon Transport Driver"
+#define DRIVER_NAME "vtransport_axon"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) || \
+	defined(CONFIG_NO_DEPRECATED_MEMORY_BARRIERS)
+#define smp_mb__before_atomic_dec smp_mb__before_atomic
+#define smp_mb__before_atomic_inc smp_mb__before_atomic
+#define smp_mb__after_atomic_dec smp_mb__after_atomic
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+#define DMA_ATTRS unsigned long
+#else
+#define DMA_ATTRS struct dma_attrs *
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && \
+	!defined(CONFIG_CMA)
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+	return NULL;
+}
+#endif
+
+static struct kmem_cache *mbuf_cache;
+
+struct child_device {
+	struct device *dev;
+	struct list_head list;
+};
+
+/* Number of services in the transport array to allocate at a time */
+#define SERVICES_ALLOC_CHUNK	16
+#define MSG_SEND_FREE_BUFS	VS_SERVICE_ID_RESERVED_1
+
+/* The maximum value we allow for the free_bufs_balance counter */
+#define MAX_BALANCE		1
+
+/*
+ * The free bufs quota must be enough to take free_bufs_balance from its
+ * minimum to its maximum.
+ */
+#define FREE_BUFS_QUOTA		(MAX_BALANCE * 2)
+
+/*
+ * The free bufs retry delay is the period in jiffies that we delay retrying
+ * after an out-of-memory condition when trying to send a free bufs message.
+ */
+#define FREE_BUFS_RETRY_DELAY	2
+
+/* The minimum values we permit for queue and message size. */
+#define MIN_QUEUE_SIZE		((size_t)4)
+#define MIN_MSG_SIZE		(32 - sizeof(vs_service_id_t))
+
+/*
+ * The maximum size for a batched receive. This should be larger than the
+ * maximum message size, and large enough to avoid excessive context switching
+ * overheads, yet small enough to avoid blocking the tasklet queue for too
+ * long.
+ */
+#define MAX_TRANSFER_CHUNK	65536
+
+#define INC_MOD(x, m) {						\
+	x++;							\
+	if (x == m) x = 0;					\
+}
+
+/* Local Axon cleanup workqueue */
+struct workqueue_struct *work_queue;
+
+/*
+ * True if there is only one physical segment being used for kernel memory
+ * allocations. If this is false, the device must have a usable CMA region.
+ */
+static bool okl4_single_physical_segment;
+
+/* OKL4 MMU capability. */
+static okl4_kcap_t okl4_mmu_cap;
+
+/*
+ * Per-service TX buffer allocation pool.
+ *
+ * We cannot use a normal DMA pool for TX buffers, because alloc_mbuf can be
+ * called with GFP_ATOMIC, and a normal DMA pool alloc will take pages from
+ * a global emergency pool if GFP_WAIT is not set. The emergency pool is not
+ * guaranteed to be in the same physical segment as this device's DMA region,
+ * so it might not be usable by the axon.
+ *
+ * Using a very simple allocator with preallocated memory also speeds up the
+ * TX path.
+ *
+ * RX buffers use a standard Linux DMA pool, shared between all services,
+ * rather than this struct. They are preallocated by definition, so the speed
+ * of the allocator doesn't matter much for them. Also, they're always
+ * allocated with GFP_KERNEL (which includes GFP_WAIT) so the normal DMA pool
+ * will use memory from the axon's contiguous region.
+ */
+struct vs_axon_tx_pool {
+	struct vs_transport_axon *transport;
+	struct kref kref;
+
+	void *base_vaddr;
+	dma_addr_t base_laddr;
+
+	unsigned alloc_order;
+	unsigned count;
+
+	struct work_struct free_work;
+	unsigned long alloc_bitmap[];
+};
+
+struct vs_axon_rx_freelist_entry {
+	struct list_head list;
+	dma_addr_t laddr;
+};
+
+/* Service info */
+struct vs_mv_service_info {
+	struct vs_service_device *service;
+
+	/* True if the session has started the service */
+	bool ready;
+
+	/* Number of send buffers we have allocated, in total. */
+	atomic_t send_inflight;
+
+	/*
+	 * Number of send buffers we have allocated but not yet sent.
+	 * This should always be zero if ready is false.
+	 */
+	atomic_t send_alloc;
+
+	/*
+	 * Number of receive buffers we have received and not yet freed.
+	 * This should always be zero if ready is false.
+	 */
+	atomic_t recv_inflight;
+
+	/*
+	 * Number of receive buffers we have freed, but not told the other end
+	 * about yet.
+	 *
+	 * The watermark is the maximum number of freed buffers we can
+	 * accumulate before we send a dummy message to the remote end to ack
+	 * them. This is used in situations where the protocol allows the remote
+	 * end to reach its send quota without guaranteeing a reply; the dummy
+	 * message lets it make progress even if our service driver doesn't send
+	 * an answer that we can piggy-back the acks on.
+	 */
+	atomic_t recv_freed;
+	unsigned int recv_freed_watermark;
+
+	/*
+	 * Number of buffers that have been left allocated after a reset. If
+	 * this count is nonzero, then the service has been disabled by the
+	 * session layer, and needs to be re-enabled when it reaches zero.
+	 */
+	atomic_t outstanding_frees;
+
+	/* TX allocation pool */
+	struct vs_axon_tx_pool *tx_pool;
+
+	/* RX allocation count */
+	unsigned rx_allocated;
+
+	/* Reference count for this info struct. */
+	struct kref kref;
+
+	/* RCU head for cleanup */
+	struct rcu_head rcu_head;
+};
+
+/*
+ * Transport readiness state machine
+ *
+ * This is similar to the service readiness state machine, but simpler,
+ * because there are fewer transition triggers.
+ *
+ * The states are:
+ * INIT: Initial state. This occurs transiently during probe.
+ * LOCAL_RESET: We have initiated a reset at this end, but the remote end has
+ * not yet acknowledged it. We will enter the RESET state on receiving
+ * acknowledgement.
+ * RESET: The transport is inactive at both ends, and the session layer has
+ * not yet told us to start activating.
+ * LOCAL_READY: The session layer has told us to start activating, and we
+ * have notified the remote end that we're ready.
+ * REMOTE_READY: The remote end has notified us that it is ready, but the
+ * local session layer hasn't decided to become ready yet.
+ * ACTIVE: Both ends are ready to communicate.
+ * SHUTDOWN: The transport is shutting down and should not become ready.
+ */
+enum vs_transport_readiness {
+	VS_TRANSPORT_INIT = 0,
+	VS_TRANSPORT_LOCAL_RESET,
+	VS_TRANSPORT_RESET,
+	VS_TRANSPORT_LOCAL_READY,
+	VS_TRANSPORT_REMOTE_READY,
+	VS_TRANSPORT_ACTIVE,
+	VS_TRANSPORT_SHUTDOWN,
+};
+
+/*
+ * Transport reset / ready VIRQ payload bits
+ */
+enum vs_transport_reset_virq {
+	VS_TRANSPORT_VIRQ_RESET_REQ = (1 << 0),
+	VS_TRANSPORT_VIRQ_RESET_ACK = (1 << 1),
+	VS_TRANSPORT_VIRQ_READY = (1 << 2),
+};
+
+/*
+ * Internal definitions of the transport and message buffer structures.
+ */
+#define MAX_NOTIFICATION_LINES 16 /* Enough for 512 notifications each way */
+
+struct vs_transport_axon {
+	struct device *axon_dev;
+
+	struct okl4_axon_tx *tx;
+	struct okl4_axon_queue_entry *tx_descs;
+	struct vs_axon_tx_pool **tx_pools;
+	struct okl4_axon_rx *rx;
+	struct okl4_axon_queue_entry *rx_descs;
+	void **rx_ptrs;
+
+	dma_addr_t tx_phys, rx_phys;
+	size_t tx_size, rx_size;
+
+	okl4_kcap_t segment;
+	okl4_laddr_t segment_base;
+
+	okl4_kcap_t tx_cap, rx_cap, reset_cap;
+	unsigned int tx_irq, rx_irq, reset_irq;
+	okl4_interrupt_number_t reset_okl4_irq;
+
+	unsigned int notify_tx_nirqs;
+	okl4_kcap_t notify_cap[MAX_NOTIFICATION_LINES];
+	unsigned int notify_rx_nirqs;
+	unsigned int notify_irq[MAX_NOTIFICATION_LINES];
+
+	bool is_server;
+	size_t msg_size, queue_size;
+
+	/*
+	 * The handle to the device tree node for the virtual-session node
+	 * associated with the axon.
+	 */
+	struct device_node *of_node;
+
+	struct list_head child_dev_list;
+
+	/*
+	 * Hold queue and tx tasklet used to buffer and resend mbufs blocked
+	 * by a full outgoing axon queue, due to a slow receiver or a halted
+	 * axon.
+	 */
+	struct list_head tx_queue;
+	struct tasklet_struct tx_tasklet;
+	u32 tx_uptr_freed;
+
+	/*
+	 * The readiness state of the transport, and a spinlock protecting it.
+	 * Note that this is different to the session's readiness state
+	 * machine, though it has the same basic purpose.
+	 */
+	enum vs_transport_readiness readiness;
+	spinlock_t readiness_lock;
+
+	struct tasklet_struct rx_tasklet;
+	struct timer_list rx_retry_timer;
+	struct list_head rx_freelist;
+	u32 rx_alloc_extra;
+	struct dma_pool *rx_pool;
+	spinlock_t rx_alloc_lock;
+	u32 rx_uptr_allocated;
+
+	struct vs_session_device *session_dev;
+	struct vs_transport transport;
+
+	DECLARE_BITMAP(service_bitmap, VS_SERVICE_ID_BITMAP_BITS);
+
+	struct delayed_work free_bufs_work;
+
+	/*
+	 * Freed buffers messages balance counter. This counter is incremented
+	 * when we send a freed buffers message and decremented when we receive
+	 * one. If the balance is negative then we need to send a message
+	 * as an acknowledgement to the other end, even if there are no
+	 * freed buffers to acknowledge.
+	 */
+	atomic_t free_bufs_balance;
+
+	/*
+	 * Flag set when a service exceeds its freed buffers watermark,
+	 * telling free_bufs_work to send a message when the balance
+	 * counter is non-negative. This is ignored, and a message is
+	 * sent in any case, if the balance is negative.
+	 */
+	bool free_bufs_pending;
+
+	/* Pool for allocating outgoing free bufs messages */
+	struct vs_axon_tx_pool *free_bufs_pool;
+};
+
+#define to_vs_transport_axon(t) \
+	container_of(t, struct vs_transport_axon, transport)
+
+struct vs_mbuf_axon {
+	struct vs_mbuf base;
+	struct vs_transport_axon *owner;
+	dma_addr_t laddr;
+	struct vs_axon_tx_pool *pool;
+};
+
+#define to_vs_mbuf_axon(b) container_of(b, struct vs_mbuf_axon, base)
+
+/*
+ * Buffer allocation
+ *
+ * Buffers used by axons must be allocated within a single contiguous memory
+ * region, backed by a single OKL4 physical segment. This is similar to how
+ * the DMA allocator normally works, but we can't use the normal DMA allocator
+ * because the platform code will remap the allocated memory with caching
+ * disabled.
+ *
+ * We borrow the useful parts of the DMA allocator by providing our own DMA
+ * mapping ops which don't actually remap the memory.
+ */
+static void *axon_dma_alloc(struct device *dev, size_t size,
+		dma_addr_t *handle, gfp_t gfp, DMA_ATTRS attrs)
+{
+	unsigned long order;
+	size_t count;
+	struct page *page;
+	void *ptr;
+
+	*handle = DMA_ERROR_CODE;
+	size = PAGE_ALIGN(size);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+	if (!(gfp & __GFP_WAIT))
+#else
+	if (!(gfp & __GFP_RECLAIM))
+#endif
+		return NULL;
+
+	order = get_order(size);
+	count = size >> PAGE_SHIFT;
+
+	if (dev_get_cma_area(dev)) {
+		page = dma_alloc_from_contiguous(dev, count, order);
+
+		if (!page)
+			return NULL;
+	} else {
+		struct page *p, *e;
+		page = alloc_pages(gfp, order);
+
+		if (!page)
+			return NULL;
+
+		/* Split huge page and free any excess pages */
+		split_page(page, order);
+		for (p = page + count, e = page + (1 << order); p < e; p++)
+			__free_page(p);
+	}
+
+	if (PageHighMem(page)) {
+		struct vm_struct *area = get_vm_area(size, VM_USERMAP);
+		if (!area)
+			goto free_pages;
+		ptr = area->addr;
+		area->phys_addr = __pfn_to_phys(page_to_pfn(page));
+
+		if (ioremap_page_range((unsigned long)ptr,
+					(unsigned long)ptr + size,
+					area->phys_addr, PAGE_KERNEL)) {
+			vunmap(ptr);
+			goto free_pages;
+		}
+	} else {
+		ptr = page_address(page);
+	}
+
+	*handle = (dma_addr_t)page_to_pfn(page) << PAGE_SHIFT;
+
+	dev_dbg(dev, "dma_alloc: %#tx bytes at %pK (%#llx), %s cma, %s high\n",
+			size, ptr, (long long)*handle,
+			dev_get_cma_area(dev) ? "is" : "not",
+			PageHighMem(page) ? "is" : "not");
+
+	return ptr;
+
+free_pages:
+	if (dev_get_cma_area(dev)) {
+		dma_release_from_contiguous(dev, page, count);
+	} else {
+		struct page *e = page + count;
+
+		while (page < e) {
+			__free_page(page);
+			page++;
+		}
+	}
+
+	return NULL;
+}
+
+static void axon_dma_free(struct device *dev, size_t size, void *cpu_addr,
+		dma_addr_t handle, DMA_ATTRS attrs)
+{
+	struct page *page = pfn_to_page(handle >> PAGE_SHIFT);
+
+	size = PAGE_ALIGN(size);
+
+	if (PageHighMem(page)) {
+		unmap_kernel_range((unsigned long)cpu_addr, size);
+		vunmap(cpu_addr);
+	}
+
+	if (dev_get_cma_area(dev)) {
+		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+	} else {
+		struct page *e = page + (size >> PAGE_SHIFT);
+
+		while (page < e) {
+			__free_page(page);
+			page++;
+		}
+	}
+}
+
+struct dma_map_ops axon_dma_ops = {
+	.alloc		= axon_dma_alloc,
+	.free		= axon_dma_free,
+};
+
+/*
+ * Quotas
+ * ------
+ *
+ * Each service has two quotas, one for send and one for receive. The
+ * send quota is incremented when we allocate an mbuf. The send quota
+ * is decremented by receiving an freed buffer ack from the remove
+ * end, either in the reserved bits of the service id or in a special
+ * free bufs message.
+ *
+ * The receive quota is incremented whenever we receive a message and
+ * decremented when we free the mbuf. Exceeding the receive quota
+ * indicates that something bad has happened since the other end's
+ * send quota should have prevented it from sending the
+ * message. Exceeding the receive quota indicates a driver bug since
+ * the two ends are disagreeing about the quotas. If this happens then
+ * a warning is printed and the offending service is reset.
+ */
+
+/*
+ * The base of the mbuf has the destination service id, but we pass the
+ * data pointer starting after the service id. The following helper
+ * functions are used to avoid ugly pointer arithmetic when handling
+ * mbufs.
+ */
+static size_t mbuf_real_size(struct vs_mbuf_axon *mbuf)
+{
+	return mbuf->base.size + sizeof(vs_service_id_t);
+}
+
+static void *mbuf_real_base(struct vs_mbuf_axon *mbuf)
+{
+	return mbuf->base.data - sizeof(vs_service_id_t);
+}
+/*
+ * Get the service_id and reserved bits from a message buffer and the
+ * clear the reserved bits so the upper layers don't see them.
+ */
+vs_service_id_t
+transport_get_mbuf_service_id(struct vs_transport_axon *transport,
+		void *data, unsigned int *freed_acks)
+{
+	unsigned int reserved_bits;
+	vs_service_id_t id;
+
+	/* Get the real service id and reserved bits */
+	id = *(vs_service_id_t *)data;
+	reserved_bits = vs_get_service_id_reserved_bits(id);
+	id = vs_get_real_service_id(id);
+
+	/* Clear the reserved bits in the service id */
+	vs_set_service_id_reserved_bits(&id, 0);
+	if (freed_acks) {
+		*(vs_service_id_t *)data = id;
+		*freed_acks = reserved_bits;
+	}
+	return id;
+}
+
+static void
+__transport_get_service_info(struct vs_mv_service_info *service_info)
+{
+	kref_get(&service_info->kref);
+}
+
+static struct vs_mv_service_info *
+transport_get_service_info(struct vs_service_device *service)
+{
+	struct vs_mv_service_info *service_info;
+
+	rcu_read_lock();
+	service_info = rcu_dereference(service->transport_priv);
+	if (service_info)
+		__transport_get_service_info(service_info);
+	rcu_read_unlock();
+
+	return service_info;
+}
+
+static struct vs_mv_service_info *
+transport_get_service_id_info(struct vs_transport_axon *transport,
+		vs_service_id_t service_id)
+{
+	struct vs_service_device *service;
+	struct vs_mv_service_info *service_info;
+
+	service = vs_session_get_service(transport->session_dev, service_id);
+	if (!service)
+		return NULL;
+
+	service_info = transport_get_service_info(service);
+
+	vs_put_service(service);
+	return service_info;
+}
+
+static void transport_info_free(struct rcu_head *rcu_head)
+{
+	struct vs_mv_service_info *service_info =
+		container_of(rcu_head, struct vs_mv_service_info, rcu_head);
+
+	vs_put_service(service_info->service);
+	kfree(service_info);
+}
+
+static void transport_info_release(struct kref *kref)
+{
+	struct vs_mv_service_info *service_info =
+		container_of(kref, struct vs_mv_service_info, kref);
+
+	call_rcu(&service_info->rcu_head, transport_info_free);
+}
+
+static void transport_put_service_info(struct vs_mv_service_info *service_info)
+{
+	kref_put(&service_info->kref, transport_info_release);
+}
+
+static bool transport_axon_reset(struct vs_transport_axon *transport);
+
+static void transport_fatal_error(struct vs_transport_axon *transport,
+		const char *msg)
+{
+	dev_err(transport->axon_dev, "Fatal transport error (%s); resetting\n",
+			msg);
+#ifdef DEBUG
+	dump_stack();
+#endif
+	transport_axon_reset(transport);
+}
+
+static unsigned int reduce_send_quota(struct vs_transport_axon *transport,
+		struct vs_mv_service_info *service_info, unsigned int count,
+		bool allow_tx_ready)
+{
+	int new_inflight, send_alloc;
+	bool was_over_quota, is_over_quota;
+
+        /* FIXME: Redmine issue #1303 - philip. */
+	spin_lock_irq(&transport->readiness_lock);
+	/*
+	 * We read the current send_alloc for error checking *before*
+	 * decrementing send_inflight. This avoids any false positives
+	 * due to send_alloc being incremented by a concurrent alloc_mbuf.
+	 *
+	 * Note that there is an implicit smp_mb() before atomic_sub_return(),
+	 * matching the explicit one in alloc_mbuf.
+	 */
+	send_alloc = atomic_read(&service_info->send_alloc);
+	new_inflight = atomic_sub_return(count, &service_info->send_inflight);
+
+	spin_unlock_irq(&transport->readiness_lock);
+	if (WARN_ON(new_inflight < send_alloc)) {
+		dev_err(transport->axon_dev,
+				"inflight sent messages for service %d is less than the number of allocated messages (%d < %d, was reduced by %d)\n",
+				service_info->service->id, new_inflight,
+				send_alloc, count);
+		transport_fatal_error(transport, "sent msg count underrun");
+		return 0;
+	}
+
+	was_over_quota = (new_inflight + count >=
+			service_info->service->send_quota);
+	is_over_quota = (new_inflight > service_info->service->send_quota);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"Service %d quota %d -> %d (over_quota: %d -> %d)\n",
+			service_info->service->id, new_inflight + count,
+			new_inflight, was_over_quota, is_over_quota);
+
+	/*
+	 * Notify the service that a buffer has been freed. We call tx_ready
+	 * if this is a notification from the remote end (i.e. not an unsent
+	 * buffer) and the quota has just dropped below the maximum.
+	 */
+	vs_session_quota_available(transport->session_dev,
+			service_info->service->id, count,
+			!is_over_quota && was_over_quota && allow_tx_ready);
+
+	return count;
+}
+
+static void __transport_tx_pool_free(struct vs_axon_tx_pool *pool,
+		dma_addr_t laddr);
+
+static void
+__transport_tx_cleanup(struct vs_transport_axon *transport)
+{
+	u32 uptr;
+	struct okl4_axon_queue_entry *desc;
+
+	lockdep_assert_held(&transport->readiness_lock);
+
+	uptr = transport->tx_uptr_freed;
+	desc = &transport->tx_descs[uptr];
+
+	while (!okl4_axon_data_info_getpending(&desc->info)) {
+		if (!transport->tx_pools[uptr])
+			break;
+
+		__transport_tx_pool_free(transport->tx_pools[uptr],
+				okl4_axon_data_info_getladdr(&desc->info));
+		transport->tx_pools[uptr] = NULL;
+
+		INC_MOD(uptr, transport->tx->queues[0].entries);
+		desc = &transport->tx_descs[uptr];
+		transport->tx_uptr_freed = uptr;
+	}
+}
+
+static void
+transport_axon_free_tx_pool(struct work_struct *work)
+{
+	struct vs_axon_tx_pool *pool = container_of(work,
+			struct vs_axon_tx_pool, free_work);
+	struct vs_transport_axon *transport = pool->transport;
+
+	dmam_free_coherent(transport->axon_dev,
+			pool->count << pool->alloc_order,
+			pool->base_vaddr, pool->base_laddr);
+	devm_kfree(transport->axon_dev, pool);
+}
+
+static void
+transport_axon_queue_free_tx_pool(struct kref *kref)
+{
+	struct vs_axon_tx_pool *pool = container_of(kref,
+			struct vs_axon_tx_pool, kref);
+
+	/*
+	 * Put the task on the axon local work queue for running in
+	 * a context where IRQ is enabled.
+	 */
+	INIT_WORK(&pool->free_work, transport_axon_free_tx_pool);
+	queue_work(work_queue, &pool->free_work);
+}
+
+static void
+transport_axon_put_tx_pool(struct vs_axon_tx_pool *pool)
+{
+	kref_put(&pool->kref, transport_axon_queue_free_tx_pool);
+}
+
+/* Low-level tx buffer allocation, without quota tracking. */
+static struct vs_mbuf_axon *
+__transport_alloc_mbuf(struct vs_transport_axon *transport,
+		vs_service_id_t service_id, struct vs_axon_tx_pool *pool,
+		size_t size, gfp_t gfp_flags)
+{
+	size_t real_size = size + sizeof(vs_service_id_t);
+	struct vs_mbuf_axon *mbuf;
+	unsigned index;
+
+	if (WARN_ON(real_size > (1 << pool->alloc_order))) {
+		dev_err(transport->axon_dev, "Message too big (%zu > %zu)\n",
+				real_size, (size_t)1 << pool->alloc_order);
+		goto fail_message_size;
+	}
+
+	kref_get(&pool->kref);
+
+	do {
+		index = find_first_zero_bit(pool->alloc_bitmap, pool->count);
+		if (unlikely(index >= pool->count)) {
+			/*
+			 * No buffers left. This can't be an out-of-quota
+			 * situation, because we've already checked the quota;
+			 * it must be because there's a buffer left over in
+			 * the tx queue. Clean out the tx queue and retry.
+			 */
+			spin_lock_irq(&transport->readiness_lock);
+			__transport_tx_cleanup(transport);
+			spin_unlock_irq(&transport->readiness_lock);
+
+			index = find_first_zero_bit(pool->alloc_bitmap,
+					pool->count);
+		}
+		if (unlikely(index >= pool->count))
+			goto fail_buffer_alloc;
+	} while (unlikely(test_and_set_bit_lock(index, pool->alloc_bitmap)));
+
+	mbuf = kmem_cache_alloc(mbuf_cache, gfp_flags & ~GFP_ZONEMASK);
+	if (!mbuf)
+		goto fail_mbuf_alloc;
+
+	mbuf->base.is_recv = false;
+	mbuf->base.data = pool->base_vaddr + (index << pool->alloc_order);
+	mbuf->base.size = size;
+	mbuf->owner = transport;
+	mbuf->laddr = pool->base_laddr + (index << pool->alloc_order);
+	mbuf->pool = pool;
+
+	/*
+	 * We put the destination service id in the mbuf, but increment the
+	 * data pointer past it so the receiver doesn't always need to skip
+	 * the service id.
+	 */
+	*(vs_service_id_t *)mbuf->base.data = service_id;
+	mbuf->base.data += sizeof(vs_service_id_t);
+
+	return mbuf;
+
+fail_mbuf_alloc:
+	clear_bit_unlock(index, pool->alloc_bitmap);
+fail_buffer_alloc:
+	transport_axon_put_tx_pool(pool);
+fail_message_size:
+	return NULL;
+}
+
+/* Allocate a tx buffer for a specified service. */
+static struct vs_mbuf *transport_alloc_mbuf(struct vs_transport *_transport,
+		struct vs_service_device *service, size_t size, gfp_t gfp_flags)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	size_t real_size = size + sizeof(vs_service_id_t);
+	struct vs_mv_service_info *service_info = NULL;
+	struct vs_mbuf_axon *mbuf;
+	vs_service_id_t service_id = service->id;
+
+	if (real_size > transport->msg_size) {
+		dev_err(transport->axon_dev, "Message too big (%zu > %zu)\n",
+				real_size, transport->msg_size);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (WARN_ON(service_id == MSG_SEND_FREE_BUFS))
+		return ERR_PTR(-ENXIO);
+
+	service_info = transport_get_service_info(service);
+	if (WARN_ON(!service_info))
+		return ERR_PTR(-EINVAL);
+
+	if (!service_info->tx_pool) {
+		transport_put_service_info(service_info);
+		return ERR_PTR(-ECONNRESET);
+	}
+
+	if (!atomic_add_unless(&service_info->send_inflight, 1,
+			service_info->service->send_quota)) {
+		/* Service has reached its quota */
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"Service %d is at max send quota %d\n",
+				service_id, service_info->service->send_quota);
+		transport_put_service_info(service_info);
+		return ERR_PTR(-ENOBUFS);
+	}
+
+	/*
+	 * Increment the count of allocated but unsent mbufs. This is done
+	 * *after* the send_inflight increment (with a barrier to enforce
+	 * ordering) to ensure that send_inflight is never less than
+	 * send_alloc - see reduce_send_quota().
+	 */
+	smp_mb__before_atomic_inc();
+	atomic_inc(&service_info->send_alloc);
+
+	mbuf = __transport_alloc_mbuf(transport, service_id,
+			service_info->tx_pool, size, gfp_flags);
+	if (!mbuf) {
+		/*
+		 * Failed to allocate a buffer - decrement our quota back to
+		 * where it was.
+		 */
+		atomic_dec(&service_info->send_alloc);
+		smp_mb__after_atomic_dec();
+		atomic_dec(&service_info->send_inflight);
+
+		transport_put_service_info(service_info);
+
+		return ERR_PTR(-ENOMEM);
+	}
+
+	transport_put_service_info(service_info);
+
+	return &mbuf->base;
+}
+
+static void transport_free_sent_mbuf(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf)
+{
+	kmem_cache_free(mbuf_cache, mbuf);
+}
+
+static void __transport_tx_pool_free(struct vs_axon_tx_pool *pool,
+		dma_addr_t laddr)
+{
+	unsigned index = (laddr - pool->base_laddr) >> pool->alloc_order;
+
+	if (WARN_ON(index >= pool->count)) {
+		printk(KERN_DEBUG "free %#llx base %#llx order %d count %d\n",
+				(long long)laddr, (long long)pool->base_laddr,
+				pool->alloc_order, pool->count);
+		return;
+	}
+
+	clear_bit_unlock(index, pool->alloc_bitmap);
+	transport_axon_put_tx_pool(pool);
+}
+
+static int transport_rx_queue_buffer(struct vs_transport_axon *transport,
+		void *ptr, dma_addr_t laddr);
+
+static void transport_rx_recycle(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf)
+{
+	void *data = mbuf_real_base(mbuf);
+	dma_addr_t laddr = mbuf->laddr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&transport->rx_alloc_lock, flags);
+
+	if (transport->rx_alloc_extra) {
+		transport->rx_alloc_extra--;
+		dma_pool_free(transport->rx_pool, data, laddr);
+	} else if (transport_rx_queue_buffer(transport, data, laddr) < 0) {
+		struct vs_axon_rx_freelist_entry *buf = data;
+		buf->laddr = laddr;
+		list_add_tail(&buf->list, &transport->rx_freelist);
+		tasklet_schedule(&transport->rx_tasklet);
+	} else {
+		tasklet_schedule(&transport->rx_tasklet);
+	}
+
+	spin_unlock_irqrestore(&transport->rx_alloc_lock, flags);
+}
+
+static void transport_free_mbuf_pools(struct vs_transport_axon *transport,
+		struct vs_service_device *service,
+		struct vs_mv_service_info *service_info)
+{
+	/*
+	 * Free the TX allocation pool. This will also free any buffer
+	 * memory allocated from the pool, so it is essential that
+	 * this happens only after we have successfully freed all
+	 * mbufs.
+	 *
+	 * Note that the pool will not exist if the core client is reset
+	 * before it receives a startup message.
+	 */
+	if (!IS_ERR_OR_NULL(service_info->tx_pool))
+		transport_axon_put_tx_pool(service_info->tx_pool);
+	service_info->tx_pool = NULL;
+
+	/* Mark the service's preallocated RX buffers as extra. */
+	spin_lock_irq(&transport->rx_alloc_lock);
+	transport->rx_alloc_extra += service_info->rx_allocated;
+	service_info->rx_allocated = 0;
+	spin_unlock_irq(&transport->rx_alloc_lock);
+}
+
+/* Low-level tx or rx buffer free, with no quota tracking */
+static void __transport_free_mbuf(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf, bool is_rx)
+{
+	if (is_rx) {
+		transport_rx_recycle(transport, mbuf);
+	} else {
+		__transport_tx_pool_free(mbuf->pool, mbuf->laddr);
+	}
+
+	kmem_cache_free(mbuf_cache, mbuf);
+}
+
+static void transport_free_mbuf(struct vs_transport *_transport,
+		struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+	struct vs_mv_service_info *service_info = NULL;
+	void *data = mbuf_real_base(mbuf);
+	vs_service_id_t service_id __maybe_unused =
+		transport_get_mbuf_service_id(transport, data, NULL);
+	bool is_recv = mbuf->base.is_recv;
+
+	WARN_ON(!service);
+	service_info = transport_get_service_info(service);
+
+	__transport_free_mbuf(transport, mbuf, is_recv);
+
+	/*
+	 * If this message was left over from a service that has already been
+	 * deleted, we don't need to do any quota accounting.
+	 */
+	if (!service_info)
+		return;
+
+	if (unlikely(atomic_read(&service_info->outstanding_frees))) {
+		if (atomic_dec_and_test(&service_info->outstanding_frees)) {
+			dev_dbg(transport->axon_dev,
+				"service %d all outstanding frees done\n",
+				service->id);
+			transport_free_mbuf_pools(transport, service,
+					service_info);
+			vs_service_enable(service);
+		} else {
+			dev_dbg(transport->axon_dev,
+				"service %d outstanding frees -> %d\n",
+				service->id, atomic_read(
+					&service_info->outstanding_frees));
+		}
+	} else if (is_recv) {
+		smp_mb__before_atomic_dec();
+		atomic_dec(&service_info->recv_inflight);
+		if (atomic_inc_return(&service_info->recv_freed) >=
+				service_info->recv_freed_watermark) {
+			transport->free_bufs_pending = true;
+			schedule_delayed_work(&transport->free_bufs_work, 0);
+		}
+
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"Freed recv buffer for service %d rq=%d/%d, freed=%d (watermark = %d)\n",
+				service_id,
+				atomic_read(&service_info->recv_inflight),
+				service_info->service->recv_quota,
+				atomic_read(&service_info->recv_freed),
+				service_info->recv_freed_watermark);
+	} else {
+		/*
+		 * We are freeing a message buffer that we allocated. This
+		 * usually happens on error paths in application drivers if
+		 * we allocated a buffer but failed to send it. In this case
+		 * we need to decrement our own send quota since we didn't
+		 * send anything.
+		 */
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"Freeing send buffer for service %d, send quota = %d\n",
+				service_id, atomic_read(&service_info->send_inflight));
+
+		smp_mb__before_atomic_dec();
+		atomic_dec(&service_info->send_alloc);
+
+		/*
+		 * We don't allow the tx_ready handler to run when we are
+		 * freeing an mbuf that we allocated.
+		 */
+		reduce_send_quota(transport, service_info, 1, false);
+	}
+
+	transport_put_service_info(service_info);
+}
+
+static size_t transport_mbuf_size(struct vs_mbuf *_mbuf)
+{
+	struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+
+	return mbuf_real_size(mbuf);
+}
+
+static size_t transport_max_mbuf_size(struct vs_transport *_transport)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+	return transport->msg_size - sizeof(vs_service_id_t);
+}
+
+static int okl4_error_to_errno(okl4_error_t err) {
+	switch (err) {
+	case OKL4_OK:
+		return 0;
+	case OKL4_ERROR_AXON_QUEUE_NOT_MAPPED:
+		/* Axon has been reset locally */
+		return -ECONNRESET;
+	case OKL4_ERROR_AXON_QUEUE_NOT_READY:
+		/* No message buffers in the queue. */
+		return -ENOBUFS;
+	case OKL4_ERROR_AXON_INVALID_OFFSET:
+	case OKL4_ERROR_AXON_AREA_TOO_BIG:
+		/* Buffer address is bad */
+		return -EFAULT;
+	case OKL4_ERROR_AXON_BAD_MESSAGE_SIZE:
+	case OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED:
+		/* One of the Axon's message size limits has been exceeded */
+		return -EMSGSIZE;
+	default:
+		/* Miscellaneous failure, probably a bad cap */
+		return -EIO;
+	}
+}
+
+static void queue_tx_mbuf(struct vs_mbuf_axon *mbuf, struct vs_transport_axon *priv,
+		vs_service_id_t service_id)
+{
+	list_add_tail(&mbuf->base.queue, &priv->tx_queue);
+}
+
+static void free_tx_mbufs(struct vs_transport_axon *priv)
+{
+	struct vs_mbuf_axon *child, *tmp;
+
+	list_for_each_entry_safe(child, tmp, &priv->tx_queue, base.queue) {
+		list_del(&child->base.queue);
+		__transport_free_mbuf(priv, child, false);
+	}
+}
+
+static int __transport_flush(struct vs_transport_axon *transport)
+{
+	_okl4_sys_axon_trigger_send(transport->tx_cap);
+	return 0;
+}
+
+static int transport_flush(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+	return __transport_flush(transport);
+}
+
+/*
+ * Low-level transport message send function.
+ *
+ * The caller must hold the transport->readiness_lock, and is responsible for
+ * freeing the mbuf on successful send (use transport_free_sent_mbuf). The
+ * mbuf should _not_ be freed if this function fails. The Virtual Service
+ * driver is responsible for freeing the mbuf in the failure case.
+ */
+static int __transport_send(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf, vs_service_id_t service_id,
+		unsigned long flags)
+{
+	u32 uptr;
+	struct okl4_axon_queue_entry *desc;
+	struct vs_axon_tx_pool *old_pool;
+	dma_addr_t old_laddr;
+
+	lockdep_assert_held(&transport->readiness_lock);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"send %zu bytes to service %d\n",
+			mbuf->base.size, service_id);
+	vs_debug_dump_mbuf(transport->session_dev, &mbuf->base);
+
+	uptr = ACCESS_ONCE(transport->tx->queues[0].uptr);
+	desc = &transport->tx_descs[uptr];
+
+	/* Is the descriptor ready to use? */
+	if (okl4_axon_data_info_getpending(&desc->info))
+		return -ENOSPC;
+	mb();
+
+	/* The descriptor is ours; save its old state and increment the uptr */
+	old_pool = transport->tx_pools[uptr];
+	if (old_pool != NULL)
+		old_laddr = okl4_axon_data_info_getladdr(&desc->info);
+	transport->tx_pools[uptr] = mbuf->pool;
+
+	INC_MOD(uptr, transport->tx->queues[0].entries);
+	ACCESS_ONCE(transport->tx->queues[0].uptr) = uptr;
+
+	/* Set up the descriptor */
+	desc->data_size = mbuf_real_size(mbuf);
+	okl4_axon_data_info_setladdr(&desc->info, mbuf->laddr);
+
+	/* Message is ready to go */
+	wmb();
+	okl4_axon_data_info_setpending(&desc->info, true);
+
+	if (flags & VS_TRANSPORT_SEND_FLAGS_MORE) {
+		/*
+		 * This is a batched message, so we normally don't flush,
+		 * unless we've filled the queue completely.
+		 *
+		 * Races on the queue descriptor don't matter here, because
+		 * this is only an optimisation; the service should do an
+		 * explicit flush when it finishes the batch anyway.
+		 */
+		desc = &transport->tx_descs[uptr];
+		if (okl4_axon_data_info_getpending(&desc->info))
+			__transport_flush(transport);
+	} else {
+		__transport_flush(transport);
+	}
+
+	/* Free any buffer previously in the descriptor */
+	if (old_pool != NULL) {
+		u32 uptr_freed = transport->tx_uptr_freed;
+		INC_MOD(uptr_freed, transport->tx->queues[0].entries);
+		WARN_ON(uptr_freed != uptr);
+		__transport_tx_pool_free(old_pool, old_laddr);
+		transport->tx_uptr_freed = uptr_freed;
+	}
+
+	return 0;
+}
+
+static int transport_send_might_queue(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf, vs_service_id_t service_id,
+		unsigned long flags, bool *queued)
+{
+	int ret = 0;
+
+	lockdep_assert_held(&transport->readiness_lock);
+	*queued = false;
+
+	if (transport->readiness != VS_TRANSPORT_ACTIVE)
+		return -ECONNRESET;
+
+	if (!list_empty(&transport->tx_queue)) {
+		*queued = true;
+	} else {
+		ret = __transport_send(transport, mbuf, service_id, flags);
+		if (ret == -ENOSPC) {
+			*queued = true;
+			ret = 0;
+		}
+	}
+
+	if (*queued)
+		queue_tx_mbuf(mbuf, transport, service_id);
+
+	return ret;
+}
+
+static int transport_send(struct vs_transport *_transport,
+		struct vs_service_device *service, struct vs_mbuf *_mbuf,
+		unsigned long flags)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+	struct vs_mv_service_info *service_info;
+	vs_service_id_t service_id;
+	int recv_freed, freed_acks;
+	bool queued;
+	int err;
+	unsigned long irqflags;
+
+	if (WARN_ON(!transport || !mbuf || mbuf->owner != transport))
+		return -EINVAL;
+
+	service_id = transport_get_mbuf_service_id(transport,
+			mbuf_real_base(mbuf), NULL);
+
+	if (WARN_ON(service_id != service->id))
+		return -EINVAL;
+
+	service_info = transport_get_service_info(service);
+	if (!service_info)
+		return -EINVAL;
+
+	if (mbuf->base.is_recv) {
+		/*
+		 * This message buffer was allocated for receive. We don't
+		 * allow receive message buffers to be reused for sending
+		 * because it makes our quotas inconsistent.
+		 */
+		dev_err(&service_info->service->dev,
+				"Attempted to send a received message buffer\n");
+		transport_put_service_info(service_info);
+		return -EINVAL;
+	}
+
+	if (!service_info->ready) {
+		transport_put_service_info(service_info);
+		return -ECOMM;
+	}
+
+	/*
+	 * Set the message's service id reserved bits to the number of buffers
+	 * we have freed. We can only ack 2 ^ VS_SERVICE_ID_RESERVED_BITS - 1
+	 * buffers in one message.
+	 */
+	do {
+		recv_freed = atomic_read(&service_info->recv_freed);
+		freed_acks = min_t(int, recv_freed,
+				VS_SERVICE_ID_TRANSPORT_MASK);
+	} while (recv_freed != atomic_cmpxchg(&service_info->recv_freed,
+				recv_freed, recv_freed - freed_acks));
+
+	service_id = service_info->service->id;
+	vs_set_service_id_reserved_bits(&service_id, freed_acks);
+	*(vs_service_id_t *)mbuf_real_base(mbuf) = service_id;
+
+	spin_lock_irqsave(&transport->readiness_lock, irqflags);
+	err = transport_send_might_queue(transport, mbuf,
+			service_info->service->id, flags, &queued);
+	if (err) {
+		/* We failed to send, so revert the freed acks */
+		if (atomic_add_return(freed_acks,
+				&service_info->recv_freed) >=
+				service_info->recv_freed_watermark) {
+			transport->free_bufs_pending = true;
+			schedule_delayed_work(&transport->free_bufs_work, 0);
+		}
+		transport_put_service_info(service_info);
+		spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+		return err;
+	}
+
+	atomic_dec(&service_info->send_alloc);
+
+	if (queued) {
+		transport_put_service_info(service_info);
+		spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+		return 0;
+	}
+
+	/*
+	 * The mbuf was sent successfully. We can free it locally since it is
+	 * now owned by the remote end.
+	 */
+	transport_free_sent_mbuf(transport, mbuf);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"Send okay: service %d (0x%.2x) sq=%d/%d, alloc--=%d, rq=%d/%d, freed=%d/%d, bc=%d\n",
+			service_info->service->id, service_id,
+			atomic_read(&service_info->send_inflight),
+			service_info->service->send_quota,
+			atomic_read(&service_info->send_alloc),
+			atomic_read(&service_info->recv_inflight),
+			service_info->service->recv_quota, freed_acks,
+			atomic_read(&service_info->recv_freed),
+			atomic_read(&transport->free_bufs_balance));
+
+	transport_put_service_info(service_info);
+	spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+
+	return 0;
+}
+
+static void transport_free_bufs_work(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct vs_transport_axon *transport = container_of(dwork,
+			struct vs_transport_axon, free_bufs_work);
+	struct vs_mbuf_axon *mbuf;
+	int i, err, count = 0, old_balance;
+	bool queued;
+	size_t size;
+	u16 *p;
+
+	/*
+	 * Atomically decide whether to send a message, and increment
+	 * the balance if we are going to.
+	 *
+	 * We don't need barriers before these reads because they're
+	 * implicit in the work scheduling.
+	 */
+	do {
+		old_balance = atomic_read(&transport->free_bufs_balance);
+
+		/*
+		 * We only try to send if the balance is negative,
+		 * or if we have been triggered by going over a
+		 * watermark.
+		 */
+		if (old_balance >= 0 && !transport->free_bufs_pending)
+			return;
+
+		/*
+		 * If we've hit the max balance, we can't send. The
+		 * tasklet will be rescheduled next time the balance
+		 * is decremented, if free_bufs_pending is true.
+		 */
+		if (old_balance >= MAX_BALANCE)
+			return;
+
+	} while (old_balance != atomic_cmpxchg(&transport->free_bufs_balance,
+			old_balance, old_balance + 1));
+
+	/* Try to allocate a message buffer. */
+	mbuf = __transport_alloc_mbuf(transport, MSG_SEND_FREE_BUFS,
+			transport->free_bufs_pool,
+			transport->msg_size - sizeof(vs_service_id_t),
+			GFP_KERNEL | __GFP_NOWARN);
+	if (!mbuf) {
+		/* Out of memory at the moment; retry later. */
+		atomic_dec(&transport->free_bufs_balance);
+		schedule_delayed_work(dwork, FREE_BUFS_RETRY_DELAY);
+		return;
+	}
+
+	/*
+	 * Clear free_bufs_pending, because we are going to try to send.  We
+	 * need a write barrier afterwards to guarantee that this write is
+	 * ordered before any writes to the recv_freed counts, and therefore
+	 * before any remote free_bufs_pending = true when a service goes
+	 * over its watermark right after we inspect it.
+	 *
+	 * The matching barrier is implicit in the atomic_inc_return in
+	 * transport_free_mbuf().
+	 */
+	transport->free_bufs_pending = false;
+	smp_wmb();
+
+	/*
+	 * Fill in the buffer. Message format is:
+	 *
+	 *   u16: Number of services
+	 *
+	 *   For each service:
+	 *       u16: Service ID
+	 *       u16: Number of freed buffers
+	 */
+	p = mbuf->base.data;
+	*(p++) = 0;
+
+	for_each_set_bit(i, transport->service_bitmap,
+			VS_SERVICE_ID_BITMAP_BITS) {
+		struct vs_mv_service_info *service_info;
+		int recv_freed;
+		u16 freed_acks;
+
+		service_info = transport_get_service_id_info(transport, i);
+		if (!service_info)
+			continue;
+
+		/*
+		 * Don't let the message exceed the maximum size for the
+		 * transport.
+		 */
+		size = sizeof(vs_service_id_t) + sizeof(u16) +
+				(count * (2 * sizeof(u16)));
+		if (size > transport->msg_size) {
+			/* FIXME: Jira ticket SDK-3131 - ryanm. */
+			transport_put_service_info(service_info);
+			transport->free_bufs_pending = true;
+			break;
+		}
+
+		/*
+		 * We decrement each service's quota immediately by up to
+		 * USHRT_MAX. If we subsequently fail to send the message then
+		 * we return the count to what it was previously.
+		 */
+		do {
+			recv_freed = atomic_read(&service_info->recv_freed);
+			freed_acks = min_t(int, USHRT_MAX, recv_freed);
+		} while (recv_freed != atomic_cmpxchg(
+				&service_info->recv_freed,
+				recv_freed, recv_freed - freed_acks));
+
+		if (freed_acks) {
+			if (freed_acks < recv_freed)
+				transport->free_bufs_pending = true;
+
+			*(p++) = service_info->service->id;
+			*(p++) = freed_acks;
+			count++;
+
+			vs_dev_debug(VS_DEBUG_TRANSPORT,
+					transport->session_dev,
+					transport->axon_dev,
+					"  [%.2d] Freed %.2d buffers\n",
+					service_info->service->id,
+					freed_acks);
+		} else {
+			vs_dev_debug(VS_DEBUG_TRANSPORT,
+					transport->session_dev,
+					transport->axon_dev,
+					"  [%.2d] No buffers to free\n",
+					service_info->service->id);
+		}
+
+		transport_put_service_info(service_info);
+	}
+
+	if (transport->free_bufs_pending)
+		schedule_delayed_work(dwork, 0);
+
+	if (count == 0 && old_balance >= 0) {
+		/*
+		 * We are sending a new free bufs message, but we have no
+		 * freed buffers to tell the other end about. We don't send
+		 * an empty message unless the pre-increment balance was
+		 * negative (in which case we need to ack a remote free_bufs).
+		 *
+		 * Note that nobody else can increase the balance, so we only
+		 * need to check for a non-negative balance once before
+		 * decrementing. However, if the incoming free-bufs handler
+		 * concurrently decrements, the balance may become negative,
+		 * in which case we reschedule ourselves immediately to send
+		 * the ack.
+		 */
+		if (atomic_dec_return(&transport->free_bufs_balance) < 0)
+			schedule_delayed_work(dwork, 0);
+
+		__transport_free_mbuf(transport, mbuf, false);
+
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"No services had buffers to free\n");
+
+		return;
+	}
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"Sending free bufs message for %d services\n", count);
+
+	/* Fix up the message size */
+	p = mbuf->base.data;
+	*p = count;
+	mbuf->base.size = sizeof(u16) * ((count * 2) + 1);
+
+	spin_lock_irq(&transport->readiness_lock);
+	err = transport_send_might_queue(transport, mbuf, MSG_SEND_FREE_BUFS,
+			0, &queued);
+	if (err) {
+		spin_unlock_irq(&transport->readiness_lock);
+		goto fail;
+	}
+
+	/* FIXME: Jira ticket SDK-4675 - ryanm. */
+	if (!queued) {
+		/*
+		 * The mbuf was sent successfully. We can free it locally
+		 * since it is now owned by the remote end.
+		 */
+		transport_free_sent_mbuf(transport, mbuf);
+	}
+	spin_unlock_irq(&transport->readiness_lock);
+
+	return;
+
+fail:
+	dev_err(transport->axon_dev,
+			"Failed to send free bufs message: %d\n", err);
+	transport_fatal_error(transport, "free bufs send failed");
+}
+
+int transport_notify(struct vs_transport *_transport,
+		struct vs_service_device *service, unsigned long bits)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	unsigned long bit_offset, bitmask, word;
+	int first_set_bit, spilled_bits;
+
+	BUG_ON(!transport);
+
+	if (!bits)
+		return -EINVAL;
+
+	/* Check that the service isn't trying to raise bits it doesn't own */
+	if (bits & ~((1UL << service->notify_send_bits) - 1))
+		return -EINVAL;
+
+	bit_offset = service->notify_send_offset;
+	word = BIT_WORD(bit_offset);
+	bitmask = bits << (bit_offset % BITS_PER_LONG);
+
+	vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			"Sending notification %ld to service id %d\n", bitmask,
+			service->id);
+
+	_okl4_sys_vinterrupt_raise(transport->notify_cap[word], bitmask);
+
+	/*
+	* Bit range may spill into the next virqline.
+	*
+	* Check by adding the bit offset to the index of the highest set bit in
+	* the requested bitmask. If we need to raise a bit that is greater than
+	* bit 31, we have spilled into the next word and need to raise that too.
+	*/
+	first_set_bit = find_first_bit(&bits, BITS_PER_LONG);
+	spilled_bits = first_set_bit + bit_offset - (BITS_PER_LONG - 1);
+	if (spilled_bits > 0) {
+		/*
+		* Calculate the new bitmask for the spilled bits. We do this by
+		* shifting the requested bits to the right. The number of shifts
+		* is determined on where the first spilled bit is.
+		*/
+		int first_spilled_bit = first_set_bit - spilled_bits + 1;
+
+		bitmask = bits >> first_spilled_bit;
+
+		vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				"Sending notification %ld to service id %d\n", bitmask,
+				service->id);
+
+		_okl4_sys_vinterrupt_raise(transport->notify_cap[word + 1], bitmask);
+	}
+
+	return 0;
+}
+
+static void
+transport_handle_free_bufs_message(struct vs_transport_axon *transport,
+		struct vs_mbuf_axon *mbuf)
+{
+	struct vs_mv_service_info *service_info;
+	vs_service_id_t service_id;
+	u16 *p = mbuf->base.data;
+	int i, count, freed_acks, new_balance;
+
+	count = *(p++);
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"Free bufs message received for %d services\n", count);
+	for (i = 0; i < count; i++) {
+		int old_quota __maybe_unused;
+
+		service_id = *(p++);
+		freed_acks = *(p++);
+
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev, "  [%.2d] %.4d\n",
+				service_id, freed_acks);
+
+		service_info = transport_get_service_id_info(transport,
+				service_id);
+		if (!service_info) {
+			vs_dev_debug(VS_DEBUG_TRANSPORT,
+					transport->session_dev,
+					transport->axon_dev,
+					"Got %d free_acks for unknown service %d\n",
+					freed_acks, service_id);
+			continue;
+		}
+
+		old_quota = atomic_read(&service_info->send_inflight);
+		freed_acks = reduce_send_quota(transport, service_info,
+				freed_acks, service_info->ready);
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"  [%.2d] Freed %.2d buffers (%d -> %d, quota = %d)\n",
+				service_id, freed_acks, old_quota,
+				atomic_read(&service_info->send_inflight),
+				service_info->service->send_quota);
+
+		transport_put_service_info(service_info);
+	}
+
+	__transport_free_mbuf(transport, mbuf, true);
+
+	new_balance = atomic_dec_return(&transport->free_bufs_balance);
+	if (new_balance < -MAX_BALANCE) {
+		dev_err(transport->axon_dev,
+				"Balance counter fell below -MAX_BALANCE (%d < %d)\n",
+				atomic_read(&transport->free_bufs_balance),
+				-MAX_BALANCE);
+		transport_fatal_error(transport, "balance counter underrun");
+		return;
+	}
+
+	/* Check if we need to send a freed buffers message back */
+	if (new_balance < 0 || transport->free_bufs_pending)
+		schedule_delayed_work(&transport->free_bufs_work, 0);
+}
+
+static int transport_rx_queue_buffer(struct vs_transport_axon *transport,
+		void *ptr, dma_addr_t laddr)
+{
+	struct okl4_axon_queue_entry *desc;
+	okl4_axon_data_info_t info;
+
+	/* Select the buffer desc to reallocate */
+	desc = &transport->rx_descs[transport->rx_uptr_allocated];
+	info = ACCESS_ONCE(desc->info);
+
+	/* If there is no space in the rx queue, fail */
+	if (okl4_axon_data_info_getusr(&info))
+		return -ENOSPC;
+
+	/* Don't update desc before reading the clear usr bit */
+	smp_mb();
+
+	/* Update the buffer pointer in the desc and mark it valid. */
+	transport->rx_ptrs[transport->rx_uptr_allocated] = ptr;
+	okl4_axon_data_info_setladdr(&info, (okl4_laddr_t)laddr);
+	okl4_axon_data_info_setpending(&info, true);
+	okl4_axon_data_info_setusr(&info, true);
+	mb();
+	ACCESS_ONCE(desc->info) = info;
+
+	/* Proceed to the next buffer */
+	INC_MOD(transport->rx_uptr_allocated,
+			transport->rx->queues[0].entries);
+
+	/* Return true if the next desc has no buffer yet */
+	desc = &transport->rx_descs[transport->rx_uptr_allocated];
+	return !okl4_axon_data_info_getusr(&desc->info);
+}
+
+/* TODO: multiple queue support / small message prioritisation */
+static int transport_process_msg(struct vs_transport_axon *transport)
+{
+	struct vs_mv_service_info *service_info;
+	struct vs_mbuf_axon *mbuf;
+	vs_service_id_t service_id;
+	unsigned freed_acks;
+	u32 uptr;
+	struct okl4_axon_queue_entry *desc;
+	void **ptr;
+	okl4_axon_data_info_t info;
+
+	/* Select the descriptor to receive from */
+	uptr = ACCESS_ONCE(transport->rx->queues[0].uptr);
+	desc = &transport->rx_descs[uptr];
+	ptr = &transport->rx_ptrs[uptr];
+	info = ACCESS_ONCE(desc->info);
+
+	/* Have we emptied the whole queue? */
+	if (!okl4_axon_data_info_getusr(&info))
+		return -ENOBUFS;
+
+	/* Has the next buffer been filled yet? */
+	if (okl4_axon_data_info_getpending(&info))
+		return 0;
+
+	/* Don't read the buffer or desc before seeing a cleared pending bit */
+	rmb();
+
+	/* Is the message too small to be valid? */
+	if (desc->data_size < sizeof(vs_service_id_t))
+		return -EBADMSG;
+
+	/* Allocate and set up the mbuf */
+	mbuf = kmem_cache_alloc(mbuf_cache, GFP_ATOMIC);
+	if (!mbuf)
+		return -ENOMEM;
+
+	mbuf->owner = transport;
+	mbuf->laddr = okl4_axon_data_info_getladdr(&info);
+	mbuf->pool = NULL;
+	mbuf->base.is_recv = true;
+	mbuf->base.data = *ptr + sizeof(vs_service_id_t);
+	mbuf->base.size = desc->data_size - sizeof(vs_service_id_t);
+
+	INC_MOD(uptr, transport->rx->queues[0].entries);
+	ACCESS_ONCE(transport->rx->queues[0].uptr) = uptr;
+
+	/* Finish reading desc before clearing usr bit */
+	smp_mb();
+
+	/* Re-check the pending bit, in case we've just been reset */
+	info = ACCESS_ONCE(desc->info);
+	if (unlikely(okl4_axon_data_info_getpending(&info))) {
+		kmem_cache_free(mbuf_cache, mbuf);
+		return 0;
+	}
+
+	/* Clear usr bit; after this point the buffer is owned by the mbuf */
+	okl4_axon_data_info_setusr(&info, false);
+	ACCESS_ONCE(desc->info) = info;
+
+	/* Determine who to deliver the mbuf to */
+	service_id = transport_get_mbuf_service_id(transport,
+			mbuf_real_base(mbuf), &freed_acks);
+
+	if (service_id == MSG_SEND_FREE_BUFS) {
+		transport_handle_free_bufs_message(transport, mbuf);
+		return 1;
+	}
+
+	service_info = transport_get_service_id_info(transport, service_id);
+	if (!service_info) {
+		vs_dev_debug(VS_DEBUG_TRANSPORT,
+				transport->session_dev, transport->axon_dev,
+				"discarding message for missing service %d\n",
+				service_id);
+		__transport_free_mbuf(transport, mbuf, true);
+		return -EIDRM;
+	}
+
+	/*
+	 * If the remote end has freed some buffers that we sent it, then we
+	 * can decrement our send quota count by that amount.
+	 */
+	freed_acks = reduce_send_quota(transport, service_info,
+			freed_acks, service_info->ready);
+
+	/* If the service has been reset, drop the message. */
+	if (!service_info->ready) {
+		vs_dev_debug(VS_DEBUG_TRANSPORT,
+				transport->session_dev, transport->axon_dev,
+				"discarding message for reset service %d\n",
+				service_id);
+
+		__transport_free_mbuf(transport, mbuf, true);
+		transport_put_service_info(service_info);
+
+		return 1;
+	}
+
+	/*
+	 * Increment our recv quota since we are now holding a buffer. We
+	 * will decrement it when the buffer is freed in transport_free_mbuf.
+	 */
+	if (!atomic_add_unless(&service_info->recv_inflight, 1,
+				service_info->service->recv_quota)) {
+		/*
+		 * Going over the recv_quota indicates that something bad
+		 * has happened because either the other end has exceeded
+		 * its send quota or the two ends have a disagreement about
+		 * what the quota is.
+		 *
+		 * We free the buffer and reset the transport.
+		 */
+		dev_err(transport->axon_dev,
+				"Service %d is at max receive quota %d - resetting\n",
+				service_info->service->id,
+				service_info->service->recv_quota);
+
+		transport_fatal_error(transport, "rx quota exceeded");
+
+		__transport_free_mbuf(transport, mbuf, true);
+		transport_put_service_info(service_info);
+
+		return 0;
+	}
+
+	WARN_ON(atomic_read(&service_info->recv_inflight) >
+			service_info->service->recv_quota);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev,
+			"receive %zu bytes from service 0x%.2x (%d): sq=%d/%d, rq=%d/%d, freed_acks=%d, freed=%d/%d bc=%d\n",
+			mbuf->base.size, service_info->service->id, service_id,
+			atomic_read(&service_info->send_inflight),
+			service_info->service->send_quota,
+			atomic_read(&service_info->recv_inflight),
+			service_info->service->recv_quota, freed_acks,
+			atomic_read(&service_info->recv_freed),
+			service_info->recv_freed_watermark,
+			atomic_read(&transport->free_bufs_balance));
+	vs_debug_dump_mbuf(transport->session_dev, &mbuf->base);
+
+	if (vs_session_handle_message(transport->session_dev, &mbuf->base,
+			service_id) < 0)
+		transport_free_mbuf(&transport->transport,
+				service_info->service, &mbuf->base);
+
+	transport_put_service_info(service_info);
+
+	return 1;
+}
+
+static void transport_flush_tx_queues(struct vs_transport_axon *transport)
+{
+	okl4_error_t err;
+	int i;
+
+	lockdep_assert_held(&transport->readiness_lock);
+
+	/* Release any queued mbufs */
+	free_tx_mbufs(transport);
+
+	/*
+	 * Re-attach the TX Axon's segment, which implicitly invalidates
+	 * the queues and stops any outgoing message transfers. The queues
+	 * will be reconfigured when the transport becomes ready again.
+	 */
+	err = _okl4_sys_axon_set_send_segment(transport->tx_cap,
+			transport->segment, transport->segment_base);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "TX reattach failed: %d\n",
+				(int)err);
+	}
+
+	/*
+	 * The TX Axon has stopped, so we can safely clear the pending
+	 * bit and free the buffer for any outgoing messages, and reset uptr
+	 * and kptr to 0.
+	 */
+	for (i = 0; i < transport->tx->queues[0].entries; i++) {
+		if (!transport->tx_pools[i])
+			continue;
+
+		okl4_axon_data_info_setpending(
+				&transport->tx_descs[i].info, false);
+		__transport_tx_pool_free(transport->tx_pools[i],
+				okl4_axon_data_info_getladdr(
+					&transport->tx_descs[i].info));
+		transport->tx_pools[i] = NULL;
+	}
+	transport->tx->queues[0].uptr = 0;
+	transport->tx->queues[0].kptr = 0;
+	transport->tx_uptr_freed = 0;
+}
+
+static void transport_flush_rx_queues(struct vs_transport_axon *transport)
+{
+	okl4_error_t err;
+	int i;
+
+	lockdep_assert_held(&transport->readiness_lock);
+
+	/*
+	 * Re-attach the TX Axon's segment, which implicitly invalidates
+	 * the queues and stops any incoming message transfers, though those
+	 * should already have cancelled those at the sending end. The queues
+	 * will be reconfigured when the transport becomes ready again.
+	 */
+	err = _okl4_sys_axon_set_recv_segment(transport->rx_cap,
+			transport->segment, transport->segment_base);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "RX reattach failed: %d\n",
+				(int)err);
+	}
+
+	/*
+	 * The RX Axon has stopped, so we can reset the pending bit on all
+	 * allocated message buffers to prepare them for reuse when the reset
+	 * completes.
+	 */
+	for (i = 0; i < transport->rx->queues[0].entries; i++) {
+		if (okl4_axon_data_info_getusr(&transport->rx_descs[i].info))
+			okl4_axon_data_info_setpending(
+					&transport->rx_descs[i].info, true);
+	}
+
+	/*
+	 * Reset kptr to the current uptr.
+	 *
+	 * We use a barrier here to ensure the pending bits are reset before
+	 * reading uptr, matching the barrier in transport_process_msg between
+	 * the uptr update and the second check of the pending bit. This means
+	 * that races with transport_process_msg() will end in one of two
+	 * ways:
+	 *
+	 * 1. transport_process_msg() updates uptr before this barrier, so the
+	 *    RX buffer is passed up to the session layer to be rejected there
+	 *    and recycled; or
+	 *
+	 * 2. the reset pending bit is seen by the second check in
+	 *    transport_process_msg(), which knows that it is being reset and
+	 *    can drop the message before it claims the buffer.
+	 */
+	smp_mb();
+	transport->rx->queues[0].kptr =
+		ACCESS_ONCE(transport->rx->queues[0].uptr);
+
+	/*
+	 * Cancel any pending freed bufs work. We can't flush it here, but
+	 * that is OK: we will do so before we become ready.
+	 */
+	cancel_delayed_work(&transport->free_bufs_work);
+}
+
+static bool transport_axon_reset(struct vs_transport_axon *transport)
+{
+	okl4_error_t err;
+	unsigned long flags;
+	bool reset_complete = false;
+
+	spin_lock_irqsave(&transport->readiness_lock, flags);
+
+	/*
+	 * Reset the transport, dumping any messages in transit, and tell the
+	 * remote end that it should do the same.
+	 *
+	 * We only do this if the transport is not already marked reset. Doing
+	 * otherwise would be redundant.
+	 */
+	if ((transport->readiness != VS_TRANSPORT_RESET) &&
+			transport->readiness != VS_TRANSPORT_LOCAL_RESET &&
+			transport->readiness != VS_TRANSPORT_REMOTE_READY) {
+		/*
+		 * Flush the Axons' TX queues. We can't flush the RX queues
+		 * until after the remote end has acknowledged the reset.
+		 */
+		transport_flush_tx_queues(transport);
+
+		/*
+		 * Raise a reset request VIRQ, and discard any incoming reset
+		 * or ready notifications as they are now stale. Note that we
+		 * must do this in a single syscall.
+		 */
+		err = _okl4_sys_vinterrupt_clear_and_raise(
+				transport->reset_okl4_irq,
+				transport->reset_cap, 0UL,
+				VS_TRANSPORT_VIRQ_RESET_REQ).error;
+		if (err != OKL4_OK) {
+			dev_err(transport->axon_dev, "Reset raise failed: %d\n",
+					(int)err);
+		}
+
+		/* Local reset is complete */
+		if (transport->readiness != VS_TRANSPORT_SHUTDOWN)
+			transport->readiness = VS_TRANSPORT_LOCAL_RESET;
+	} else {
+		/* Already in reset */
+		reset_complete = true;
+	}
+
+	spin_unlock_irqrestore(&transport->readiness_lock, flags);
+
+	return reset_complete;
+}
+
+static void transport_reset(struct vs_transport *_transport)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev, "reset\n");
+
+	if (transport_axon_reset(transport)) {
+		vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				"reset while already reset (no-op)\n");
+
+		vs_session_handle_reset(transport->session_dev);
+	}
+}
+
+static void transport_ready(struct vs_transport *_transport)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	okl4_error_t err;
+
+	vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			"%s: becoming ready\n", __func__);
+
+	/*
+	 * Make sure any previously scheduled freed bufs work is cancelled.
+	 * It should not be possible for this to be rescheduled later, as long
+	 * as the transport is in reset.
+	 */
+	cancel_delayed_work_sync(&transport->free_bufs_work);
+	spin_lock_irq(&transport->readiness_lock);
+
+	atomic_set(&transport->free_bufs_balance, 0);
+	transport->free_bufs_pending = false;
+
+	switch(transport->readiness) {
+	case VS_TRANSPORT_RESET:
+		transport->readiness = VS_TRANSPORT_LOCAL_READY;
+		break;
+	case VS_TRANSPORT_REMOTE_READY:
+		vs_session_handle_activate(transport->session_dev);
+		transport->readiness = VS_TRANSPORT_ACTIVE;
+		break;
+	case VS_TRANSPORT_LOCAL_RESET:
+		/*
+		 * Session layer is confused; usually due to the reset at init
+		 * time, which it did not explicitly request, not having
+		 * completed yet. We just ignore it and wait for the reset. We
+		 * could avoid this by not starting the session until the
+		 * startup reset completes.
+		 */
+		spin_unlock_irq(&transport->readiness_lock);
+		return;
+	case VS_TRANSPORT_SHUTDOWN:
+		/* Do nothing. */
+		spin_unlock_irq(&transport->readiness_lock);
+		return;
+	default:
+		/* Session layer is broken */
+		WARN(1, "transport_ready() called in the wrong state: %d",
+				transport->readiness);
+		goto fail;
+	}
+
+	/* Raise a ready notification VIRQ. */
+	err = _okl4_sys_vinterrupt_raise(transport->reset_cap,
+			VS_TRANSPORT_VIRQ_READY);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "Ready raise failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	/*
+	 * Set up the Axons' queue pointers.
+	 */
+	err = _okl4_sys_axon_set_send_area(transport->tx_cap,
+			transport->tx_phys, transport->tx_size);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "TX set area failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	err = _okl4_sys_axon_set_send_queue(transport->tx_cap,
+			transport->tx_phys);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "TX set queue failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	err = _okl4_sys_axon_set_recv_area(transport->rx_cap,
+			transport->rx_phys, transport->rx_size);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "RX set area failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	err = _okl4_sys_axon_set_recv_queue(transport->rx_cap,
+			transport->rx_phys);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "RX set queue failed: %d\n",
+				(int)err);
+		goto fail;
+	}
+
+	spin_unlock_irq(&transport->readiness_lock);
+	return;
+
+fail:
+	spin_unlock_irq(&transport->readiness_lock);
+
+	transport_axon_reset(transport);
+}
+
+static int transport_service_add(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mv_service_info *service_info;
+
+	/*
+	 * We can't print out the core service add because the session
+	 * isn't fully registered at that time.
+	 */
+	if (service->id != 0)
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev,
+				"Add service - id = %d\n", service->id);
+
+	service_info = kzalloc(sizeof(*service_info), GFP_KERNEL);
+	if (!service_info)
+		return -ENOMEM;
+
+	kref_init(&service_info->kref);
+
+	/* Matching vs_put_service() is in transport_info_free */
+	service_info->service = vs_get_service(service);
+
+	/* Make the service_info visible */
+	rcu_assign_pointer(service->transport_priv, service_info);
+
+	__set_bit(service->id, transport->service_bitmap);
+
+	return 0;
+}
+
+static void transport_service_remove(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mv_service_info *service_info;
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev, "Remove service - id = %d\n",
+			service->id);
+
+	__clear_bit(service->id, transport->service_bitmap);
+
+	service_info = service->transport_priv;
+	rcu_assign_pointer(service->transport_priv, NULL);
+
+	if (service_info->ready) {
+		dev_err(transport->axon_dev,
+				"Removing service %d while ready\n",
+				service->id);
+		transport_fatal_error(transport, "removing ready service");
+	}
+
+	transport_put_service_info(service_info);
+}
+
+static struct vs_axon_tx_pool *
+transport_axon_init_tx_pool(struct vs_transport_axon *transport,
+		size_t msg_size, unsigned send_quota)
+{
+	struct vs_axon_tx_pool *pool;
+
+	pool = devm_kzalloc(transport->axon_dev, sizeof(*pool) +
+			(sizeof(unsigned long) * BITS_TO_LONGS(send_quota)),
+			GFP_KERNEL);
+	if (!pool)
+		return ERR_PTR(-ENOMEM);
+
+	pool->transport = transport;
+	pool->alloc_order = ilog2(msg_size + sizeof(vs_service_id_t));
+	pool->count = send_quota;
+
+	pool->base_vaddr = dmam_alloc_coherent(transport->axon_dev,
+			send_quota << pool->alloc_order, &pool->base_laddr,
+			GFP_KERNEL);
+	if (!pool->base_vaddr) {
+		dev_err(transport->axon_dev, "Couldn't allocate %lu times %zu bytes for TX\n",
+				(unsigned long)pool->count, (size_t)1 << pool->alloc_order);
+		devm_kfree(transport->axon_dev, pool);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	kref_init(&pool->kref);
+	return pool;
+}
+
+static int transport_service_start(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_mv_service_info *service_info;
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_notify_info *info;
+	int i, ret;
+	bool enable_rx;
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev, "Start service - id = %d\n",
+			service->id);
+
+	service_info = service->transport_priv;
+	__transport_get_service_info(service_info);
+
+	/* We shouldn't have any mbufs left from before the last reset. */
+	if (WARN_ON(atomic_read(&service_info->outstanding_frees))) {
+		transport_put_service_info(service_info);
+		return -EBUSY;
+	}
+
+	/*
+	 * The watermark is set to half of the received-message quota, rounded
+	 * down, plus one. This is fairly arbitrary. The constant offset
+	 * ensures that we don't set it to 0 for services with 1 quota (and
+	 * thus trigger infinite free_bufs messages).
+	 */
+	service_info->recv_freed_watermark = (service->recv_quota + 1) / 2;
+
+	if (WARN_ON(service->notify_recv_bits + service->notify_recv_offset >
+				transport->notify_rx_nirqs * BITS_PER_LONG)) {
+		transport_put_service_info(service_info);
+		return -EINVAL;
+	}
+
+	if (WARN_ON(service->notify_send_bits + service->notify_send_offset >
+				transport->notify_tx_nirqs * BITS_PER_LONG)) {
+		transport_put_service_info(service_info);
+		return -EINVAL;
+	}
+
+	/* This is called twice for the core client only. */
+	WARN_ON(service->id != 0 && service_info->ready);
+
+	if (!service_info->ready) {
+		WARN_ON(atomic_read(&service_info->send_alloc));
+		WARN_ON(atomic_read(&service_info->recv_freed));
+		WARN_ON(atomic_read(&service_info->recv_inflight));
+	}
+
+	/* Create the TX buffer pool. */
+	WARN_ON(service->send_quota && service_info->tx_pool);
+	if (service->send_quota) {
+		service_info->tx_pool = transport_axon_init_tx_pool(transport,
+				transport->msg_size, service->send_quota);
+		if (IS_ERR(service_info->tx_pool)) {
+			ret = PTR_ERR(service_info->tx_pool);
+			service_info->tx_pool = NULL;
+			transport_put_service_info(service_info);
+			return ret;
+		}
+	}
+
+	/* Preallocate some RX buffers, if necessary. */
+	spin_lock_irq(&transport->rx_alloc_lock);
+	i = min(transport->rx_alloc_extra,
+			service->recv_quota - service_info->rx_allocated);
+	transport->rx_alloc_extra -= i;
+	service_info->rx_allocated += i;
+	spin_unlock_irq(&transport->rx_alloc_lock);
+
+	for (; service_info->rx_allocated < service->recv_quota;
+			service_info->rx_allocated++) {
+		dma_addr_t laddr;
+		struct vs_axon_rx_freelist_entry *buf =
+			dma_pool_alloc(transport->rx_pool, GFP_KERNEL, &laddr);
+		if (WARN_ON(!buf))
+			break;
+		buf->laddr = laddr;
+
+		spin_lock_irq(&transport->rx_alloc_lock);
+		list_add(&buf->list, &transport->rx_freelist);
+		spin_unlock_irq(&transport->rx_alloc_lock);
+	}
+
+	for (i = 0; i < service->notify_recv_bits; i++) {
+		unsigned bit = i + service->notify_recv_offset;
+		info = &transport->transport.notify_info[bit];
+
+		info->service_id = service->id;
+		info->offset = service->notify_recv_offset;
+	}
+
+	atomic_set(&service_info->send_inflight, 0);
+
+	/*
+	 * If this is the core service and it wasn't ready before, we need to
+	 * enable RX for the whole transport.
+	 */
+	enable_rx = service->id == 0 && !service_info->ready;
+
+	service_info->ready = true;
+
+	/* We're now ready to receive. */
+	if (enable_rx)
+		tasklet_enable(&transport->rx_tasklet);
+
+	transport_put_service_info(service_info);
+
+	return 0;
+}
+
+static int transport_service_reset(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_mv_service_info *service_info;
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+	struct vs_mbuf_axon *child, *tmp;
+	int ret = 0, service_id, send_remaining, recv_remaining;
+
+	vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			transport->axon_dev, "Reset service - id = %d\n",
+			service->id);
+
+	service_info = service->transport_priv;
+	__transport_get_service_info(service_info);
+
+	/*
+	 * Clear the ready bit with the tasklet disabled. After this point,
+	 * incoming messages will be discarded by transport_process_msg()
+	 * without incrementing recv_inflight, so we won't spuriously see
+	 * nonzero recv_inflight values for messages that would be discarded
+	 * in the session layer.
+	 */
+	tasklet_disable(&transport->rx_tasklet);
+	service_info->ready = false;
+	if (service->id)
+		tasklet_enable(&transport->rx_tasklet);
+
+	/*
+	 * Cancel and free all pending outgoing messages for the service being
+	 * reset; i.e. those that have been sent by the service but are not
+	 * yet in the axon queue.
+	 *
+	 * Note that this does not clean out the axon queue; messages there
+	 * are already visible to OKL4 and may be transferred at any time,
+	 * so we treat those as already sent.
+	 */
+	spin_lock_irq(&transport->readiness_lock);
+	list_for_each_entry_safe(child, tmp, &transport->tx_queue, base.queue) {
+		service_id = transport_get_mbuf_service_id(transport,
+				mbuf_real_base(child), NULL);
+		if (service_id == service->id) {
+			list_del(&child->base.queue);
+			__transport_tx_pool_free(child->pool, child->laddr);
+		}
+	}
+	spin_unlock_irq(&transport->readiness_lock);
+
+	/*
+	 * If any buffers remain allocated, we mark them as outstanding frees.
+	 * The transport will remain disabled until this count goes to zero.
+	 */
+	send_remaining = atomic_read(&service_info->send_alloc);
+	recv_remaining = atomic_read(&service_info->recv_inflight);
+	ret = atomic_add_return(send_remaining + recv_remaining,
+			&service_info->outstanding_frees);
+	dev_dbg(transport->axon_dev, "reset service %d with %d outstanding (send %d, recv %d)\n",
+			service->id, ret, send_remaining, recv_remaining);
+
+	/*
+	 * Reduce the send alloc count to 0, accounting for races with frees,
+	 * which might have reduced either the alloc count or the outstanding
+	 * count.
+	 */
+	while (send_remaining > 0) {
+		unsigned new_send_remaining = atomic_cmpxchg(
+				&service_info->send_alloc, send_remaining, 0);
+		if (send_remaining == new_send_remaining) {
+			smp_mb();
+			break;
+		}
+		WARN_ON(send_remaining < new_send_remaining);
+		ret = atomic_sub_return(send_remaining - new_send_remaining,
+				&service_info->outstanding_frees);
+		send_remaining = new_send_remaining;
+		dev_dbg(transport->axon_dev, "failed to zero send quota, now %d outstanding (%d send)\n",
+				ret, send_remaining);
+	}
+
+	/* Repeat the above for the recv inflight count. */
+	while (recv_remaining > 0) {
+		unsigned new_recv_remaining = atomic_cmpxchg(
+				&service_info->recv_inflight, recv_remaining,
+				0);
+		if (recv_remaining == new_recv_remaining) {
+			smp_mb();
+			break;
+		}
+		WARN_ON(recv_remaining < new_recv_remaining);
+		ret = atomic_sub_return(recv_remaining - new_recv_remaining,
+				&service_info->outstanding_frees);
+		recv_remaining = new_recv_remaining;
+		dev_dbg(transport->axon_dev, "failed to zero recv quota, now %d outstanding (%d send)\n",
+				ret, recv_remaining);
+	}
+
+	/* The outstanding frees count should never go negative */
+	WARN_ON(ret < 0);
+
+	/* Discard any outstanding freed buffer notifications. */
+	atomic_set(&service_info->recv_freed, 0);
+
+	/*
+	 * Wait for any previously queued free_bufs work to finish. This
+	 * guarantees that any freed buffer notifications that are already in
+	 * progress will be sent to the remote end before we return, and thus
+	 * before the reset is signalled.
+	 */
+	flush_delayed_work(&transport->free_bufs_work);
+
+	if (!ret)
+		transport_free_mbuf_pools(transport, service, service_info);
+
+	transport_put_service_info(service_info);
+
+	return ret;
+}
+
+static ssize_t transport_service_send_avail(struct vs_transport *_transport,
+		struct vs_service_device *service)
+{
+	struct vs_mv_service_info *service_info;
+	ssize_t count = 0;
+
+	service_info = service->transport_priv;
+	if (!service_info)
+		return -EINVAL;
+
+	__transport_get_service_info(service_info);
+
+	count = service->send_quota -
+		atomic_read(&service_info->send_inflight);
+
+	transport_put_service_info(service_info);
+
+	return count < 0 ? 0 : count;
+}
+
+static void transport_get_notify_bits(struct vs_transport *_transport,
+		unsigned *send_notify_bits, unsigned *recv_notify_bits)
+{
+	struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+	*send_notify_bits = transport->notify_tx_nirqs * BITS_PER_LONG;
+	*recv_notify_bits = transport->notify_rx_nirqs * BITS_PER_LONG;
+}
+
+static void transport_get_quota_limits(struct vs_transport *_transport,
+		unsigned *send_quota, unsigned *recv_quota)
+{
+	/*
+	 * This driver does not need to enforce a quota limit, because message
+	 * buffers are allocated from the kernel heap rather than a fixed
+	 * buffer area. The queue length only determines the maximum size of
+	 * a message batch, and the number of preallocated RX buffers.
+	 *
+	 * Note that per-service quotas are still enforced; there is simply no
+	 * hard limit on the total of all service quotas.
+	 */
+
+	*send_quota = UINT_MAX;
+	*recv_quota = UINT_MAX;
+}
+
+static const struct vs_transport_vtable tvt = {
+	.alloc_mbuf		= transport_alloc_mbuf,
+	.free_mbuf		= transport_free_mbuf,
+	.mbuf_size		= transport_mbuf_size,
+	.max_mbuf_size		= transport_max_mbuf_size,
+	.send			= transport_send,
+	.flush			= transport_flush,
+	.notify			= transport_notify,
+	.reset			= transport_reset,
+	.ready			= transport_ready,
+	.service_add		= transport_service_add,
+	.service_remove		= transport_service_remove,
+	.service_start		= transport_service_start,
+	.service_reset		= transport_service_reset,
+	.service_send_avail	= transport_service_send_avail,
+	.get_notify_bits	= transport_get_notify_bits,
+	.get_quota_limits	= transport_get_quota_limits,
+};
+
+/* Incoming notification handling for client */
+static irqreturn_t transport_axon_notify_virq(int irq, void *priv)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+	struct vs_notify_info *n_info;
+	unsigned long offset, bit = 0, notification;
+	int word;
+	okl4_virq_flags_t payload = okl4_get_virq_payload(irq);
+
+	for (word = 0; word < transport->notify_rx_nirqs; word++)
+		if (irq == transport->notify_irq[word])
+			break;
+
+	if (word == transport->notify_rx_nirqs) {
+		dev_err(transport->axon_dev, "Bad IRQ %d\n", irq);
+		return IRQ_NONE;
+	}
+
+	vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+			"Got notification irq\n");
+
+#if defined(__BIG_ENDIAN)
+	/*
+	 * We rely on being able to use the Linux bitmap operations directly
+	 * on the VIRQ payload.
+	 */
+	BUILD_BUG_ON((sizeof(payload) % sizeof(unsigned long)) != 0);
+#endif
+
+	for_each_set_bit(bit, (unsigned long *)&payload, sizeof(payload) * 8) {
+		offset = bit + word * BITS_PER_LONG;
+
+		/*
+		 * We need to know which service id is associated
+		 * with which notification bit here. The transport is informed
+		 * about notification bit - service id mapping during the
+		 * initialhandshake protocol.
+		 */
+		n_info = &transport->transport.notify_info[offset];
+
+		notification = 1UL << (offset - n_info->offset);
+		vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				"Got notification bit %lu for service %d\n",
+				notification, n_info->service_id);
+
+		/* FIXME: Jira ticket SDK-2145 - shivanik. */
+		vs_session_handle_notify(transport->session_dev, notification,
+				n_info->service_id);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t transport_axon_reset_irq(int irq, void *priv)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+	bool do_reset = false;
+
+	u32 payload = okl4_get_virq_payload(irq);
+
+	spin_lock(&transport->readiness_lock);
+
+	if (payload & VS_TRANSPORT_VIRQ_RESET_REQ) {
+		okl4_error_t err;
+
+		transport->readiness = VS_TRANSPORT_RESET;
+
+		/* Flush the queues in both directions */
+		transport_flush_tx_queues(transport);
+		transport_flush_rx_queues(transport);
+
+		/*
+		 * When sending an ack, it is important to cancel any earlier
+		 * ready notification, so the recipient can safely assume that
+		 * the ack precedes any ready it sees
+		 */
+		err = _okl4_sys_vinterrupt_modify(transport->reset_cap,
+				~VS_TRANSPORT_VIRQ_READY,
+				VS_TRANSPORT_VIRQ_RESET_ACK);
+		if (err != OKL4_OK) {
+			dev_warn(transport->axon_dev,
+					"Error sending reset ack: %d\n", (int)err);
+		}
+
+		/*
+		 * Discard any pending ready event; it must have happened
+		 * before the reset request was raised, because we had not
+		 * yet sent the reset ack.
+		 */
+		payload = 0;
+		do_reset = true;
+	} else if (payload & VS_TRANSPORT_VIRQ_RESET_ACK) {
+		transport->readiness = VS_TRANSPORT_RESET;
+
+		/*
+		 * Flush the RX queues, as we know at this point that the
+		 * other end has flushed its TX queues.
+		 */
+		transport_flush_rx_queues(transport);
+
+		/*
+		 * Preserve any pending ready event; it must have been
+		 * generated after the ack (see above)
+		 */
+		payload &= VS_TRANSPORT_VIRQ_READY;
+		do_reset = true;
+	}
+
+	if (do_reset) {
+		/*
+		 * Reset the session. Note that duplicate calls to this are
+		 * expected if there are duplicate resets; they don't
+		 * necessarily match activate calls.
+		 */
+		vs_session_handle_reset(transport->session_dev);
+	}
+
+	if (payload & VS_TRANSPORT_VIRQ_READY) {
+		if (transport->readiness == VS_TRANSPORT_RESET) {
+			transport->readiness = VS_TRANSPORT_REMOTE_READY;
+		} else if (transport->readiness == VS_TRANSPORT_LOCAL_READY) {
+			vs_session_handle_activate(transport->session_dev);
+			transport->readiness = VS_TRANSPORT_ACTIVE;
+		} else {
+			/* Ready lost a race with reset; ignore it. */
+		}
+	}
+
+	spin_unlock(&transport->readiness_lock);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Axon VIRQ handling.
+ */
+static irqreturn_t transport_axon_rx_irq(int irq, void *priv)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+
+	okl4_axon_virq_flags_t flags = okl4_get_virq_payload(irq);
+
+	if (okl4_axon_virq_flags_getfault(&flags)) {
+		dev_err_ratelimited(transport->axon_dev,
+				"fault on RX axon buffer or queue; resetting\n");
+		transport_axon_reset(transport);
+	} else if (okl4_axon_virq_flags_getready(&flags)) {
+		tasklet_schedule(&transport->rx_tasklet);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t transport_axon_tx_irq(int irq, void *priv)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+
+	okl4_axon_virq_flags_t flags = okl4_get_virq_payload(irq);
+
+	if (okl4_axon_virq_flags_getfault(&flags)) {
+		dev_err_ratelimited(transport->axon_dev,
+				"fault on TX axon buffer or queue; resetting\n");
+		transport_axon_reset(transport);
+	} else if (okl4_axon_virq_flags_getready(&flags)) {
+		spin_lock(&transport->readiness_lock);
+		if (!list_empty(&transport->tx_queue))
+			tasklet_schedule(&transport->tx_tasklet);
+		spin_unlock(&transport->readiness_lock);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void transport_rx_tasklet(unsigned long data)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+	int status;
+	struct _okl4_sys_axon_process_recv_return recv_result;
+
+	/* Refill the RX queue */
+	spin_lock_irq(&transport->rx_alloc_lock);
+	while (!list_empty(&transport->rx_freelist)) {
+		struct vs_axon_rx_freelist_entry *buf;
+		buf = list_first_entry(&transport->rx_freelist,
+				struct vs_axon_rx_freelist_entry, list);
+		list_del(&buf->list);
+		status = transport_rx_queue_buffer(transport, buf, buf->laddr);
+		if (status < 0)
+			list_add(&buf->list, &transport->rx_freelist);
+		if (status <= 0)
+			break;
+	}
+	spin_unlock_irq(&transport->rx_alloc_lock);
+
+	/* Start the transfer */
+	recv_result = _okl4_sys_axon_process_recv(transport->rx_cap,
+			MAX_TRANSFER_CHUNK);
+
+	if (recv_result.error == OKL4_OK) {
+		status = 1;
+	} else {
+		status = okl4_error_to_errno(recv_result.error);
+		vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+				transport->axon_dev, "rx syscall fail: %d",
+				status);
+	}
+
+	/* Process the received messages */
+	while (status > 0)
+		status = transport_process_msg(transport);
+
+	if (status == -ENOMEM) {
+		/* Give kswapd some time to reclaim pages */
+		mod_timer(&transport->rx_retry_timer, jiffies + HZ);
+	} else if (status == -ENOBUFS) {
+		/*
+		 * Reschedule ourselves if more RX buffers are available,
+		 * otherwise do nothing until a buffer is freed
+		 */
+		spin_lock_irq(&transport->rx_alloc_lock);
+		if (!list_empty(&transport->rx_freelist))
+			tasklet_schedule(&transport->rx_tasklet);
+		spin_unlock_irq(&transport->rx_alloc_lock);
+	} else if (!status && !recv_result.send_empty) {
+		/* There are more messages waiting; reschedule */
+		tasklet_schedule(&transport->rx_tasklet);
+	} else if (status < 0 && status != -ECONNRESET) {
+		/* Something else went wrong, other than a reset */
+		dev_err(transport->axon_dev, "Fatal RX error %d\n", status);
+		transport_fatal_error(transport, "rx failure");
+	} else {
+		/* Axon is empty; wait for an RX interrupt */
+	}
+}
+
+static void transport_tx_tasklet(unsigned long data)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+	struct vs_mbuf_axon *mbuf;
+	vs_service_id_t service_id;
+	int err;
+
+	spin_lock_irq(&transport->readiness_lock);
+
+	/* Check to see if there is anything in the queue to send */
+	if (list_empty(&transport->tx_queue)) {
+		/*
+		 * Queue is empty, probably because a service reset cancelled
+		 * some pending messages. Nothing to do.
+		 */
+		spin_unlock_irq(&transport->readiness_lock);
+		return;
+	}
+
+	/*
+	 * Try to send the mbuf.  If it can't, the channel must be
+	 * full again so wait until the next can send event.
+	 */
+	mbuf = list_first_entry(&transport->tx_queue, struct vs_mbuf_axon,
+			base.queue);
+
+	service_id = transport_get_mbuf_service_id(transport,
+			mbuf_real_base(mbuf), NULL);
+
+	err = __transport_send(transport, mbuf, service_id,
+			VS_TRANSPORT_SEND_FLAGS_MORE);
+	if (err == -ENOSPC) {
+		/*
+		 * The channel is currently full. Leave the message in the
+		 * queue and try again when it has emptied.
+		 */
+		__transport_flush(transport);
+		goto out_unlock;
+	}
+	if (err) {
+		/*
+		 * We cannot properly handle a message send error here because
+		 * we have already returned success for the send to the service
+		 * driver when the message was queued. We don't want to leave
+		 * the message in the queue, since it could cause a DoS if the
+		 * error is persistent. Give up and force a transport reset.
+		 */
+		dev_err(transport->axon_dev,
+				"Failed to send queued mbuf: %d\n", err);
+		spin_unlock_irq(&transport->readiness_lock);
+		transport_fatal_error(transport, "queued send failure");
+		return;
+	}
+
+	/* Message sent, remove it from the queue and free the local copy */
+	list_del(&mbuf->base.queue);
+	transport_free_sent_mbuf(transport, mbuf);
+
+	/* Check to see if we have run out of messages to send */
+	if (list_empty(&transport->tx_queue)) {
+		/* Nothing left in the queue; flush and return */
+		__transport_flush(transport);
+	} else {
+		/* Reschedule to send the next message */
+		tasklet_schedule(&transport->tx_tasklet);
+	}
+
+out_unlock:
+	spin_unlock_irq(&transport->readiness_lock);
+}
+
+static void transport_rx_retry_timer(unsigned long data)
+{
+	struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+
+	/* Try to receive again; hopefully we have memory now */
+	tasklet_schedule(&transport->rx_tasklet);
+}
+
+/* Transport device management */
+
+static int alloc_notify_info(struct device *dev, struct vs_notify_info **info,
+		int *info_size, int virqs)
+{
+	/* Each VIRQ can handle BITS_PER_LONG notifications */
+	*info_size = sizeof(struct vs_notify_info) * (virqs * BITS_PER_LONG);
+	*info = devm_kzalloc(dev, *info_size, GFP_KERNEL);
+	if (!(*info))
+		return -ENOMEM;
+
+	memset(*info, 0, *info_size);
+	return 0;
+}
+
+static int transport_axon_probe_virqs(struct vs_transport_axon *transport)
+{
+	struct device *device = transport->axon_dev;
+	struct device_node *axon_node = device->of_node;
+	struct device_node *vs_node = transport->of_node;
+	struct irq_data *irqd;
+	struct property *irqlines;
+	int ret, num_virq_lines;
+	struct device_node *virq_node = NULL;
+	u32 cap;
+	int i, irq_count;
+
+	if (of_irq_count(axon_node) < 2) {
+		dev_err(device, "Missing axon interrupts\n");
+		return -ENODEV;
+	}
+
+	irq_count = of_irq_count(vs_node);
+	if (irq_count < 1) {
+		dev_err(device, "Missing reset interrupt\n");
+		return -ENODEV;
+	} else if (irq_count > 1 + MAX_NOTIFICATION_LINES) {
+		dev_warn(device,
+			"Too many notification interrupts; only the first %d will be used\n",
+			MAX_NOTIFICATION_LINES);
+	}
+
+	/* Find the TX and RX axon IRQs and the reset IRQ */
+	transport->tx_irq = irq_of_parse_and_map(axon_node, 0);
+	if (!transport->tx_irq) {
+		dev_err(device, "No TX IRQ\n");
+		return -ENODEV;
+	}
+
+	transport->rx_irq = irq_of_parse_and_map(axon_node, 1);
+	if (!transport->rx_irq) {
+		dev_err(device, "No RX IRQ\n");
+		return -ENODEV;
+	}
+
+	transport->reset_irq = irq_of_parse_and_map(vs_node, 0);
+	if (!transport->reset_irq) {
+		dev_err(device, "No reset IRQ\n");
+		return -ENODEV;
+	}
+	irqd = irq_get_irq_data(transport->reset_irq);
+	if (!irqd) {
+		dev_err(device, "No reset IRQ data\n");
+		return -ENODEV;
+	}
+	transport->reset_okl4_irq = irqd_to_hwirq(irqd);
+
+	/* Find the notification IRQs */
+	transport->notify_rx_nirqs = irq_count - 1;
+	for (i = 0; i < transport->notify_rx_nirqs; i++) {
+		transport->notify_irq[i] = irq_of_parse_and_map(vs_node,
+				i + 1);
+		if (!transport->notify_irq[i]) {
+			dev_err(device, "Bad notify IRQ\n");
+			return -ENODEV;
+		}
+	}
+
+	/* Find all outgoing virq lines */
+	irqlines = of_find_property(vs_node, "okl,interrupt-lines", NULL);
+	if (!irqlines || irqlines->length < sizeof(u32)) {
+		dev_err(device, "No VIRQ sources found");
+		return -ENODEV;
+	}
+	num_virq_lines = irqlines->length / sizeof(u32);
+
+	virq_node = of_parse_phandle(vs_node, "okl,interrupt-lines", 0);
+	if (!virq_node) {
+		dev_err(device, "No reset VIRQ line object\n");
+		return -ENODEV;
+	}
+	ret = of_property_read_u32(virq_node, "reg", &cap);
+	if (ret || cap == OKL4_KCAP_INVALID) {
+		dev_err(device, "Bad reset VIRQ line\n");
+		return -ENODEV;
+	}
+	transport->reset_cap = cap;
+
+	transport->notify_tx_nirqs = num_virq_lines - 1;
+	for (i = 0; i < transport->notify_tx_nirqs; i++) {
+		virq_node = of_parse_phandle(vs_node, "okl,interrupt-lines",
+				i + 1);
+		if (!virq_node) {
+			dev_err(device, "No notify VIRQ line object\n");
+			return -ENODEV;
+		}
+		ret = of_property_read_u32(virq_node, "reg", &cap);
+		if (ret || cap == OKL4_KCAP_INVALID) {
+			dev_err(device, "Bad notify VIRQ line\n");
+			return -ENODEV;
+		}
+		transport->notify_cap[i] = cap;
+	}
+
+	return 0;
+}
+
+static int transport_axon_request_irqs(struct vs_transport_axon *transport)
+{
+	struct device *device = transport->axon_dev;
+	int i, ret;
+
+	ret = devm_request_irq(device, transport->reset_irq,
+			transport_axon_reset_irq, IRQF_TRIGGER_HIGH,
+			dev_name(transport->axon_dev), transport);
+	if (ret < 0)
+		return ret;
+
+	ret = devm_request_irq(device, transport->tx_irq,
+			transport_axon_tx_irq, IRQF_TRIGGER_HIGH,
+			dev_name(transport->axon_dev), transport);
+	if (ret < 0)
+		return ret;
+
+	ret = devm_request_irq(device, transport->rx_irq,
+			transport_axon_rx_irq, IRQF_TRIGGER_HIGH,
+			dev_name(transport->axon_dev), transport);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < transport->notify_rx_nirqs; i++) {
+		ret = devm_request_irq(device, transport->notify_irq[i],
+				transport_axon_notify_virq, IRQF_TRIGGER_HIGH,
+				dev_name(transport->axon_dev), transport);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int transport_axon_setup_descs(struct vs_transport_axon *transport)
+{
+	const int rx_buffer_order = ilog2(transport->msg_size +
+			sizeof(vs_service_id_t));
+	const size_t rx_queue_size = sizeof(*transport->rx) +
+		(sizeof(*transport->rx_descs) * transport->queue_size) +
+		(sizeof(*transport->rx_ptrs) * transport->queue_size);
+	const size_t tx_queue_size = sizeof(*transport->tx) +
+		(sizeof(*transport->tx_descs) * transport->queue_size);
+	const size_t queue_size = ALIGN(rx_queue_size,
+			__alignof__(*transport->tx)) + tx_queue_size;
+
+	struct _okl4_sys_mmu_lookup_pn_return lookup_return;
+	void *queue;
+	struct device_node *seg_node;
+	u32 seg_index;
+	okl4_kcap_t seg_cap;
+	okl4_error_t err;
+	dma_addr_t dma_handle;
+	const __be32 *prop;
+	int len, ret;
+
+	/*
+	 * Allocate memory for the queue descriptors.
+	 *
+	 * We allocate one block for both rx and tx because the minimum
+	 * allocation from dmam_alloc_coherent is usually a whole page.
+	 */
+	ret = -ENOMEM;
+	queue = dmam_alloc_coherent(transport->axon_dev, queue_size,
+			&dma_handle, GFP_KERNEL);
+	if (queue == NULL) {
+		dev_err(transport->axon_dev, "Failed to allocate %zd bytes for queue descriptors\n",
+				queue_size);
+		goto fail_alloc_dma;
+	}
+	memset(queue, 0, queue_size);
+
+	/*
+	 * Find the OKL4 physical segment object to attach to the axons.
+	 *
+	 * If the device has a CMA area, and the cell's memory segments have
+	 * not been split unnecessarily, then all allocations through the DMA
+	 * API for this device will be within a single segment. So, we can
+	 * simply look up the segment that contains the queue.
+	 *
+	 * The location and size of the CMA area can be configured elsewhere.
+	 * In 3.12 and later a device-specific area can be reserved via the
+	 * standard device tree reserved-memory properties. Otherwise, the
+	 * global area will be used, which has a size configurable on the
+	 * kernel command line and defaults to 16MB.
+	 */
+
+	/* Locate the physical segment */
+	ret = -ENODEV;
+	lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+			dma_handle >> OKL4_DEFAULT_PAGEBITS, -1);
+	err = okl4_mmu_lookup_index_geterror(&lookup_return.segment_index);
+	if (err == OKL4_ERROR_NOT_IN_SEGMENT) {
+		dev_err(transport->axon_dev,
+				"No segment found for DMA address %pK (%#llx)!\n",
+				queue, (unsigned long long)dma_handle);
+		goto fail_lookup_segment;
+	}
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev,
+				"Could not look up segment for DMA address %pK (%#llx): OKL4 error %d\n",
+				queue, (unsigned long long)dma_handle,
+				(int)err);
+		goto fail_lookup_segment;
+	}
+	seg_index = okl4_mmu_lookup_index_getindex(&lookup_return.segment_index);
+
+	dev_dbg(transport->axon_dev, "lookup pn %#lx got error %ld segment %ld count %lu offset %#lx\n",
+			(long)(dma_handle >> OKL4_DEFAULT_PAGEBITS),
+			(long)err, (long)seg_index,
+			(unsigned long)lookup_return.count_pn,
+			(unsigned long)lookup_return.offset_pn);
+
+	/* Locate the physical segment's OF node */
+	for_each_compatible_node(seg_node, NULL, "okl,microvisor-segment") {
+		u32 attach_index;
+		ret = of_property_read_u32(seg_node, "okl,segment-attachment",
+				&attach_index);
+		if (attach_index == seg_index)
+			break;
+	}
+	if (seg_node == NULL) {
+		ret = -ENXIO;
+		dev_err(transport->axon_dev, "No physical segment found for %pK\n",
+				queue);
+		goto fail_lookup_segment;
+	}
+
+	/* Determine the physical segment's cap */
+	prop = of_get_property(seg_node, "reg", &len);
+	ret = !!prop ? 0 : -EPERM;
+	if (!ret)
+		seg_cap = of_read_number(prop, of_n_addr_cells(seg_node));
+	if (!ret && seg_cap == OKL4_KCAP_INVALID)
+		ret = -ENXIO;
+	if (ret < 0) {
+		dev_err(transport->axon_dev, "missing physical-segment cap\n");
+		goto fail_lookup_segment;
+	}
+	transport->segment = seg_cap;
+	transport->segment_base =
+		(round_down(dma_handle >> OKL4_DEFAULT_PAGEBITS,
+			    lookup_return.count_pn) -
+		 lookup_return.offset_pn) << OKL4_DEFAULT_PAGEBITS;
+
+	dev_dbg(transport->axon_dev, "physical segment cap is %#lx, base %#llx\n",
+			(unsigned long)transport->segment,
+			(unsigned long long)transport->segment_base);
+
+	/* Attach the segment to the Axon endpoints */
+	err = _okl4_sys_axon_set_send_segment(transport->tx_cap,
+			transport->segment, transport->segment_base);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "TX attach failed: %d\n",
+				(int)err);
+		ret = okl4_error_to_errno(err);
+		goto fail_attach;
+	}
+
+	err = _okl4_sys_axon_set_recv_segment(transport->rx_cap,
+			transport->segment, transport->segment_base);
+	if (err != OKL4_OK) {
+		dev_err(transport->axon_dev, "RX attach failed: %d\n",
+				(int)err);
+		ret = okl4_error_to_errno(err);
+		goto fail_attach;
+	}
+
+	/* Array of pointers to the source TX pool for each outgoing buffer. */
+	transport->tx_pools = devm_kzalloc(transport->axon_dev,
+			sizeof(*transport->tx_pools) * transport->queue_size,
+			GFP_KERNEL);
+	if (!transport->tx_pools) {
+		err = -ENOMEM;
+		goto fail_alloc_tx_pools;
+	}
+
+	/* Set up the rx queue descriptors. */
+	transport->rx = queue;
+	transport->rx_phys = dma_handle;
+	transport->rx_size = rx_queue_size;
+	transport->rx_descs = (void *)(transport->rx + 1);
+	transport->rx_ptrs = (void *)(transport->rx_descs + transport->queue_size);
+	okl4_axon_queue_size_setallocorder(&transport->rx->queue_sizes[0],
+			rx_buffer_order);
+	transport->rx->queues[0].queue_offset = sizeof(*transport->rx);
+	transport->rx->queues[0].entries = transport->queue_size;
+	transport->rx->queues[0].uptr = 0;
+	transport->rx->queues[0].kptr = 0;
+	transport->rx_uptr_allocated = 0;
+
+	/* Set up the tx queue descriptors. */
+	transport->tx = queue + ALIGN(rx_queue_size,
+			__alignof__(*transport->tx));
+	transport->tx_phys = dma_handle + ((void *)transport->tx - queue);
+	transport->tx_size = tx_queue_size;
+	transport->tx_descs = (void *)(transport->tx + 1);
+	transport->tx->queues[0].queue_offset = sizeof(*transport->tx);
+	transport->tx->queues[0].entries = transport->queue_size;
+	transport->tx->queues[0].uptr = 0;
+	transport->tx->queues[0].kptr = 0;
+	transport->tx_uptr_freed = 0;
+
+	/* Create a DMA pool for the RX buffers. */
+	transport->rx_pool = dmam_pool_create("vs_axon_rx_pool",
+			transport->axon_dev, 1 << rx_buffer_order,
+			max(dma_get_cache_alignment(),
+				1 << OKL4_PRESHIFT_LADDR_AXON_DATA_INFO), 0);
+
+	return 0;
+
+fail_alloc_tx_pools:
+fail_attach:
+fail_lookup_segment:
+	dmam_free_coherent(transport->axon_dev, queue_size, queue, dma_handle);
+fail_alloc_dma:
+	return ret;
+}
+
+static void transport_axon_free_descs(struct vs_transport_axon *transport)
+{
+	int i;
+
+	tasklet_disable(&transport->rx_tasklet);
+	tasklet_kill(&transport->rx_tasklet);
+
+	tasklet_disable(&transport->tx_tasklet);
+	tasklet_kill(&transport->tx_tasklet);
+
+	cancel_delayed_work_sync(&transport->free_bufs_work);
+
+	transport->tx = NULL;
+	transport->tx_descs = NULL;
+
+	for (i = 0; i < transport->rx->queues[0].entries; i++) {
+		struct okl4_axon_queue_entry *desc = &transport->rx_descs[i];
+
+		if (okl4_axon_data_info_getusr(&desc->info)) {
+			void *ptr = transport->rx_ptrs[i];
+			dma_addr_t dma = okl4_axon_data_info_getladdr(&desc->info);
+			dma_pool_free(transport->rx_pool, ptr, dma);
+		}
+	}
+
+	transport->rx = NULL;
+	transport->rx_descs = NULL;
+	transport->rx_ptrs = NULL;
+
+	/* Let devm free the queues so we don't have to keep the dma handle */
+}
+
+static int transport_axon_probe(struct platform_device *dev)
+{
+	struct vs_transport_axon *priv = NULL;
+	u32 cap[2];
+	u32 queue_size, msg_size;
+	int ret, i;
+	const char* name;
+
+	if (!dev_get_cma_area(&dev->dev) && !okl4_single_physical_segment) {
+		dev_err(&dev->dev, "Multiple physical segments, but CMA is disabled\n");
+		return -ENOSYS;
+	}
+
+	dev->dev.coherent_dma_mask = ~(u64)0;
+	dev->dev.archdata.dma_ops = &axon_dma_ops;
+
+	priv = devm_kzalloc(&dev->dev, sizeof(struct vs_transport_axon) +
+			sizeof(unsigned long), GFP_KERNEL);
+	if (priv == NULL) {
+		dev_err(&dev->dev, "create transport object failed\n");
+		ret = -ENOMEM;
+		goto err_alloc_priv;
+	}
+	dev_set_drvdata(&dev->dev, priv);
+
+	priv->of_node = of_get_child_by_name(dev->dev.of_node,
+			"virtual-session");
+	if ((!priv->of_node) ||
+			(!of_device_is_compatible(priv->of_node,
+					"okl,virtual-session"))) {
+		dev_err(&dev->dev, "missing virtual-session node\n");
+		ret = -ENODEV;
+		goto error_of_node;
+	}
+
+	name = dev->dev.of_node->full_name;
+	of_property_read_string(dev->dev.of_node, "label", &name);
+
+	if (of_property_read_bool(priv->of_node, "okl,is-client")) {
+		priv->is_server = false;
+	} else if (of_property_read_bool(priv->of_node, "okl,is-server")) {
+		priv->is_server = true;
+	} else {
+		dev_err(&dev->dev, "virtual-session node is not marked as client or server\n");
+		ret = -ENODEV;
+		goto error_of_node;
+	}
+
+	priv->transport.vt = &tvt;
+	priv->transport.type = "microvisor";
+	priv->axon_dev = &dev->dev;
+
+	/* Read the Axon caps */
+	ret = of_property_read_u32_array(dev->dev.of_node, "reg", cap, 2);
+	if (ret < 0 || cap[0] == OKL4_KCAP_INVALID ||
+			cap[1] == OKL4_KCAP_INVALID) {
+		dev_err(&dev->dev, "missing axon endpoint caps\n");
+		ret = -ENODEV;
+		goto error_of_node;
+	}
+	priv->tx_cap = cap[0];
+	priv->rx_cap = cap[1];
+
+	/* Set transport properties; default to a 64kb buffer */
+	queue_size = 16;
+	(void)of_property_read_u32(priv->of_node, "okl,queue-length",
+			&queue_size);
+	priv->queue_size = max((size_t)queue_size, MIN_QUEUE_SIZE);
+
+	msg_size = PAGE_SIZE - sizeof(vs_service_id_t);
+	(void)of_property_read_u32(priv->of_node, "okl,message-size",
+			&msg_size);
+	priv->msg_size = max((size_t)msg_size, MIN_MSG_SIZE);
+
+	/*
+	 * Since the Axon API requires received message size limits to be
+	 * powers of two, we must round up the message size (including the
+	 * space reserved for the service ID).
+	 */
+	priv->msg_size = roundup_pow_of_two(priv->msg_size +
+			sizeof(vs_service_id_t)) - sizeof(vs_service_id_t);
+	if (priv->msg_size != msg_size)
+		dev_info(&dev->dev, "message size rounded up from %zd to %zd\n",
+				(size_t)msg_size, priv->msg_size);
+
+	INIT_LIST_HEAD(&priv->tx_queue);
+
+	/* Initialise the activation state, tasklets, and RX retry timer */
+	spin_lock_init(&priv->readiness_lock);
+	priv->readiness = VS_TRANSPORT_INIT;
+
+	tasklet_init(&priv->rx_tasklet, transport_rx_tasklet,
+		(unsigned long)priv);
+	tasklet_init(&priv->tx_tasklet, transport_tx_tasklet,
+		(unsigned long)priv);
+
+	INIT_DELAYED_WORK(&priv->free_bufs_work, transport_free_bufs_work);
+	spin_lock_init(&priv->rx_alloc_lock);
+	priv->rx_alloc_extra = 0;
+	INIT_LIST_HEAD(&priv->rx_freelist);
+
+	setup_timer(&priv->rx_retry_timer, transport_rx_retry_timer,
+			(unsigned long)priv);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+	set_timer_slack(&priv->rx_retry_timer, HZ);
+#endif
+
+	/* Keep RX disabled until the core service is ready. */
+	tasklet_disable(&priv->rx_tasklet);
+
+	ret = transport_axon_probe_virqs(priv);
+	if (ret < 0)
+		goto err_probe_virqs;
+
+	if (priv->notify_rx_nirqs) {
+		ret = alloc_notify_info(&dev->dev, &priv->transport.notify_info,
+				&priv->transport.notify_info_size,
+				priv->notify_rx_nirqs);
+		if (ret < 0) {
+			dev_err(&dev->dev, "Alloc notify_info failed\n");
+			goto err_alloc_notify;
+		}
+	} else {
+		priv->transport.notify_info = NULL;
+		priv->transport.notify_info_size = 0;
+	}
+
+	priv->free_bufs_pool = transport_axon_init_tx_pool(priv, priv->msg_size,
+			FREE_BUFS_QUOTA);
+	if (IS_ERR(priv->free_bufs_pool)) {
+		ret = PTR_ERR(priv->free_bufs_pool);
+		goto err_init_free_bufs_pool;
+	}
+
+	ret = transport_axon_setup_descs(priv);
+	if (ret < 0)
+		goto err_setup_descs;
+
+	/* Allocate RX buffers for free bufs messages */
+	for (i = 0; i < FREE_BUFS_QUOTA; i++) {
+		dma_addr_t laddr;
+		struct vs_axon_rx_freelist_entry *buf =
+			dma_pool_alloc(priv->rx_pool, GFP_KERNEL, &laddr);
+		if (!buf)
+			goto err_alloc_rx_free_bufs;
+		buf->laddr = laddr;
+
+		spin_lock_irq(&priv->rx_alloc_lock);
+		list_add_tail(&buf->list, &priv->rx_freelist);
+		spin_unlock_irq(&priv->rx_alloc_lock);
+	}
+
+	/* Set up the session device */
+	priv->session_dev = vs_session_register(&priv->transport, &dev->dev,
+			priv->is_server, name);
+	if (IS_ERR(priv->session_dev)) {
+		ret = PTR_ERR(priv->session_dev);
+		dev_err(&dev->dev, "failed to register session: %d\n", ret);
+		goto err_session_register;
+	}
+
+	/*
+	 * Start the core service. Note that it can't actually communicate
+	 * until the initial reset completes.
+	 */
+	vs_session_start(priv->session_dev);
+
+	/*
+	 * Reset the transport. This will also set the Axons' segment
+	 * attachments, and eventually the Axons' queue pointers (once the
+	 * session marks the transport ready).
+	 */
+	transport_reset(&priv->transport);
+
+	/*
+	 * We're ready to start handling IRQs at this point, so register the
+	 * handlers.
+	 */
+	ret = transport_axon_request_irqs(priv);
+	if (ret < 0)
+		goto err_irq_register;
+
+	return 0;
+
+err_irq_register:
+	vs_session_unregister(priv->session_dev);
+err_session_register:
+err_alloc_rx_free_bufs:
+	transport_axon_free_descs(priv);
+err_setup_descs:
+	transport_axon_put_tx_pool(priv->free_bufs_pool);
+err_init_free_bufs_pool:
+	if (priv->transport.notify_info)
+		devm_kfree(&dev->dev, priv->transport.notify_info);
+err_alloc_notify:
+err_probe_virqs:
+	del_timer_sync(&priv->rx_retry_timer);
+	tasklet_kill(&priv->rx_tasklet);
+	tasklet_kill(&priv->tx_tasklet);
+	cancel_delayed_work_sync(&priv->free_bufs_work);
+error_of_node:
+	devm_kfree(&dev->dev, priv);
+err_alloc_priv:
+	return ret;
+}
+
+static int transport_axon_remove(struct platform_device *dev)
+{
+	struct vs_transport_axon *priv = dev_get_drvdata(&dev->dev);
+	int i;
+
+	for (i = 0; i < priv->notify_rx_nirqs; i++)
+		devm_free_irq(&dev->dev, priv->notify_irq[i], priv);
+
+	devm_free_irq(&dev->dev, priv->rx_irq, priv);
+	irq_dispose_mapping(priv->rx_irq);
+	devm_free_irq(&dev->dev, priv->tx_irq, priv);
+	irq_dispose_mapping(priv->tx_irq);
+	devm_free_irq(&dev->dev, priv->reset_irq, priv);
+	irq_dispose_mapping(priv->reset_irq);
+
+	del_timer_sync(&priv->rx_retry_timer);
+	tasklet_kill(&priv->rx_tasklet);
+	tasklet_kill(&priv->tx_tasklet);
+	cancel_delayed_work_sync(&priv->free_bufs_work);
+
+	priv->readiness = VS_TRANSPORT_SHUTDOWN;
+	vs_session_unregister(priv->session_dev);
+	WARN_ON(priv->readiness != VS_TRANSPORT_SHUTDOWN);
+
+	transport_axon_free_descs(priv);
+	transport_axon_put_tx_pool(priv->free_bufs_pool);
+
+	if (priv->transport.notify_info)
+		devm_kfree(&dev->dev, priv->transport.notify_info);
+
+	free_tx_mbufs(priv);
+
+	flush_workqueue(work_queue);
+
+	while (!list_empty(&priv->rx_freelist)) {
+		struct vs_axon_rx_freelist_entry *buf;
+		buf = list_first_entry(&priv->rx_freelist,
+				struct vs_axon_rx_freelist_entry, list);
+		list_del(&buf->list);
+		dma_pool_free(priv->rx_pool, buf, buf->laddr);
+	}
+
+	devm_kfree(&dev->dev, priv);
+	return 0;
+}
+
+static const struct of_device_id transport_axon_of_match[] = {
+	{ .compatible = "okl,microvisor-axon-transport", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, transport_axon_of_match);
+
+static struct platform_driver transport_axon_driver = {
+	.probe		= transport_axon_probe,
+	.remove		= transport_axon_remove,
+	.driver = {
+		.name		= DRIVER_NAME,
+		.owner		= THIS_MODULE,
+		.bus		= &platform_bus_type,
+		.of_match_table = of_match_ptr(transport_axon_of_match),
+	},
+};
+
+static int __init vs_transport_axon_init(void)
+{
+	int ret;
+	okl4_error_t err;
+	struct device_node *cpus;
+	struct zone *zone;
+	struct _okl4_sys_mmu_lookup_pn_return lookup_return;
+	u32 last_seen_attachment = -1;
+	bool first_attachment;
+
+	printk(KERN_INFO "Virtual Services transport driver for OKL4 Axons\n");
+
+	/* Allocate the Axon cleanup workqueue */
+	work_queue = alloc_workqueue("axon_cleanup", 0, 0);
+	if (!work_queue) {
+		ret = -ENOMEM;
+		goto fail_create_workqueue;
+	}
+
+	/* Locate the MMU capability, needed for lookups */
+	cpus = of_find_node_by_path("/cpus");
+	if (IS_ERR_OR_NULL(cpus)) {
+		ret = -EINVAL;
+		goto fail_mmu_cap;
+	}
+	ret = of_property_read_u32(cpus, "okl,vmmu-capability", &okl4_mmu_cap);
+	if (ret) {
+		goto fail_mmu_cap;
+	}
+	if (okl4_mmu_cap == OKL4_KCAP_INVALID) {
+		printk(KERN_ERR "%s: OKL4 MMU capability not found\n", __func__);
+		ret = -EPERM;
+		goto fail_mmu_cap;
+	}
+
+	/*
+	 * Determine whether there are multiple OKL4 physical memory segments
+	 * in this Cell. If so, every transport device must have a valid CMA
+	 * region, to guarantee that its buffer allocations all come from the
+	 * segment that is attached to the axon endpoints.
+	 *
+	 * We assume that each zone is contiguously mapped in stage 2 with a
+	 * constant physical-to-IPA offset, typically 0. The weaver won't
+	 * violate this assumption for Linux (or other HLOS) guests unless it
+	 * is explicitly told to.
+	 */
+	okl4_single_physical_segment = true;
+	first_attachment = true;
+	for_each_zone(zone) {
+		u32 attachment;
+
+		/* We only care about zones that the page allocator is using */
+		if (!zone->managed_pages)
+			continue;
+
+		/* Find the segment at the start of the zone */
+		lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+				zone->zone_start_pfn, -1);
+		err = okl4_mmu_lookup_index_geterror(
+				&lookup_return.segment_index);
+		if (err != OKL4_OK) {
+			printk(KERN_WARNING "%s: Unable to determine physical segment count, assuming >1\n",
+					__func__);
+			okl4_single_physical_segment = false;
+			break;
+		}
+		attachment = okl4_mmu_lookup_index_getindex(
+				&lookup_return.segment_index);
+
+		if (first_attachment) {
+			last_seen_attachment = attachment;
+			first_attachment = false;
+		} else if (last_seen_attachment != attachment) {
+			okl4_single_physical_segment = false;
+			break;
+		}
+
+		/* Find the segment at the end of the zone */
+		lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+				zone_end_pfn(zone) - 1, -1);
+		err = okl4_mmu_lookup_index_geterror(
+				&lookup_return.segment_index);
+		if (err != OKL4_OK) {
+			printk(KERN_WARNING "%s: Unable to determine physical segment count, assuming >1\n",
+					__func__);
+			okl4_single_physical_segment = false;
+			break;
+		}
+		attachment = okl4_mmu_lookup_index_getindex(
+				&lookup_return.segment_index);
+
+		/* Check that it's still the same segment */
+		if (last_seen_attachment != attachment) {
+			okl4_single_physical_segment = false;
+			break;
+		}
+	}
+
+#ifdef DEBUG
+	printk(KERN_DEBUG "%s: physical segment count %s\n", __func__,
+			okl4_single_physical_segment ? "1" : ">1");
+#endif
+
+	mbuf_cache = KMEM_CACHE(vs_mbuf_axon, 0UL);
+	if (!mbuf_cache) {
+		ret = -ENOMEM;
+		goto kmem_cache_failed;
+	}
+
+	ret = platform_driver_register(&transport_axon_driver);
+	if (ret)
+		goto register_plat_driver_failed;
+
+	return ret;
+
+register_plat_driver_failed:
+	kmem_cache_destroy(mbuf_cache);
+	mbuf_cache = NULL;
+kmem_cache_failed:
+fail_mmu_cap:
+	if (work_queue)
+		destroy_workqueue(work_queue);
+fail_create_workqueue:
+	return ret;
+}
+
+static void __exit vs_transport_axon_exit(void)
+{
+	platform_driver_unregister(&transport_axon_driver);
+
+	rcu_barrier();
+
+	if (mbuf_cache)
+		kmem_cache_destroy(mbuf_cache);
+	mbuf_cache = NULL;
+
+	if (work_queue)
+		destroy_workqueue(work_queue);
+}
+
+module_init(vs_transport_axon_init);
+module_exit(vs_transport_axon_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 5676aef..f4e59c4 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -18,15 +18,16 @@
 
 static void disable_hotplug_cpu(int cpu)
 {
-	if (cpu_online(cpu)) {
-		lock_device_hotplug();
+	if (!cpu_is_hotpluggable(cpu))
+		return;
+	lock_device_hotplug();
+	if (cpu_online(cpu))
 		device_offline(get_cpu_device(cpu));
-		unlock_device_hotplug();
-	}
-	if (cpu_present(cpu))
+	if (!cpu_online(cpu) && cpu_present(cpu)) {
 		xen_arch_unregister_cpu(cpu);
-
-	set_cpu_present(cpu, false);
+		set_cpu_present(cpu, false);
+	}
+	unlock_device_hotplug();
 }
 
 static int vcpu_online(unsigned int cpu)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 1435d8c..4b0cc9d 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -139,7 +139,7 @@
 		clear_evtchn_to_irq_row(row);
 	}
 
-	evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
+	evtchn_to_irq[row][col] = irq;
 	return 0;
 }
 
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 9122ba2..abd49bc 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -282,17 +282,26 @@
 		/*
 		 * The Xenstore watch fires directly after registering it and
 		 * after a suspend/resume cycle. So ENOENT is no error but
-		 * might happen in those cases.
+		 * might happen in those cases. ERANGE is observed when we get
+		 * an empty value (''), this happens when we acknowledge the
+		 * request by writing '\0' below.
 		 */
-		if (err != -ENOENT)
+		if (err != -ENOENT && err != -ERANGE)
 			pr_err("Error %d reading sysrq code in control/sysrq\n",
 			       err);
 		xenbus_transaction_end(xbt, 1);
 		return;
 	}
 
-	if (sysrq_key != '\0')
-		xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+	if (sysrq_key != '\0') {
+		err = xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+		if (err) {
+			pr_err("%s: Error %d writing sysrq in control/sysrq\n",
+			       __func__, err);
+			xenbus_transaction_end(xbt, 1);
+			return;
+		}
+	}
 
 	err = xenbus_transaction_end(xbt, 0);
 	if (err == -EAGAIN)
@@ -344,7 +353,12 @@
 			continue;
 		snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
 			 shutdown_handlers[idx].command);
-		xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+		err = xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+		if (err) {
+			pr_err("%s: Error %d writing %s\n", __func__,
+				err, node);
+			return err;
+		}
 	}
 
 	return 0;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 980f328..992cb8f 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1014,6 +1014,7 @@
 {
 	struct v2p_entry *entry;
 	unsigned long flags;
+	int err;
 
 	if (try) {
 		spin_lock_irqsave(&info->v2p_lock, flags);
@@ -1029,8 +1030,11 @@
 			scsiback_del_translation_entry(info, vir);
 		}
 	} else if (!try) {
-		xenbus_printf(XBT_NIL, info->dev->nodename, state,
+		err = xenbus_printf(XBT_NIL, info->dev->nodename, state,
 			      "%d", XenbusStateClosed);
+		if (err)
+			xenbus_dev_error(info->dev, err,
+				"%s: writing %s", __func__, state);
 	}
 }
 
@@ -1069,8 +1073,11 @@
 	snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
 	val = xenbus_read(XBT_NIL, dev->nodename, str, NULL);
 	if (IS_ERR(val)) {
-		xenbus_printf(XBT_NIL, dev->nodename, state,
+		err = xenbus_printf(XBT_NIL, dev->nodename, state,
 			      "%d", XenbusStateClosed);
+		if (err)
+			xenbus_dev_error(info->dev, err,
+				"%s: writing %s", __func__, state);
 		return;
 	}
 	strlcpy(phy, val, VSCSI_NAMELEN);
@@ -1081,8 +1088,11 @@
 	err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
 			   &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
 	if (XENBUS_EXIST_ERR(err)) {
-		xenbus_printf(XBT_NIL, dev->nodename, state,
+		err = xenbus_printf(XBT_NIL, dev->nodename, state,
 			      "%d", XenbusStateClosed);
+		if (err)
+			xenbus_dev_error(info->dev, err,
+				"%s: writing %s", __func__, state);
 		return;
 	}
 
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index f329eee..352abc3 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -105,7 +105,7 @@
 {
 	struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
 	struct iov_iter from;
-	int retval;
+	int retval, err;
 
 	iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
 
@@ -126,7 +126,9 @@
 			 retval);
 	else
 		p9_client_write(fid, 0, &from, &retval);
-	p9_client_clunk(fid);
+	err = p9_client_clunk(fid);
+	if (!retval && err)
+		retval = err;
 	return retval;
 }
 
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index a1fba42..42f8633 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -14,6 +14,7 @@
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
+#include <linux/magic.h>
 
 /* This is the range of ioctl() numbers we claim as ours */
 #define AUTOFS_IOC_FIRST     AUTOFS_IOC_READY
@@ -123,7 +124,8 @@
 
 static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb)
 {
-	return (struct autofs_sb_info *)(sb->s_fs_info);
+	return sb->s_magic != AUTOFS_SUPER_MAGIC ?
+		NULL : (struct autofs_sb_info *)(sb->s_fs_info);
 }
 
 static inline struct autofs_info *autofs4_dentry_ino(struct dentry *dentry)
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 438b5bf..ce0c6ea 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -14,7 +14,6 @@
 #include <linux/pagemap.h>
 #include <linux/parser.h>
 #include <linux/bitops.h>
-#include <linux/magic.h>
 #include "autofs_i.h"
 #include <linux/module.h>
 
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 1fdf4e5..e7e25a8 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1217,9 +1217,8 @@
 		goto out_free_ph;
 	}
 
-	len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
-			    ELF_MIN_ALIGN - 1);
-	bss = eppnt->p_memsz + eppnt->p_vaddr;
+	len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
+	bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
 	if (bss > len) {
 		error = vm_brk(len, bss - len);
 		if (error)
@@ -1707,7 +1706,7 @@
 		const struct user_regset *regset = &view->regsets[i];
 		do_thread_regset_writeback(t->task, regset);
 		if (regset->core_note_type && regset->get &&
-		    (!regset->active || regset->active(t->task, regset))) {
+		    (!regset->active || regset->active(t->task, regset) > 0)) {
 			int ret;
 			size_t size = regset->n * regset->size;
 			void *data = kmalloc(size, GFP_KERNEL);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 05169ef..b450adf 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -586,6 +586,12 @@
 	btrfs_rm_dev_replace_unblocked(fs_info);
 
 	/*
+	 * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will
+	 * update on-disk dev stats value during commit transaction
+	 */
+	atomic_inc(&tgt_device->dev_stats_ccnt);
+
+	/*
 	 * this is again a consistent state where no dev_replace procedure
 	 * is running, the target device is part of the filesystem, the
 	 * source device is not part of the filesystem anymore and its 1st
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8dc7034..f6e1a1c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1096,8 +1096,9 @@
 
 		fs_info = BTRFS_I(mapping->host)->root->fs_info;
 		/* this is a bit racy, but that's ok */
-		ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
-					     BTRFS_DIRTY_METADATA_THRESH);
+		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
+					     BTRFS_DIRTY_METADATA_THRESH,
+					     fs_info->dirty_metadata_batch);
 		if (ret < 0)
 			return 0;
 	}
@@ -4107,8 +4108,9 @@
 	if (flush_delayed)
 		btrfs_balance_delayed_items(root);
 
-	ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
-				     BTRFS_DIRTY_METADATA_THRESH);
+	ret = __percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
+				     BTRFS_DIRTY_METADATA_THRESH,
+				     root->fs_info->dirty_metadata_batch);
 	if (ret > 0) {
 		balance_dirty_pages_ratelimited(
 				   root->fs_info->btree_inode->i_mapping);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 44a4385..6661116 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -10853,7 +10853,7 @@
 		/* Don't want to race with allocators so take the groups_sem */
 		down_write(&space_info->groups_sem);
 		spin_lock(&block_group->lock);
-		if (block_group->reserved ||
+		if (block_group->reserved || block_group->pinned ||
 		    btrfs_block_group_used(&block_group->item) ||
 		    block_group->ro ||
 		    list_is_singular(&block_group->list)) {
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 03ac3ab..a1615e8 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3830,8 +3830,8 @@
 	if (wbc->sync_mode == WB_SYNC_ALL)
 		tag_pages_for_writeback(mapping, index, end);
 	while (!done && !nr_to_write_done && (index <= end) &&
-	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
-			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+	       (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
+			tag))) {
 		unsigned i;
 
 		scanned = 1;
@@ -3841,11 +3841,6 @@
 			if (!PagePrivate(page))
 				continue;
 
-			if (!wbc->range_cyclic && page->index > end) {
-				done = 1;
-				break;
-			}
-
 			spin_lock(&mapping->private_lock);
 			if (!PagePrivate(page)) {
 				spin_unlock(&mapping->private_lock);
@@ -3978,8 +3973,8 @@
 		tag_pages_for_writeback(mapping, index, end);
 	done_index = index;
 	while (!done && !nr_to_write_done && (index <= end) &&
-	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
-			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+			(nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
+						&index, end, tag))) {
 		unsigned i;
 
 		scanned = 1;
@@ -4004,12 +3999,6 @@
 				continue;
 			}
 
-			if (!wbc->range_cyclic && page->index > end) {
-				done = 1;
-				unlock_page(page);
-				continue;
-			}
-
 			if (wbc->sync_mode != WB_SYNC_NONE) {
 				if (PageWriteback(page))
 					flush_fn(data);
@@ -4298,6 +4287,7 @@
 	struct extent_map *em;
 	u64 start = page_offset(page);
 	u64 end = start + PAGE_SIZE - 1;
+	struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
 
 	if (gfpflags_allow_blocking(mask) &&
 	    page->mapping->host->i_size > SZ_16M) {
@@ -4320,6 +4310,8 @@
 					    extent_map_end(em) - 1,
 					    EXTENT_LOCKED | EXTENT_WRITEBACK,
 					    0, NULL)) {
+				set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+					&btrfs_inode->runtime_flags);
 				remove_extent_mapping(map, em);
 				/* once for the rb tree */
 				free_extent_map(em);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index dfd9986..9afad8c 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2237,6 +2237,21 @@
 }
 
 /*
+ * Check if the leaf is the last leaf. Which means all node pointers
+ * are at their last position.
+ */
+static bool is_last_leaf(struct btrfs_path *path)
+{
+	int i;
+
+	for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
+		if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
+			return false;
+	}
+	return true;
+}
+
+/*
  * returns < 0 on error, 0 when more leafs are to be scanned.
  * returns 1 when done.
  */
@@ -2249,6 +2264,7 @@
 	struct ulist *roots = NULL;
 	struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
 	u64 num_bytes;
+	bool done;
 	int slot;
 	int ret;
 
@@ -2277,6 +2293,7 @@
 		mutex_unlock(&fs_info->qgroup_rescan_lock);
 		return ret;
 	}
+	done = is_last_leaf(path);
 
 	btrfs_item_key_to_cpu(path->nodes[0], &found,
 			      btrfs_header_nritems(path->nodes[0]) - 1);
@@ -2323,6 +2340,8 @@
 	}
 	btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
 
+	if (done && !ret)
+		ret = 1;
 	return ret;
 }
 
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 04c61bc..9140aed 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1325,18 +1325,19 @@
 	struct mapping_node *node = NULL;
 	struct reloc_control *rc = root->fs_info->reloc_ctl;
 
-	spin_lock(&rc->reloc_root_tree.lock);
-	rb_node = tree_search(&rc->reloc_root_tree.rb_root,
-			      root->node->start);
-	if (rb_node) {
-		node = rb_entry(rb_node, struct mapping_node, rb_node);
-		rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
+	if (rc) {
+		spin_lock(&rc->reloc_root_tree.lock);
+		rb_node = tree_search(&rc->reloc_root_tree.rb_root,
+				      root->node->start);
+		if (rb_node) {
+			node = rb_entry(rb_node, struct mapping_node, rb_node);
+			rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
+		}
+		spin_unlock(&rc->reloc_root_tree.lock);
+		if (!node)
+			return;
+		BUG_ON((struct btrfs_root *)node->data != root);
 	}
-	spin_unlock(&rc->reloc_root_tree.lock);
-
-	if (!node)
-		return;
-	BUG_ON((struct btrfs_root *)node->data != root);
 
 	spin_lock(&root->fs_info->trans_lock);
 	list_del_init(&root->root_list);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 44d3492..44966fd 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2979,8 +2979,11 @@
 	mutex_unlock(&log_root_tree->log_mutex);
 
 	/*
-	 * The barrier before waitqueue_active is implied by mutex_unlock
+	 * The barrier before waitqueue_active is needed so all the updates
+	 * above are seen by the woken threads. It might not be necessary, but
+	 * proving that seems to be hard.
 	 */
+	smp_mb();
 	if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
 		wake_up(&log_root_tree->log_commit_wait[index2]);
 out:
@@ -2991,8 +2994,11 @@
 	mutex_unlock(&root->log_mutex);
 
 	/*
-	 * The barrier before waitqueue_active is implied by mutex_unlock
+	 * The barrier before waitqueue_active is needed so all the updates
+	 * above are seen by the woken threads. It might not be necessary, but
+	 * proving that seems to be hard.
 	 */
+	smp_mb();
 	if (waitqueue_active(&root->log_commit_wait[index1]))
 		wake_up(&root->log_commit_wait[index1]);
 	return ret;
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 41df8a2..2026885 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -195,7 +195,6 @@
 		pr_err("\n");
 		pr_err("Error: Unexpected object collision\n");
 		cachefiles_printk_object(object, xobject);
-		BUG();
 	}
 	atomic_inc(&xobject->usage);
 	write_unlock(&cache->active_lock);
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index afbdc41..5e3bc9d 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -27,6 +27,7 @@
 	struct cachefiles_one_read *monitor =
 		container_of(wait, struct cachefiles_one_read, monitor);
 	struct cachefiles_object *object;
+	struct fscache_retrieval *op = monitor->op;
 	struct wait_bit_key *key = _key;
 	struct page *page = wait->private;
 
@@ -51,16 +52,22 @@
 	list_del(&wait->task_list);
 
 	/* move onto the action list and queue for FS-Cache thread pool */
-	ASSERT(monitor->op);
+	ASSERT(op);
 
-	object = container_of(monitor->op->op.object,
-			      struct cachefiles_object, fscache);
+	/* We need to temporarily bump the usage count as we don't own a ref
+	 * here otherwise cachefiles_read_copier() may free the op between the
+	 * monitor being enqueued on the op->to_do list and the op getting
+	 * enqueued on the work queue.
+	 */
+	fscache_get_retrieval(op);
 
+	object = container_of(op->op.object, struct cachefiles_object, fscache);
 	spin_lock(&object->work_lock);
-	list_add_tail(&monitor->op_link, &monitor->op->to_do);
+	list_add_tail(&monitor->op_link, &op->to_do);
 	spin_unlock(&object->work_lock);
 
-	fscache_enqueue_retrieval(monitor->op);
+	fscache_enqueue_retrieval(op);
+	fscache_put_retrieval(op);
 	return 0;
 }
 
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 7b79a54..546d643 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -838,21 +838,15 @@
 		struct page **pages = NULL, **data_pages;
 		mempool_t *pool = NULL;	/* Becomes non-null if mempool used */
 		struct page *page;
-		int want;
 		u64 offset = 0, len = 0;
 
 		max_pages = max_pages_ever;
 
 get_more_pages:
 		first = -1;
-		want = min(end - index,
-			   min((pgoff_t)PAGEVEC_SIZE,
-			       max_pages - (pgoff_t)locked_pages) - 1)
-			+ 1;
-		pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-						PAGECACHE_TAG_DIRTY,
-						want);
-		dout("pagevec_lookup_tag got %d\n", pvec_pages);
+		pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
+						end, PAGECACHE_TAG_DIRTY);
+		dout("pagevec_lookup_range_tag got %d\n", pvec_pages);
 		if (!pvec_pages && !locked_pages)
 			break;
 		for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
@@ -870,12 +864,6 @@
 				unlock_page(page);
 				break;
 			}
-			if (!wbc->range_cyclic && page->index > end) {
-				dout("end of range %p\n", page);
-				done = 1;
-				unlock_page(page);
-				break;
-			}
 			if (strip_unit_end && (page->index > strip_unit_end)) {
 				dout("end of strip unit %p\n", page);
 				unlock_page(page);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 4a6df2c..1f75433 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1077,6 +1077,7 @@
 	if (IS_ERR(realdn)) {
 		pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
 		       PTR_ERR(realdn), dn, in, ceph_vinop(in));
+		dput(dn);
 		dn = realdn; /* note realdn contains the error */
 		goto out;
 	} else if (realdn) {
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 3d03e48..e06468f 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -123,25 +123,41 @@
 	seq_printf(m, "CIFS Version %s\n", CIFS_VERSION);
 	seq_printf(m, "Features:");
 #ifdef CONFIG_CIFS_DFS_UPCALL
-	seq_printf(m, " dfs");
+	seq_printf(m, " DFS");
 #endif
 #ifdef CONFIG_CIFS_FSCACHE
-	seq_printf(m, " fscache");
+	seq_printf(m, ",FSCACHE");
+#endif
+#ifdef CONFIG_CIFS_SMB_DIRECT
+	seq_printf(m, ",SMB_DIRECT");
+#endif
+#ifdef CONFIG_CIFS_STATS2
+	seq_printf(m, ",STATS2");
+#elif defined(CONFIG_CIFS_STATS)
+	seq_printf(m, ",STATS");
+#endif
+#ifdef CONFIG_CIFS_DEBUG2
+	seq_printf(m, ",DEBUG2");
+#elif defined(CONFIG_CIFS_DEBUG)
+	seq_printf(m, ",DEBUG");
+#endif
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+	seq_printf(m, ",ALLOW_INSECURE_LEGACY");
 #endif
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
-	seq_printf(m, " lanman");
+	seq_printf(m, ",WEAK_PW_HASH");
 #endif
 #ifdef CONFIG_CIFS_POSIX
-	seq_printf(m, " posix");
+	seq_printf(m, ",CIFS_POSIX");
 #endif
 #ifdef CONFIG_CIFS_UPCALL
-	seq_printf(m, " spnego");
+	seq_printf(m, ",UPCALL(SPNEGO)");
 #endif
 #ifdef CONFIG_CIFS_XATTR
-	seq_printf(m, " xattr");
+	seq_printf(m, ",XATTR");
 #endif
 #ifdef CONFIG_CIFS_ACL
-	seq_printf(m, " acl");
+	seq_printf(m, ",ACL");
 #endif
 	seq_putc(m, '\n');
 	seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
@@ -268,6 +284,10 @@
 		atomic_set(&totBufAllocCount, 0);
 		atomic_set(&totSmBufAllocCount, 0);
 #endif /* CONFIG_CIFS_STATS2 */
+		spin_lock(&GlobalMid_Lock);
+		GlobalMaxActiveXid = 0;
+		GlobalCurrentXid = 0;
+		spin_unlock(&GlobalMid_Lock);
 		spin_lock(&cifs_tcp_ses_lock);
 		list_for_each(tmp1, &cifs_tcp_ses_list) {
 			server = list_entry(tmp1, struct TCP_Server_Info,
@@ -280,6 +300,10 @@
 							  struct cifs_tcon,
 							  tcon_list);
 					atomic_set(&tcon->num_smbs_sent, 0);
+					spin_lock(&tcon->stat_lock);
+					tcon->bytes_read = 0;
+					tcon->bytes_written = 0;
+					spin_unlock(&tcon->stat_lock);
 					if (server->ops->clear_stats)
 						server->ops->clear_stats(tcon);
 				}
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index a0b3e7d..211ac47 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -101,9 +101,6 @@
 	case SFM_LESSTHAN:
 		*target = '<';
 		break;
-	case SFM_SLASH:
-		*target = '\\';
-		break;
 	case SFM_SPACE:
 		*target = ' ';
 		break;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 8407b07..741b83c 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -577,10 +577,15 @@
 	}
 
 	count = 0;
+	/*
+	 * We know that all the name entries in the protocols array
+	 * are short (< 16 bytes anyway) and are NUL terminated.
+	 */
 	for (i = 0; i < CIFS_NUM_PROT; i++) {
-		strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
-		count += strlen(protocols[i].name) + 1;
-		/* null at end of source and target buffers anyway */
+		size_t len = strlen(protocols[i].name) + 1;
+
+		memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
+		count += len;
 	}
 	inc_rfc1001_len(pSMB, count);
 	pSMB->ByteCount = cpu_to_le16(count);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 24c19eb..a012f70 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1116,6 +1116,8 @@
 	if (!server->ops->set_file_info)
 		return -ENOSYS;
 
+	info_buf.Pad = 0;
+
 	if (attrs->ia_valid & ATTR_ATIME) {
 		set_time = true;
 		info_buf.LastAccessTime =
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index d031af8..38d26cb 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -419,7 +419,7 @@
 	struct cifs_io_parms io_parms;
 	int buf_type = CIFS_NO_BUFFER;
 	__le16 *utf16_path;
-	__u8 oplock = SMB2_OPLOCK_LEVEL_II;
+	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
 	struct smb2_file_all_info *pfile_info = NULL;
 
 	oparms.tcon = tcon;
@@ -481,7 +481,7 @@
 	struct cifs_io_parms io_parms;
 	int create_options = CREATE_NOT_DIR;
 	__le16 *utf16_path;
-	__u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
 	struct kvec iov[2];
 
 	if (backup_cred(cifs_sb))
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 323d8e3..50559a8 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -406,9 +406,17 @@
 			(struct smb_com_transaction_change_notify_rsp *)buf;
 		struct file_notify_information *pnotify;
 		__u32 data_offset = 0;
+		size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
+
 		if (get_bcc(buf) > sizeof(struct file_notify_information)) {
 			data_offset = le32_to_cpu(pSMBr->DataOffset);
 
+			if (data_offset >
+			    len - sizeof(struct file_notify_information)) {
+				cifs_dbg(FYI, "invalid data_offset %u\n",
+					 data_offset);
+				return true;
+			}
 			pnotify = (struct file_notify_information *)
 				((char *)&pSMBr->hdr.Protocol + data_offset);
 			cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index a27fc87..ef24b45 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -376,8 +376,15 @@
 
 		new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
 				pfData->FileNameLength;
-	} else
-		new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
+	} else {
+		u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset);
+
+		if (old_entry + next_offset < old_entry) {
+			cifs_dbg(VFS, "invalid offset %u\n", next_offset);
+			return NULL;
+		}
+		new_entry = old_entry + next_offset;
+	}
 	cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry);
 	/* validate that new_entry is not past end of SMB */
 	if (new_entry >= end_of_smb) {
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index c3db2a8..bb20807 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -398,6 +398,12 @@
 		goto setup_ntlmv2_ret;
 	}
 	*pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
+	if (!*pbuffer) {
+		rc = -ENOMEM;
+		cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
+		*buflen = 0;
+		goto setup_ntlmv2_ret;
+	}
 	sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
 
 	memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 1238cd3..0267d8c 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -267,7 +267,7 @@
 	int rc;
 
 	if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
-	    (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
+	    (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
 	    (buf->Attributes == 0))
 		return 0; /* would be a no op, no sense sending this */
 
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 967dfe6..e96a74d 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -209,6 +209,13 @@
 			return 0;
 
 		/*
+		 * Some windows servers (win2016) will pad also the final
+		 * PDU in a compound to 8 bytes.
+		 */
+		if (((clc_len + 7) & ~7) == len)
+			return 0;
+
+		/*
 		 * MacOS server pads after SMB2.1 write response with 3 bytes
 		 * of junk. Other servers match RFC1001 len to actual
 		 * SMB2/SMB3 frame length (header + smb2 response specific data)
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 812e488..08c1c86 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -894,6 +894,13 @@
 
 }
 
+/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
+#define GMT_TOKEN_SIZE 50
+
+/*
+ * Input buffer contains (empty) struct smb_snapshot array with size filled in
+ * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
+ */
 static int
 smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
 		   struct cifsFileInfo *cfile, void __user *ioc_buf)
@@ -922,14 +929,27 @@
 			kfree(retbuf);
 			return rc;
 		}
-		if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
-			rc = -ERANGE;
-			kfree(retbuf);
-			return rc;
-		}
 
-		if (ret_data_len > snapshot_in.snapshot_array_size)
-			ret_data_len = snapshot_in.snapshot_array_size;
+		/*
+		 * Check for min size, ie not large enough to fit even one GMT
+		 * token (snapshot).  On the first ioctl some users may pass in
+		 * smaller size (or zero) to simply get the size of the array
+		 * so the user space caller can allocate sufficient memory
+		 * and retry the ioctl again with larger array size sufficient
+		 * to hold all of the snapshot GMT tokens on the second try.
+		 */
+		if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
+			ret_data_len = sizeof(struct smb_snapshot_array);
+
+		/*
+		 * We return struct SRV_SNAPSHOT_ARRAY, followed by
+		 * the snapshot array (of 50 byte GMT tokens) each
+		 * representing an available previous version of the data
+		 */
+		if (ret_data_len > (snapshot_in.snapshot_array_size +
+					sizeof(struct smb_snapshot_array)))
+			ret_data_len = snapshot_in.snapshot_array_size +
+					sizeof(struct smb_snapshot_array);
 
 		if (copy_to_user(ioc_buf, retbuf, ret_data_len))
 			rc = -EFAULT;
@@ -969,7 +989,7 @@
 	}
 
 	srch_inf->entries_in_buffer = 0;
-	srch_inf->index_of_last_entry = 0;
+	srch_inf->index_of_last_entry = 2;
 
 	rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
 				  fid->volatile_fid, 0, srch_inf);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 4ded64b..50251a8 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -320,7 +320,7 @@
 	smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
 
 	if (tcon != NULL) {
-#ifdef CONFIG_CIFS_STATS2
+#ifdef CONFIG_CIFS_STATS
 		uint16_t com_code = le16_to_cpu(smb2_command);
 		cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
 #endif
@@ -2565,33 +2565,38 @@
 	int len;
 	unsigned int entrycount = 0;
 	unsigned int next_offset = 0;
-	FILE_DIRECTORY_INFO *entryptr;
+	char *entryptr;
+	FILE_DIRECTORY_INFO *dir_info;
 
 	if (bufstart == NULL)
 		return 0;
 
-	entryptr = (FILE_DIRECTORY_INFO *)bufstart;
+	entryptr = bufstart;
 
 	while (1) {
-		entryptr = (FILE_DIRECTORY_INFO *)
-					((char *)entryptr + next_offset);
-
-		if ((char *)entryptr + size > end_of_buf) {
+		if (entryptr + next_offset < entryptr ||
+		    entryptr + next_offset > end_of_buf ||
+		    entryptr + next_offset + size > end_of_buf) {
 			cifs_dbg(VFS, "malformed search entry would overflow\n");
 			break;
 		}
 
-		len = le32_to_cpu(entryptr->FileNameLength);
-		if ((char *)entryptr + len + size > end_of_buf) {
+		entryptr = entryptr + next_offset;
+		dir_info = (FILE_DIRECTORY_INFO *)entryptr;
+
+		len = le32_to_cpu(dir_info->FileNameLength);
+		if (entryptr + len < entryptr ||
+		    entryptr + len > end_of_buf ||
+		    entryptr + len + size > end_of_buf) {
 			cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
 				 end_of_buf);
 			break;
 		}
 
-		*lastentry = (char *)entryptr;
+		*lastentry = entryptr;
 		entrycount++;
 
-		next_offset = le32_to_cpu(entryptr->NextEntryOffset);
+		next_offset = le32_to_cpu(dir_info->NextEntryOffset);
 		if (!next_offset)
 			break;
 	}
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 56fb261..d2a1a79 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1777,6 +1777,16 @@
 	struct dentry *dentry = group->cg_item.ci_dentry;
 	struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
 
+	mutex_lock(&subsys->su_mutex);
+	if (!group->cg_item.ci_parent->ci_group) {
+		/*
+		 * The parent has already been unlinked and detached
+		 * due to a rmdir.
+		 */
+		goto unlink_group;
+	}
+	mutex_unlock(&subsys->su_mutex);
+
 	inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
 	spin_lock(&configfs_dirent_lock);
 	configfs_detach_prep(dentry, NULL);
@@ -1791,6 +1801,7 @@
 	dput(dentry);
 
 	mutex_lock(&subsys->su_mutex);
+unlink_group:
 	unlink_group(group);
 	mutex_unlock(&subsys->su_mutex);
 }
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 0758d32..0f46cf5 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -162,12 +162,8 @@
 	}
 
 	req = skcipher_request_alloc(tfm, gfp_flags);
-	if (!req) {
-		printk_ratelimited(KERN_ERR
-				"%s: crypto_request_alloc() failed\n",
-				__func__);
+	if (!req)
 		return -ENOMEM;
-	}
 
 	skcipher_request_set_callback(
 		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
@@ -184,9 +180,10 @@
 		res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
 	skcipher_request_free(req);
 	if (res) {
-		printk_ratelimited(KERN_ERR
-			"%s: crypto_skcipher_encrypt() returned %d\n",
-			__func__, res);
+		fscrypt_err(inode->i_sb,
+			    "%scryption failed for inode %lu, block %llu: %d",
+			    (rw == FS_DECRYPT ? "de" : "en"),
+			    inode->i_ino, lblk_num, res);
 		return res;
 	}
 	return 0;
@@ -332,7 +329,6 @@
 		return 0;
 	}
 
-	/* this should eventually be an flag in d_flags */
 	spin_lock(&dentry->d_lock);
 	cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
 	spin_unlock(&dentry->d_lock);
@@ -359,7 +355,6 @@
 const struct dentry_operations fscrypt_d_ops = {
 	.d_revalidate = fscrypt_d_revalidate,
 };
-EXPORT_SYMBOL(fscrypt_d_ops);
 
 void fscrypt_restore_control_page(struct page *page)
 {
@@ -428,13 +423,43 @@
 	return res;
 }
 
+void fscrypt_msg(struct super_block *sb, const char *level,
+		 const char *fmt, ...)
+{
+	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+				      DEFAULT_RATELIMIT_BURST);
+	struct va_format vaf;
+	va_list args;
+
+	if (!__ratelimit(&rs))
+		return;
+
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	if (sb)
+		printk("%sfscrypt (%s): %pV\n", level, sb->s_id, &vaf);
+	else
+		printk("%sfscrypt: %pV\n", level, &vaf);
+	va_end(args);
+}
+
 /**
  * fscrypt_init() - Set up for fs encryption.
  */
 static int __init fscrypt_init(void)
 {
+	/*
+	 * Use an unbound workqueue to allow bios to be decrypted in parallel
+	 * even when they happen to complete on the same CPU.  This sacrifices
+	 * locality, but it's worthwhile since decryption is CPU-intensive.
+	 *
+	 * Also use a high-priority workqueue to prioritize decryption work,
+	 * which blocks reads from completing, over regular application tasks.
+	 */
 	fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
-							WQ_HIGHPRI, 0);
+						 WQ_UNBOUND | WQ_HIGHPRI,
+						 num_online_cpus());
 	if (!fscrypt_read_workqueue)
 		goto fail;
 
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index b18fa32..1bdb9f2 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -58,11 +58,8 @@
 
 	/* Set up the encryption request */
 	req = skcipher_request_alloc(tfm, GFP_NOFS);
-	if (!req) {
-		printk_ratelimited(KERN_ERR
-			"%s: skcipher_request_alloc() failed\n", __func__);
+	if (!req)
 		return -ENOMEM;
-	}
 	skcipher_request_set_callback(req,
 			CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
 			crypto_req_done, &wait);
@@ -73,8 +70,9 @@
 	res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
 	skcipher_request_free(req);
 	if (res < 0) {
-		printk_ratelimited(KERN_ERR
-				"%s: Error (error code %d)\n", __func__, res);
+		fscrypt_err(inode->i_sb,
+			    "Filename encryption failed for inode %lu: %d",
+			    inode->i_ino, res);
 		return res;
 	}
 
@@ -95,23 +93,14 @@
 	struct skcipher_request *req = NULL;
 	DECLARE_CRYPTO_WAIT(wait);
 	struct scatterlist src_sg, dst_sg;
-	struct fscrypt_info *ci = inode->i_crypt_info;
-	struct crypto_skcipher *tfm = ci->ci_ctfm;
+	struct crypto_skcipher *tfm = inode->i_crypt_info->ci_ctfm;
 	int res = 0;
 	char iv[FS_CRYPTO_BLOCK_SIZE];
-	unsigned lim;
-
-	lim = inode->i_sb->s_cop->max_namelen(inode);
-	if (iname->len <= 0 || iname->len > lim)
-		return -EIO;
 
 	/* Allocate request */
 	req = skcipher_request_alloc(tfm, GFP_NOFS);
-	if (!req) {
-		printk_ratelimited(KERN_ERR
-			"%s: crypto_request_alloc() failed\n",  __func__);
+	if (!req)
 		return -ENOMEM;
-	}
 	skcipher_request_set_callback(req,
 		CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
 		crypto_req_done, &wait);
@@ -126,8 +115,9 @@
 	res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
 	skcipher_request_free(req);
 	if (res < 0) {
-		printk_ratelimited(KERN_ERR
-				"%s: Error (error code %d)\n", __func__, res);
+		fscrypt_err(inode->i_sb,
+			    "Filename decryption failed for inode %lu: %d",
+			    inode->i_ino, res);
 		return res;
 	}
 
@@ -340,12 +330,12 @@
 		return 0;
 	}
 	ret = fscrypt_get_encryption_info(dir);
-	if (ret && ret != -EOPNOTSUPP)
+	if (ret)
 		return ret;
 
 	if (dir->i_crypt_info) {
 		if (!fscrypt_fname_encrypted_size(dir, iname->len,
-						  dir->i_sb->s_cop->max_namelen(dir),
+						  dir->i_sb->s_cop->max_namelen,
 						  &fname->crypto_buf.len))
 			return -ENAMETOOLONG;
 		fname->crypto_buf.name = kmalloc(fname->crypto_buf.len,
diff --git a/fs/crypto/fscrypt_ice.c b/fs/crypto/fscrypt_ice.c
index 62dae83..cd84469 100644
--- a/fs/crypto/fscrypt_ice.c
+++ b/fs/crypto/fscrypt_ice.c
@@ -12,6 +12,8 @@
 
 #include "fscrypt_ice.h"
 
+extern int fscrypt_get_mode_key_size(int mode);
+
 int fscrypt_using_hardware_encryption(const struct inode *inode)
 {
 	struct fscrypt_info *ci = inode->i_crypt_info;
@@ -21,6 +23,30 @@
 }
 EXPORT_SYMBOL(fscrypt_using_hardware_encryption);
 
+size_t fscrypt_get_ice_encryption_key_size(const struct inode *inode)
+{
+	struct fscrypt_info *ci = NULL;
+
+	if (inode)
+		ci = inode->i_crypt_info;
+	if (!ci)
+		return 0;
+
+	return fscrypt_get_mode_key_size(ci->ci_data_mode) / 2;
+}
+
+size_t fscrypt_get_ice_encryption_salt_size(const struct inode *inode)
+{
+	struct fscrypt_info *ci = NULL;
+
+	if (inode)
+		ci = inode->i_crypt_info;
+	if (!ci)
+		return 0;
+
+        return fscrypt_get_mode_key_size(ci->ci_data_mode) / 2;
+}
+
 /*
  * Retrieves encryption key from the inode
  */
@@ -44,6 +70,7 @@
 char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
 {
 	struct fscrypt_info *ci = NULL;
+	int size = 0;
 
 	if (!inode)
 		return NULL;
@@ -52,7 +79,11 @@
 	if (!ci)
 		return NULL;
 
-	return &(ci->ci_raw_key[fscrypt_get_ice_encryption_key_size(inode)]);
+	size = fscrypt_get_ice_encryption_key_size(inode);
+	if (!size)
+		return NULL;
+
+	return &(ci->ci_raw_key[size]);
 }
 
 /*
@@ -126,16 +157,29 @@
 }
 EXPORT_SYMBOL(fscrypt_set_ice_dun);
 
+void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip)
+{
+#ifdef CONFIG_DM_DEFAULT_KEY
+	bio->bi_crypt_skip = bi_crypt_skip;
+#endif
+}
+EXPORT_SYMBOL(fscrypt_set_ice_skip);
+
 /*
  * This function will be used for filesystem when deciding to merge bios.
  * Basic assumption is, if inline_encryption is set, single bio has to
  * guarantee consecutive LBAs as well as ino|pg->index.
  */
-bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted)
+bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted,
+						int bi_crypt_skip)
 {
 	if (!bio)
 		return true;
 
+#ifdef CONFIG_DM_DEFAULT_KEY
+	if (bi_crypt_skip != bio->bi_crypt_skip)
+		return false;
+#endif
 	/* if both of them are not encrypted, no further check is needed */
 	if (!bio_dun(bio) && !bio_encrypted)
 		return true;
diff --git a/fs/crypto/fscrypt_ice.h b/fs/crypto/fscrypt_ice.h
index d448eae..3115cd0 100644
--- a/fs/crypto/fscrypt_ice.h
+++ b/fs/crypto/fscrypt_ice.h
@@ -40,17 +40,10 @@
 bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
 					const struct inode *inode2);
 
-static inline size_t fscrypt_get_ice_encryption_key_size(
-					const struct inode *inode)
-{
-	return FS_AES_256_XTS_KEY_SIZE / 2;
-}
+size_t fscrypt_get_ice_encryption_key_size(const struct inode *inode);
 
-static inline size_t fscrypt_get_ice_encryption_salt_size(
-					const struct inode *inode)
-{
-	return FS_AES_256_XTS_KEY_SIZE / 2;
-}
+size_t fscrypt_get_ice_encryption_salt_size(const struct inode *inode);
+
 #else
 static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
 {
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 6563f88..f2251a8 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -20,15 +20,7 @@
 
 /* Encryption parameters */
 #define FS_IV_SIZE			16
-#define FS_AES_128_ECB_KEY_SIZE		16
-#define FS_AES_128_CBC_KEY_SIZE		16
-#define FS_AES_128_CTS_KEY_SIZE		16
-#define FS_AES_256_GCM_KEY_SIZE		32
-#define FS_AES_256_CBC_KEY_SIZE		32
-#define FS_AES_256_CTS_KEY_SIZE		32
-#define FS_AES_256_XTS_KEY_SIZE		64
-
-#define FS_KEY_DERIVATION_NONCE_SIZE		16
+#define FS_KEY_DERIVATION_NONCE_SIZE	16
 
 /**
  * Encryption context for inode
@@ -129,6 +121,15 @@
 				  gfp_t gfp_flags);
 extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
 					      gfp_t gfp_flags);
+extern const struct dentry_operations fscrypt_d_ops;
+
+extern void __printf(3, 4) __cold
+fscrypt_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+
+#define fscrypt_warn(sb, fmt, ...)		\
+	fscrypt_msg(sb, KERN_WARNING, fmt, ##__VA_ARGS__)
+#define fscrypt_err(sb, fmt, ...)		\
+	fscrypt_msg(sb, KERN_ERR, fmt, ##__VA_ARGS__)
 
 /* fname.c */
 extern int fname_encrypt(struct inode *inode, const struct qstr *iname,
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index bec0649..926e5df 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -39,8 +39,9 @@
 	dir = dget_parent(file_dentry(filp));
 	if (IS_ENCRYPTED(d_inode(dir)) &&
 	    !fscrypt_has_permitted_context(d_inode(dir), inode)) {
-		pr_warn_ratelimited("fscrypt: inconsistent encryption contexts: %lu/%lu",
-				    d_inode(dir)->i_ino, inode->i_ino);
+		fscrypt_warn(inode->i_sb,
+			     "inconsistent encryption contexts: %lu/%lu",
+			     d_inode(dir)->i_ino, inode->i_ino);
 		err = -EPERM;
 	}
 	dput(dir);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 1866733..6219c91 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -19,17 +19,16 @@
 
 static struct crypto_shash *essiv_hash_tfm;
 
-/**
- * derive_key_aes() - Derive a key using AES-128-ECB
- * @deriving_key: Encryption key used for derivation.
- * @source_key:   Source key to which to apply derivation.
- * @derived_raw_key:  Derived raw key.
+/*
+ * Key derivation function.  This generates the derived key by encrypting the
+ * master key with AES-128-ECB using the inode's nonce as the AES key.
  *
- * Return: Zero on success; non-zero otherwise.
+ * The master key must be at least as long as the derived key.  If the master
+ * key is longer, then only the first 'derived_keysize' bytes are used.
  */
-static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
-				const struct fscrypt_key *source_key,
-				u8 derived_raw_key[FS_MAX_KEY_SIZE])
+static int derive_key_aes(const u8 *master_key,
+			  const struct fscrypt_context *ctx,
+			  u8 *derived_key, unsigned int derived_keysize)
 {
 	int res = 0;
 	struct skcipher_request *req = NULL;
@@ -51,14 +50,13 @@
 	skcipher_request_set_callback(req,
 			CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
 			crypto_req_done, &wait);
-	res = crypto_skcipher_setkey(tfm, deriving_key,
-					FS_AES_128_ECB_KEY_SIZE);
+	res = crypto_skcipher_setkey(tfm, ctx->nonce, sizeof(ctx->nonce));
 	if (res < 0)
 		goto out;
 
-	sg_init_one(&src_sg, source_key->raw, source_key->size);
-	sg_init_one(&dst_sg, derived_raw_key, source_key->size);
-	skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size,
+	sg_init_one(&src_sg, master_key, derived_keysize);
+	sg_init_one(&dst_sg, derived_key, derived_keysize);
+	skcipher_request_set_crypt(req, &src_sg, &dst_sg, derived_keysize,
 				   NULL);
 	res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
 out:
@@ -67,125 +65,156 @@
 	return res;
 }
 
-static int validate_user_key(struct fscrypt_info *crypt_info,
-			struct fscrypt_context *ctx,
-			const char *prefix, int min_keysize)
+/*
+ * Search the current task's subscribed keyrings for a "logon" key with
+ * description prefix:descriptor, and if found acquire a read lock on it and
+ * return a pointer to its validated payload in *payload_ret.
+ */
+static struct key *
+find_and_lock_process_key(const char *prefix,
+			  const u8 descriptor[FS_KEY_DESCRIPTOR_SIZE],
+			  unsigned int min_keysize,
+			  const struct fscrypt_key **payload_ret)
 {
 	char *description;
-	struct key *keyring_key;
-	struct fscrypt_key *master_key;
+	struct key *key;
 	const struct user_key_payload *ukp;
-	int res;
+	const struct fscrypt_key *payload;
 
 	description = kasprintf(GFP_NOFS, "%s%*phN", prefix,
-				FS_KEY_DESCRIPTOR_SIZE,
-				ctx->master_key_descriptor);
+				FS_KEY_DESCRIPTOR_SIZE, descriptor);
 	if (!description)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
-	keyring_key = request_key(&key_type_logon, description, NULL);
+	key = request_key(&key_type_logon, description, NULL);
 	kfree(description);
-	if (IS_ERR(keyring_key))
-		return PTR_ERR(keyring_key);
-	down_read(&keyring_key->sem);
+	if (IS_ERR(key))
+		return key;
 
-	if (keyring_key->type != &key_type_logon) {
-		printk_once(KERN_WARNING
-				"%s: key type must be logon\n", __func__);
-		res = -ENOKEY;
-		goto out;
-	}
-	ukp = user_key_payload_locked(keyring_key);
-	if (!ukp) {
-		/* key was revoked before we acquired its semaphore */
-		res = -EKEYREVOKED;
-		goto out;
-	}
-	if (ukp->datalen != sizeof(struct fscrypt_key)) {
-		res = -EINVAL;
-		goto out;
-	}
-	master_key = (struct fscrypt_key *)ukp->data;
-	BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);
+	down_read(&key->sem);
+	ukp = user_key_payload_locked(key);
 
-	if (master_key->size < min_keysize || master_key->size > FS_MAX_KEY_SIZE
-	    || master_key->size % AES_BLOCK_SIZE != 0) {
-		printk_once(KERN_WARNING
-				"%s: key size incorrect: %d\n",
-				__func__, master_key->size);
-		res = -ENOKEY;
-		goto out;
-	}
-	res = derive_key_aes(ctx->nonce, master_key, crypt_info->ci_raw_key);
-	/* If we don't need to derive, we still want to do everything
-	 * up until now to validate the key. It's cleaner to fail now
-	 * than to fail in block I/O.
-	if (!is_private_data_mode(crypt_info)) {
-		res = derive_key_aes(ctx->nonce, master_key,
-				crypt_info->ci_raw_key);
-	} else {
-		 * Inline encryption: no key derivation required because IVs are
-		 * assigned based on iv_sector.
+	if (!ukp) /* was the key revoked before we acquired its semaphore? */
+		goto invalid;
 
-		BUILD_BUG_ON(sizeof(crypt_info->ci_raw_key) !=
-				sizeof(master_key->raw));
-		memcpy(crypt_info->ci_raw_key,
-			master_key->raw, sizeof(crypt_info->ci_raw_key));
-		res = 0;
+	payload = (const struct fscrypt_key *)ukp->data;
+
+	if (ukp->datalen != sizeof(struct fscrypt_key) ||
+	    payload->size < 1 || payload->size > FS_MAX_KEY_SIZE) {
+		fscrypt_warn(NULL,
+			     "key with description '%s' has invalid payload",
+			     key->description);
+		goto invalid;
 	}
-	 */
-out:
-	up_read(&keyring_key->sem);
-	key_put(keyring_key);
-	return res;
+
+	if (payload->size < min_keysize) {
+		fscrypt_warn(NULL,
+			     "key with description '%s' is too short (got %u bytes, need %u+ bytes)",
+			     key->description, payload->size, min_keysize);
+		goto invalid;
+	}
+
+	*payload_ret = payload;
+	return key;
+
+invalid:
+	up_read(&key->sem);
+	key_put(key);
+	return ERR_PTR(-ENOKEY);
 }
 
-static const struct {
+/* Find the master key, then derive the inode's actual encryption key */
+static int find_and_derive_key(const struct inode *inode,
+			       const struct fscrypt_context *ctx,
+			       u8 *derived_key, unsigned int derived_keysize)
+{
+	struct key *key;
+	const struct fscrypt_key *payload;
+	int err;
+
+	key = find_and_lock_process_key(FS_KEY_DESC_PREFIX,
+					ctx->master_key_descriptor,
+					derived_keysize, &payload);
+	if (key == ERR_PTR(-ENOKEY) && inode->i_sb->s_cop->key_prefix) {
+		key = find_and_lock_process_key(inode->i_sb->s_cop->key_prefix,
+						ctx->master_key_descriptor,
+						derived_keysize, &payload);
+	}
+	if (IS_ERR(key))
+		return PTR_ERR(key);
+	err = derive_key_aes(payload->raw, ctx, derived_key, derived_keysize);
+        /* If we don't need to derive, we still want to do everything
+         * up until now to validate the key. It's cleaner to fail now
+         * than to fail in block I/O.
+	 */
+	up_read(&key->sem);
+	key_put(key);
+	return err;
+}
+
+static struct fscrypt_mode {
+	const char *friendly_name;
 	const char *cipher_str;
 	int keysize;
+	bool logged_impl_name;
 } available_modes[] = {
-	[FS_ENCRYPTION_MODE_AES_256_XTS] = { "xts(aes)",
-					     FS_AES_256_XTS_KEY_SIZE },
-	[FS_ENCRYPTION_MODE_AES_256_CTS] = { "cts(cbc(aes))",
-					     FS_AES_256_CTS_KEY_SIZE },
-	[FS_ENCRYPTION_MODE_AES_128_CBC] = { "cbc(aes)",
-					     FS_AES_128_CBC_KEY_SIZE },
-	[FS_ENCRYPTION_MODE_AES_128_CTS] = { "cts(cbc(aes))",
-					     FS_AES_128_CTS_KEY_SIZE },
-	[FS_ENCRYPTION_MODE_SPECK128_256_XTS] = { "xts(speck128)",	64 },
-	[FS_ENCRYPTION_MODE_SPECK128_256_CTS] = { "cts(cbc(speck128))",	32 },
-	[FS_ENCRYPTION_MODE_PRIVATE]	 = { "bugon",
-					     FS_AES_256_XTS_KEY_SIZE },
+	[FS_ENCRYPTION_MODE_AES_256_XTS] = {
+		.friendly_name = "AES-256-XTS",
+		.cipher_str = "xts(aes)",
+		.keysize = 64,
+	},
+	[FS_ENCRYPTION_MODE_AES_256_CTS] = {
+		.friendly_name = "AES-256-CTS-CBC",
+		.cipher_str = "cts(cbc(aes))",
+		.keysize = 32,
+	},
+	[FS_ENCRYPTION_MODE_AES_128_CBC] = {
+		.friendly_name = "AES-128-CBC",
+		.cipher_str = "cbc(aes)",
+		.keysize = 16,
+	},
+	[FS_ENCRYPTION_MODE_AES_128_CTS] = {
+		.friendly_name = "AES-128-CTS-CBC",
+		.cipher_str = "cts(cbc(aes))",
+		.keysize = 16,
+	},
+	[FS_ENCRYPTION_MODE_SPECK128_256_XTS] = {
+		.friendly_name = "Speck128/256-XTS",
+		.cipher_str = "xts(speck128)",
+		.keysize = 64,
+	},
+	[FS_ENCRYPTION_MODE_SPECK128_256_CTS] = {
+		.friendly_name = "Speck128/256-CTS-CBC",
+		.cipher_str = "cts(cbc(speck128))",
+		.keysize = 32,
+	},
+	[FS_ENCRYPTION_MODE_PRIVATE] = {
+		.friendly_name = "ICE",
+		.cipher_str = "bugon",
+		.keysize = 64,
+	},
 };
 
-static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
-		const char **cipher_str_ret, int *keysize_ret, int *fname)
+static struct fscrypt_mode *
+select_encryption_mode(const struct fscrypt_info *ci, const struct inode *inode)
 {
-	u32 mode;
-
 	if (!fscrypt_valid_enc_modes(ci->ci_data_mode, ci->ci_filename_mode)) {
-		pr_warn_ratelimited("fscrypt: inode %lu uses unsupported encryption modes (contents mode %d, filenames mode %d)\n",
-				    inode->i_ino,
-				    ci->ci_data_mode, ci->ci_filename_mode);
-		return -EINVAL;
+		fscrypt_warn(inode->i_sb,
+			     "inode %lu uses unsupported encryption modes (contents mode %d, filenames mode %d)",
+			     inode->i_ino, ci->ci_data_mode,
+			     ci->ci_filename_mode);
+		return ERR_PTR(-EINVAL);
 	}
 
-	if (S_ISREG(inode->i_mode)) {
-		ci->ci_mode = CI_DATA_MODE;
-		mode = ci->ci_data_mode;
-	} else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
-		ci->ci_mode = CI_FNAME_MODE;
-		mode = ci->ci_filename_mode;
-		*fname = 1;
-	} else {
-		WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
-			  inode->i_ino, (inode->i_mode & S_IFMT));
-		return -EINVAL;
-	}
+	if (S_ISREG(inode->i_mode))
+		return &available_modes[ci->ci_data_mode];
 
-	*cipher_str_ret = available_modes[mode].cipher_str;
-	*keysize_ret = available_modes[mode].keysize;
-	return 0;
+	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+		return &available_modes[ci->ci_filename_mode];
+
+	WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
+		  inode->i_ino, (inode->i_mode & S_IFMT));
+	return ERR_PTR(-EINVAL);
 }
 
 static void put_crypt_info(struct fscrypt_info *ci)
@@ -209,8 +238,9 @@
 
 		tfm = crypto_alloc_shash("sha256", 0, 0);
 		if (IS_ERR(tfm)) {
-			pr_warn_ratelimited("fscrypt: error allocating SHA-256 transform: %ld\n",
-					    PTR_ERR(tfm));
+			fscrypt_warn(NULL,
+				     "error allocating SHA-256 transform: %ld",
+				     PTR_ERR(tfm));
 			return PTR_ERR(tfm);
 		}
 		prev_tfm = cmpxchg(&essiv_hash_tfm, NULL, tfm);
@@ -271,15 +301,20 @@
 	FS_ENCRYPTION_MODE_PRIVATE : FS_ENCRYPTION_MODE_AES_256_XTS;
 }
 
+int fscrypt_get_mode_key_size(int mode)
+{
+	return available_modes[mode].keysize;
+}
+EXPORT_SYMBOL(fscrypt_get_mode_key_size);
+
 int fscrypt_get_encryption_info(struct inode *inode)
 {
 	struct fscrypt_info *crypt_info;
 	struct fscrypt_context ctx;
 	struct crypto_skcipher *ctfm;
-	const char *cipher_str;
-	int keysize;
+	struct fscrypt_mode *mode;
+	u8 *raw_key = NULL;
 	int res;
-	int fname = 0;
 
 	if (inode->i_crypt_info)
 		return 0;
@@ -322,70 +357,71 @@
 	memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
 				sizeof(crypt_info->ci_master_key));
 
-	res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize,
-				&fname);
-	if (res)
+	mode = select_encryption_mode(crypt_info, inode);
+	if (IS_ERR(mode)) {
+		res = PTR_ERR(mode);
 		goto out;
+	}
 
 	/*
 	 * This cannot be a stack buffer because it is passed to the scatterlist
 	 * crypto API as part of key derivation.
 	 */
 	res = -ENOMEM;
-
-	res = validate_user_key(crypt_info, &ctx, FS_KEY_DESC_PREFIX,
-				keysize);
-	if (res && inode->i_sb->s_cop->key_prefix) {
-		int res2 = validate_user_key(crypt_info, &ctx,
-					     inode->i_sb->s_cop->key_prefix,
-					     keysize);
-		if (res2) {
-			if (res2 == -ENOKEY)
-				res = -ENOKEY;
-			goto out;
-		}
-		res = 0;
-	} else if (res) {
+	raw_key = kmalloc(mode->keysize, GFP_NOFS);
+	if (!raw_key)
 		goto out;
-	}
+
+	res = find_and_derive_key(inode, &ctx, raw_key, mode->keysize);
+	if (res)
+		goto out;
 
 	if (is_private_data_mode(crypt_info)) {
 		if (!fscrypt_is_ice_capable(inode->i_sb)) {
 			pr_warn("%s: ICE support not available\n",
-					__func__);
+				__func__);
 			res = -EINVAL;
 			goto out;
 		}
 		/* Let's encrypt/decrypt by ICE */
+		memcpy(crypt_info->ci_raw_key, raw_key, mode->keysize);
 		goto do_ice;
 	}
 
-
-	ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
-	if (!ctfm || IS_ERR(ctfm)) {
-		res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
-		pr_debug("%s: error %d (inode %lu) allocating crypto tfm\n",
-			 __func__, res, inode->i_ino);
+	ctfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0);
+	if (IS_ERR(ctfm)) {
+		res = PTR_ERR(ctfm);
+		fscrypt_warn(inode->i_sb,
+			     "error allocating '%s' transform for inode %lu: %d",
+			     mode->cipher_str, inode->i_ino, res);
 		goto out;
 	}
+	if (unlikely(!mode->logged_impl_name)) {
+		/*
+		 * fscrypt performance can vary greatly depending on which
+		 * crypto algorithm implementation is used.  Help people debug
+		 * performance problems by logging the ->cra_driver_name the
+		 * first time a mode is used.  Note that multiple threads can
+		 * race here, but it doesn't really matter.
+		 */
+		mode->logged_impl_name = true;
+		pr_info("fscrypt: %s using implementation \"%s\"\n",
+			mode->friendly_name,
+			crypto_skcipher_alg(ctfm)->base.cra_driver_name);
+	}
 	crypt_info->ci_ctfm = ctfm;
-	crypto_skcipher_clear_flags(ctfm, ~0);
 	crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
-	/*
-	 * if the provided key is longer than keysize, we use the first
-	 * keysize bytes of the derived key only
-	 */
-	res = crypto_skcipher_setkey(ctfm, crypt_info->ci_raw_key, keysize);
+	res = crypto_skcipher_setkey(ctfm, raw_key, mode->keysize);
 	if (res)
 		goto out;
 
 	if (S_ISREG(inode->i_mode) &&
 	    crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
-		res = init_essiv_generator(crypt_info, crypt_info->ci_raw_key,
-						keysize);
+		res = init_essiv_generator(crypt_info, raw_key, mode->keysize);
 		if (res) {
-			pr_debug("%s: error %d (inode %lu) allocating essiv tfm\n",
-				 __func__, res, inode->i_ino);
+			fscrypt_warn(inode->i_sb,
+				     "error initializing ESSIV generator for inode %lu: %d",
+				     inode->i_ino, res);
 			goto out;
 		}
 	}
diff --git a/fs/dcache.c b/fs/dcache.c
index 3c8c1a1..10b31f3 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -286,7 +286,8 @@
 		spin_unlock(&dentry->d_lock);
 		name->name = p->name;
 	} else {
-		memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
+		memcpy(name->inline_name, dentry->d_iname,
+		       dentry->d_name.len + 1);
 		spin_unlock(&dentry->d_lock);
 		name->name = name->inline_name;
 	}
@@ -352,14 +353,11 @@
 	__releases(dentry->d_inode->i_lock)
 {
 	struct inode *inode = dentry->d_inode;
-	bool hashed = !d_unhashed(dentry);
 
-	if (hashed)
-		raw_write_seqcount_begin(&dentry->d_seq);
+	raw_write_seqcount_begin(&dentry->d_seq);
 	__d_clear_type_and_inode(dentry);
 	hlist_del_init(&dentry->d_u.d_alias);
-	if (hashed)
-		raw_write_seqcount_end(&dentry->d_seq);
+	raw_write_seqcount_end(&dentry->d_seq);
 	spin_unlock(&dentry->d_lock);
 	spin_unlock(&inode->i_lock);
 	if (!inode->i_nlink)
@@ -1914,10 +1912,12 @@
 
 	if (root_inode) {
 		res = __d_alloc(root_inode->i_sb, NULL);
-		if (res)
+		if (res) {
+			res->d_flags |= DCACHE_RCUACCESS;
 			d_instantiate(res, root_inode);
-		else
+		} else {
 			iput(root_inode);
+		}
 	}
 	return res;
 }
diff --git a/fs/exec.c b/fs/exec.c
index d27f5e9..70110a6 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1228,15 +1228,14 @@
 	return -EAGAIN;
 }
 
-char *get_task_comm(char *buf, struct task_struct *tsk)
+char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
 {
-	/* buf must be at least sizeof(tsk->comm) in size */
 	task_lock(tsk);
-	strncpy(buf, tsk->comm, sizeof(tsk->comm));
+	strncpy(buf, tsk->comm, buf_size);
 	task_unlock(tsk);
 	return buf;
 }
-EXPORT_SYMBOL_GPL(get_task_comm);
+EXPORT_SYMBOL_GPL(__get_task_comm);
 
 /*
  * These functions flushes out all traces of the currently running executable
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index ad13f07..2455fe1 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -378,6 +378,8 @@
 		return -EFSCORRUPTED;
 
 	ext4_lock_group(sb, block_group);
+	if (buffer_verified(bh))
+		goto verified;
 	if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
 			desc, bh))) {
 		ext4_unlock_group(sb, block_group);
@@ -400,6 +402,7 @@
 		return -EFSCORRUPTED;
 	}
 	set_buffer_verified(bh);
+verified:
 	ext4_unlock_group(sb, block_group);
 	return 0;
 }
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index e8b3650..e16bc4c 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -74,7 +74,7 @@
 	else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
 		error_msg = "rec_len is too small for name_len";
 	else if (unlikely(((char *) de - buf) + rlen > size))
-		error_msg = "directory entry across range";
+		error_msg = "directory entry overrun";
 	else if (unlikely(le32_to_cpu(de->inode) >
 			le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
 		error_msg = "inode out of bounds";
@@ -83,18 +83,16 @@
 
 	if (filp)
 		ext4_error_file(filp, function, line, bh->b_blocknr,
-				"bad entry in directory: %s - offset=%u(%u), "
-				"inode=%u, rec_len=%d, name_len=%d",
-				error_msg, (unsigned) (offset % size),
-				offset, le32_to_cpu(de->inode),
-				rlen, de->name_len);
+				"bad entry in directory: %s - offset=%u, "
+				"inode=%u, rec_len=%d, name_len=%d, size=%d",
+				error_msg, offset, le32_to_cpu(de->inode),
+				rlen, de->name_len, size);
 	else
 		ext4_error_inode(dir, function, line, bh->b_blocknr,
-				"bad entry in directory: %s - offset=%u(%u), "
-				"inode=%u, rec_len=%d, name_len=%d",
-				error_msg, (unsigned) (offset % size),
-				offset, le32_to_cpu(de->inode),
-				rlen, de->name_len);
+				"bad entry in directory: %s - offset=%u, "
+				"inode=%u, rec_len=%d, name_len=%d, size=%d",
+				 error_msg, offset, le32_to_cpu(de->inode),
+				 rlen, de->name_len, size);
 
 	return 1;
 }
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 6941365..004c088 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -88,6 +88,8 @@
 		return -EFSCORRUPTED;
 
 	ext4_lock_group(sb, block_group);
+	if (buffer_verified(bh))
+		goto verified;
 	blk = ext4_inode_bitmap(sb, desc);
 	if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
 					   EXT4_INODES_PER_GROUP(sb) / 8)) {
@@ -105,6 +107,7 @@
 		return -EFSBADCRC;
 	}
 	set_buffer_verified(bh);
+verified:
 	ext4_unlock_group(sb, block_group);
 	return 0;
 }
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 6fde321..f901b643 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -693,6 +693,10 @@
 		goto convert;
 	}
 
+	ret = ext4_journal_get_write_access(handle, iloc.bh);
+	if (ret)
+		goto out;
+
 	flags |= AOP_FLAG_NOFS;
 
 	page = grab_cache_page_write_begin(mapping, 0, flags);
@@ -721,7 +725,7 @@
 out_up_read:
 	up_read(&EXT4_I(inode)->xattr_sem);
 out:
-	if (handle)
+	if (handle && (ret != 1))
 		ext4_journal_stop(handle);
 	brelse(iloc.bh);
 	return ret;
@@ -763,6 +767,7 @@
 
 	ext4_write_unlock_xattr(inode, &no_expand);
 	brelse(iloc.bh);
+	mark_inode_dirty(inode);
 out:
 	return copied;
 }
@@ -909,7 +914,6 @@
 		goto out;
 	}
 
-
 	page = grab_cache_page_write_begin(mapping, 0, flags);
 	if (!page) {
 		ret = -ENOMEM;
@@ -927,6 +931,9 @@
 		if (ret < 0)
 			goto out_release_page;
 	}
+	ret = ext4_journal_get_write_access(handle, iloc.bh);
+	if (ret)
+		goto out_release_page;
 
 	up_read(&EXT4_I(inode)->xattr_sem);
 	*pagep = page;
@@ -947,7 +954,6 @@
 				  unsigned len, unsigned copied,
 				  struct page *page)
 {
-	int i_size_changed = 0;
 	int ret;
 
 	ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
@@ -965,10 +971,8 @@
 	 * But it's important to update i_size while still holding page lock:
 	 * page writeout could otherwise come in and zero beyond i_size.
 	 */
-	if (pos+copied > inode->i_size) {
+	if (pos+copied > inode->i_size)
 		i_size_write(inode, pos+copied);
-		i_size_changed = 1;
-	}
 	unlock_page(page);
 	put_page(page);
 
@@ -978,8 +982,7 @@
 	 * ordering of page lock and transaction start for journaling
 	 * filesystems.
 	 */
-	if (i_size_changed)
-		mark_inode_dirty(inode);
+	mark_inode_dirty(inode);
 
 	return copied;
 }
@@ -1765,6 +1768,7 @@
 {
 	int err, inline_size;
 	struct ext4_iloc iloc;
+	size_t inline_len;
 	void *inline_pos;
 	unsigned int offset;
 	struct ext4_dir_entry_2 *de;
@@ -1792,8 +1796,9 @@
 		goto out;
 	}
 
+	inline_len = ext4_get_inline_size(dir);
 	offset = EXT4_INLINE_DOTDOT_SIZE;
-	while (offset < dir->i_size) {
+	while (offset < inline_len) {
 		de = ext4_get_inline_entry(dir, &iloc, offset,
 					   &inline_pos, &inline_size);
 		if (ext4_check_dir_entry(dir, NULL, de,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index af9b5f6..c38245d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1332,10 +1332,11 @@
 	loff_t old_size = inode->i_size;
 	int ret = 0, ret2;
 	int i_size_changed = 0;
+	int inline_data = ext4_has_inline_data(inode);
 
 	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_write_end(inode, pos, len, copied);
-	if (ext4_has_inline_data(inode)) {
+	if (inline_data) {
 		ret = ext4_write_inline_data_end(inode, pos, len,
 						 copied, page);
 		if (ret < 0) {
@@ -1363,7 +1364,7 @@
 	 * ordering of page lock and transaction start for journaling
 	 * filesystems.
 	 */
-	if (i_size_changed)
+	if (i_size_changed || inline_data)
 		ext4_mark_inode_dirty(handle, inode);
 
 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
@@ -1437,6 +1438,7 @@
 	int partial = 0;
 	unsigned from, to;
 	int size_changed = 0;
+	int inline_data = ext4_has_inline_data(inode);
 
 	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_journalled_write_end(inode, pos, len, copied);
@@ -1445,7 +1447,7 @@
 
 	BUG_ON(!ext4_handle_valid(handle));
 
-	if (ext4_has_inline_data(inode)) {
+	if (inline_data) {
 		ret = ext4_write_inline_data_end(inode, pos, len,
 						 copied, page);
 		if (ret < 0) {
@@ -1476,7 +1478,7 @@
 	if (old_size < pos)
 		pagecache_isize_extended(inode, old_size, pos);
 
-	if (size_changed) {
+	if (size_changed || inline_data) {
 		ret2 = ext4_mark_inode_dirty(handle, inode);
 		if (!ret)
 			ret = ret2;
@@ -1974,11 +1976,7 @@
 	}
 
 	if (inline_data) {
-		BUFFER_TRACE(inode_bh, "get write access");
-		ret = ext4_journal_get_write_access(handle, inode_bh);
-
-		err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
-
+		ret = ext4_mark_inode_dirty(handle, inode);
 	} else {
 		ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
 					     do_journal_get_write_access);
@@ -2571,8 +2569,8 @@
 	mpd->map.m_len = 0;
 	mpd->next_page = index;
 	while (index <= end) {
-		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
-			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
+				tag);
 		if (nr_pages == 0)
 			goto out;
 
@@ -2580,16 +2578,6 @@
 			struct page *page = pvec.pages[i];
 
 			/*
-			 * At this point, the page may be truncated or
-			 * invalidated (changing page->mapping to NULL), or
-			 * even swizzled back from swapper_space to tmpfs file
-			 * mapping. However, page->index will not change
-			 * because we have a reference on the page.
-			 */
-			if (page->index > end)
-				goto out;
-
-			/*
 			 * Accumulated enough dirty pages? This doesn't apply
 			 * to WB_SYNC_ALL mode. For integrity sync we have to
 			 * keep going because someone may be concurrently
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 0fcc336..3d6f73e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -26,6 +26,7 @@
 #include <linux/log2.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 #include <linux/backing-dev.h>
 #include <trace/events/ext4.h>
 
@@ -2144,7 +2145,8 @@
 		 * This should tell if fe_len is exactly power of 2
 		 */
 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
-			ac->ac_2order = i - 1;
+			ac->ac_2order = array_index_nospec(i - 1,
+							   sb->s_blocksize_bits + 2);
 	}
 
 	/* if stream allocation is enabled, use global goal */
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index d89754e..c2e830a 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -48,7 +48,6 @@
 	 */
 	sb_start_write(sb);
 	ext4_mmp_csum_set(sb, mmp);
-	mark_buffer_dirty(bh);
 	lock_buffer(bh);
 	bh->b_end_io = end_buffer_write_sync;
 	get_bh(bh);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index d536e0a..9bad755 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1385,6 +1385,7 @@
 			goto cleanup_and_exit;
 		dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
 			       "falling back\n"));
+		ret = NULL;
 	}
 	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
 	if (!nblocks) {
@@ -1545,24 +1546,14 @@
 	struct inode *inode;
 	struct ext4_dir_entry_2 *de;
 	struct buffer_head *bh;
+	int err;
 
-	if (ext4_encrypted_inode(dir)) {
-		int res = fscrypt_get_encryption_info(dir);
+	err = fscrypt_prepare_lookup(dir, dentry, flags);
+	if (err)
+		return ERR_PTR(err);
 
-		/*
-		 * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
-		 * created while the directory was encrypted and we
-		 * have access to the key.
-		 */
-		if (fscrypt_has_encryption_key(dir))
-			fscrypt_set_encrypted_dentry(dentry);
-		fscrypt_set_d_op(dentry);
-		if (res && res != -ENOKEY)
-			return ERR_PTR(res);
-	}
-
-       if (dentry->d_name.len > EXT4_NAME_LEN)
-	       return ERR_PTR(-ENAMETOOLONG);
+	if (dentry->d_name.len > EXT4_NAME_LEN)
+		return ERR_PTR(-ENAMETOOLONG);
 
 	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
 	if (IS_ERR(bh))
@@ -3443,6 +3434,12 @@
 	int credits;
 	u8 old_file_type;
 
+	if (new.inode && new.inode->i_nlink == 0) {
+		EXT4_ERROR_INODE(new.inode,
+				 "target of rename is already freed");
+		return -EFSCORRUPTED;
+	}
+
 	if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) &&
 	    (!projid_eq(EXT4_I(new_dir)->i_projid,
 			EXT4_I(old_dentry->d_inode)->i_projid)))
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index eb720d9..1da301e 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -18,6 +18,7 @@
 
 int ext4_resize_begin(struct super_block *sb)
 {
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	int ret = 0;
 
 	if (!capable(CAP_SYS_RESOURCE))
@@ -28,7 +29,7 @@
          * because the user tools have no way of handling this.  Probably a
          * bad time to do it anyways.
          */
-	if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+	if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
 	    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
 		ext4_warning(sb, "won't resize using backup superblock at %llu",
 			(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
@@ -1954,6 +1955,26 @@
 		}
 	}
 
+	/*
+	 * Make sure the last group has enough space so that it's
+	 * guaranteed to have enough space for all metadata blocks
+	 * that it might need to hold.  (We might not need to store
+	 * the inode table blocks in the last block group, but there
+	 * will be cases where this might be needed.)
+	 */
+	if ((ext4_group_first_block_no(sb, n_group) +
+	     ext4_group_overhead_blocks(sb, n_group) + 2 +
+	     sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
+		n_blocks_count = ext4_group_first_block_no(sb, n_group);
+		n_group--;
+		n_blocks_count_retry = 0;
+		if (resize_inode) {
+			iput(resize_inode);
+			resize_inode = NULL;
+		}
+		goto retry;
+	}
+
 	/* extend the last group */
 	if (n_group == o_group)
 		add = n_blocks_count - o_blocks_count;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index ab72207..031e43d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1174,19 +1174,13 @@
 	return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
 }
 
-static unsigned ext4_max_namelen(struct inode *inode)
-{
-	return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
-		EXT4_NAME_LEN;
-}
-
 static const struct fscrypt_operations ext4_cryptops = {
 	.key_prefix		= "ext4:",
 	.get_context		= ext4_get_context,
 	.set_context		= ext4_set_context,
 	.dummy_context		= ext4_dummy_context,
 	.empty_dir		= ext4_empty_dir,
-	.max_namelen		= ext4_max_namelen,
+	.max_namelen		= EXT4_NAME_LEN,
 };
 #endif
 
@@ -2025,6 +2019,8 @@
 		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
 	if (test_opt(sb, DATA_ERR_ABORT))
 		SEQ_OPTS_PUTS("data_err=abort");
+	if (DUMMY_ENCRYPTION_ENABLED(sbi))
+		SEQ_OPTS_PUTS("test_dummy_encryption");
 
 	ext4_show_quota_options(seq, sb);
 	return 0;
@@ -2241,7 +2237,7 @@
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
 	ext4_fsblk_t last_block;
-	ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
+	ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
 	ext4_fsblk_t block_bitmap;
 	ext4_fsblk_t inode_bitmap;
 	ext4_fsblk_t inode_table;
@@ -3945,13 +3941,13 @@
 			goto failed_mount2;
 		}
 	}
+	sbi->s_gdb_count = db_count;
 	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
 		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
 		ret = -EFSCORRUPTED;
 		goto failed_mount2;
 	}
 
-	sbi->s_gdb_count = db_count;
 	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
 	spin_lock_init(&sbi->s_next_gen_lock);
 
@@ -4199,11 +4195,13 @@
 	block = ext4_count_free_clusters(sb);
 	ext4_free_blocks_count_set(sbi->s_es, 
 				   EXT4_C2B(sbi, block));
+	ext4_superblock_csum_set(sb);
 	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
 				  GFP_KERNEL);
 	if (!err) {
 		unsigned long freei = ext4_count_free_inodes(sb);
 		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
+		ext4_superblock_csum_set(sb);
 		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
 					  GFP_KERNEL);
 	}
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 5dc655e..54942d6 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -277,8 +277,12 @@
 	case attr_pointer_ui:
 		if (!ptr)
 			return 0;
-		return snprintf(buf, PAGE_SIZE, "%u\n",
-				*((unsigned int *) ptr));
+		if (a->attr_ptr == ptr_ext4_super_block_offset)
+			return snprintf(buf, PAGE_SIZE, "%u\n",
+					le32_to_cpup(ptr));
+		else
+			return snprintf(buf, PAGE_SIZE, "%u\n",
+					*((unsigned int *) ptr));
 	case attr_pointer_atomic:
 		if (!ptr)
 			return 0;
@@ -311,7 +315,10 @@
 		ret = kstrtoul(skip_spaces(buf), 0, &t);
 		if (ret)
 			return ret;
-		*((unsigned int *) ptr) = t;
+		if (a->attr_ptr == ptr_ext4_super_block_offset)
+			*((__le32 *) ptr) = cpu_to_le32(t);
+		else
+			*((unsigned int *) ptr) = t;
 		return len;
 	case attr_inode_readahead:
 		return inode_readahead_blks_store(a, sbi, buf, len);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 3fadfab..c10180d 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -184,6 +184,8 @@
 		struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
 		if ((void *)next >= end)
 			return -EFSCORRUPTED;
+		if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
+			return -EFSCORRUPTED;
 		e = next;
 	}
 
@@ -207,12 +209,12 @@
 {
 	int error;
 
-	if (buffer_verified(bh))
-		return 0;
-
 	if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
 	    BHDR(bh)->h_blocks != cpu_to_le32(1))
 		return -EFSCORRUPTED;
+	if (buffer_verified(bh))
+		return 0;
+
 	if (!ext4_xattr_block_csum_verify(inode, bh))
 		return -EFSBADCRC;
 	error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
@@ -643,14 +645,20 @@
 }
 
 static int
-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
+ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
+		     struct inode *inode)
 {
-	struct ext4_xattr_entry *last;
+	struct ext4_xattr_entry *last, *next;
 	size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
 
 	/* Compute min_offs and last. */
 	last = s->first;
-	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+	for (; !IS_LAST_ENTRY(last); last = next) {
+		next = EXT4_XATTR_NEXT(last);
+		if ((void *)next >= s->end) {
+			EXT4_ERROR_INODE(inode, "corrupted xattr entries");
+			return -EIO;
+		}
 		if (last->e_value_size) {
 			size_t offs = le16_to_cpu(last->e_value_offs);
 			if (offs < min_offs)
@@ -832,7 +840,7 @@
 			mb_cache_entry_delete_block(ext4_mb_cache, hash,
 						    bs->bh->b_blocknr);
 			ea_bdebug(bs->bh, "modifying in-place");
-			error = ext4_xattr_set_entry(i, s);
+			error = ext4_xattr_set_entry(i, s, inode);
 			if (!error) {
 				if (!IS_LAST_ENTRY(s->first))
 					ext4_xattr_rehash(header(s->base),
@@ -879,7 +887,7 @@
 		s->end = s->base + sb->s_blocksize;
 	}
 
-	error = ext4_xattr_set_entry(i, s);
+	error = ext4_xattr_set_entry(i, s, inode);
 	if (error == -EFSCORRUPTED)
 		goto bad_block;
 	if (error)
@@ -1077,7 +1085,7 @@
 
 	if (EXT4_I(inode)->i_extra_isize == 0)
 		return -ENOSPC;
-	error = ext4_xattr_set_entry(i, s);
+	error = ext4_xattr_set_entry(i, s, inode);
 	if (error) {
 		if (error == -ENOSPC &&
 		    ext4_has_inline_data(inode)) {
@@ -1089,7 +1097,7 @@
 			error = ext4_xattr_ibody_find(inode, i, is);
 			if (error)
 				return error;
-			error = ext4_xattr_set_entry(i, s);
+			error = ext4_xattr_set_entry(i, s, inode);
 		}
 		if (error)
 			return error;
@@ -1115,7 +1123,7 @@
 
 	if (EXT4_I(inode)->i_extra_isize == 0)
 		return -ENOSPC;
-	error = ext4_xattr_set_entry(i, s);
+	error = ext4_xattr_set_entry(i, s, inode);
 	if (error)
 		return error;
 	header = IHDR(inode, ext4_raw_inode(&is->iloc));
@@ -1424,6 +1432,11 @@
 		last = IFIRST(header);
 		/* Find the entry best suited to be pushed into EA block */
 		for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+			/* never move system.data out of the inode */
+			if ((last->e_name_len == 4) &&
+			    (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
+			    !memcmp(last->e_name, "data", 4))
+				continue;
 			total_size =
 			EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
 					EXT4_XATTR_LEN(last->e_name_len);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 91b2d00..6b24eb4 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -24,10 +24,11 @@
 #include <trace/events/f2fs.h>
 
 static struct kmem_cache *ino_entry_slab;
-struct kmem_cache *inode_entry_slab;
+struct kmem_cache *f2fs_inode_entry_slab;
 
 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
 {
+	f2fs_build_fault_attr(sbi, 0, 0);
 	set_ckpt_flags(sbi, CP_ERROR_FLAG);
 	if (!end_io)
 		f2fs_flush_merged_writes(sbi);
@@ -36,7 +37,7 @@
 /*
  * We guarantee no failure on the returned page.
  */
-struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
 {
 	struct address_space *mapping = META_MAPPING(sbi);
 	struct page *page = NULL;
@@ -70,6 +71,7 @@
 		.encrypted_page = NULL,
 		.is_meta = is_meta,
 	};
+	int err;
 
 	if (unlikely(!is_meta))
 		fio.op_flags &= ~REQ_META;
@@ -84,9 +86,10 @@
 
 	fio.page = page;
 
-	if (f2fs_submit_page_bio(&fio)) {
+	err = f2fs_submit_page_bio(&fio);
+	if (err) {
 		f2fs_put_page(page, 1);
-		goto repeat;
+		return ERR_PTR(err);
 	}
 
 	lock_page(page);
@@ -95,29 +98,46 @@
 		goto repeat;
 	}
 
-	/*
-	 * if there is any IO error when accessing device, make our filesystem
-	 * readonly and make sure do not write checkpoint with non-uptodate
-	 * meta page.
-	 */
-	if (unlikely(!PageUptodate(page)))
-		f2fs_stop_checkpoint(sbi, false);
+	if (unlikely(!PageUptodate(page))) {
+		f2fs_put_page(page, 1);
+		return ERR_PTR(-EIO);
+	}
 out:
 	return page;
 }
 
-struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
 {
 	return __get_meta_page(sbi, index, true);
 }
 
+struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index)
+{
+	struct page *page;
+	int count = 0;
+
+retry:
+	page = __get_meta_page(sbi, index, true);
+	if (IS_ERR(page)) {
+		if (PTR_ERR(page) == -EIO &&
+				++count <= DEFAULT_RETRY_IO_COUNT)
+			goto retry;
+
+		f2fs_stop_checkpoint(sbi, false);
+		f2fs_bug_on(sbi, 1);
+	}
+
+	return page;
+}
+
 /* for POR only */
-struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
+struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
 {
 	return __get_meta_page(sbi, index, false);
 }
 
-bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+					block_t blkaddr, int type)
 {
 	switch (type) {
 	case META_NAT:
@@ -137,8 +157,20 @@
 			return false;
 		break;
 	case META_POR:
+	case DATA_GENERIC:
 		if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
-			blkaddr < MAIN_BLKADDR(sbi)))
+			blkaddr < MAIN_BLKADDR(sbi))) {
+			if (type == DATA_GENERIC) {
+				f2fs_msg(sbi->sb, KERN_WARNING,
+					"access invalid blkaddr:%u", blkaddr);
+				WARN_ON(1);
+			}
+			return false;
+		}
+		break;
+	case META_GENERIC:
+		if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
+			blkaddr >= MAIN_BLKADDR(sbi)))
 			return false;
 		break;
 	default:
@@ -151,7 +183,7 @@
 /*
  * Readahead CP/NAT/SIT/SSA pages
  */
-int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
+int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
 							int type, bool sync)
 {
 	struct page *page;
@@ -173,7 +205,7 @@
 	blk_start_plug(&plug);
 	for (; nrpages-- > 0; blkno++) {
 
-		if (!is_valid_blkaddr(sbi, blkno, type))
+		if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
 			goto out;
 
 		switch (type) {
@@ -217,7 +249,7 @@
 	return blkno - start;
 }
 
-void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
+void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
 {
 	struct page *page;
 	bool readahead = false;
@@ -228,7 +260,7 @@
 	f2fs_put_page(page, 0);
 
 	if (readahead)
-		ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
+		f2fs_ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
 }
 
 static int __f2fs_write_meta_page(struct page *page,
@@ -239,17 +271,14 @@
 
 	trace_f2fs_writepage(page, META);
 
-	if (unlikely(f2fs_cp_error(sbi))) {
-		dec_page_count(sbi, F2FS_DIRTY_META);
-		unlock_page(page);
-		return 0;
-	}
+	if (unlikely(f2fs_cp_error(sbi)))
+		goto redirty_out;
 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
 		goto redirty_out;
 	if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
 		goto redirty_out;
 
-	write_meta_page(sbi, page, io_type);
+	f2fs_do_write_meta_page(sbi, page, io_type);
 	dec_page_count(sbi, F2FS_DIRTY_META);
 
 	if (wbc->for_reclaim)
@@ -294,7 +323,7 @@
 
 	trace_f2fs_writepages(mapping->host, wbc, META);
 	diff = nr_pages_to_write(sbi, META, wbc);
-	written = sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
+	written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
 	mutex_unlock(&sbi->cp_mutex);
 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
 	return 0;
@@ -305,13 +334,14 @@
 	return 0;
 }
 
-long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
+long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
 				long nr_to_write, enum iostat_type io_type)
 {
 	struct address_space *mapping = META_MAPPING(sbi);
-	pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX;
+	pgoff_t index = 0, prev = ULONG_MAX;
 	struct pagevec pvec;
 	long nwritten = 0;
+	int nr_pages;
 	struct writeback_control wbc = {
 		.for_reclaim = 0,
 	};
@@ -321,13 +351,9 @@
 
 	blk_start_plug(&plug);
 
-	while (index <= end) {
-		int i, nr_pages;
-		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-				PAGECACHE_TAG_DIRTY,
-				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
-		if (unlikely(nr_pages == 0))
-			break;
+	while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+				PAGECACHE_TAG_DIRTY))) {
+		int i;
 
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
@@ -458,20 +484,20 @@
 	spin_unlock(&im->ino_lock);
 }
 
-void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
+void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
 {
 	/* add new dirty ino entry into list */
 	__add_ino_entry(sbi, ino, 0, type);
 }
 
-void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
+void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
 {
 	/* remove dirty ino entry from list */
 	__remove_ino_entry(sbi, ino, type);
 }
 
 /* mode should be APPEND_INO or UPDATE_INO */
-bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
+bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
 {
 	struct inode_management *im = &sbi->im[mode];
 	struct ino_entry *e;
@@ -482,7 +508,7 @@
 	return e ? true : false;
 }
 
-void release_ino_entry(struct f2fs_sb_info *sbi, bool all)
+void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all)
 {
 	struct ino_entry *e, *tmp;
 	int i;
@@ -501,13 +527,13 @@
 	}
 }
 
-void set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
+void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
 					unsigned int devidx, int type)
 {
 	__add_ino_entry(sbi, ino, devidx, type);
 }
 
-bool is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
+bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
 					unsigned int devidx, int type)
 {
 	struct inode_management *im = &sbi->im[type];
@@ -522,20 +548,19 @@
 	return is_dirty;
 }
 
-int acquire_orphan_inode(struct f2fs_sb_info *sbi)
+int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
 {
 	struct inode_management *im = &sbi->im[ORPHAN_INO];
 	int err = 0;
 
 	spin_lock(&im->ino_lock);
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_ORPHAN)) {
 		spin_unlock(&im->ino_lock);
 		f2fs_show_injection_info(FAULT_ORPHAN);
 		return -ENOSPC;
 	}
-#endif
+
 	if (unlikely(im->ino_num >= sbi->max_orphans))
 		err = -ENOSPC;
 	else
@@ -545,7 +570,7 @@
 	return err;
 }
 
-void release_orphan_inode(struct f2fs_sb_info *sbi)
+void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi)
 {
 	struct inode_management *im = &sbi->im[ORPHAN_INO];
 
@@ -555,14 +580,14 @@
 	spin_unlock(&im->ino_lock);
 }
 
-void add_orphan_inode(struct inode *inode)
+void f2fs_add_orphan_inode(struct inode *inode)
 {
 	/* add new orphan ino entry into list */
 	__add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO);
-	update_inode_page(inode);
+	f2fs_update_inode_page(inode);
 }
 
-void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
+void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 {
 	/* remove orphan entry from orphan list */
 	__remove_ino_entry(sbi, ino, ORPHAN_INO);
@@ -572,12 +597,7 @@
 {
 	struct inode *inode;
 	struct node_info ni;
-	int err = acquire_orphan_inode(sbi);
-
-	if (err)
-		goto err_out;
-
-	__add_ino_entry(sbi, ino, 0, ORPHAN_INO);
+	int err;
 
 	inode = f2fs_iget_retry(sbi->sb, ino);
 	if (IS_ERR(inode)) {
@@ -590,23 +610,25 @@
 	}
 
 	err = dquot_initialize(inode);
-	if (err)
+	if (err) {
+		iput(inode);
 		goto err_out;
+	}
 
-	dquot_initialize(inode);
 	clear_nlink(inode);
 
 	/* truncate all the data during iput */
 	iput(inode);
 
-	get_node_info(sbi, ino, &ni);
+	err = f2fs_get_node_info(sbi, ino, &ni);
+	if (err)
+		goto err_out;
 
 	/* ENOMEM was fully retried in f2fs_evict_inode. */
 	if (ni.blk_addr != NULL_ADDR) {
 		err = -EIO;
 		goto err_out;
 	}
-	__remove_ino_entry(sbi, ino, ORPHAN_INO);
 	return 0;
 
 err_out:
@@ -617,7 +639,7 @@
 	return err;
 }
 
-int recover_orphan_inodes(struct f2fs_sb_info *sbi)
+int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
 {
 	block_t start_blk, orphan_blocks, i, j;
 	unsigned int s_flags = sbi->sb->s_flags;
@@ -638,19 +660,28 @@
 	/* Needed for iput() to work correctly and not trash data */
 	sbi->sb->s_flags |= MS_ACTIVE;
 
-	/* Turn on quotas so that they are updated correctly */
+	/*
+	 * Turn on quotas which were not enabled for read-only mounts if
+	 * filesystem has quota feature, so that they are updated correctly.
+	 */
 	quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
 #endif
 
 	start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
 	orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
 
-	ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
+	f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
 
 	for (i = 0; i < orphan_blocks; i++) {
-		struct page *page = get_meta_page(sbi, start_blk + i);
+		struct page *page;
 		struct f2fs_orphan_block *orphan_blk;
 
+		page = f2fs_get_meta_page(sbi, start_blk + i);
+		if (IS_ERR(page)) {
+			err = PTR_ERR(page);
+			goto out;
+		}
+
 		orphan_blk = (struct f2fs_orphan_block *)page_address(page);
 		for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
 			nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
@@ -698,7 +729,7 @@
 	/* loop for each orphan inode entry and write them in Jornal block */
 	list_for_each_entry(orphan, head, list) {
 		if (!page) {
-			page = grab_meta_page(sbi, start_blk++);
+			page = f2fs_grab_meta_page(sbi, start_blk++);
 			orphan_blk =
 				(struct f2fs_orphan_block *)page_address(page);
 			memset(orphan_blk, 0, sizeof(*orphan_blk));
@@ -740,11 +771,15 @@
 	size_t crc_offset = 0;
 	__u32 crc = 0;
 
-	*cp_page = get_meta_page(sbi, cp_addr);
+	*cp_page = f2fs_get_meta_page(sbi, cp_addr);
+	if (IS_ERR(*cp_page))
+		return PTR_ERR(*cp_page);
+
 	*cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
 
 	crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
 	if (crc_offset > (blk_size - sizeof(__le32))) {
+		f2fs_put_page(*cp_page, 1);
 		f2fs_msg(sbi->sb, KERN_WARNING,
 			"invalid crc_offset: %zu", crc_offset);
 		return -EINVAL;
@@ -752,6 +787,7 @@
 
 	crc = cur_cp_crc(*cp_block);
 	if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+		f2fs_put_page(*cp_page, 1);
 		f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
 		return -EINVAL;
 	}
@@ -771,14 +807,22 @@
 	err = get_checkpoint_version(sbi, cp_addr, &cp_block,
 					&cp_page_1, version);
 	if (err)
-		goto invalid_cp1;
+		return NULL;
+
+	if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
+					sbi->blocks_per_seg) {
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"invalid cp_pack_total_block_count:%u",
+			le32_to_cpu(cp_block->cp_pack_total_block_count));
+		goto invalid_cp;
+	}
 	pre_version = *version;
 
 	cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
 	err = get_checkpoint_version(sbi, cp_addr, &cp_block,
 					&cp_page_2, version);
 	if (err)
-		goto invalid_cp2;
+		goto invalid_cp;
 	cur_version = *version;
 
 	if (cur_version == pre_version) {
@@ -786,14 +830,13 @@
 		f2fs_put_page(cp_page_2, 1);
 		return cp_page_1;
 	}
-invalid_cp2:
 	f2fs_put_page(cp_page_2, 1);
-invalid_cp1:
+invalid_cp:
 	f2fs_put_page(cp_page_1, 1);
 	return NULL;
 }
 
-int get_valid_checkpoint(struct f2fs_sb_info *sbi)
+int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_checkpoint *cp_block;
 	struct f2fs_super_block *fsb = sbi->raw_super;
@@ -805,7 +848,8 @@
 	block_t cp_blk_no;
 	int i;
 
-	sbi->ckpt = f2fs_kzalloc(sbi, cp_blks * blk_size, GFP_KERNEL);
+	sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks),
+				 GFP_KERNEL);
 	if (!sbi->ckpt)
 		return -ENOMEM;
 	/*
@@ -836,15 +880,15 @@
 	cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
 	memcpy(sbi->ckpt, cp_block, blk_size);
 
-	/* Sanity checking of checkpoint */
-	if (sanity_check_ckpt(sbi))
-		goto free_fail_no_cp;
-
 	if (cur_page == cp1)
 		sbi->cur_cp_pack = 1;
 	else
 		sbi->cur_cp_pack = 2;
 
+	/* Sanity checking of checkpoint */
+	if (f2fs_sanity_check_ckpt(sbi))
+		goto free_fail_no_cp;
+
 	if (cp_blks <= 1)
 		goto done;
 
@@ -856,7 +900,9 @@
 		void *sit_bitmap_ptr;
 		unsigned char *ckpt = (unsigned char *)sbi->ckpt;
 
-		cur_page = get_meta_page(sbi, cp_blk_no + i);
+		cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
+		if (IS_ERR(cur_page))
+			goto free_fail_no_cp;
 		sit_bitmap_ptr = page_address(cur_page);
 		memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
 		f2fs_put_page(cur_page, 1);
@@ -901,7 +947,7 @@
 	stat_dec_dirty_inode(F2FS_I_SB(inode), type);
 }
 
-void update_dirty_page(struct inode *inode, struct page *page)
+void f2fs_update_dirty_page(struct inode *inode, struct page *page)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
@@ -920,7 +966,7 @@
 	f2fs_trace_pid(page);
 }
 
-void remove_dirty_inode(struct inode *inode)
+void f2fs_remove_dirty_inode(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
@@ -937,7 +983,7 @@
 	spin_unlock(&sbi->inode_lock[type]);
 }
 
-int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
+int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
 {
 	struct list_head *head;
 	struct inode *inode;
@@ -978,12 +1024,10 @@
 
 		iput(inode);
 		/* We need to give cpu to another writers. */
-		if (ino == cur_ino) {
-			congestion_wait(BLK_RW_ASYNC, HZ/50);
+		if (ino == cur_ino)
 			cond_resched();
-		} else {
+		else
 			ino = cur_ino;
-		}
 	} else {
 		/*
 		 * We should submit bio, since it exists several
@@ -1020,7 +1064,7 @@
 
 			/* it's on eviction */
 			if (is_inode_flag_set(inode, FI_DIRTY_INODE))
-				update_inode_page(inode);
+				f2fs_update_inode_page(inode);
 			iput(inode);
 		}
 	}
@@ -1060,7 +1104,7 @@
 	/* write all the dirty dentry pages */
 	if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
 		f2fs_unlock_all(sbi);
-		err = sync_dirty_inodes(sbi, DIR_INODE);
+		err = f2fs_sync_dirty_inodes(sbi, DIR_INODE);
 		if (err)
 			goto out;
 		cond_resched();
@@ -1088,7 +1132,9 @@
 
 	if (get_pages(sbi, F2FS_DIRTY_NODES)) {
 		up_write(&sbi->node_write);
-		err = sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
+		atomic_inc(&sbi->wb_sync_req[NODE]);
+		err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
+		atomic_dec(&sbi->wb_sync_req[NODE]);
 		if (err) {
 			up_write(&sbi->node_change);
 			f2fs_unlock_all(sbi);
@@ -1115,7 +1161,7 @@
 	f2fs_unlock_all(sbi);
 }
 
-static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
+void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
 {
 	DEFINE_WAIT(wait);
 
@@ -1125,6 +1171,9 @@
 		if (!get_pages(sbi, F2FS_WB_CP_DATA))
 			break;
 
+		if (unlikely(f2fs_cp_error(sbi)))
+			break;
+
 		io_schedule_timeout(5*HZ);
 	}
 	finish_wait(&sbi->cp_wait, &wait);
@@ -1182,10 +1231,10 @@
 
 	/*
 	 * pagevec_lookup_tag and lock_page again will take
-	 * some extra time. Therefore, update_meta_pages and
-	 * sync_meta_pages are combined in this function.
+	 * some extra time. Therefore, f2fs_update_meta_pages and
+	 * f2fs_sync_meta_pages are combined in this function.
 	 */
-	struct page *page = grab_meta_page(sbi, blk_addr);
+	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
 	int err;
 
 	memcpy(page_address(page), src, PAGE_SIZE);
@@ -1198,8 +1247,12 @@
 
 	/* writeout cp pack 2 page */
 	err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
-	f2fs_bug_on(sbi, err);
+	if (unlikely(err && f2fs_cp_error(sbi))) {
+		f2fs_put_page(page, 1);
+		return;
+	}
 
+	f2fs_bug_on(sbi, err);
 	f2fs_put_page(page, 0);
 
 	/* submit checkpoint (with barrier if NOBARRIER is not set) */
@@ -1223,16 +1276,16 @@
 
 	/* Flush all the NAT/SIT pages */
 	while (get_pages(sbi, F2FS_DIRTY_META)) {
-		sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
 		if (unlikely(f2fs_cp_error(sbi)))
-			return -EIO;
+			break;
 	}
 
 	/*
 	 * modify checkpoint
 	 * version number is already updated
 	 */
-	ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
+	ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
 	ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
 	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
 		ckpt->cur_node_segno[i] =
@@ -1252,7 +1305,7 @@
 	}
 
 	/* 2 cp  + n data seg summary + orphan inode blocks */
-	data_sum_blocks = npages_for_summary_flush(sbi, false);
+	data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false);
 	spin_lock_irqsave(&sbi->cp_lock, flags);
 	if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
 		__set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
@@ -1297,22 +1350,23 @@
 
 		blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
 		for (i = 0; i < nm_i->nat_bits_blocks; i++)
-			update_meta_page(sbi, nm_i->nat_bits +
+			f2fs_update_meta_page(sbi, nm_i->nat_bits +
 					(i << F2FS_BLKSIZE_BITS), blk + i);
 
 		/* Flush all the NAT BITS pages */
 		while (get_pages(sbi, F2FS_DIRTY_META)) {
-			sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+			f2fs_sync_meta_pages(sbi, META, LONG_MAX,
+							FS_CP_META_IO);
 			if (unlikely(f2fs_cp_error(sbi)))
-				return -EIO;
+				break;
 		}
 	}
 
 	/* write out checkpoint buffer at block 0 */
-	update_meta_page(sbi, ckpt, start_blk++);
+	f2fs_update_meta_page(sbi, ckpt, start_blk++);
 
 	for (i = 1; i < 1 + cp_payload_blks; i++)
-		update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
+		f2fs_update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
 							start_blk++);
 
 	if (orphan_num) {
@@ -1320,7 +1374,7 @@
 		start_blk += orphan_blocks;
 	}
 
-	write_data_summaries(sbi, start_blk);
+	f2fs_write_data_summaries(sbi, start_blk);
 	start_blk += data_sum_blocks;
 
 	/* Record write statistics in the hot node summary */
@@ -1331,7 +1385,7 @@
 	seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
 
 	if (__remain_node_summaries(cpc->reason)) {
-		write_node_summaries(sbi, start_blk);
+		f2fs_write_node_summaries(sbi, start_blk);
 		start_blk += NR_CURSEG_NODE_TYPE;
 	}
 
@@ -1340,13 +1394,10 @@
 	percpu_counter_set(&sbi->alloc_valid_block_count, 0);
 
 	/* Here, we have one bio having CP pack except cp pack 2 page */
-	sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+	f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
 
 	/* wait for previous submitted meta pages writeback */
-	wait_on_all_pages_writeback(sbi);
-
-	if (unlikely(f2fs_cp_error(sbi)))
-		return -EIO;
+	f2fs_wait_on_all_pages_writeback(sbi);
 
 	/* flush all device cache */
 	err = f2fs_flush_device_cache(sbi);
@@ -1355,12 +1406,19 @@
 
 	/* barrier and flush checkpoint cp pack 2 page if it can */
 	commit_checkpoint(sbi, ckpt, start_blk);
-	wait_on_all_pages_writeback(sbi);
+	f2fs_wait_on_all_pages_writeback(sbi);
 
-	release_ino_entry(sbi, false);
+	/*
+	 * invalidate intermediate page cache borrowed from meta inode
+	 * which are used for migration of encrypted inode's blocks.
+	 */
+	if (f2fs_sb_has_encrypt(sbi->sb))
+		invalidate_mapping_pages(META_MAPPING(sbi),
+				MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
 
-	if (unlikely(f2fs_cp_error(sbi)))
-		return -EIO;
+	f2fs_release_ino_entry(sbi, false);
+
+	f2fs_reset_fsync_node_info(sbi);
 
 	clear_sbi_flag(sbi, SBI_IS_DIRTY);
 	clear_sbi_flag(sbi, SBI_NEED_CP);
@@ -1376,13 +1434,13 @@
 
 	f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
 
-	return 0;
+	return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
 }
 
 /*
  * We guarantee that this checkpoint procedure will not fail.
  */
-int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 {
 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 	unsigned long long ckpt_ver;
@@ -1415,7 +1473,7 @@
 
 	/* this is the case of multiple fstrims without any changes */
 	if (cpc->reason & CP_DISCARD) {
-		if (!exist_trim_candidates(sbi, cpc)) {
+		if (!f2fs_exist_trim_candidates(sbi, cpc)) {
 			unblock_operations(sbi);
 			goto out;
 		}
@@ -1423,8 +1481,8 @@
 		if (NM_I(sbi)->dirty_nat_cnt == 0 &&
 				SIT_I(sbi)->dirty_sentries == 0 &&
 				prefree_segments(sbi) == 0) {
-			flush_sit_entries(sbi, cpc);
-			clear_prefree_segments(sbi, cpc);
+			f2fs_flush_sit_entries(sbi, cpc);
+			f2fs_clear_prefree_segments(sbi, cpc);
 			unblock_operations(sbi);
 			goto out;
 		}
@@ -1439,15 +1497,15 @@
 	ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
 
 	/* write cached NAT/SIT entries to NAT/SIT area */
-	flush_nat_entries(sbi, cpc);
-	flush_sit_entries(sbi, cpc);
+	f2fs_flush_nat_entries(sbi, cpc);
+	f2fs_flush_sit_entries(sbi, cpc);
 
 	/* unlock all the fs_lock[] in do_checkpoint() */
 	err = do_checkpoint(sbi, cpc);
 	if (err)
-		release_discard_addrs(sbi);
+		f2fs_release_discard_addrs(sbi);
 	else
-		clear_prefree_segments(sbi, cpc);
+		f2fs_clear_prefree_segments(sbi, cpc);
 
 	unblock_operations(sbi);
 	stat_inc_cp_count(sbi->stat_info);
@@ -1464,7 +1522,7 @@
 	return err;
 }
 
-void init_ino_entry_info(struct f2fs_sb_info *sbi)
+void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
 {
 	int i;
 
@@ -1482,23 +1540,23 @@
 				F2FS_ORPHANS_PER_BLOCK;
 }
 
-int __init create_checkpoint_caches(void)
+int __init f2fs_create_checkpoint_caches(void)
 {
 	ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
 			sizeof(struct ino_entry));
 	if (!ino_entry_slab)
 		return -ENOMEM;
-	inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
+	f2fs_inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
 			sizeof(struct inode_entry));
-	if (!inode_entry_slab) {
+	if (!f2fs_inode_entry_slab) {
 		kmem_cache_destroy(ino_entry_slab);
 		return -ENOMEM;
 	}
 	return 0;
 }
 
-void destroy_checkpoint_caches(void)
+void f2fs_destroy_checkpoint_caches(void)
 {
 	kmem_cache_destroy(ino_entry_slab);
-	kmem_cache_destroy(inode_entry_slab);
+	kmem_cache_destroy(f2fs_inode_entry_slab);
 }
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 30c5ec0..6570c75 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -48,6 +48,8 @@
 	if (inode->i_ino == F2FS_META_INO(sbi) ||
 			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
 			S_ISDIR(inode->i_mode) ||
+			(S_ISREG(inode->i_mode) &&
+			is_inode_flag_set(inode, FI_ATOMIC_FILE)) ||
 			is_cold_data(page))
 		return true;
 	return false;
@@ -124,12 +126,10 @@
 
 static void f2fs_read_end_io(struct bio *bio)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
 		f2fs_show_injection_info(FAULT_IO);
 		bio->bi_error = -EIO;
 	}
-#endif
 
 	if (f2fs_bio_post_read_required(bio)) {
 		struct bio_post_read_ctx *ctx = bio->bi_private;
@@ -175,6 +175,8 @@
 					page->index != nid_of_node(page));
 
 		dec_page_count(sbi, type);
+		if (f2fs_in_warm_node_list(sbi, page))
+			f2fs_del_fsync_node_entry(sbi, page);
 		clear_cold_data(page);
 		end_page_writeback(page);
 	}
@@ -244,7 +246,7 @@
 	} else {
 		bio->bi_end_io = f2fs_write_end_io;
 		bio->bi_private = sbi;
-		bio->bi_write_hint = io_type_to_rw_hint(sbi, type, temp);
+		bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, type, temp);
 	}
 	if (wbc)
 		wbc_init_bio(wbc, bio);
@@ -261,7 +263,7 @@
 		if (type != DATA && type != NODE)
 			goto submit_io;
 
-		if (f2fs_sb_has_blkzoned(sbi->sb) && current->plug)
+		if (test_opt(sbi, LFS) && current->plug)
 			blk_finish_plug(current->plug);
 
 		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
@@ -439,7 +441,10 @@
 			fio->encrypted_page : fio->page;
 	struct inode *inode = fio->page->mapping->host;
 
-	verify_block_addr(fio, fio->new_blkaddr);
+	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
+			__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
+		return -EFAULT;
+
 	trace_f2fs_submit_page_bio(page, fio);
 	f2fs_trace_ios(fio, 0);
 
@@ -448,7 +453,8 @@
 				1, is_read_io(fio->op), fio->type, fio->temp);
 
 	if (f2fs_may_encrypt_bio(inode, fio))
-	fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, fio->page));
+		fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, fio->page));
+	fscrypt_set_ice_skip(bio, fio->encrypted_page ? 1 : 0);
 
 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 		bio_put(bio);
@@ -463,7 +469,7 @@
 	return 0;
 }
 
-int f2fs_submit_page_write(struct f2fs_io_info *fio)
+void f2fs_submit_page_write(struct f2fs_io_info *fio)
 {
 	struct f2fs_sb_info *sbi = fio->sbi;
 	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
@@ -471,8 +477,8 @@
 	struct page *bio_page;
 	struct inode *inode;
 	bool bio_encrypted;
+	int bi_crypt_skip;
 	u64 dun;
-	int err = 0;
 
 	f2fs_bug_on(sbi, is_read_io(fio->op));
 
@@ -482,7 +488,7 @@
 		spin_lock(&io->io_lock);
 		if (list_empty(&io->io_list)) {
 			spin_unlock(&io->io_lock);
-			goto out_fail;
+			goto out;
 		}
 		fio = list_first_entry(&io->io_list,
 						struct f2fs_io_info, list);
@@ -490,13 +496,14 @@
 		spin_unlock(&io->io_lock);
 	}
 
-	if (fio->old_blkaddr != NEW_ADDR)
+	if (__is_valid_data_blkaddr(fio->old_blkaddr))
 		verify_block_addr(fio, fio->old_blkaddr);
 	verify_block_addr(fio, fio->new_blkaddr);
 
 	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
 	inode = fio->page->mapping->host;
 	dun = PG_DUN(inode, fio->page);
+	bi_crypt_skip = fio->encrypted_page ? 1 : 0;
 	bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
 
 	/* set submitted = true as a return value */
@@ -510,23 +517,23 @@
 		__submit_merged_bio(io);
 
 	/* ICE support */
-	if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted))
+	if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted, bi_crypt_skip))
 		__submit_merged_bio(io);
 
 alloc_new:
 	if (io->bio == NULL) {
 		if ((fio->type == DATA || fio->type == NODE) &&
 				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
-			err = -EAGAIN;
 			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
-			goto out_fail;
+			fio->retry = true;
+			goto skip;
 		}
 		io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
 						BIO_MAX_PAGES, false,
 						fio->type, fio->temp);
 		if (bio_encrypted)
 			fscrypt_set_ice_dun(inode, io->bio, dun);
-
+		fscrypt_set_ice_skip(io->bio, bi_crypt_skip);
 		io->fio = *fio;
 	}
 
@@ -542,28 +549,30 @@
 	f2fs_trace_ios(fio, 0);
 
 	trace_f2fs_submit_page_write(fio->page, fio);
-
+skip:
 	if (fio->in_list)
 		goto next;
-out_fail:
+out:
 	up_write(&io->io_rwsem);
-	return err;
 }
 
 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
-							 unsigned nr_pages)
+					unsigned nr_pages, unsigned op_flag)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct bio *bio;
 	struct bio_post_read_ctx *ctx;
 	unsigned int post_read_steps = 0;
 
+	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+		return ERR_PTR(-EFAULT);
+
 	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
 	if (!bio)
 		return ERR_PTR(-ENOMEM);
 	f2fs_target_device(sbi, blkaddr, bio);
 	bio->bi_end_io = f2fs_read_end_io;
-	bio_set_op_attrs(bio, REQ_OP_READ, 0);
+	bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
 
         if (f2fs_encrypted_file(inode) &&
             !fscrypt_using_hardware_encryption(inode))
@@ -589,7 +598,7 @@
 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
 							block_t blkaddr)
 {
-	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);
+	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
 
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
@@ -625,7 +634,7 @@
  *  ->node_page
  *    update block addresses in the node page
  */
-void set_data_blkaddr(struct dnode_of_data *dn)
+void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
 {
 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
 	__set_data_blkaddr(dn);
@@ -636,12 +645,12 @@
 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
 {
 	dn->data_blkaddr = blkaddr;
-	set_data_blkaddr(dn);
+	f2fs_set_data_blkaddr(dn);
 	f2fs_update_extent_cache(dn);
 }
 
 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
-int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
+int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	int err;
@@ -675,12 +684,12 @@
 }
 
 /* Should keep dn->ofs_in_node unchanged */
-int reserve_new_block(struct dnode_of_data *dn)
+int f2fs_reserve_new_block(struct dnode_of_data *dn)
 {
 	unsigned int ofs_in_node = dn->ofs_in_node;
 	int ret;
 
-	ret = reserve_new_blocks(dn, 1);
+	ret = f2fs_reserve_new_blocks(dn, 1);
 	dn->ofs_in_node = ofs_in_node;
 	return ret;
 }
@@ -690,12 +699,12 @@
 	bool need_put = dn->inode_page ? false : true;
 	int err;
 
-	err = get_dnode_of_data(dn, index, ALLOC_NODE);
+	err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
 	if (err)
 		return err;
 
 	if (dn->data_blkaddr == NULL_ADDR)
-		err = reserve_new_block(dn);
+		err = f2fs_reserve_new_block(dn);
 	if (err || need_put)
 		f2fs_put_dnode(dn);
 	return err;
@@ -714,7 +723,7 @@
 	return f2fs_reserve_block(dn, index);
 }
 
-struct page *get_read_data_page(struct inode *inode, pgoff_t index,
+struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
 						int op_flags, bool for_write)
 {
 	struct address_space *mapping = inode->i_mapping;
@@ -733,7 +742,7 @@
 	}
 
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
+	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
 	if (err)
 		goto put_err;
 	f2fs_put_dnode(&dn);
@@ -752,7 +761,8 @@
 	 * A new dentry page is allocated but not able to be written, since its
 	 * new inode page couldn't be allocated due to -ENOSPC.
 	 * In such the case, its blkaddr can be remained as NEW_ADDR.
-	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
+	 * see, f2fs_add_link -> f2fs_get_new_data_page ->
+	 * f2fs_init_inode_metadata.
 	 */
 	if (dn.data_blkaddr == NEW_ADDR) {
 		zero_user_segment(page, 0, PAGE_SIZE);
@@ -772,7 +782,7 @@
 	return ERR_PTR(err);
 }
 
-struct page *find_data_page(struct inode *inode, pgoff_t index)
+struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
 {
 	struct address_space *mapping = inode->i_mapping;
 	struct page *page;
@@ -782,7 +792,7 @@
 		return page;
 	f2fs_put_page(page, 0);
 
-	page = get_read_data_page(inode, index, 0, false);
+	page = f2fs_get_read_data_page(inode, index, 0, false);
 	if (IS_ERR(page))
 		return page;
 
@@ -802,13 +812,13 @@
  * Because, the callers, functions in dir.c and GC, should be able to know
  * whether this page exists or not.
  */
-struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
+struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
 							bool for_write)
 {
 	struct address_space *mapping = inode->i_mapping;
 	struct page *page;
 repeat:
-	page = get_read_data_page(inode, index, 0, for_write);
+	page = f2fs_get_read_data_page(inode, index, 0, for_write);
 	if (IS_ERR(page))
 		return page;
 
@@ -834,7 +844,7 @@
  * Note that, ipage is set only by make_empty_dir, and if any error occur,
  * ipage should be released by this function.
  */
-struct page *get_new_data_page(struct inode *inode,
+struct page *f2fs_get_new_data_page(struct inode *inode,
 		struct page *ipage, pgoff_t index, bool new_i_size)
 {
 	struct address_space *mapping = inode->i_mapping;
@@ -873,7 +883,7 @@
 
 		/* if ipage exists, blkaddr should be NEW_ADDR */
 		f2fs_bug_on(F2FS_I_SB(inode), ipage);
-		page = get_lock_data_page(inode, index, true);
+		page = f2fs_get_lock_data_page(inode, index, true);
 		if (IS_ERR(page))
 			return page;
 	}
@@ -889,6 +899,7 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	struct f2fs_summary sum;
 	struct node_info ni;
+	block_t old_blkaddr;
 	pgoff_t fofs;
 	blkcnt_t count = 1;
 	int err;
@@ -896,6 +907,10 @@
 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
 		return -EPERM;
 
+	err = f2fs_get_node_info(sbi, dn->nid, &ni);
+	if (err)
+		return err;
+
 	dn->data_blkaddr = datablock_addr(dn->inode,
 				dn->node_page, dn->ofs_in_node);
 	if (dn->data_blkaddr == NEW_ADDR)
@@ -905,15 +920,17 @@
 		return err;
 
 alloc:
-	get_node_info(sbi, dn->nid, &ni);
 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
-
-	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
+	old_blkaddr = dn->data_blkaddr;
+	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
 					&sum, seg_type, NULL, false);
-	set_data_blkaddr(dn);
+	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+		invalidate_mapping_pages(META_MAPPING(sbi),
+					old_blkaddr, old_blkaddr);
+	f2fs_set_data_blkaddr(dn);
 
 	/* update i_size */
-	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
+	fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
 							dn->ofs_in_node;
 	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
 		f2fs_i_size_write(dn->inode,
@@ -951,7 +968,7 @@
 	map.m_seg_type = NO_CHECK_TYPE;
 
 	if (direct_io) {
-		map.m_seg_type = rw_hint_to_seg_type(iocb->ki_hint);
+		map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
 		flag = f2fs_force_buffered_io(inode, WRITE) ?
 					F2FS_GET_BLOCK_PRE_AIO :
 					F2FS_GET_BLOCK_PRE_DIO;
@@ -1041,7 +1058,7 @@
 
 	/* When reading holes, we need its node page */
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	err = get_dnode_of_data(&dn, pgofs, mode);
+	err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
 	if (err) {
 		if (flag == F2FS_GET_BLOCK_BMAP)
 			map->m_pblk = 0;
@@ -1049,10 +1066,10 @@
 			err = 0;
 			if (map->m_next_pgofs)
 				*map->m_next_pgofs =
-					get_next_page_offset(&dn, pgofs);
+					f2fs_get_next_page_offset(&dn, pgofs);
 			if (map->m_next_extent)
 				*map->m_next_extent =
-					get_next_page_offset(&dn, pgofs);
+					f2fs_get_next_page_offset(&dn, pgofs);
 		}
 		goto unlock_out;
 	}
@@ -1065,7 +1082,13 @@
 next_block:
 	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
 
-	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
+	if (__is_valid_data_blkaddr(blkaddr) &&
+		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
+		err = -EFAULT;
+		goto sync_out;
+	}
+
+	if (!is_valid_data_blkaddr(sbi, blkaddr)) {
 		if (create) {
 			if (unlikely(f2fs_cp_error(sbi))) {
 				err = -EIO;
@@ -1138,7 +1161,7 @@
 			(pgofs == end || dn.ofs_in_node == end_offset)) {
 
 		dn.ofs_in_node = ofs_in_node;
-		err = reserve_new_blocks(&dn, prealloc);
+		err = f2fs_reserve_new_blocks(&dn, prealloc);
 		if (err)
 			goto sync_out;
 
@@ -1257,7 +1280,7 @@
 {
 	return __get_data_block(inode, iblock, bh_result, create,
 						F2FS_GET_BLOCK_DEFAULT, NULL,
-						rw_hint_to_seg_type(
+						f2fs_rw_hint_to_seg_type(
 							inode->i_write_hint));
 }
 
@@ -1302,7 +1325,11 @@
 		if (!page)
 			return -ENOMEM;
 
-		get_node_info(sbi, inode->i_ino, &ni);
+		err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+		if (err) {
+			f2fs_put_page(page, 1);
+			return err;
+		}
 
 		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
 		offset = offsetof(struct f2fs_inode, i_addr) +
@@ -1329,7 +1356,11 @@
 		if (!page)
 			return -ENOMEM;
 
-		get_node_info(sbi, xnid, &ni);
+		err = f2fs_get_node_info(sbi, xnid, &ni);
+		if (err) {
+			f2fs_put_page(page, 1);
+			return err;
+		}
 
 		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
 		len = inode->i_sb->s_blocksize;
@@ -1441,10 +1472,15 @@
 /*
  * This function was originally taken from fs/mpage.c, and customized for f2fs.
  * Major change was from block_size == page_size in f2fs by default.
+ *
+ * Note that the aops->readpages() function is ONLY used for read-ahead. If
+ * this function ever deviates from doing just read-ahead, it should either
+ * use ->readpage() or do the necessary surgery to decouple ->readpages()
+ * from read-ahead.
  */
 static int f2fs_mpage_readpages(struct address_space *mapping,
 			struct list_head *pages, struct page *page,
-			unsigned nr_pages)
+			unsigned nr_pages, bool is_readahead)
 {
 	struct bio *bio = NULL;
 	sector_t last_block_in_bio = 0;
@@ -1517,6 +1553,10 @@
 				SetPageUptodate(page);
 				goto confused;
 			}
+
+			if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
+								DATA_GENERIC))
+				goto set_error_page;
 		} else {
 			zero_user_segment(page, 0, PAGE_SIZE);
 			if (!PageUptodate(page))
@@ -1538,13 +1578,14 @@
 
 		dun = PG_DUN(inode, page);
 		bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
-		if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted)) {
+		if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted, 0)) {
 			__submit_bio(F2FS_I_SB(inode), bio, DATA);
 			bio = NULL;
 		}
 
 		if (bio == NULL) {
-			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
+			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
+					is_readahead ? REQ_RAHEAD : 0);
 			if (IS_ERR(bio)) {
 				bio = NULL;
 				goto set_error_page;
@@ -1589,7 +1630,7 @@
 	if (f2fs_has_inline_data(inode))
 		ret = f2fs_read_inline_data(inode, page);
 	if (ret == -EAGAIN)
-		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
+		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false);
 	return ret;
 }
 
@@ -1606,12 +1647,13 @@
 	if (f2fs_has_inline_data(inode))
 		return 0;
 
-	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
+	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
 }
 
 static int encrypt_one_page(struct f2fs_io_info *fio)
 {
 	struct inode *inode = fio->page->mapping->host;
+	struct page *mpage;
 	gfp_t gfp_flags = GFP_NOFS;
 
 	if (!f2fs_encrypted_file(inode))
@@ -1626,17 +1668,25 @@
 
 	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
 			PAGE_SIZE, 0, fio->page->index, gfp_flags);
-	if (!IS_ERR(fio->encrypted_page))
-		return 0;
-
-	/* flush pending IOs and wait for a while in the ENOMEM case */
-	if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
-		f2fs_flush_merged_writes(fio->sbi);
-		congestion_wait(BLK_RW_ASYNC, HZ/50);
-		gfp_flags |= __GFP_NOFAIL;
-		goto retry_encrypt;
+	if (IS_ERR(fio->encrypted_page)) {
+		/* flush pending IOs and wait for a while in the ENOMEM case */
+		if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
+			f2fs_flush_merged_writes(fio->sbi);
+			congestion_wait(BLK_RW_ASYNC, HZ/50);
+			gfp_flags |= __GFP_NOFAIL;
+			goto retry_encrypt;
+		}
+		return PTR_ERR(fio->encrypted_page);
 	}
-	return PTR_ERR(fio->encrypted_page);
+
+	mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
+	if (mpage) {
+		if (PageUptodate(mpage))
+			memcpy(page_address(mpage),
+				page_address(fio->encrypted_page), PAGE_SIZE);
+		f2fs_put_page(mpage, 1);
+	}
+	return 0;
 }
 
 static inline bool check_inplace_update_policy(struct inode *inode,
@@ -1647,12 +1697,12 @@
 
 	if (policy & (0x1 << F2FS_IPU_FORCE))
 		return true;
-	if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
+	if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
 		return true;
 	if (policy & (0x1 << F2FS_IPU_UTIL) &&
 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
 		return true;
-	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
+	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
 		return true;
 
@@ -1673,7 +1723,7 @@
 	return false;
 }
 
-bool should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
+bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
 {
 	if (f2fs_is_pinned_file(inode))
 		return true;
@@ -1685,7 +1735,7 @@
 	return check_inplace_update_policy(inode, fio);
 }
 
-bool should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
+bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 
@@ -1708,27 +1758,19 @@
 {
 	struct inode *inode = fio->page->mapping->host;
 
-	if (should_update_outplace(inode, fio))
+	if (f2fs_should_update_outplace(inode, fio))
 		return false;
 
-	return should_update_inplace(inode, fio);
+	return f2fs_should_update_inplace(inode, fio);
 }
 
-static inline bool valid_ipu_blkaddr(struct f2fs_io_info *fio)
-{
-	if (fio->old_blkaddr == NEW_ADDR)
-		return false;
-	if (fio->old_blkaddr == NULL_ADDR)
-		return false;
-	return true;
-}
-
-int do_write_data_page(struct f2fs_io_info *fio)
+int f2fs_do_write_data_page(struct f2fs_io_info *fio)
 {
 	struct page *page = fio->page;
 	struct inode *inode = page->mapping->host;
 	struct dnode_of_data dn;
 	struct extent_info ei = {0,0,0};
+	struct node_info ni;
 	bool ipu_force = false;
 	int err = 0;
 
@@ -1737,18 +1779,20 @@
 			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
 		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
 
-		if (valid_ipu_blkaddr(fio)) {
-			ipu_force = true;
-			fio->need_lock = LOCK_DONE;
-			goto got_it;
-		}
+		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+							DATA_GENERIC))
+			return -EFAULT;
+
+		ipu_force = true;
+		fio->need_lock = LOCK_DONE;
+		goto got_it;
 	}
 
 	/* Deadlock due to between page->lock and f2fs_lock_op */
 	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
 		return -EAGAIN;
 
-	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
+	err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
 	if (err)
 		goto out;
 
@@ -1760,11 +1804,18 @@
 		goto out_writepage;
 	}
 got_it:
+	if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
+		!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+							DATA_GENERIC)) {
+		err = -EFAULT;
+		goto out_writepage;
+	}
 	/*
 	 * If current allocation needs SSR,
 	 * it had better in-place writes for updated data.
 	 */
-	if (ipu_force || (valid_ipu_blkaddr(fio) && need_inplace_update(fio))) {
+	if (ipu_force || (is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
+					need_inplace_update(fio))) {
 		err = encrypt_one_page(fio);
 		if (err)
 			goto out_writepage;
@@ -1774,7 +1825,7 @@
 		f2fs_put_dnode(&dn);
 		if (fio->need_lock == LOCK_REQ)
 			f2fs_unlock_op(fio->sbi);
-		err = rewrite_data_page(fio);
+		err = f2fs_inplace_write_data(fio);
 		trace_f2fs_do_write_data_page(fio->page, IPU);
 		set_inode_flag(inode, FI_UPDATE_WRITE);
 		return err;
@@ -1788,6 +1839,12 @@
 		fio->need_lock = LOCK_REQ;
 	}
 
+	err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
+	if (err)
+		goto out_writepage;
+
+	fio->version = ni.version;
+
 	err = encrypt_one_page(fio);
 	if (err)
 		goto out_writepage;
@@ -1796,7 +1853,7 @@
 	ClearPageError(page);
 
 	/* LFS mode write path */
-	write_data_page(&dn, fio);
+	f2fs_outplace_write_data(&dn, fio);
 	trace_f2fs_do_write_data_page(page, OPU);
 	set_inode_flag(inode, FI_APPEND_WRITE);
 	if (page->index == 0)
@@ -1842,6 +1899,12 @@
 	/* we should bypass data pages to proceed the kworkder jobs */
 	if (unlikely(f2fs_cp_error(sbi))) {
 		mapping_set_error(page->mapping, -EIO);
+		/*
+		 * don't drop any dirty dentry pages for keeping lastest
+		 * directory structure.
+		 */
+		if (S_ISDIR(inode->i_mode))
+			goto redirty_out;
 		goto out;
 	}
 
@@ -1866,13 +1929,13 @@
 	/* we should not write 0'th page having journal header */
 	if (f2fs_is_volatile_file(inode) && (!page->index ||
 			(!wbc->for_reclaim &&
-			available_free_memory(sbi, BASE_CHECK))))
+			f2fs_available_free_memory(sbi, BASE_CHECK))))
 		goto redirty_out;
 
 	/* Dentry blocks are controlled by checkpoint */
 	if (S_ISDIR(inode->i_mode)) {
 		fio.need_lock = LOCK_DONE;
-		err = do_write_data_page(&fio);
+		err = f2fs_do_write_data_page(&fio);
 		goto done;
 	}
 
@@ -1891,10 +1954,10 @@
 	}
 
 	if (err == -EAGAIN) {
-		err = do_write_data_page(&fio);
+		err = f2fs_do_write_data_page(&fio);
 		if (err == -EAGAIN) {
 			fio.need_lock = LOCK_REQ;
-			err = do_write_data_page(&fio);
+			err = f2fs_do_write_data_page(&fio);
 		}
 	}
 
@@ -1919,7 +1982,7 @@
 	if (wbc->for_reclaim) {
 		f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
 		clear_inode_flag(inode, FI_HOT_DATA);
-		remove_dirty_inode(inode);
+		f2fs_remove_dirty_inode(inode);
 		submitted = NULL;
 	}
 
@@ -1969,6 +2032,7 @@
 	int ret = 0;
 	int done = 0;
 	struct pagevec pvec;
+	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
 	int nr_pages;
 	pgoff_t uninitialized_var(writeback_index);
 	pgoff_t index;
@@ -2013,8 +2077,8 @@
 	while (!done && (index <= end)) {
 		int i;
 
-		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
-			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
+		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
+				tag);
 		if (nr_pages == 0)
 			break;
 
@@ -2022,7 +2086,9 @@
 			struct page *page = pvec.pages[i];
 			bool submitted = false;
 
-			if (page->index > end) {
+			/* give a priority to WB_SYNC threads */
+			if (atomic_read(&sbi->wb_sync_req[DATA]) &&
+					wbc->sync_mode == WB_SYNC_NONE) {
 				done = 1;
 				break;
 			}
@@ -2081,9 +2147,7 @@
 				last_idx = page->index;
 			}
 
-			/* give a priority to WB_SYNC threads */
-			if ((atomic_read(&F2FS_M_SB(mapping)->wb_sync_req) ||
-					--wbc->nr_to_write <= 0) &&
+			if (--wbc->nr_to_write <= 0 &&
 					wbc->sync_mode == WB_SYNC_NONE) {
 				done = 1;
 				break;
@@ -2121,7 +2185,7 @@
 	return false;
 }
 
-int __f2fs_write_data_pages(struct address_space *mapping,
+static int __f2fs_write_data_pages(struct address_space *mapping,
 						struct writeback_control *wbc,
 						enum iostat_type io_type)
 {
@@ -2145,7 +2209,7 @@
 
 	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
 			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
-			available_free_memory(sbi, DIRTY_DENTS))
+			f2fs_available_free_memory(sbi, DIRTY_DENTS))
 		goto skip_write;
 
 	/* skip writing during file defragment */
@@ -2156,8 +2220,8 @@
 
 	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
 	if (wbc->sync_mode == WB_SYNC_ALL)
-		atomic_inc(&sbi->wb_sync_req);
-	else if (atomic_read(&sbi->wb_sync_req))
+		atomic_inc(&sbi->wb_sync_req[DATA]);
+	else if (atomic_read(&sbi->wb_sync_req[DATA]))
 		goto skip_write;
 
 	if (__should_serialize_io(inode, wbc)) {
@@ -2173,13 +2237,13 @@
 		mutex_unlock(&sbi->writepages);
 
 	if (wbc->sync_mode == WB_SYNC_ALL)
-		atomic_dec(&sbi->wb_sync_req);
+		atomic_dec(&sbi->wb_sync_req[DATA]);
 	/*
 	 * if some pages were truncated, we cannot guarantee its mapping->host
 	 * to detect pending bios.
 	 */
 
-	remove_dirty_inode(inode);
+	f2fs_remove_dirty_inode(inode);
 	return ret;
 
 skip_write:
@@ -2204,10 +2268,14 @@
 	loff_t i_size = i_size_read(inode);
 
 	if (to > i_size) {
+		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 		down_write(&F2FS_I(inode)->i_mmap_sem);
+
 		truncate_pagecache(inode, i_size);
-		truncate_blocks(inode, i_size, true);
+		f2fs_truncate_blocks(inode, i_size, true);
+
 		up_write(&F2FS_I(inode)->i_mmap_sem);
+		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 	}
 }
 
@@ -2238,7 +2306,7 @@
 	}
 restart:
 	/* check inline_data */
-	ipage = get_node_page(sbi, inode->i_ino);
+	ipage = f2fs_get_node_page(sbi, inode->i_ino);
 	if (IS_ERR(ipage)) {
 		err = PTR_ERR(ipage);
 		goto unlock_out;
@@ -2248,7 +2316,7 @@
 
 	if (f2fs_has_inline_data(inode)) {
 		if (pos + len <= MAX_INLINE_DATA(inode)) {
-			read_inline_data(page, ipage);
+			f2fs_do_read_inline_data(page, ipage);
 			set_inode_flag(inode, FI_DATA_EXIST);
 			if (inode->i_nlink)
 				set_inline_node(ipage);
@@ -2266,7 +2334,7 @@
 			dn.data_blkaddr = ei.blk + index - ei.fofs;
 		} else {
 			/* hole case */
-			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
+			err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
 			if (err || dn.data_blkaddr == NULL_ADDR) {
 				f2fs_put_dnode(&dn);
 				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
@@ -2312,8 +2380,9 @@
 	}
 	trace_f2fs_write_begin(inode, pos, len, flags);
 
-	if (f2fs_is_atomic_file(inode) &&
-			!available_free_memory(sbi, INMEM_PAGES)) {
+	if ((f2fs_is_atomic_file(inode) &&
+			!f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
+			is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
 		err = -ENOMEM;
 		drop_atomic = true;
 		goto fail;
@@ -2397,7 +2466,7 @@
 	f2fs_put_page(page, 1);
 	f2fs_write_failed(mapping, pos + len);
 	if (drop_atomic)
-		drop_inmem_pages_all(sbi);
+		f2fs_drop_inmem_pages_all(sbi, false);
 	return err;
 }
 
@@ -2438,14 +2507,20 @@
 static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
 			   loff_t offset)
 {
-	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
+	unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
+	unsigned blkbits = i_blkbits;
+	unsigned blocksize_mask = (1 << blkbits) - 1;
+	unsigned long align = offset | iov_iter_alignment(iter);
+	struct block_device *bdev = inode->i_sb->s_bdev;
 
-	if (offset & blocksize_mask)
-		return -EINVAL;
-
-	if (iov_iter_alignment(iter) & blocksize_mask)
-		return -EINVAL;
-
+	if (align & blocksize_mask) {
+		if (bdev)
+			blkbits = blksize_bits(bdev_logical_block_size(bdev));
+		blocksize_mask = (1 << blkbits) - 1;
+		if (align & blocksize_mask)
+			return -EINVAL;
+		return 1;
+	}
 	return 0;
 }
 
@@ -2463,7 +2538,7 @@
 
 	err = check_direct_IO(inode, iter, offset);
 	if (err)
-		return err;
+		return err < 0 ? err : 0;
 
 	if (f2fs_force_buffered_io(inode, rw))
 		return 0;
@@ -2495,17 +2570,17 @@
 	if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
 		iocb->ki_hint = WRITE_LIFE_NOT_SET;
 
-	if (!down_read_trylock(&F2FS_I(inode)->dio_rwsem[rw])) {
+	if (!down_read_trylock(&F2FS_I(inode)->i_gc_rwsem[rw])) {
 		if (iocb->ki_flags & IOCB_NOWAIT) {
 			iocb->ki_hint = hint;
 			err = -EAGAIN;
 			goto out;
 		}
-		down_read(&F2FS_I(inode)->dio_rwsem[rw]);
+		down_read(&F2FS_I(inode)->i_gc_rwsem[rw]);
 	}
 
 	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
-	up_read(&F2FS_I(inode)->dio_rwsem[rw]);
+	up_read(&F2FS_I(inode)->i_gc_rwsem[rw]);
 
 	if (rw == WRITE) {
 		if (whint_mode == WHINT_MODE_OFF)
@@ -2548,13 +2623,13 @@
 			dec_page_count(sbi, F2FS_DIRTY_NODES);
 		} else {
 			inode_dec_dirty_pages(inode);
-			remove_dirty_inode(inode);
+			f2fs_remove_dirty_inode(inode);
 		}
 	}
 
 	/* This is atomic written page, keep Private */
 	if (IS_ATOMIC_WRITTEN_PAGE(page))
-		return drop_inmem_page(inode, page);
+		return f2fs_drop_inmem_page(inode, page);
 
 	set_page_private(page, 0);
 	ClearPagePrivate(page);
@@ -2585,9 +2660,13 @@
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
 
+	/* don't remain PG_checked flag which was set during GC */
+	if (is_cold_data(page))
+		clear_cold_data(page);
+
 	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
 		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
-			register_inmem_page(inode, page);
+			f2fs_register_inmem_page(inode, page);
 			return 1;
 		}
 		/*
@@ -2599,7 +2678,7 @@
 
 	if (!PageDirty(page)) {
 		__set_page_dirty_nobuffers(page);
-		update_dirty_page(inode, page);
+		f2fs_update_dirty_page(inode, page);
 		return 1;
 	}
 	return 0;
@@ -2692,6 +2771,17 @@
 #endif
 };
 
+void f2fs_clear_radix_tree_dirty_tag(struct page *page)
+{
+	struct address_space *mapping = page_mapping(page);
+	unsigned long flags;
+
+	spin_lock_irqsave(&mapping->tree_lock, flags);
+	radix_tree_tag_clear(&mapping->page_tree, page_index(page),
+					PAGECACHE_TAG_DIRTY);
+	spin_unlock_irqrestore(&mapping->tree_lock, flags);
+}
+
 int __init f2fs_init_post_read_processing(void)
 {
 	bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index a66107b..214a968 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -104,6 +104,8 @@
 	si->avail_nids = NM_I(sbi)->available_nids;
 	si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID];
 	si->bg_gc = sbi->bg_gc;
+	si->skipped_atomic_files[BG_GC] = sbi->skipped_atomic_files[BG_GC];
+	si->skipped_atomic_files[FG_GC] = sbi->skipped_atomic_files[FG_GC];
 	si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
 		* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
 		/ 2;
@@ -213,7 +215,8 @@
 	si->base_mem += sizeof(struct f2fs_nm_info);
 	si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
 	si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
-	si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
+	si->base_mem += NM_I(sbi)->nat_blocks *
+				f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK);
 	si->base_mem += NM_I(sbi)->nat_blocks / 8;
 	si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
 
@@ -342,6 +345,10 @@
 				si->bg_data_blks);
 		seq_printf(s, "  - node blocks : %d (%d)\n", si->node_blks,
 				si->bg_node_blks);
+		seq_printf(s, "Skipped : atomic write %llu (%llu)\n",
+				si->skipped_atomic_files[BG_GC] +
+				si->skipped_atomic_files[FG_GC],
+				si->skipped_atomic_files[BG_GC]);
 		seq_puts(s, "\nExtent Cache:\n");
 		seq_printf(s, "  - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
 				si->hit_largest, si->hit_cached,
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index f9a1e18..56cc274 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -60,12 +60,12 @@
 	[S_IFLNK >> S_SHIFT]	= F2FS_FT_SYMLINK,
 };
 
-void set_de_type(struct f2fs_dir_entry *de, umode_t mode)
+static void set_de_type(struct f2fs_dir_entry *de, umode_t mode)
 {
 	de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
 }
 
-unsigned char get_de_type(struct f2fs_dir_entry *de)
+unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de)
 {
 	if (de->file_type < F2FS_FT_MAX)
 		return f2fs_filetype_table[de->file_type];
@@ -97,14 +97,14 @@
 	dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page);
 
 	make_dentry_ptr_block(NULL, &d, dentry_blk);
-	de = find_target_dentry(fname, namehash, max_slots, &d);
+	de = f2fs_find_target_dentry(fname, namehash, max_slots, &d);
 	if (de)
 		*res_page = dentry_page;
 
 	return de;
 }
 
-struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
+struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
 			f2fs_hash_t namehash, int *max_slots,
 			struct f2fs_dentry_ptr *d)
 {
@@ -171,7 +171,7 @@
 
 	for (; bidx < end_block; bidx++) {
 		/* no need to allocate new dentry pages to all the indices */
-		dentry_page = find_data_page(dir, bidx);
+		dentry_page = f2fs_find_data_page(dir, bidx);
 		if (IS_ERR(dentry_page)) {
 			if (PTR_ERR(dentry_page) == -ENOENT) {
 				room = true;
@@ -210,7 +210,7 @@
 
 	if (f2fs_has_inline_dentry(dir)) {
 		*res_page = NULL;
-		de = find_in_inline_dir(dir, fname, res_page);
+		de = f2fs_find_in_inline_dir(dir, fname, res_page);
 		goto out;
 	}
 
@@ -319,7 +319,7 @@
 	set_page_dirty(ipage);
 }
 
-void do_make_empty_dir(struct inode *inode, struct inode *parent,
+void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
 					struct f2fs_dentry_ptr *d)
 {
 	struct qstr dot = QSTR_INIT(".", 1);
@@ -340,23 +340,23 @@
 	struct f2fs_dentry_ptr d;
 
 	if (f2fs_has_inline_dentry(inode))
-		return make_empty_inline_dir(inode, parent, page);
+		return f2fs_make_empty_inline_dir(inode, parent, page);
 
-	dentry_page = get_new_data_page(inode, page, 0, true);
+	dentry_page = f2fs_get_new_data_page(inode, page, 0, true);
 	if (IS_ERR(dentry_page))
 		return PTR_ERR(dentry_page);
 
 	dentry_blk = page_address(dentry_page);
 
 	make_dentry_ptr_block(NULL, &d, dentry_blk);
-	do_make_empty_dir(inode, parent, &d);
+	f2fs_do_make_empty_dir(inode, parent, &d);
 
 	set_page_dirty(dentry_page);
 	f2fs_put_page(dentry_page, 1);
 	return 0;
 }
 
-struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
+struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
 			const struct qstr *new_name, const struct qstr *orig_name,
 			struct page *dpage)
 {
@@ -365,7 +365,7 @@
 	int err;
 
 	if (is_inode_flag_set(inode, FI_NEW_INODE)) {
-		page = new_inode_page(inode);
+		page = f2fs_new_inode_page(inode);
 		if (IS_ERR(page))
 			return page;
 
@@ -395,7 +395,7 @@
 				goto put_error;
 		}
 	} else {
-		page = get_node_page(F2FS_I_SB(dir), inode->i_ino);
+		page = f2fs_get_node_page(F2FS_I_SB(dir), inode->i_ino);
 		if (IS_ERR(page))
 			return page;
 	}
@@ -418,19 +418,19 @@
 		 * we should remove this inode from orphan list.
 		 */
 		if (inode->i_nlink == 0)
-			remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
+			f2fs_remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
 		f2fs_i_links_write(inode, true);
 	}
 	return page;
 
 put_error:
 	clear_nlink(inode);
-	update_inode(inode, page);
+	f2fs_update_inode(inode, page);
 	f2fs_put_page(page, 1);
 	return ERR_PTR(err);
 }
 
-void update_parent_metadata(struct inode *dir, struct inode *inode,
+void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
 						unsigned int current_depth)
 {
 	if (inode && is_inode_flag_set(inode, FI_NEW_INODE)) {
@@ -448,7 +448,7 @@
 		clear_inode_flag(inode, FI_INC_LINK);
 }
 
-int room_for_filename(const void *bitmap, int slots, int max_slots)
+int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots)
 {
 	int bit_start = 0;
 	int zero_start, zero_end;
@@ -517,12 +517,11 @@
 	}
 
 start:
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) {
 		f2fs_show_injection_info(FAULT_DIR_DEPTH);
 		return -ENOSPC;
 	}
-#endif
+
 	if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
 		return -ENOSPC;
 
@@ -537,12 +536,12 @@
 				(le32_to_cpu(dentry_hash) % nbucket));
 
 	for (block = bidx; block <= (bidx + nblock - 1); block++) {
-		dentry_page = get_new_data_page(dir, NULL, block, true);
+		dentry_page = f2fs_get_new_data_page(dir, NULL, block, true);
 		if (IS_ERR(dentry_page))
 			return PTR_ERR(dentry_page);
 
 		dentry_blk = page_address(dentry_page);
-		bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
+		bit_pos = f2fs_room_for_filename(&dentry_blk->dentry_bitmap,
 						slots, NR_DENTRY_IN_BLOCK);
 		if (bit_pos < NR_DENTRY_IN_BLOCK)
 			goto add_dentry;
@@ -558,7 +557,7 @@
 
 	if (inode) {
 		down_write(&F2FS_I(inode)->i_sem);
-		page = init_inode_metadata(inode, dir, new_name,
+		page = f2fs_init_inode_metadata(inode, dir, new_name,
 						orig_name, NULL);
 		if (IS_ERR(page)) {
 			err = PTR_ERR(page);
@@ -576,7 +575,7 @@
 		f2fs_put_page(page, 1);
 	}
 
-	update_parent_metadata(dir, inode, current_depth);
+	f2fs_update_parent_metadata(dir, inode, current_depth);
 fail:
 	if (inode)
 		up_write(&F2FS_I(inode)->i_sem);
@@ -586,7 +585,7 @@
 	return err;
 }
 
-int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname,
+int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname,
 				struct inode *inode, nid_t ino, umode_t mode)
 {
 	struct qstr new_name;
@@ -610,7 +609,7 @@
  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
  * f2fs_unlock_op().
  */
-int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
 				struct inode *inode, nid_t ino, umode_t mode)
 {
 	struct fscrypt_name fname;
@@ -639,7 +638,7 @@
 	} else if (IS_ERR(page)) {
 		err = PTR_ERR(page);
 	} else {
-		err = __f2fs_do_add_link(dir, &fname, inode, ino, mode);
+		err = f2fs_add_dentry(dir, &fname, inode, ino, mode);
 	}
 	fscrypt_free_filename(&fname);
 	return err;
@@ -651,7 +650,7 @@
 	int err = 0;
 
 	down_write(&F2FS_I(inode)->i_sem);
-	page = init_inode_metadata(inode, dir, NULL, NULL, NULL);
+	page = f2fs_init_inode_metadata(inode, dir, NULL, NULL, NULL);
 	if (IS_ERR(page)) {
 		err = PTR_ERR(page);
 		goto fail;
@@ -683,9 +682,9 @@
 	up_write(&F2FS_I(inode)->i_sem);
 
 	if (inode->i_nlink == 0)
-		add_orphan_inode(inode);
+		f2fs_add_orphan_inode(inode);
 	else
-		release_orphan_inode(sbi);
+		f2fs_release_orphan_inode(sbi);
 }
 
 /*
@@ -698,14 +697,12 @@
 	struct	f2fs_dentry_block *dentry_blk;
 	unsigned int bit_pos;
 	int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
-	struct address_space *mapping = page_mapping(page);
-	unsigned long flags;
 	int i;
 
 	f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
 
 	if (F2FS_OPTION(F2FS_I_SB(dir)).fsync_mode == FSYNC_MODE_STRICT)
-		add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO);
+		f2fs_add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO);
 
 	if (f2fs_has_inline_dentry(dir))
 		return f2fs_delete_inline_entry(dentry, page, dir, inode);
@@ -731,17 +728,13 @@
 		f2fs_drop_nlink(dir, inode);
 
 	if (bit_pos == NR_DENTRY_IN_BLOCK &&
-			!truncate_hole(dir, page->index, page->index + 1)) {
-		spin_lock_irqsave(&mapping->tree_lock, flags);
-		radix_tree_tag_clear(&mapping->page_tree, page_index(page),
-				     PAGECACHE_TAG_DIRTY);
-		spin_unlock_irqrestore(&mapping->tree_lock, flags);
-
+		!f2fs_truncate_hole(dir, page->index, page->index + 1)) {
+		f2fs_clear_radix_tree_dirty_tag(page);
 		clear_page_dirty_for_io(page);
 		ClearPagePrivate(page);
 		ClearPageUptodate(page);
 		inode_dec_dirty_pages(dir);
-		remove_dirty_inode(dir);
+		f2fs_remove_dirty_inode(dir);
 	}
 	f2fs_put_page(page, 1);
 }
@@ -758,7 +751,7 @@
 		return f2fs_empty_inline_dir(dir);
 
 	for (bidx = 0; bidx < nblock; bidx++) {
-		dentry_page = get_lock_data_page(dir, bidx, false);
+		dentry_page = f2fs_get_lock_data_page(dir, bidx, false);
 		if (IS_ERR(dentry_page)) {
 			if (PTR_ERR(dentry_page) == -ENOENT)
 				continue;
@@ -806,7 +799,7 @@
 			continue;
 		}
 
-		d_type = get_de_type(de);
+		d_type = f2fs_get_de_type(de);
 
 		de_name.name = d->filename[bit_pos];
 		de_name.len = le16_to_cpu(de->name_len);
@@ -830,7 +823,7 @@
 			return 1;
 
 		if (sbi->readdir_ra == 1)
-			ra_node_page(sbi, le32_to_cpu(de->ino));
+			f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
 
 		bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
 		ctx->pos = start_pos + bit_pos;
@@ -880,7 +873,7 @@
 			page_cache_sync_readahead(inode->i_mapping, ra, file, n,
 				min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
 
-		dentry_page = get_lock_data_page(inode, n, false);
+		dentry_page = f2fs_get_lock_data_page(inode, n, false);
 		if (IS_ERR(dentry_page)) {
 			err = PTR_ERR(dentry_page);
 			if (err == -ENOENT) {
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index d5a861b..231b77e 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -49,7 +49,7 @@
 	return NULL;
 }
 
-struct rb_entry *__lookup_rb_tree(struct rb_root *root,
+struct rb_entry *f2fs_lookup_rb_tree(struct rb_root *root,
 				struct rb_entry *cached_re, unsigned int ofs)
 {
 	struct rb_entry *re;
@@ -61,7 +61,7 @@
 	return re;
 }
 
-struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
 				struct rb_root *root, struct rb_node **parent,
 				unsigned int ofs)
 {
@@ -92,7 +92,7 @@
  * in order to simpfy the insertion after.
  * tree must stay unchanged between lookup and insertion.
  */
-struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
+struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
 				struct rb_entry *cached_re,
 				unsigned int ofs,
 				struct rb_entry **prev_entry,
@@ -159,7 +159,7 @@
 	return re;
 }
 
-bool __check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
 						struct rb_root *root)
 {
 #ifdef CONFIG_F2FS_CHECK_FS
@@ -390,7 +390,7 @@
 		goto out;
 	}
 
-	en = (struct extent_node *)__lookup_rb_tree(&et->root,
+	en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
 				(struct rb_entry *)et->cached_en, pgofs);
 	if (!en)
 		goto out;
@@ -470,7 +470,7 @@
 		goto do_insert;
 	}
 
-	p = __lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
+	p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
 do_insert:
 	en = __attach_extent_node(sbi, et, ei, parent, p);
 	if (!en)
@@ -520,7 +520,7 @@
 	__drop_largest_extent(inode, fofs, len);
 
 	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
-	en = (struct extent_node *)__lookup_rb_tree_ret(&et->root,
+	en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
 					(struct rb_entry *)et->cached_en, fofs,
 					(struct rb_entry **)&prev_en,
 					(struct rb_entry **)&next_en,
@@ -773,7 +773,7 @@
 	else
 		blkaddr = dn->data_blkaddr;
 
-	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
+	fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
 								dn->ofs_in_node;
 	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
 }
@@ -788,7 +788,7 @@
 	f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
 }
 
-void init_extent_cache_info(struct f2fs_sb_info *sbi)
+void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
 {
 	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
 	mutex_init(&sbi->extent_tree_lock);
@@ -800,7 +800,7 @@
 	atomic_set(&sbi->total_ext_node, 0);
 }
 
-int __init create_extent_cache(void)
+int __init f2fs_create_extent_cache(void)
 {
 	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
 			sizeof(struct extent_tree));
@@ -815,7 +815,7 @@
 	return 0;
 }
 
-void destroy_extent_cache(void)
+void f2fs_destroy_extent_cache(void)
 {
 	kmem_cache_destroy(extent_node_slab);
 	kmem_cache_destroy(extent_tree_slab);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 4c4b51a..5bcbdce 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -26,6 +26,7 @@
 #include <linux/blkdev.h>
 #include <linux/quotaops.h>
 #include <crypto/hash.h>
+#include <linux/overflow.h>
 
 #define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION)
 #include <linux/fscrypt.h>
@@ -42,7 +43,6 @@
 	} while (0)
 #endif
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 enum {
 	FAULT_KMALLOC,
 	FAULT_KVMALLOC,
@@ -57,16 +57,20 @@
 	FAULT_TRUNCATE,
 	FAULT_IO,
 	FAULT_CHECKPOINT,
+	FAULT_DISCARD,
 	FAULT_MAX,
 };
 
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+#define F2FS_ALL_FAULT_TYPE		((1 << FAULT_MAX) - 1)
+
 struct f2fs_fault_info {
 	atomic_t inject_ops;
 	unsigned int inject_rate;
 	unsigned int inject_type;
 };
 
-extern char *fault_name[FAULT_MAX];
+extern char *f2fs_fault_name[FAULT_MAX];
 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
 #endif
 
@@ -179,8 +183,8 @@
 
 #define MAX_DISCARD_BLOCKS(sbi)		BLKS_PER_SEC(sbi)
 #define DEF_MAX_DISCARD_REQUEST		8	/* issue 8 discards per round */
-#define DEF_MAX_DISCARD_LEN		512	/* Max. 2MB per discard */
 #define DEF_MIN_DISCARD_ISSUE_TIME	50	/* 50 ms, if exists */
+#define DEF_MID_DISCARD_ISSUE_TIME	500	/* 500 ms, if device busy */
 #define DEF_MAX_DISCARD_ISSUE_TIME	60000	/* 60 s, if no candidates */
 #define DEF_DISCARD_URGENT_UTIL		80	/* do more discard over 80% */
 #define DEF_CP_INTERVAL			60	/* 60 secs */
@@ -194,7 +198,7 @@
 };
 
 /*
- * For CP/NAT/SIT/SSA readahead
+ * indicate meta/data type
  */
 enum {
 	META_CP,
@@ -202,6 +206,8 @@
 	META_SIT,
 	META_SSA,
 	META_POR,
+	DATA_GENERIC,
+	META_GENERIC,
 };
 
 /* for the list of ino */
@@ -226,6 +232,12 @@
 	struct inode *inode;	/* vfs inode pointer */
 };
 
+struct fsync_node_entry {
+	struct list_head list;	/* list head */
+	struct page *page;	/* warm node page pointer */
+	unsigned int seq_id;	/* sequence id */
+};
+
 /* for the bitmap indicate blocks to be discarded */
 struct discard_entry {
 	struct list_head list;	/* list head */
@@ -242,9 +254,10 @@
 					(MAX_PLIST_NUM - 1) : (blk_num - 1))
 
 enum {
-	D_PREP,
-	D_SUBMIT,
-	D_DONE,
+	D_PREP,			/* initial */
+	D_PARTIAL,		/* partially submitted */
+	D_SUBMIT,		/* all submitted */
+	D_DONE,			/* finished */
 };
 
 struct discard_info {
@@ -269,7 +282,10 @@
 	struct block_device *bdev;	/* bdev */
 	unsigned short ref;		/* reference count */
 	unsigned char state;		/* state */
+	unsigned char issuing;		/* issuing discard */
 	int error;			/* bio error */
+	spinlock_t lock;		/* for state/bio_ref updating */
+	unsigned short bio_ref;		/* bio reference count */
 };
 
 enum {
@@ -283,11 +299,13 @@
 struct discard_policy {
 	int type;			/* type of discard */
 	unsigned int min_interval;	/* used for candidates exist */
+	unsigned int mid_interval;	/* used for device busy */
 	unsigned int max_interval;	/* used for candidates not exist */
 	unsigned int max_requests;	/* # of discards issued per round */
 	unsigned int io_aware_gran;	/* minimum granularity discard not be aware of I/O */
 	bool io_aware;			/* issue discard in idle time */
 	bool sync;			/* submit discard with REQ_SYNC flag */
+	bool ordered;			/* issue discard by lba order */
 	unsigned int granularity;	/* discard granularity */
 };
 
@@ -304,10 +322,12 @@
 	unsigned int max_discards;		/* max. discards to be issued */
 	unsigned int discard_granularity;	/* discard granularity */
 	unsigned int undiscard_blks;		/* # of undiscard blocks */
+	unsigned int next_pos;			/* next discard position */
 	atomic_t issued_discard;		/* # of issued discard */
 	atomic_t issing_discard;		/* # of issing discard */
 	atomic_t discard_cmd_cnt;		/* # of cached cmd count */
 	struct rb_root root;			/* root of discard rb-tree */
+	bool rbtree_check;			/* config for consistence check */
 };
 
 /* for the list of fsync inodes, used only during recovery */
@@ -504,13 +524,12 @@
 					 */
 };
 
+#define DEFAULT_RETRY_IO_COUNT	8	/* maximum retry read IO count */
+
 #define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
 
 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
 
-/* vector size for gang look-up from extent cache that consists of radix tree */
-#define EXT_TREE_VEC_SIZE	64
-
 /* for in-memory extent cache entry */
 #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
 
@@ -596,6 +615,8 @@
 #define FADVISE_HOT_BIT		0x20
 #define FADVISE_VERITY_BIT	0x40	/* reserved */
 
+#define FADVISE_MODIFIABLE_BITS	(FADVISE_COLD_BIT | FADVISE_HOT_BIT)
+
 #define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
 #define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
 #define file_set_cold(inode)	set_file(inode, FADVISE_COLD_BIT)
@@ -615,15 +636,20 @@
 
 #define DEF_DIR_LEVEL		0
 
+enum {
+	GC_FAILURE_PIN,
+	GC_FAILURE_ATOMIC,
+	MAX_GC_FAILURE
+};
+
 struct f2fs_inode_info {
 	struct inode vfs_inode;		/* serve a vfs inode */
 	unsigned long i_flags;		/* keep an inode flags for ioctl */
 	unsigned char i_advise;		/* use to give file attribute hints */
 	unsigned char i_dir_level;	/* use for dentry level for large dir */
-	union {
-		unsigned int i_current_depth;	/* only for directory depth */
-		unsigned short i_gc_failures;	/* only for regular file */
-	};
+	unsigned int i_current_depth;	/* only for directory depth */
+	/* for gc failure statistic */
+	unsigned int i_gc_failures[MAX_GC_FAILURE];
 	unsigned int i_pino;		/* parent inode number */
 	umode_t i_acl_mode;		/* keep file acl mode temporarily */
 
@@ -651,7 +677,9 @@
 	struct task_struct *inmem_task;	/* store inmemory task */
 	struct mutex inmem_lock;	/* lock for inmemory pages */
 	struct extent_tree *extent_tree;	/* cached extent_tree entry */
-	struct rw_semaphore dio_rwsem[2];/* avoid racing between dio and gc */
+
+	/* avoid racing between foreground op and gc */
+	struct rw_semaphore i_gc_rwsem[2];
 	struct rw_semaphore i_mmap_sem;
 	struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
 
@@ -687,22 +715,22 @@
 }
 
 static inline bool __is_discard_mergeable(struct discard_info *back,
-						struct discard_info *front)
+			struct discard_info *front, unsigned int max_len)
 {
 	return (back->lstart + back->len == front->lstart) &&
-		(back->len + front->len < DEF_MAX_DISCARD_LEN);
+		(back->len + front->len <= max_len);
 }
 
 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
-						struct discard_info *back)
+			struct discard_info *back, unsigned int max_len)
 {
-	return __is_discard_mergeable(back, cur);
+	return __is_discard_mergeable(back, cur, max_len);
 }
 
 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
-						struct discard_info *front)
+			struct discard_info *front, unsigned int max_len)
 {
-	return __is_discard_mergeable(cur, front);
+	return __is_discard_mergeable(cur, front, max_len);
 }
 
 static inline bool __is_extent_mergeable(struct extent_info *back,
@@ -757,6 +785,7 @@
 	struct radix_tree_root nat_set_root;/* root of the nat set cache */
 	struct rw_semaphore nat_tree_lock;	/* protect nat_tree_lock */
 	struct list_head nat_entries;	/* cached nat entry list (clean) */
+	spinlock_t nat_list_lock;	/* protect clean nat entry list */
 	unsigned int nat_cnt;		/* the # of cached nat entries */
 	unsigned int dirty_nat_cnt;	/* total num of nat entries in set */
 	unsigned int nat_blocks;	/* # of nat blocks */
@@ -1002,8 +1031,10 @@
 	int need_lock;		/* indicate we need to lock cp_rwsem */
 	bool in_list;		/* indicate fio is in io_list */
 	bool is_meta;		/* indicate borrow meta inode mapping or not */
+	bool retry;		/* need to reallocate block address */
 	enum iostat_type io_type;	/* io type */
 	struct writeback_control *io_wbc; /* writeback control */
+	unsigned char version;		/* version of the node */
 };
 
 #define is_read_io(rw) ((rw) == READ)
@@ -1055,6 +1086,7 @@
 	SBI_POR_DOING,				/* recovery is doing or not */
 	SBI_NEED_SB_WRITE,			/* need to recover superblock */
 	SBI_NEED_CP,				/* need to checkpoint */
+	SBI_IS_SHUTDOWN,			/* shutdown by ioctl */
 };
 
 enum {
@@ -1064,6 +1096,13 @@
 };
 
 enum {
+	GC_NORMAL,
+	GC_IDLE_CB,
+	GC_IDLE_GREEDY,
+	GC_URGENT,
+};
+
+enum {
 	WHINT_MODE_OFF,		/* not pass down write hints */
 	WHINT_MODE_USER,	/* try to pass down hints given by users */
 	WHINT_MODE_FS,		/* pass down hints with F2FS policy */
@@ -1112,6 +1151,8 @@
 	struct f2fs_bio_info *write_io[NR_PAGE_TYPE];	/* for write bios */
 	struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE];
 						/* bio ordering for NODE/DATA */
+	/* keep migration IO order for LFS mode */
+	struct rw_semaphore io_order_lock;
 	mempool_t *write_io_dummy;		/* Dummy pages */
 
 	/* for checkpoint */
@@ -1129,6 +1170,11 @@
 
 	struct inode_management im[MAX_INO_ENTRY];      /* manage inode cache */
 
+	spinlock_t fsync_node_lock;		/* for node entry lock */
+	struct list_head fsync_node_list;	/* node list head */
+	unsigned int fsync_seg_id;		/* sequence id */
+	unsigned int fsync_node_num;		/* number of node entries */
+
 	/* for orphan inode, use 0'th array */
 	unsigned int max_orphans;		/* max orphan inodes */
 
@@ -1182,7 +1228,7 @@
 	struct percpu_counter alloc_valid_block_count;
 
 	/* writeback control */
-	atomic_t wb_sync_req;			/* count # of WB_SYNC threads */
+	atomic_t wb_sync_req[META];	/* count # of WB_SYNC threads */
 
 	/* valid inode count */
 	struct percpu_counter total_valid_inode_count;
@@ -1193,9 +1239,10 @@
 	struct mutex gc_mutex;			/* mutex for GC */
 	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
 	unsigned int cur_victim_sec;		/* current victim section num */
-
-	/* threshold for converting bg victims for fg */
-	u64 fggc_threshold;
+	unsigned int gc_mode;			/* current GC state */
+	/* for skip statistic */
+	unsigned long long skipped_atomic_files[2];	/* FG_GC and BG_GC */
+	unsigned long long skipped_gc_rwsem;		/* FG_GC only */
 
 	/* threshold for gc trials on pinned files */
 	u64 gc_pin_file_threshold;
@@ -1260,7 +1307,7 @@
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 #define f2fs_show_injection_info(type)				\
 	printk("%sF2FS-fs : inject %s in %s of %pF\n",		\
-		KERN_INFO, fault_name[type],			\
+		KERN_INFO, f2fs_fault_name[type],		\
 		__func__, __builtin_return_address(0))
 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
 {
@@ -1279,6 +1326,12 @@
 	}
 	return false;
 }
+#else
+#define f2fs_show_injection_info(type) do { } while (0)
+static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
+{
+	return false;
+}
 #endif
 
 /* For write statistics. Suppose sector size is 512 bytes,
@@ -1307,7 +1360,7 @@
 	struct request_list *rl = &q->root_rl;
 
 	if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
-		return 0;
+		return false;
 
 	return f2fs_time_over(sbi, REQ_TIME);
 }
@@ -1586,18 +1639,6 @@
 }
 
 /*
- * Check whether the given nid is within node id range.
- */
-static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
-{
-	if (unlikely(nid < F2FS_ROOT_INO(sbi)))
-		return -EINVAL;
-	if (unlikely(nid >= NM_I(sbi)->max_nid))
-		return -EINVAL;
-	return 0;
-}
-
-/*
  * Check whether the inode has blocks or not
  */
 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
@@ -1643,13 +1684,12 @@
 	if (ret)
 		return ret;
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_BLOCK)) {
 		f2fs_show_injection_info(FAULT_BLOCK);
 		release = *count;
 		goto enospc;
 	}
-#endif
+
 	/*
 	 * let's increase this in prior to actual block count change in order
 	 * for f2fs_sync_file to avoid data races when deciding checkpoint.
@@ -1673,18 +1713,20 @@
 		sbi->total_valid_block_count -= diff;
 		if (!*count) {
 			spin_unlock(&sbi->stat_lock);
-			percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
 			goto enospc;
 		}
 	}
 	spin_unlock(&sbi->stat_lock);
 
-	if (unlikely(release))
+	if (unlikely(release)) {
+		percpu_counter_sub(&sbi->alloc_valid_block_count, release);
 		dquot_release_reservation_block(inode, release);
+	}
 	f2fs_i_blocks_write(inode, *count, true, true);
 	return 0;
 
 enospc:
+	percpu_counter_sub(&sbi->alloc_valid_block_count, release);
 	dquot_release_reservation_block(inode, release);
 	return -ENOSPC;
 }
@@ -1856,12 +1898,10 @@
 			return ret;
 	}
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_BLOCK)) {
 		f2fs_show_injection_info(FAULT_BLOCK);
 		goto enospc;
 	}
-#endif
 
 	spin_lock(&sbi->stat_lock);
 
@@ -1946,17 +1986,23 @@
 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
 						pgoff_t index, bool for_write)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-	struct page *page = find_lock_page(mapping, index);
+	struct page *page;
 
-	if (page)
-		return page;
+	if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
+		if (!for_write)
+			page = find_get_page_flags(mapping, index,
+							FGP_LOCK | FGP_ACCESSED);
+		else
+			page = find_lock_page(mapping, index);
+		if (page)
+			return page;
 
-	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
-		f2fs_show_injection_info(FAULT_PAGE_ALLOC);
-		return NULL;
+		if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
+			f2fs_show_injection_info(FAULT_PAGE_ALLOC);
+			return NULL;
+		}
 	}
-#endif
+
 	if (!for_write)
 		return grab_cache_page(mapping, index);
 	return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
@@ -1966,12 +2012,11 @@
 				struct address_space *mapping, pgoff_t index,
 				int fgp_flags, gfp_t gfp_mask)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
 		f2fs_show_injection_info(FAULT_PAGE_GET);
 		return NULL;
 	}
-#endif
+
 	return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
 }
 
@@ -2036,12 +2081,11 @@
 			bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
 		return bio;
 	}
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
 		f2fs_show_injection_info(FAULT_ALLOC_BIO);
 		return NULL;
 	}
-#endif
+
 	return bio_alloc(GFP_KERNEL, npages);
 }
 
@@ -2155,9 +2199,60 @@
 	*addr ^= mask;
 }
 
-#define F2FS_REG_FLMASK		(~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
-#define F2FS_OTHER_FLMASK	(FS_NODUMP_FL | FS_NOATIME_FL)
-#define F2FS_FL_INHERITED	(FS_PROJINHERIT_FL)
+/*
+ * Inode flags
+ */
+#define F2FS_SECRM_FL			0x00000001 /* Secure deletion */
+#define F2FS_UNRM_FL			0x00000002 /* Undelete */
+#define F2FS_COMPR_FL			0x00000004 /* Compress file */
+#define F2FS_SYNC_FL			0x00000008 /* Synchronous updates */
+#define F2FS_IMMUTABLE_FL		0x00000010 /* Immutable file */
+#define F2FS_APPEND_FL			0x00000020 /* writes to file may only append */
+#define F2FS_NODUMP_FL			0x00000040 /* do not dump file */
+#define F2FS_NOATIME_FL			0x00000080 /* do not update atime */
+/* Reserved for compression usage... */
+#define F2FS_DIRTY_FL			0x00000100
+#define F2FS_COMPRBLK_FL		0x00000200 /* One or more compressed clusters */
+#define F2FS_NOCOMPR_FL			0x00000400 /* Don't compress */
+#define F2FS_ENCRYPT_FL			0x00000800 /* encrypted file */
+/* End compression flags --- maybe not all used */
+#define F2FS_INDEX_FL			0x00001000 /* hash-indexed directory */
+#define F2FS_IMAGIC_FL			0x00002000 /* AFS directory */
+#define F2FS_JOURNAL_DATA_FL		0x00004000 /* file data should be journaled */
+#define F2FS_NOTAIL_FL			0x00008000 /* file tail should not be merged */
+#define F2FS_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
+#define F2FS_TOPDIR_FL			0x00020000 /* Top of directory hierarchies*/
+#define F2FS_HUGE_FILE_FL               0x00040000 /* Set to each huge file */
+#define F2FS_EXTENTS_FL			0x00080000 /* Inode uses extents */
+#define F2FS_EA_INODE_FL	        0x00200000 /* Inode used for large EA */
+#define F2FS_EOFBLOCKS_FL		0x00400000 /* Blocks allocated beyond EOF */
+#define F2FS_INLINE_DATA_FL		0x10000000 /* Inode has inline data. */
+#define F2FS_PROJINHERIT_FL		0x20000000 /* Create with parents projid */
+#define F2FS_RESERVED_FL		0x80000000 /* reserved for ext4 lib */
+
+#define F2FS_FL_USER_VISIBLE		0x304BDFFF /* User visible flags */
+#define F2FS_FL_USER_MODIFIABLE		0x204BC0FF /* User modifiable flags */
+
+/* Flags we can manipulate with through F2FS_IOC_FSSETXATTR */
+#define F2FS_FL_XFLAG_VISIBLE		(F2FS_SYNC_FL | \
+					 F2FS_IMMUTABLE_FL | \
+					 F2FS_APPEND_FL | \
+					 F2FS_NODUMP_FL | \
+					 F2FS_NOATIME_FL | \
+					 F2FS_PROJINHERIT_FL)
+
+/* Flags that should be inherited by new inodes from their parent. */
+#define F2FS_FL_INHERITED (F2FS_SECRM_FL | F2FS_UNRM_FL | F2FS_COMPR_FL |\
+			   F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL |\
+			   F2FS_NOCOMPR_FL | F2FS_JOURNAL_DATA_FL |\
+			   F2FS_NOTAIL_FL | F2FS_DIRSYNC_FL |\
+			   F2FS_PROJINHERIT_FL)
+
+/* Flags that are appropriate for regular files (all but dir-specific ones). */
+#define F2FS_REG_FLMASK		(~(F2FS_DIRSYNC_FL | F2FS_TOPDIR_FL))
+
+/* Flags that are appropriate for non-directories/regular files. */
+#define F2FS_OTHER_FLMASK	(F2FS_NODUMP_FL | F2FS_NOATIME_FL)
 
 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
 {
@@ -2200,6 +2295,7 @@
 	FI_EXTRA_ATTR,		/* indicate file has extra attribute */
 	FI_PROJ_INHERIT,	/* indicate file inherits projectid */
 	FI_PIN_FILE,		/* indicate file should not be gced */
+	FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
 };
 
 static inline void __mark_inode_dirty_flag(struct inode *inode,
@@ -2298,7 +2394,7 @@
 static inline void f2fs_i_gc_failures_write(struct inode *inode,
 					unsigned int count)
 {
-	F2FS_I(inode)->i_gc_failures = count;
+	F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
 	f2fs_mark_inode_dirty_sync(inode, true);
 }
 
@@ -2524,12 +2620,11 @@
 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
 					size_t size, gfp_t flags)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_KMALLOC)) {
 		f2fs_show_injection_info(FAULT_KMALLOC);
 		return NULL;
 	}
-#endif
+
 	return kmalloc(size, flags);
 }
 
@@ -2572,12 +2667,11 @@
 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
 					size_t size, gfp_t flags)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_KVMALLOC)) {
 		f2fs_show_injection_info(FAULT_KVMALLOC);
 		return NULL;
 	}
-#endif
+
 	return kvmalloc(size, flags);
 }
 
@@ -2597,7 +2691,7 @@
 	return F2FS_I(inode)->i_inline_xattr_size;
 }
 
-#define get_inode_mode(i) \
+#define f2fs_get_inode_mode(i) \
 	((is_inode_flag_set(i, FI_ACL_MODE)) ? \
 	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
 
@@ -2636,18 +2730,51 @@
 	spin_unlock(&sbi->iostat_lock);
 }
 
+#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META &&	\
+				(!is_read_io(fio->op) || fio->is_meta))
+
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+					block_t blkaddr, int type);
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
+					block_t blkaddr, int type)
+{
+	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+			"invalid blkaddr: %u, type: %d, run fsck to fix.",
+			blkaddr, type);
+		f2fs_bug_on(sbi, 1);
+	}
+}
+
+static inline bool __is_valid_data_blkaddr(block_t blkaddr)
+{
+	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
+		return false;
+	return true;
+}
+
+static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
+						block_t blkaddr)
+{
+	if (!__is_valid_data_blkaddr(blkaddr))
+		return false;
+	verify_blkaddr(sbi, blkaddr, DATA_GENERIC);
+	return true;
+}
+
 /*
  * file.c
  */
 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
-void truncate_data_blocks(struct dnode_of_data *dn);
-int truncate_blocks(struct inode *inode, u64 from, bool lock);
+void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
 int f2fs_truncate(struct inode *inode);
 int f2fs_getattr(struct vfsmount *mnt, struct dentry *dentry,
 			struct kstat *stat);
 int f2fs_setattr(struct dentry *dentry, struct iattr *attr);
-int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
-void truncate_data_blocks_range(struct dnode_of_data *dn, int count);
+int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
+void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
 int f2fs_precache_extents(struct inode *inode);
 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
@@ -2661,38 +2788,37 @@
 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
-int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
-void update_inode(struct inode *inode, struct page *node_page);
-void update_inode_page(struct inode *inode);
+int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
+void f2fs_update_inode(struct inode *inode, struct page *node_page);
+void f2fs_update_inode_page(struct inode *inode);
 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
 void f2fs_evict_inode(struct inode *inode);
-void handle_failed_inode(struct inode *inode);
+void f2fs_handle_failed_inode(struct inode *inode);
 
 /*
  * namei.c
  */
-int update_extension_list(struct f2fs_sb_info *sbi, const char *name,
+int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
 							bool hot, bool set);
 struct dentry *f2fs_get_parent(struct dentry *child);
 
 /*
  * dir.c
  */
-void set_de_type(struct f2fs_dir_entry *de, umode_t mode);
-unsigned char get_de_type(struct f2fs_dir_entry *de);
-struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
+unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
+struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
 			f2fs_hash_t namehash, int *max_slots,
 			struct f2fs_dentry_ptr *d);
 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
 			unsigned int start_pos, struct fscrypt_str *fstr);
-void do_make_empty_dir(struct inode *inode, struct inode *parent,
+void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
 			struct f2fs_dentry_ptr *d);
-struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
+struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
 			const struct qstr *new_name,
 			const struct qstr *orig_name, struct page *dpage);
-void update_parent_metadata(struct inode *dir, struct inode *inode,
+void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
 			unsigned int current_depth);
-int room_for_filename(const void *bitmap, int slots, int max_slots);
+int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
 			struct fscrypt_name *fname, struct page **res_page);
@@ -2709,9 +2835,9 @@
 int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
 			const struct qstr *orig_name,
 			struct inode *inode, nid_t ino, umode_t mode);
-int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname,
+int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname,
 			struct inode *inode, nid_t ino, umode_t mode);
-int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
 			struct inode *inode, nid_t ino, umode_t mode);
 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
 			struct inode *dir, struct inode *inode);
@@ -2720,7 +2846,7 @@
 
 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
 {
-	return __f2fs_add_link(d_inode(dentry->d_parent), &dentry->d_name,
+	return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
 				inode, inode->i_ino, inode->i_mode);
 }
 
@@ -2735,7 +2861,7 @@
 int f2fs_sync_fs(struct super_block *sb, int sync);
 extern __printf(3, 4)
 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
-int sanity_check_ckpt(struct f2fs_sb_info *sbi);
+int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
 
 /*
  * hash.c
@@ -2749,138 +2875,154 @@
 struct dnode_of_data;
 struct node_info;
 
-bool available_free_memory(struct f2fs_sb_info *sbi, int type);
-int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
-bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
-bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
-void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni);
-pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
-int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
-int truncate_inode_blocks(struct inode *inode, pgoff_t from);
-int truncate_xattr_node(struct inode *inode);
-int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino);
-int remove_inode_page(struct inode *inode);
-struct page *new_inode_page(struct inode *inode);
-struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs);
-void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
-struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
-struct page *get_node_page_ra(struct page *parent, int start);
-void move_node_page(struct page *node_page, int gc_type);
-int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
-			struct writeback_control *wbc, bool atomic);
-int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc,
+int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
+bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
+int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
+bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
+bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
+int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+						struct node_info *ni);
+pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
+int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
+int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
+int f2fs_truncate_xattr_node(struct inode *inode);
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+					unsigned int seq_id);
+int f2fs_remove_inode_page(struct inode *inode);
+struct page *f2fs_new_inode_page(struct inode *inode);
+struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
+void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
+struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
+struct page *f2fs_get_node_page_ra(struct page *parent, int start);
+void f2fs_move_node_page(struct page *node_page, int gc_type);
+int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
+			struct writeback_control *wbc, bool atomic,
+			unsigned int *seq_id);
+int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
+			struct writeback_control *wbc,
 			bool do_balance, enum iostat_type io_type);
-void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
-bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
-void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
-void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
-int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
-void recover_inline_xattr(struct inode *inode, struct page *page);
-int recover_xattr_data(struct inode *inode, struct page *page);
-int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
-void restore_node_summary(struct f2fs_sb_info *sbi,
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
+bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
+void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
+void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
+int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
+void f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
+int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
+int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
+int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
 			unsigned int segno, struct f2fs_summary_block *sum);
-void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
-int build_node_manager(struct f2fs_sb_info *sbi);
-void destroy_node_manager(struct f2fs_sb_info *sbi);
-int __init create_node_manager_caches(void);
-void destroy_node_manager_caches(void);
+void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
+void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
+int __init f2fs_create_node_manager_caches(void);
+void f2fs_destroy_node_manager_caches(void);
 
 /*
  * segment.c
  */
-bool need_SSR(struct f2fs_sb_info *sbi);
-void register_inmem_page(struct inode *inode, struct page *page);
-void drop_inmem_pages_all(struct f2fs_sb_info *sbi);
-void drop_inmem_pages(struct inode *inode);
-void drop_inmem_page(struct inode *inode, struct page *page);
-int commit_inmem_pages(struct inode *inode);
+bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
+void f2fs_register_inmem_page(struct inode *inode, struct page *page);
+void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
+void f2fs_drop_inmem_pages(struct inode *inode);
+void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
+int f2fs_commit_inmem_pages(struct inode *inode);
 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi);
 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
-int create_flush_cmd_control(struct f2fs_sb_info *sbi);
+int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
-void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
-void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
-bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
-void drop_discard_cmd(struct f2fs_sb_info *sbi);
-void stop_discard_thread(struct f2fs_sb_info *sbi);
+void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
+void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
+bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
+void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
+void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
 bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
-void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
-void release_discard_addrs(struct f2fs_sb_info *sbi);
-int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
-void allocate_new_segments(struct f2fs_sb_info *sbi);
+void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
+					struct cp_control *cpc);
+void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
+int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
+void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
-bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc);
-struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
-void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr);
-void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
+					struct cp_control *cpc);
+struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
+void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
+					block_t blk_addr);
+void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
 						enum iostat_type io_type);
-void write_node_page(unsigned int nid, struct f2fs_io_info *fio);
-void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio);
-int rewrite_data_page(struct f2fs_io_info *fio);
-void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
+void f2fs_outplace_write_data(struct dnode_of_data *dn,
+			struct f2fs_io_info *fio);
+int f2fs_inplace_write_data(struct f2fs_io_info *fio);
+void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 			block_t old_blkaddr, block_t new_blkaddr,
 			bool recover_curseg, bool recover_newaddr);
 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
 			block_t old_addr, block_t new_addr,
 			unsigned char version, bool recover_curseg,
 			bool recover_newaddr);
-void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
 			block_t old_blkaddr, block_t *new_blkaddr,
 			struct f2fs_summary *sum, int type,
 			struct f2fs_io_info *fio, bool add_list);
 void f2fs_wait_on_page_writeback(struct page *page,
 			enum page_type type, bool ordered);
 void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr);
-void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
-void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
-int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
+void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
+void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
+int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
 			unsigned int val, int alloc);
-void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
-int build_segment_manager(struct f2fs_sb_info *sbi);
-void destroy_segment_manager(struct f2fs_sb_info *sbi);
-int __init create_segment_manager_caches(void);
-void destroy_segment_manager_caches(void);
-int rw_hint_to_seg_type(enum rw_hint hint);
-enum rw_hint io_type_to_rw_hint(struct f2fs_sb_info *sbi, enum page_type type,
-				enum temp_type temp);
+void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
+void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
+int __init f2fs_create_segment_manager_caches(void);
+void f2fs_destroy_segment_manager_caches(void);
+int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
+enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
+			enum page_type type, enum temp_type temp);
 
 /*
  * checkpoint.c
  */
 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
-struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
-struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
-struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
-bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type);
-int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
+struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index);
+struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+					block_t blkaddr, int type);
+int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
 			int type, bool sync);
-void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
-long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
+void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
+long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
 			long nr_to_write, enum iostat_type io_type);
-void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
-void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
-void release_ino_entry(struct f2fs_sb_info *sbi, bool all);
-bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
-void set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
+void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
+void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
+void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
+bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
+void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
 					unsigned int devidx, int type);
-bool is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
+bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
 					unsigned int devidx, int type);
 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
-int acquire_orphan_inode(struct f2fs_sb_info *sbi);
-void release_orphan_inode(struct f2fs_sb_info *sbi);
-void add_orphan_inode(struct inode *inode);
-void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
-int recover_orphan_inodes(struct f2fs_sb_info *sbi);
-int get_valid_checkpoint(struct f2fs_sb_info *sbi);
-void update_dirty_page(struct inode *inode, struct page *page);
-void remove_dirty_inode(struct inode *inode);
-int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
-int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
-void init_ino_entry_info(struct f2fs_sb_info *sbi);
-int __init create_checkpoint_caches(void);
-void destroy_checkpoint_caches(void);
+int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
+void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
+void f2fs_add_orphan_inode(struct inode *inode);
+void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
+int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
+int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
+void f2fs_update_dirty_page(struct inode *inode, struct page *page);
+void f2fs_remove_dirty_inode(struct inode *inode);
+int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
+void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi);
+int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
+int __init f2fs_create_checkpoint_caches(void);
+void f2fs_destroy_checkpoint_caches(void);
 
 /*
  * data.c
@@ -2893,34 +3035,31 @@
 				enum page_type type);
 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
-int f2fs_submit_page_write(struct f2fs_io_info *fio);
+void f2fs_submit_page_write(struct f2fs_io_info *fio);
 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
 			block_t blk_addr, struct bio *bio);
 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
-void set_data_blkaddr(struct dnode_of_data *dn);
+void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
-int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
-int reserve_new_block(struct dnode_of_data *dn);
+int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
+int f2fs_reserve_new_block(struct dnode_of_data *dn);
 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
-struct page *get_read_data_page(struct inode *inode, pgoff_t index,
+struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
 			int op_flags, bool for_write);
-struct page *find_data_page(struct inode *inode, pgoff_t index);
-struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
+struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
+struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
 			bool for_write);
-struct page *get_new_data_page(struct inode *inode,
+struct page *f2fs_get_new_data_page(struct inode *inode,
 			struct page *ipage, pgoff_t index, bool new_i_size);
-int do_write_data_page(struct f2fs_io_info *fio);
+int f2fs_do_write_data_page(struct f2fs_io_info *fio);
 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 			int create, int flag);
 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 			u64 start, u64 len);
-bool should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
-bool should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
-int __f2fs_write_data_pages(struct address_space *mapping,
-						struct writeback_control *wbc,
-						enum iostat_type io_type);
+bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
+bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
 void f2fs_invalidate_page(struct page *page, unsigned int offset,
 			unsigned int length);
 int f2fs_release_page(struct page *page, gfp_t wait);
@@ -2929,22 +3068,23 @@
 			struct page *page, enum migrate_mode mode);
 #endif
 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
+void f2fs_clear_radix_tree_dirty_tag(struct page *page);
 
 /*
  * gc.c
  */
-int start_gc_thread(struct f2fs_sb_info *sbi);
-void stop_gc_thread(struct f2fs_sb_info *sbi);
-block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
+int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
+void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
+block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
 			unsigned int segno);
-void build_gc_manager(struct f2fs_sb_info *sbi);
+void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
 
 /*
  * recovery.c
  */
-int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
-bool space_for_roll_forward(struct f2fs_sb_info *sbi);
+int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
+bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
 
 /*
  * debug.c
@@ -2982,6 +3122,7 @@
 	int bg_node_segs, bg_data_segs;
 	int tot_blks, data_blks, node_blks;
 	int bg_data_blks, bg_node_blks;
+	unsigned long long skipped_atomic_files[2];
 	int curseg[NR_CURSEG_TYPE];
 	int cursec[NR_CURSEG_TYPE];
 	int curzone[NR_CURSEG_TYPE];
@@ -3148,29 +3289,31 @@
 extern const struct inode_operations f2fs_symlink_inode_operations;
 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
 extern const struct inode_operations f2fs_special_inode_operations;
-extern struct kmem_cache *inode_entry_slab;
+extern struct kmem_cache *f2fs_inode_entry_slab;
 
 /*
  * inline.c
  */
 bool f2fs_may_inline_data(struct inode *inode);
 bool f2fs_may_inline_dentry(struct inode *inode);
-void read_inline_data(struct page *page, struct page *ipage);
-void truncate_inline_inode(struct inode *inode, struct page *ipage, u64 from);
+void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
+void f2fs_truncate_inline_inode(struct inode *inode,
+						struct page *ipage, u64 from);
 int f2fs_read_inline_data(struct inode *inode, struct page *page);
 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
 int f2fs_convert_inline_inode(struct inode *inode);
 int f2fs_write_inline_data(struct inode *inode, struct page *page);
-bool recover_inline_data(struct inode *inode, struct page *npage);
-struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
+bool f2fs_recover_inline_data(struct inode *inode, struct page *npage);
+struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
 			struct fscrypt_name *fname, struct page **res_page);
-int make_empty_inline_dir(struct inode *inode, struct inode *parent,
+int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
 			struct page *ipage);
 int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
 			const struct qstr *orig_name,
 			struct inode *inode, nid_t ino, umode_t mode);
-void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
-			struct inode *dir, struct inode *inode);
+void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
+				struct page *page, struct inode *dir,
+				struct inode *inode);
 bool f2fs_empty_inline_dir(struct inode *dir);
 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
 			struct fscrypt_str *fstr);
@@ -3191,17 +3334,17 @@
 /*
  * extent_cache.c
  */
-struct rb_entry *__lookup_rb_tree(struct rb_root *root,
+struct rb_entry *f2fs_lookup_rb_tree(struct rb_root *root,
 				struct rb_entry *cached_re, unsigned int ofs);
-struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
 				struct rb_root *root, struct rb_node **parent,
 				unsigned int ofs);
-struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
+struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
 		struct rb_entry *cached_re, unsigned int ofs,
 		struct rb_entry **prev_entry, struct rb_entry **next_entry,
 		struct rb_node ***insert_p, struct rb_node **insert_parent,
 		bool force);
-bool __check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
 						struct rb_root *root);
 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
 bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext);
@@ -3213,9 +3356,9 @@
 void f2fs_update_extent_cache(struct dnode_of_data *dn);
 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
 			pgoff_t fofs, block_t blkaddr, unsigned int len);
-void init_extent_cache_info(struct f2fs_sb_info *sbi);
-int __init create_extent_cache(void);
-void destroy_extent_cache(void);
+void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
+int __init f2fs_create_extent_cache(void);
+void f2fs_destroy_extent_cache(void);
 
 /*
  * sysfs.c
@@ -3314,7 +3457,7 @@
 
 	return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
 #else
-	return 0;
+	return false;
 #endif
 }
 
@@ -3336,4 +3479,11 @@
 			fscrypt_using_hardware_encryption(inode));
 }
 
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+							unsigned int type);
+#else
+#define f2fs_build_fault_attr(sbi, rate, type)		do { } while (0)
+#endif
+
 #endif
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 44a2e32..4636b01 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -97,7 +97,8 @@
 	/* page is wholly or partially inside EOF */
 	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
 						i_size_read(inode)) {
-		unsigned offset;
+		loff_t offset;
+
 		offset = i_size_read(inode) & ~PAGE_MASK;
 		zero_user_segment(page, offset, PAGE_SIZE);
 	}
@@ -159,17 +160,18 @@
 		cp_reason = CP_SB_NEED_CP;
 	else if (file_wrong_pino(inode))
 		cp_reason = CP_WRONG_PINO;
-	else if (!space_for_roll_forward(sbi))
+	else if (!f2fs_space_for_roll_forward(sbi))
 		cp_reason = CP_NO_SPC_ROLL;
-	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
+	else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
 		cp_reason = CP_NODE_NEED_CP;
 	else if (test_opt(sbi, FASTBOOT))
 		cp_reason = CP_FASTBOOT_MODE;
 	else if (F2FS_OPTION(sbi).active_logs == 2)
 		cp_reason = CP_SPEC_LOG_NUM;
 	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
-		need_dentry_mark(sbi, inode->i_ino) &&
-		exist_written_data(sbi, F2FS_I(inode)->i_pino, TRANS_DIR_INO))
+		f2fs_need_dentry_mark(sbi, inode->i_ino) &&
+		f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
+							TRANS_DIR_INO))
 		cp_reason = CP_RECOVER_DIR;
 
 	return cp_reason;
@@ -180,7 +182,7 @@
 	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
 	bool ret = false;
 	/* But we need to avoid that there are some inode updates */
-	if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
+	if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
 		ret = true;
 	f2fs_put_page(i, 0);
 	return ret;
@@ -213,6 +215,7 @@
 		.nr_to_write = LONG_MAX,
 		.for_reclaim = 0,
 	};
+	unsigned int seq_id = 0;
 
 	if (unlikely(f2fs_readonly(inode->i_sb)))
 		return 0;
@@ -240,14 +243,14 @@
 	 * if there is no written data, don't waste time to write recovery info.
 	 */
 	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
-			!exist_written_data(sbi, ino, APPEND_INO)) {
+			!f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
 
 		/* it may call write_inode just prior to fsync */
 		if (need_inode_page_update(sbi, ino))
 			goto go_write;
 
 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
-				exist_written_data(sbi, ino, UPDATE_INO))
+				f2fs_exist_written_data(sbi, ino, UPDATE_INO))
 			goto flush_out;
 		goto out;
 	}
@@ -274,7 +277,9 @@
 		goto out;
 	}
 sync_nodes:
-	ret = fsync_node_pages(sbi, inode, &wbc, atomic);
+	atomic_inc(&sbi->wb_sync_req[NODE]);
+	ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
+	atomic_dec(&sbi->wb_sync_req[NODE]);
 	if (ret)
 		goto out;
 
@@ -284,7 +289,7 @@
 		goto out;
 	}
 
-	if (need_inode_block_update(sbi, ino)) {
+	if (f2fs_need_inode_block_update(sbi, ino)) {
 		f2fs_mark_inode_dirty_sync(inode, true);
 		f2fs_write_inode(inode, NULL);
 		goto sync_nodes;
@@ -299,21 +304,21 @@
 	 * given fsync mark.
 	 */
 	if (!atomic) {
-		ret = wait_on_node_pages_writeback(sbi, ino);
+		ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
 		if (ret)
 			goto out;
 	}
 
 	/* once recovery info is written, don't need to tack this */
-	remove_ino_entry(sbi, ino, APPEND_INO);
+	f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
 	clear_inode_flag(inode, FI_APPEND_WRITE);
 flush_out:
 	if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
 		ret = f2fs_issue_flush(sbi, inode->i_ino);
 	if (!ret) {
-		remove_ino_entry(sbi, ino, UPDATE_INO);
+		f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
 		clear_inode_flag(inode, FI_UPDATE_WRITE);
-		remove_ino_entry(sbi, ino, FLUSH_INO);
+		f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
 	}
 	f2fs_update_time(sbi, REQ_TIME);
 out:
@@ -332,28 +337,29 @@
 static pgoff_t __get_first_dirty_index(struct address_space *mapping,
 						pgoff_t pgofs, int whence)
 {
-	struct pagevec pvec;
+	struct page *page;
 	int nr_pages;
 
 	if (whence != SEEK_DATA)
 		return 0;
 
 	/* find first dirty page index */
-	pagevec_init(&pvec, 0);
-	nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
-					PAGECACHE_TAG_DIRTY, 1);
-	pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX;
-	pagevec_release(&pvec);
+	nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
+				      1, &page);
+	if (!nr_pages)
+		return ULONG_MAX;
+	pgofs = page->index;
+	put_page(page);
 	return pgofs;
 }
 
-static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
-							int whence)
+static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
+				pgoff_t dirty, pgoff_t pgofs, int whence)
 {
 	switch (whence) {
 	case SEEK_DATA:
 		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
-			(blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
+			is_valid_data_blkaddr(sbi, blkaddr))
 			return true;
 		break;
 	case SEEK_HOLE:
@@ -393,13 +399,13 @@
 
 	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
 		set_new_dnode(&dn, inode, NULL, NULL, 0);
-		err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
+		err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
 		if (err && err != -ENOENT) {
 			goto fail;
 		} else if (err == -ENOENT) {
 			/* direct node does not exists */
 			if (whence == SEEK_DATA) {
-				pgofs = get_next_page_offset(&dn, pgofs);
+				pgofs = f2fs_get_next_page_offset(&dn, pgofs);
 				continue;
 			} else {
 				goto found;
@@ -413,10 +419,19 @@
 				dn.ofs_in_node++, pgofs++,
 				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
 			block_t blkaddr;
+
 			blkaddr = datablock_addr(dn.inode,
 					dn.node_page, dn.ofs_in_node);
 
-			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
+			if (__is_valid_data_blkaddr(blkaddr) &&
+				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
+						blkaddr, DATA_GENERIC)) {
+				f2fs_put_dnode(&dn);
+				goto fail;
+			}
+
+			if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
+							pgofs, whence)) {
 				f2fs_put_dnode(&dn);
 				goto found;
 			}
@@ -487,7 +502,7 @@
 	return dquot_file_open(inode, filp);
 }
 
-void truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	struct f2fs_node *raw_node;
@@ -503,12 +518,18 @@
 
 	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
 		block_t blkaddr = le32_to_cpu(*addr);
+
 		if (blkaddr == NULL_ADDR)
 			continue;
 
 		dn->data_blkaddr = NULL_ADDR;
-		set_data_blkaddr(dn);
-		invalidate_blocks(sbi, blkaddr);
+		f2fs_set_data_blkaddr(dn);
+
+		if (__is_valid_data_blkaddr(blkaddr) &&
+			!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+			continue;
+
+		f2fs_invalidate_blocks(sbi, blkaddr);
 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
 			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
 		nr_free++;
@@ -520,7 +541,7 @@
 		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
 		 * we will invalidate all blkaddr in the whole range.
 		 */
-		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
+		fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
 							dn->inode) + ofs;
 		f2fs_update_extent_cache_range(dn, fofs, 0, len);
 		dec_valid_block_count(sbi, dn->inode, nr_free);
@@ -532,15 +553,15 @@
 					 dn->ofs_in_node, nr_free);
 }
 
-void truncate_data_blocks(struct dnode_of_data *dn)
+void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
 {
-	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
+	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
 }
 
 static int truncate_partial_data_page(struct inode *inode, u64 from,
 								bool cache_only)
 {
-	unsigned offset = from & (PAGE_SIZE - 1);
+	loff_t offset = from & (PAGE_SIZE - 1);
 	pgoff_t index = from >> PAGE_SHIFT;
 	struct address_space *mapping = inode->i_mapping;
 	struct page *page;
@@ -556,7 +577,7 @@
 		return 0;
 	}
 
-	page = get_lock_data_page(inode, index, true);
+	page = f2fs_get_lock_data_page(inode, index, true);
 	if (IS_ERR(page))
 		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
 truncate_out:
@@ -571,7 +592,7 @@
 	return 0;
 }
 
-int truncate_blocks(struct inode *inode, u64 from, bool lock)
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct dnode_of_data dn;
@@ -590,21 +611,21 @@
 	if (lock)
 		f2fs_lock_op(sbi);
 
-	ipage = get_node_page(sbi, inode->i_ino);
+	ipage = f2fs_get_node_page(sbi, inode->i_ino);
 	if (IS_ERR(ipage)) {
 		err = PTR_ERR(ipage);
 		goto out;
 	}
 
 	if (f2fs_has_inline_data(inode)) {
-		truncate_inline_inode(inode, ipage, from);
+		f2fs_truncate_inline_inode(inode, ipage, from);
 		f2fs_put_page(ipage, 1);
 		truncate_page = true;
 		goto out;
 	}
 
 	set_new_dnode(&dn, inode, ipage, NULL, 0);
-	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
+	err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
 	if (err) {
 		if (err == -ENOENT)
 			goto free_next;
@@ -617,13 +638,13 @@
 	f2fs_bug_on(sbi, count < 0);
 
 	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
-		truncate_data_blocks_range(&dn, count);
+		f2fs_truncate_data_blocks_range(&dn, count);
 		free_from += count;
 	}
 
 	f2fs_put_dnode(&dn);
 free_next:
-	err = truncate_inode_blocks(inode, free_from);
+	err = f2fs_truncate_inode_blocks(inode, free_from);
 out:
 	if (lock)
 		f2fs_unlock_op(sbi);
@@ -649,12 +670,11 @@
 
 	trace_f2fs_truncate(inode);
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
 		f2fs_show_injection_info(FAULT_TRUNCATE);
 		return -EIO;
 	}
-#endif
+
 	/* we should check inline_data size */
 	if (!f2fs_may_inline_data(inode)) {
 		err = f2fs_convert_inline_inode(inode);
@@ -662,7 +682,7 @@
 			return err;
 	}
 
-	err = truncate_blocks(inode, i_size_read(inode), true);
+	err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
 	if (err)
 		return err;
 
@@ -688,16 +708,16 @@
 		stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
 	}
 
-	flags = fi->i_flags & (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL);
-	if (flags & FS_APPEND_FL)
+	flags = fi->i_flags & F2FS_FL_USER_VISIBLE;
+	if (flags & F2FS_APPEND_FL)
 		stat->attributes |= STATX_ATTR_APPEND;
-	if (flags & FS_COMPR_FL)
+	if (flags & F2FS_COMPR_FL)
 		stat->attributes |= STATX_ATTR_COMPRESSED;
 	if (f2fs_encrypted_inode(inode))
 		stat->attributes |= STATX_ATTR_ENCRYPTED;
-	if (flags & FS_IMMUTABLE_FL)
+	if (flags & F2FS_IMMUTABLE_FL)
 		stat->attributes |= STATX_ATTR_IMMUTABLE;
-	if (flags & FS_NODUMP_FL)
+	if (flags & F2FS_NODUMP_FL)
 		stat->attributes |= STATX_ATTR_NODUMP;
 
 	stat->attributes_mask |= (STATX_ATTR_APPEND |
@@ -778,22 +798,26 @@
 	}
 
 	if (attr->ia_valid & ATTR_SIZE) {
-		if (attr->ia_size <= i_size_read(inode)) {
-			down_write(&F2FS_I(inode)->i_mmap_sem);
-			truncate_setsize(inode, attr->ia_size);
-			err = f2fs_truncate(inode);
-			up_write(&F2FS_I(inode)->i_mmap_sem);
-			if (err)
-				return err;
-		} else {
-			/*
-			 * do not trim all blocks after i_size if target size is
-			 * larger than i_size.
-			 */
-			down_write(&F2FS_I(inode)->i_mmap_sem);
-			truncate_setsize(inode, attr->ia_size);
-			up_write(&F2FS_I(inode)->i_mmap_sem);
+		bool to_smaller = (attr->ia_size <= i_size_read(inode));
 
+		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+		down_write(&F2FS_I(inode)->i_mmap_sem);
+
+		truncate_setsize(inode, attr->ia_size);
+
+		if (to_smaller)
+			err = f2fs_truncate(inode);
+		/*
+		 * do not trim all blocks after i_size if target size is
+		 * larger than i_size.
+		 */
+		up_write(&F2FS_I(inode)->i_mmap_sem);
+		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
+		if (err)
+			return err;
+
+		if (!to_smaller) {
 			/* should convert inline inode here */
 			if (!f2fs_may_inline_data(inode)) {
 				err = f2fs_convert_inline_inode(inode);
@@ -813,7 +837,7 @@
 	__setattr_copy(inode, attr);
 
 	if (attr->ia_valid & ATTR_MODE) {
-		err = posix_acl_chmod(inode, get_inode_mode(inode));
+		err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
 		if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
 			inode->i_mode = F2FS_I(inode)->i_acl_mode;
 			clear_inode_flag(inode, FI_ACL_MODE);
@@ -852,7 +876,7 @@
 	f2fs_balance_fs(sbi, true);
 
 	f2fs_lock_op(sbi);
-	page = get_new_data_page(inode, NULL, index, false);
+	page = f2fs_get_new_data_page(inode, NULL, index, false);
 	f2fs_unlock_op(sbi);
 
 	if (IS_ERR(page))
@@ -865,7 +889,7 @@
 	return 0;
 }
 
-int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
+int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
 {
 	int err;
 
@@ -874,10 +898,11 @@
 		pgoff_t end_offset, count;
 
 		set_new_dnode(&dn, inode, NULL, NULL, 0);
-		err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
+		err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
 		if (err) {
 			if (err == -ENOENT) {
-				pg_start = get_next_page_offset(&dn, pg_start);
+				pg_start = f2fs_get_next_page_offset(&dn,
+								pg_start);
 				continue;
 			}
 			return err;
@@ -888,7 +913,7 @@
 
 		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
 
-		truncate_data_blocks_range(&dn, count);
+		f2fs_truncate_data_blocks_range(&dn, count);
 		f2fs_put_dnode(&dn);
 
 		pg_start += count;
@@ -939,14 +964,19 @@
 
 			blk_start = (loff_t)pg_start << PAGE_SHIFT;
 			blk_end = (loff_t)pg_end << PAGE_SHIFT;
+
+			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 			down_write(&F2FS_I(inode)->i_mmap_sem);
+
 			truncate_inode_pages_range(mapping, blk_start,
 					blk_end - 1);
 
 			f2fs_lock_op(sbi);
-			ret = truncate_hole(inode, pg_start, pg_end);
+			ret = f2fs_truncate_hole(inode, pg_start, pg_end);
 			f2fs_unlock_op(sbi);
+
 			up_write(&F2FS_I(inode)->i_mmap_sem);
+			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 		}
 	}
 
@@ -962,7 +992,7 @@
 
 next_dnode:
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
+	ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
 	if (ret && ret != -ENOENT) {
 		return ret;
 	} else if (ret == -ENOENT) {
@@ -979,7 +1009,7 @@
 	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
 		*blkaddr = datablock_addr(dn.inode,
 					dn.node_page, dn.ofs_in_node);
-		if (!is_checkpointed_data(sbi, *blkaddr)) {
+		if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
 
 			if (test_opt(sbi, LFS)) {
 				f2fs_put_dnode(&dn);
@@ -1012,10 +1042,10 @@
 			continue;
 
 		set_new_dnode(&dn, inode, NULL, NULL, 0);
-		ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
+		ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
 		if (ret) {
 			dec_valid_block_count(sbi, inode, 1);
-			invalidate_blocks(sbi, *blkaddr);
+			f2fs_invalidate_blocks(sbi, *blkaddr);
 		} else {
 			f2fs_update_data_blkaddr(&dn, *blkaddr);
 		}
@@ -1045,18 +1075,23 @@
 			pgoff_t ilen;
 
 			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
-			ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
+			ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
 			if (ret)
 				return ret;
 
-			get_node_info(sbi, dn.nid, &ni);
+			ret = f2fs_get_node_info(sbi, dn.nid, &ni);
+			if (ret) {
+				f2fs_put_dnode(&dn);
+				return ret;
+			}
+
 			ilen = min((pgoff_t)
 				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
 						dn.ofs_in_node, len - i);
 			do {
 				dn.data_blkaddr = datablock_addr(dn.inode,
 						dn.node_page, dn.ofs_in_node);
-				truncate_data_blocks_range(&dn, 1);
+				f2fs_truncate_data_blocks_range(&dn, 1);
 
 				if (do_replace[i]) {
 					f2fs_i_blocks_write(src_inode,
@@ -1079,10 +1114,11 @@
 		} else {
 			struct page *psrc, *pdst;
 
-			psrc = get_lock_data_page(src_inode, src + i, true);
+			psrc = f2fs_get_lock_data_page(src_inode,
+							src + i, true);
 			if (IS_ERR(psrc))
 				return PTR_ERR(psrc);
-			pdst = get_new_data_page(dst_inode, NULL, dst + i,
+			pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
 								true);
 			if (IS_ERR(pdst)) {
 				f2fs_put_page(psrc, 1);
@@ -1093,7 +1129,8 @@
 			f2fs_put_page(pdst, 1);
 			f2fs_put_page(psrc, 1);
 
-			ret = truncate_hole(src_inode, src + i, src + i + 1);
+			ret = f2fs_truncate_hole(src_inode,
+						src + i, src + i + 1);
 			if (ret)
 				return ret;
 			i++;
@@ -1115,12 +1152,14 @@
 		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
 
 		src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
-					sizeof(block_t) * olen, GFP_KERNEL);
+					array_size(olen, sizeof(block_t)),
+					GFP_KERNEL);
 		if (!src_blkaddr)
 			return -ENOMEM;
 
 		do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
-					sizeof(int) * olen, GFP_KERNEL);
+					array_size(olen, sizeof(int)),
+					GFP_KERNEL);
 		if (!do_replace) {
 			kvfree(src_blkaddr);
 			return -ENOMEM;
@@ -1146,31 +1185,39 @@
 	return 0;
 
 roll_back:
-	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
+	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
 	kvfree(src_blkaddr);
 	kvfree(do_replace);
 	return ret;
 }
 
-static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
+static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+	pgoff_t start = offset >> PAGE_SHIFT;
+	pgoff_t end = (offset + len) >> PAGE_SHIFT;
 	int ret;
 
 	f2fs_balance_fs(sbi, true);
+
+	/* avoid gc operation during block exchange */
+	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+	down_write(&F2FS_I(inode)->i_mmap_sem);
+
 	f2fs_lock_op(sbi);
-
 	f2fs_drop_extent_tree(inode);
-
+	truncate_pagecache(inode, offset);
 	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
 	f2fs_unlock_op(sbi);
+
+	up_write(&F2FS_I(inode)->i_mmap_sem);
+	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 	return ret;
 }
 
 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 {
-	pgoff_t pg_start, pg_end;
 	loff_t new_size;
 	int ret;
 
@@ -1185,37 +1232,27 @@
 	if (ret)
 		return ret;
 
-	pg_start = offset >> PAGE_SHIFT;
-	pg_end = (offset + len) >> PAGE_SHIFT;
-
-	/* avoid gc operation during block exchange */
-	down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
-
-	down_write(&F2FS_I(inode)->i_mmap_sem);
 	/* write out all dirty pages from offset */
 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
 	if (ret)
-		goto out_unlock;
+		return ret;
 
-	truncate_pagecache(inode, offset);
-
-	ret = f2fs_do_collapse(inode, pg_start, pg_end);
+	ret = f2fs_do_collapse(inode, offset, len);
 	if (ret)
-		goto out_unlock;
+		return ret;
 
 	/* write out all moved pages, if possible */
+	down_write(&F2FS_I(inode)->i_mmap_sem);
 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
 	truncate_pagecache(inode, offset);
 
 	new_size = i_size_read(inode) - len;
 	truncate_pagecache(inode, new_size);
 
-	ret = truncate_blocks(inode, new_size, true);
+	ret = f2fs_truncate_blocks(inode, new_size, true);
+	up_write(&F2FS_I(inode)->i_mmap_sem);
 	if (!ret)
 		f2fs_i_size_write(inode, new_size);
-out_unlock:
-	up_write(&F2FS_I(inode)->i_mmap_sem);
-	up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
 	return ret;
 }
 
@@ -1235,7 +1272,7 @@
 	}
 
 	dn->ofs_in_node = ofs_in_node;
-	ret = reserve_new_blocks(dn, count);
+	ret = f2fs_reserve_new_blocks(dn, count);
 	if (ret)
 		return ret;
 
@@ -1244,7 +1281,7 @@
 		dn->data_blkaddr = datablock_addr(dn->inode,
 					dn->node_page, dn->ofs_in_node);
 		/*
-		 * reserve_new_blocks will not guarantee entire block
+		 * f2fs_reserve_new_blocks will not guarantee entire block
 		 * allocation.
 		 */
 		if (dn->data_blkaddr == NULL_ADDR) {
@@ -1252,9 +1289,9 @@
 			break;
 		}
 		if (dn->data_blkaddr != NEW_ADDR) {
-			invalidate_blocks(sbi, dn->data_blkaddr);
+			f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
 			dn->data_blkaddr = NEW_ADDR;
-			set_data_blkaddr(dn);
+			f2fs_set_data_blkaddr(dn);
 		}
 	}
 
@@ -1281,12 +1318,9 @@
 	if (ret)
 		return ret;
 
-	down_write(&F2FS_I(inode)->i_mmap_sem);
 	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
 	if (ret)
-		goto out_sem;
-
-	truncate_pagecache_range(inode, offset, offset + len - 1);
+		return ret;
 
 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
@@ -1298,7 +1332,7 @@
 		ret = fill_zero(inode, pg_start, off_start,
 						off_end - off_start);
 		if (ret)
-			goto out_sem;
+			return ret;
 
 		new_size = max_t(loff_t, new_size, offset + len);
 	} else {
@@ -1306,7 +1340,7 @@
 			ret = fill_zero(inode, pg_start++, off_start,
 						PAGE_SIZE - off_start);
 			if (ret)
-				goto out_sem;
+				return ret;
 
 			new_size = max_t(loff_t, new_size,
 					(loff_t)pg_start << PAGE_SHIFT);
@@ -1317,12 +1351,21 @@
 			unsigned int end_offset;
 			pgoff_t end;
 
+			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+			down_write(&F2FS_I(inode)->i_mmap_sem);
+
+			truncate_pagecache_range(inode,
+				(loff_t)index << PAGE_SHIFT,
+				((loff_t)pg_end << PAGE_SHIFT) - 1);
+
 			f2fs_lock_op(sbi);
 
 			set_new_dnode(&dn, inode, NULL, NULL, 0);
-			ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
+			ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
 			if (ret) {
 				f2fs_unlock_op(sbi);
+				up_write(&F2FS_I(inode)->i_mmap_sem);
+				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 				goto out;
 			}
 
@@ -1331,7 +1374,10 @@
 
 			ret = f2fs_do_zero_range(&dn, index, end);
 			f2fs_put_dnode(&dn);
+
 			f2fs_unlock_op(sbi);
+			up_write(&F2FS_I(inode)->i_mmap_sem);
+			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
 			f2fs_balance_fs(sbi, dn.node_changed);
 
@@ -1359,9 +1405,6 @@
 		else
 			f2fs_i_size_write(inode, new_size);
 	}
-out_sem:
-	up_write(&F2FS_I(inode)->i_mmap_sem);
-
 	return ret;
 }
 
@@ -1390,26 +1433,27 @@
 
 	f2fs_balance_fs(sbi, true);
 
-	/* avoid gc operation during block exchange */
-	down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
-
 	down_write(&F2FS_I(inode)->i_mmap_sem);
-	ret = truncate_blocks(inode, i_size_read(inode), true);
+	ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
+	up_write(&F2FS_I(inode)->i_mmap_sem);
 	if (ret)
-		goto out;
+		return ret;
 
 	/* write out all dirty pages from offset */
 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
 	if (ret)
-		goto out;
-
-	truncate_pagecache(inode, offset);
+		return ret;
 
 	pg_start = offset >> PAGE_SHIFT;
 	pg_end = (offset + len) >> PAGE_SHIFT;
 	delta = pg_end - pg_start;
 	idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
 
+	/* avoid gc operation during block exchange */
+	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+	down_write(&F2FS_I(inode)->i_mmap_sem);
+	truncate_pagecache(inode, offset);
+
 	while (!ret && idx > pg_start) {
 		nr = idx - pg_start;
 		if (nr > delta)
@@ -1423,16 +1467,17 @@
 					idx + delta, nr, false);
 		f2fs_unlock_op(sbi);
 	}
+	up_write(&F2FS_I(inode)->i_mmap_sem);
+	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
 	/* write out all moved pages, if possible */
+	down_write(&F2FS_I(inode)->i_mmap_sem);
 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
 	truncate_pagecache(inode, offset);
+	up_write(&F2FS_I(inode)->i_mmap_sem);
 
 	if (!ret)
 		f2fs_i_size_write(inode, new_size);
-out:
-	up_write(&F2FS_I(inode)->i_mmap_sem);
-	up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
 	return ret;
 }
 
@@ -1475,7 +1520,7 @@
 		last_off = map.m_lblk + map.m_len - 1;
 
 		/* update new size to the failed position */
-		new_size = (last_off == pg_end) ? offset + len:
+		new_size = (last_off == pg_end) ? offset + len :
 					(loff_t)(last_off + 1) << PAGE_SHIFT;
 	} else {
 		new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
@@ -1555,13 +1600,13 @@
 
 	/* some remained atomic pages should discarded */
 	if (f2fs_is_atomic_file(inode))
-		drop_inmem_pages(inode);
+		f2fs_drop_inmem_pages(inode);
 	if (f2fs_is_volatile_file(inode)) {
-		clear_inode_flag(inode, FI_VOLATILE_FILE);
-		stat_dec_volatile_write(inode);
 		set_inode_flag(inode, FI_DROP_CACHE);
 		filemap_fdatawrite(inode->i_mapping);
 		clear_inode_flag(inode, FI_DROP_CACHE);
+		clear_inode_flag(inode, FI_VOLATILE_FILE);
+		stat_dec_volatile_write(inode);
 	}
 	return 0;
 }
@@ -1578,7 +1623,7 @@
 	 */
 	if (f2fs_is_atomic_file(inode) &&
 			F2FS_I(inode)->inmem_task == current)
-		drop_inmem_pages(inode);
+		f2fs_drop_inmem_pages(inode);
 	return 0;
 }
 
@@ -1586,7 +1631,15 @@
 {
 	struct inode *inode = file_inode(filp);
 	struct f2fs_inode_info *fi = F2FS_I(inode);
-	unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
+	unsigned int flags = fi->i_flags;
+
+	if (f2fs_encrypted_inode(inode))
+		flags |= F2FS_ENCRYPT_FL;
+	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
+		flags |= F2FS_INLINE_DATA_FL;
+
+	flags &= F2FS_FL_USER_VISIBLE;
+
 	return put_user(flags, (int __user *)arg);
 }
 
@@ -1620,15 +1673,15 @@
 
 	oldflags = fi->i_flags;
 
-	if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
+	if ((flags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL)) {
 		if (!capable(CAP_LINUX_IMMUTABLE)) {
 			ret = -EPERM;
 			goto unlock_out;
 		}
 	}
 
-	flags = flags & FS_FL_USER_MODIFIABLE;
-	flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
+	flags = flags & (F2FS_FL_USER_MODIFIABLE);
+	flags |= oldflags & ~(F2FS_FL_USER_MODIFIABLE);
 	fi->i_flags = flags;
 
 	inode->i_ctime = current_time(inode);
@@ -1664,31 +1717,35 @@
 
 	inode_lock(inode);
 
-	if (f2fs_is_atomic_file(inode))
+	if (f2fs_is_atomic_file(inode)) {
+		if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
+			ret = -EINVAL;
 		goto out;
+	}
 
 	ret = f2fs_convert_inline_inode(inode);
 	if (ret)
 		goto out;
 
-	set_inode_flag(inode, FI_ATOMIC_FILE);
-	set_inode_flag(inode, FI_HOT_DATA);
-	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
 	if (!get_dirty_pages(inode))
-		goto inc_stat;
+		goto skip_flush;
 
 	f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
 		"Unexpected flush for atomic writes: ino=%lu, npages=%u",
 					inode->i_ino, get_dirty_pages(inode));
 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
 	if (ret) {
-		clear_inode_flag(inode, FI_ATOMIC_FILE);
-		clear_inode_flag(inode, FI_HOT_DATA);
+		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 		goto out;
 	}
+skip_flush:
+	set_inode_flag(inode, FI_ATOMIC_FILE);
+	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
+	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
-inc_stat:
+	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
 	F2FS_I(inode)->inmem_task = current;
 	stat_inc_atomic_write(inode);
 	stat_update_max_atomic_write(inode);
@@ -1710,29 +1767,34 @@
 	if (ret)
 		return ret;
 
+	f2fs_balance_fs(F2FS_I_SB(inode), true);
+
 	inode_lock(inode);
 
-	down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
-
-	if (f2fs_is_volatile_file(inode))
+	if (f2fs_is_volatile_file(inode)) {
+		ret = -EINVAL;
 		goto err_out;
+	}
 
 	if (f2fs_is_atomic_file(inode)) {
-		ret = commit_inmem_pages(inode);
+		ret = f2fs_commit_inmem_pages(inode);
 		if (ret)
 			goto err_out;
 
 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
 		if (!ret) {
 			clear_inode_flag(inode, FI_ATOMIC_FILE);
-			clear_inode_flag(inode, FI_HOT_DATA);
+			F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
 			stat_dec_atomic_write(inode);
 		}
 	} else {
 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
 	}
 err_out:
-	up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+	if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
+		clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
+		ret = -EINVAL;
+	}
 	inode_unlock(inode);
 	mnt_drop_write_file(filp);
 	return ret;
@@ -1817,13 +1879,15 @@
 	inode_lock(inode);
 
 	if (f2fs_is_atomic_file(inode))
-		drop_inmem_pages(inode);
+		f2fs_drop_inmem_pages(inode);
 	if (f2fs_is_volatile_file(inode)) {
 		clear_inode_flag(inode, FI_VOLATILE_FILE);
 		stat_dec_volatile_write(inode);
 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
 	}
 
+	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
+
 	inode_unlock(inode);
 
 	mnt_drop_write_file(filp);
@@ -1837,7 +1901,7 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct super_block *sb = sbi->sb;
 	__u32 in;
-	int ret;
+	int ret = 0;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
@@ -1845,9 +1909,11 @@
 	if (get_user(in, (__u32 __user *)arg))
 		return -EFAULT;
 
-	ret = mnt_want_write_file(filp);
-	if (ret)
-		return ret;
+	if (in != F2FS_GOING_DOWN_FULLSYNC) {
+		ret = mnt_want_write_file(filp);
+		if (ret)
+			return ret;
+	}
 
 	switch (in) {
 	case F2FS_GOING_DOWN_FULLSYNC:
@@ -1858,6 +1924,7 @@
 		}
 		if (sb) {
 			f2fs_stop_checkpoint(sbi, false);
+			set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
 			thaw_bdev(sb->s_bdev, sb);
 		}
 		break;
@@ -1867,28 +1934,32 @@
 		if (ret)
 			goto out;
 		f2fs_stop_checkpoint(sbi, false);
+		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
 		break;
 	case F2FS_GOING_DOWN_NOSYNC:
 		f2fs_stop_checkpoint(sbi, false);
+		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
 		break;
 	case F2FS_GOING_DOWN_METAFLUSH:
-		sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
+		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
 		f2fs_stop_checkpoint(sbi, false);
+		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
 		break;
 	default:
 		ret = -EINVAL;
 		goto out;
 	}
 
-	stop_gc_thread(sbi);
-	stop_discard_thread(sbi);
+	f2fs_stop_gc_thread(sbi);
+	f2fs_stop_discard_thread(sbi);
 
-	drop_discard_cmd(sbi);
+	f2fs_drop_discard_cmd(sbi);
 	clear_opt(sbi, DISCARD);
 
 	f2fs_update_time(sbi, REQ_TIME);
 out:
-	mnt_drop_write_file(filp);
+	if (in != F2FS_GOING_DOWN_FULLSYNC)
+		mnt_drop_write_file(filp);
 	return ret;
 }
 
@@ -2047,15 +2118,15 @@
 	if (f2fs_readonly(sbi->sb))
 		return -EROFS;
 
+	end = range.start + range.len;
+	if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) {
+		return -EINVAL;
+	}
+
 	ret = mnt_want_write_file(filp);
 	if (ret)
 		return ret;
 
-	end = range.start + range.len;
-	if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) {
-		ret = -EINVAL;
-		goto out;
-	}
 do_more:
 	if (!range.sync) {
 		if (!mutex_trylock(&sbi->gc_mutex)) {
@@ -2104,7 +2175,7 @@
 	struct inode *inode = file_inode(filp);
 	struct f2fs_map_blocks map = { .m_next_extent = NULL,
 					.m_seg_type = NO_CHECK_TYPE };
-	struct extent_info ei = {0,0,0};
+	struct extent_info ei = {0, 0, 0};
 	pgoff_t pg_start, pg_end, next_pgofs;
 	unsigned int blk_per_seg = sbi->blocks_per_seg;
 	unsigned int total = 0, sec_num;
@@ -2113,7 +2184,7 @@
 	int err;
 
 	/* if in-place-update policy is enabled, don't waste time here */
-	if (should_update_inplace(inode, NULL))
+	if (f2fs_should_update_inplace(inode, NULL))
 		return -EINVAL;
 
 	pg_start = range->start >> PAGE_SHIFT;
@@ -2208,7 +2279,7 @@
 		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
 			struct page *page;
 
-			page = get_lock_data_page(inode, idx, true);
+			page = f2fs_get_lock_data_page(inode, idx, true);
 			if (IS_ERR(page)) {
 				err = PTR_ERR(page);
 				goto clear_out;
@@ -2319,15 +2390,10 @@
 	}
 
 	inode_lock(src);
-	down_write(&F2FS_I(src)->dio_rwsem[WRITE]);
 	if (src != dst) {
 		ret = -EBUSY;
 		if (!inode_trylock(dst))
 			goto out;
-		if (!down_write_trylock(&F2FS_I(dst)->dio_rwsem[WRITE])) {
-			inode_unlock(dst);
-			goto out;
-		}
 	}
 
 	ret = -EINVAL;
@@ -2372,6 +2438,14 @@
 		goto out_unlock;
 
 	f2fs_balance_fs(sbi, true);
+
+	down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+	if (src != dst) {
+		ret = -EBUSY;
+		if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
+			goto out_src;
+	}
+
 	f2fs_lock_op(sbi);
 	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
 				pos_out >> F2FS_BLKSIZE_BITS,
@@ -2384,13 +2458,15 @@
 			f2fs_i_size_write(dst, dst_osize);
 	}
 	f2fs_unlock_op(sbi);
+
+	if (src != dst)
+		up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
+out_src:
+	up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
 out_unlock:
-	if (src != dst) {
-		up_write(&F2FS_I(dst)->dio_rwsem[WRITE]);
+	if (src != dst)
 		inode_unlock(dst);
-	}
 out:
-	up_write(&F2FS_I(src)->dio_rwsem[WRITE]);
 	inode_unlock(src);
 	return ret;
 }
@@ -2517,12 +2593,14 @@
 
 	/* Use i_gc_failures for normal file as a risk signal. */
 	if (inc)
-		f2fs_i_gc_failures_write(inode, fi->i_gc_failures + 1);
+		f2fs_i_gc_failures_write(inode,
+				fi->i_gc_failures[GC_FAILURE_PIN] + 1);
 
-	if (fi->i_gc_failures > sbi->gc_pin_file_threshold) {
+	if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
 		f2fs_msg(sbi->sb, KERN_WARNING,
 			"%s: Enable GC = ino %lx after %x GC trials\n",
-			__func__, inode->i_ino, fi->i_gc_failures);
+			__func__, inode->i_ino,
+			fi->i_gc_failures[GC_FAILURE_PIN]);
 		clear_inode_flag(inode, FI_PIN_FILE);
 		return -EAGAIN;
 	}
@@ -2553,14 +2631,14 @@
 
 	inode_lock(inode);
 
-	if (should_update_outplace(inode, NULL)) {
+	if (f2fs_should_update_outplace(inode, NULL)) {
 		ret = -EINVAL;
 		goto out;
 	}
 
 	if (!pin) {
 		clear_inode_flag(inode, FI_PIN_FILE);
-		F2FS_I(inode)->i_gc_failures = 1;
+		f2fs_i_gc_failures_write(inode, 0);
 		goto done;
 	}
 
@@ -2573,7 +2651,7 @@
 		goto out;
 
 	set_inode_flag(inode, FI_PIN_FILE);
-	ret = F2FS_I(inode)->i_gc_failures;
+	ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
 done:
 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
 out:
@@ -2588,7 +2666,7 @@
 	__u32 pin = 0;
 
 	if (is_inode_flag_set(inode, FI_PIN_FILE))
-		pin = F2FS_I(inode)->i_gc_failures;
+		pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
 	return put_user(pin, (u32 __user *)arg);
 }
 
@@ -2612,9 +2690,9 @@
 	while (map.m_lblk < end) {
 		map.m_len = end - map.m_lblk;
 
-		down_write(&fi->dio_rwsem[WRITE]);
+		down_write(&fi->i_gc_rwsem[WRITE]);
 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
-		up_write(&fi->dio_rwsem[WRITE]);
+		up_write(&fi->i_gc_rwsem[WRITE]);
 		if (err)
 			return err;
 
@@ -2690,7 +2768,6 @@
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file_inode(file);
-	struct blk_plug plug;
 	ssize_t ret;
 
 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
@@ -2720,6 +2797,8 @@
 						iov_iter_count(from)) ||
 					f2fs_has_inline_data(inode) ||
 					f2fs_force_buffered_io(inode, WRITE)) {
+						clear_inode_flag(inode,
+								FI_NO_PREALLOC);
 						inode_unlock(inode);
 						return -EAGAIN;
 				}
@@ -2735,9 +2814,7 @@
 				return err;
 			}
 		}
-		blk_start_plug(&plug);
 		ret = __generic_file_write_iter(iocb, from);
-		blk_finish_plug(&plug);
 		clear_inode_flag(inode, FI_NO_PREALLOC);
 
 		/* if we couldn't write data, we should deallocate blocks. */
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 66044fa..c6322ef 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -53,12 +53,10 @@
 			continue;
 		}
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 		if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
 			f2fs_show_injection_info(FAULT_CHECKPOINT);
 			f2fs_stop_checkpoint(sbi, false);
 		}
-#endif
 
 		if (!sb_start_write_trylock(sbi->sb))
 			continue;
@@ -76,7 +74,7 @@
 		 * invalidated soon after by user update or deletion.
 		 * So, I'd like to wait some time to collect dirty segments.
 		 */
-		if (gc_th->gc_urgent) {
+		if (sbi->gc_mode == GC_URGENT) {
 			wait_ms = gc_th->urgent_sleep_time;
 			mutex_lock(&sbi->gc_mutex);
 			goto do_gc;
@@ -114,7 +112,7 @@
 	return 0;
 }
 
-int start_gc_thread(struct f2fs_sb_info *sbi)
+int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_gc_kthread *gc_th;
 	dev_t dev = sbi->sb->s_bdev->bd_dev;
@@ -131,8 +129,6 @@
 	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
 	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
 
-	gc_th->gc_idle = 0;
-	gc_th->gc_urgent = 0;
 	gc_th->gc_wake= 0;
 
 	sbi->gc_thread = gc_th;
@@ -148,7 +144,7 @@
 	return err;
 }
 
-void stop_gc_thread(struct f2fs_sb_info *sbi)
+void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
 	if (!gc_th)
@@ -158,21 +154,19 @@
 	sbi->gc_thread = NULL;
 }
 
-static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
+static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
 {
 	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
 
-	if (!gc_th)
-		return gc_mode;
-
-	if (gc_th->gc_idle) {
-		if (gc_th->gc_idle == 1)
-			gc_mode = GC_CB;
-		else if (gc_th->gc_idle == 2)
-			gc_mode = GC_GREEDY;
-	}
-	if (gc_th->gc_urgent)
+	switch (sbi->gc_mode) {
+	case GC_IDLE_CB:
+		gc_mode = GC_CB;
+		break;
+	case GC_IDLE_GREEDY:
+	case GC_URGENT:
 		gc_mode = GC_GREEDY;
+		break;
+	}
 	return gc_mode;
 }
 
@@ -187,7 +181,7 @@
 		p->max_search = dirty_i->nr_dirty[type];
 		p->ofs_unit = 1;
 	} else {
-		p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
+		p->gc_mode = select_gc_type(sbi, gc_type);
 		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
 		p->max_search = dirty_i->nr_dirty[DIRTY];
 		p->ofs_unit = sbi->segs_per_sec;
@@ -195,7 +189,7 @@
 
 	/* we need to check every dirty segments in the FG_GC case */
 	if (gc_type != FG_GC &&
-			(sbi->gc_thread && !sbi->gc_thread->gc_urgent) &&
+			(sbi->gc_mode != GC_URGENT) &&
 			p->max_search > sbi->max_victim_search)
 		p->max_search = sbi->max_victim_search;
 
@@ -234,10 +228,6 @@
 	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
 		if (sec_usage_check(sbi, secno))
 			continue;
-
-		if (no_fggc_candidate(sbi, secno))
-			continue;
-
 		clear_bit(secno, dirty_i->victim_secmap);
 		return GET_SEG_FROM_SEC(sbi, secno);
 	}
@@ -377,9 +367,6 @@
 			goto next;
 		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
 			goto next;
-		if (gc_type == FG_GC && p.alloc_mode == LFS &&
-					no_fggc_candidate(sbi, secno))
-			goto next;
 
 		cost = get_gc_cost(sbi, segno, &p);
 
@@ -440,7 +427,7 @@
 		iput(inode);
 		return;
 	}
-	new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
+	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
 	new_ie->inode = inode;
 
 	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
@@ -454,7 +441,7 @@
 		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
 		iput(ie->inode);
 		list_del(&ie->list);
-		kmem_cache_free(inode_entry_slab, ie);
+		kmem_cache_free(f2fs_inode_entry_slab, ie);
 	}
 }
 
@@ -484,12 +471,16 @@
 	block_t start_addr;
 	int off;
 	int phase = 0;
+	bool fggc = (gc_type == FG_GC);
 
 	start_addr = START_BLOCK(sbi, segno);
 
 next_step:
 	entry = sum;
 
+	if (fggc && phase == 2)
+		atomic_inc(&sbi->wb_sync_req[NODE]);
+
 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
 		nid_t nid = le32_to_cpu(entry->nid);
 		struct page *node_page;
@@ -503,39 +494,46 @@
 			continue;
 
 		if (phase == 0) {
-			ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
+			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
 							META_NAT, true);
 			continue;
 		}
 
 		if (phase == 1) {
-			ra_node_page(sbi, nid);
+			f2fs_ra_node_page(sbi, nid);
 			continue;
 		}
 
 		/* phase == 2 */
-		node_page = get_node_page(sbi, nid);
+		node_page = f2fs_get_node_page(sbi, nid);
 		if (IS_ERR(node_page))
 			continue;
 
-		/* block may become invalid during get_node_page */
+		/* block may become invalid during f2fs_get_node_page */
 		if (check_valid_map(sbi, segno, off) == 0) {
 			f2fs_put_page(node_page, 1);
 			continue;
 		}
 
-		get_node_info(sbi, nid, &ni);
+		if (f2fs_get_node_info(sbi, nid, &ni)) {
+			f2fs_put_page(node_page, 1);
+			continue;
+		}
+
 		if (ni.blk_addr != start_addr + off) {
 			f2fs_put_page(node_page, 1);
 			continue;
 		}
 
-		move_node_page(node_page, gc_type);
+		f2fs_move_node_page(node_page, gc_type);
 		stat_inc_node_blk_count(sbi, 1, gc_type);
 	}
 
 	if (++phase < 3)
 		goto next_step;
+
+	if (fggc)
+		atomic_dec(&sbi->wb_sync_req[NODE]);
 }
 
 /*
@@ -545,7 +543,7 @@
  * as indirect or double indirect node blocks, are given, it must be a caller's
  * bug.
  */
-block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
+block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
 {
 	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
 	unsigned int bidx;
@@ -576,11 +574,14 @@
 	nid = le32_to_cpu(sum->nid);
 	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
 
-	node_page = get_node_page(sbi, nid);
+	node_page = f2fs_get_node_page(sbi, nid);
 	if (IS_ERR(node_page))
 		return false;
 
-	get_node_info(sbi, nid, dni);
+	if (f2fs_get_node_info(sbi, nid, dni)) {
+		f2fs_put_page(node_page, 1);
+		return false;
+	}
 
 	if (sum->version != dni->version) {
 		f2fs_msg(sbi->sb, KERN_WARNING,
@@ -598,12 +599,78 @@
 	return true;
 }
 
+static int ra_data_block(struct inode *inode, pgoff_t index)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct address_space *mapping = inode->i_mapping;
+	struct dnode_of_data dn;
+	struct page *page;
+	struct extent_info ei = {0, 0, 0};
+	struct f2fs_io_info fio = {
+		.sbi = sbi,
+		.ino = inode->i_ino,
+		.type = DATA,
+		.temp = COLD,
+		.op = REQ_OP_READ,
+		.op_flags = 0,
+		.encrypted_page = NULL,
+		.in_list = false,
+		.retry = false,
+	};
+	int err;
+
+	page = f2fs_grab_cache_page(mapping, index, true);
+	if (!page)
+		return -ENOMEM;
+
+	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+		dn.data_blkaddr = ei.blk + index - ei.fofs;
+		goto got_it;
+	}
+
+	set_new_dnode(&dn, inode, NULL, NULL, 0);
+	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+	if (err)
+		goto put_page;
+	f2fs_put_dnode(&dn);
+
+	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
+						DATA_GENERIC))) {
+		err = -EFAULT;
+		goto put_page;
+	}
+got_it:
+	/* read page */
+	fio.page = page;
+	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
+
+	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
+					dn.data_blkaddr,
+					FGP_LOCK | FGP_CREAT, GFP_NOFS);
+	if (!fio.encrypted_page) {
+		err = -ENOMEM;
+		goto put_page;
+	}
+
+	err = f2fs_submit_page_bio(&fio);
+	if (err)
+		goto put_encrypted_page;
+	f2fs_put_page(fio.encrypted_page, 0);
+	f2fs_put_page(page, 1);
+	return 0;
+put_encrypted_page:
+	f2fs_put_page(fio.encrypted_page, 1);
+put_page:
+	f2fs_put_page(page, 1);
+	return err;
+}
+
 /*
  * Move data block via META_MAPPING while keeping locked data page.
  * This can be used to move blocks, aka LBAs, directly on disk.
  */
 static void move_data_block(struct inode *inode, block_t bidx,
-					unsigned int segno, int off)
+				int gc_type, unsigned int segno, int off)
 {
 	struct f2fs_io_info fio = {
 		.sbi = F2FS_I_SB(inode),
@@ -614,13 +681,15 @@
 		.op_flags = 0,
 		.encrypted_page = NULL,
 		.in_list = false,
+		.retry = false,
 	};
 	struct dnode_of_data dn;
 	struct f2fs_summary sum;
 	struct node_info ni;
-	struct page *page;
+	struct page *page, *mpage;
 	block_t newaddr;
 	int err;
+	bool lfs_mode = test_opt(fio.sbi, LFS);
 
 	/* do not read out */
 	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
@@ -630,8 +699,11 @@
 	if (!check_valid_map(F2FS_I_SB(inode), segno, off))
 		goto out;
 
-	if (f2fs_is_atomic_file(inode))
+	if (f2fs_is_atomic_file(inode)) {
+		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
+		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
 		goto out;
+	}
 
 	if (f2fs_is_pinned_file(inode)) {
 		f2fs_pin_file_control(inode, true);
@@ -639,7 +711,7 @@
 	}
 
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
+	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
 	if (err)
 		goto out;
 
@@ -654,14 +726,20 @@
 	 */
 	f2fs_wait_on_page_writeback(page, DATA, true);
 
-	get_node_info(fio.sbi, dn.nid, &ni);
+	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+	if (err)
+		goto put_out;
+
 	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
 
 	/* read page */
 	fio.page = page;
 	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
 
-	allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
+	if (lfs_mode)
+		down_write(&fio.sbi->io_order_lock);
+
+	f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
 					&sum, CURSEG_COLD_DATA, NULL, false);
 
 	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
@@ -671,6 +749,23 @@
 		goto recover_block;
 	}
 
+	mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
+					fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
+	if (mpage) {
+		bool updated = false;
+
+		if (PageUptodate(mpage)) {
+			memcpy(page_address(fio.encrypted_page),
+					page_address(mpage), PAGE_SIZE);
+			updated = true;
+		}
+		f2fs_put_page(mpage, 1);
+		invalidate_mapping_pages(META_MAPPING(fio.sbi),
+					fio.old_blkaddr, fio.old_blkaddr);
+		if (updated)
+			goto write_page;
+	}
+
 	err = f2fs_submit_page_bio(&fio);
 	if (err)
 		goto put_page_out;
@@ -687,6 +782,7 @@
 		goto put_page_out;
 	}
 
+write_page:
 	set_page_dirty(fio.encrypted_page);
 	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
 	if (clear_page_dirty_for_io(fio.encrypted_page))
@@ -701,8 +797,8 @@
 	fio.op = REQ_OP_WRITE;
 	fio.op_flags = REQ_SYNC;
 	fio.new_blkaddr = newaddr;
-	err = f2fs_submit_page_write(&fio);
-	if (err) {
+	f2fs_submit_page_write(&fio);
+	if (fio.retry) {
 		if (PageWriteback(fio.encrypted_page))
 			end_page_writeback(fio.encrypted_page);
 		goto put_page_out;
@@ -717,8 +813,10 @@
 put_page_out:
 	f2fs_put_page(fio.encrypted_page, 1);
 recover_block:
+	if (lfs_mode)
+		up_write(&fio.sbi->io_order_lock);
 	if (err)
-		__f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
+		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
 								true, true);
 put_out:
 	f2fs_put_dnode(&dn);
@@ -731,15 +829,18 @@
 {
 	struct page *page;
 
-	page = get_lock_data_page(inode, bidx, true);
+	page = f2fs_get_lock_data_page(inode, bidx, true);
 	if (IS_ERR(page))
 		return;
 
 	if (!check_valid_map(F2FS_I_SB(inode), segno, off))
 		goto out;
 
-	if (f2fs_is_atomic_file(inode))
+	if (f2fs_is_atomic_file(inode)) {
+		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
+		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
 		goto out;
+	}
 	if (f2fs_is_pinned_file(inode)) {
 		if (gc_type == FG_GC)
 			f2fs_pin_file_control(inode, true);
@@ -773,15 +874,20 @@
 		f2fs_wait_on_page_writeback(page, DATA, true);
 		if (clear_page_dirty_for_io(page)) {
 			inode_dec_dirty_pages(inode);
-			remove_dirty_inode(inode);
+			f2fs_remove_dirty_inode(inode);
 		}
 
 		set_cold_data(page);
 
-		err = do_write_data_page(&fio);
-		if (err == -ENOMEM && is_dirty) {
-			congestion_wait(BLK_RW_ASYNC, HZ/50);
-			goto retry;
+		err = f2fs_do_write_data_page(&fio);
+		if (err) {
+			clear_cold_data(page);
+			if (err == -ENOMEM) {
+				congestion_wait(BLK_RW_ASYNC, HZ/50);
+				goto retry;
+			}
+			if (is_dirty)
+				set_page_dirty(page);
 		}
 	}
 out:
@@ -825,13 +931,13 @@
 			continue;
 
 		if (phase == 0) {
-			ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
+			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
 							META_NAT, true);
 			continue;
 		}
 
 		if (phase == 1) {
-			ra_node_page(sbi, nid);
+			f2fs_ra_node_page(sbi, nid);
 			continue;
 		}
 
@@ -840,7 +946,7 @@
 			continue;
 
 		if (phase == 2) {
-			ra_node_page(sbi, dni.ino);
+			f2fs_ra_node_page(sbi, dni.ino);
 			continue;
 		}
 
@@ -851,23 +957,31 @@
 			if (IS_ERR(inode) || is_bad_inode(inode))
 				continue;
 
-			/* if inode uses special I/O path, let's go phase 3 */
+			if (!down_write_trylock(
+				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
+				iput(inode);
+				sbi->skipped_gc_rwsem++;
+				continue;
+			}
+
+			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
+								ofs_in_node;
+
 			if (f2fs_post_read_required(inode)) {
+				int err = ra_data_block(inode, start_bidx);
+
+				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+				if (err) {
+					iput(inode);
+					continue;
+				}
 				add_gc_inode(gc_list, inode);
 				continue;
 			}
 
-			if (!down_write_trylock(
-				&F2FS_I(inode)->dio_rwsem[WRITE])) {
-				iput(inode);
-				continue;
-			}
-
-			start_bidx = start_bidx_of_node(nofs, inode);
-			data_page = get_read_data_page(inode,
-					start_bidx + ofs_in_node, REQ_RAHEAD,
-					true);
-			up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+			data_page = f2fs_get_read_data_page(inode,
+						start_bidx, REQ_RAHEAD, true);
+			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 			if (IS_ERR(data_page)) {
 				iput(inode);
 				continue;
@@ -885,11 +999,12 @@
 			bool locked = false;
 
 			if (S_ISREG(inode->i_mode)) {
-				if (!down_write_trylock(&fi->dio_rwsem[READ]))
+				if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
 					continue;
 				if (!down_write_trylock(
-						&fi->dio_rwsem[WRITE])) {
-					up_write(&fi->dio_rwsem[READ]);
+						&fi->i_gc_rwsem[WRITE])) {
+					sbi->skipped_gc_rwsem++;
+					up_write(&fi->i_gc_rwsem[READ]);
 					continue;
 				}
 				locked = true;
@@ -898,17 +1013,18 @@
 				inode_dio_wait(inode);
 			}
 
-			start_bidx = start_bidx_of_node(nofs, inode)
+			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
 								+ ofs_in_node;
 			if (f2fs_post_read_required(inode))
-				move_data_block(inode, start_bidx, segno, off);
+				move_data_block(inode, start_bidx, gc_type,
+								segno, off);
 			else
 				move_data_page(inode, start_bidx, gc_type,
 								segno, off);
 
 			if (locked) {
-				up_write(&fi->dio_rwsem[WRITE]);
-				up_write(&fi->dio_rwsem[READ]);
+				up_write(&fi->i_gc_rwsem[WRITE]);
+				up_write(&fi->i_gc_rwsem[READ]);
 			}
 
 			stat_inc_data_blk_count(sbi, 1, gc_type);
@@ -947,12 +1063,12 @@
 
 	/* readahead multi ssa blocks those have contiguous address */
 	if (sbi->segs_per_sec > 1)
-		ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
+		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
 					sbi->segs_per_sec, META_SSA, true);
 
 	/* reference all summary page */
 	while (segno < end_segno) {
-		sum_page = get_sum_page(sbi, segno++);
+		sum_page = f2fs_get_sum_page(sbi, segno++);
 		unlock_page(sum_page);
 	}
 
@@ -971,7 +1087,13 @@
 			goto next;
 
 		sum = page_address(sum_page);
-		f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
+		if (type != GET_SUM_TYPE((&sum->footer))) {
+			f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
+				"type [%d, %d] in SSA and SIT",
+				segno, type, GET_SUM_TYPE((&sum->footer)));
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			goto next;
+		}
 
 		/*
 		 * this is to avoid deadlock:
@@ -1018,6 +1140,9 @@
 		.ilist = LIST_HEAD_INIT(gc_list.ilist),
 		.iroot = RADIX_TREE_INIT(GFP_NOFS),
 	};
+	unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
+	unsigned long long first_skipped;
+	unsigned int skipped_round = 0, round = 0;
 
 	trace_f2fs_gc_begin(sbi->sb, sync, background,
 				get_pages(sbi, F2FS_DIRTY_NODES),
@@ -1029,6 +1154,8 @@
 				prefree_segments(sbi));
 
 	cpc.reason = __get_cp_reason(sbi);
+	sbi->skipped_gc_rwsem = 0;
+	first_skipped = last_skipped;
 gc_more:
 	if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
 		ret = -EINVAL;
@@ -1046,7 +1173,7 @@
 		 * secure free segments which doesn't need fggc any more.
 		 */
 		if (prefree_segments(sbi)) {
-			ret = write_checkpoint(sbi, &cpc);
+			ret = f2fs_write_checkpoint(sbi, &cpc);
 			if (ret)
 				goto stop;
 		}
@@ -1069,17 +1196,36 @@
 		sec_freed++;
 	total_freed += seg_freed;
 
+	if (gc_type == FG_GC) {
+		if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
+						sbi->skipped_gc_rwsem)
+			skipped_round++;
+		last_skipped = sbi->skipped_atomic_files[FG_GC];
+		round++;
+	}
+
 	if (gc_type == FG_GC)
 		sbi->cur_victim_sec = NULL_SEGNO;
 
-	if (!sync) {
-		if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
+	if (sync)
+		goto stop;
+
+	if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
+		if (skipped_round <= MAX_SKIP_GC_COUNT ||
+					skipped_round * 2 < round) {
 			segno = NULL_SEGNO;
 			goto gc_more;
 		}
 
+		if (first_skipped < last_skipped &&
+				(last_skipped - first_skipped) >
+						sbi->skipped_gc_rwsem) {
+			f2fs_drop_inmem_pages_all(sbi, true);
+			segno = NULL_SEGNO;
+			goto gc_more;
+		}
 		if (gc_type == FG_GC)
-			ret = write_checkpoint(sbi, &cpc);
+			ret = f2fs_write_checkpoint(sbi, &cpc);
 	}
 stop:
 	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
@@ -1103,19 +1249,10 @@
 	return ret;
 }
 
-void build_gc_manager(struct f2fs_sb_info *sbi)
+void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
 {
-	u64 main_count, resv_count, ovp_count;
-
 	DIRTY_I(sbi)->v_ops = &default_v_ops;
 
-	/* threshold of # of valid blocks in a section for victims of FG_GC */
-	main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
-	resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
-	ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
-
-	sbi->fggc_threshold = div64_u64((main_count - ovp_count) *
-				BLKS_PER_SEC(sbi), (main_count - resv_count));
 	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
 
 	/* give warm/cold data area from slower device */
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index b0045d4..c8619e4 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -36,8 +36,6 @@
 	unsigned int no_gc_sleep_time;
 
 	/* for changing gc mode */
-	unsigned int gc_idle;
-	unsigned int gc_urgent;
 	unsigned int gc_wake;
 };
 
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 156ac4f..df71d26 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -43,7 +43,7 @@
 	return true;
 }
 
-void read_inline_data(struct page *page, struct page *ipage)
+void f2fs_do_read_inline_data(struct page *page, struct page *ipage)
 {
 	struct inode *inode = page->mapping->host;
 	void *src_addr, *dst_addr;
@@ -65,7 +65,8 @@
 		SetPageUptodate(page);
 }
 
-void truncate_inline_inode(struct inode *inode, struct page *ipage, u64 from)
+void f2fs_truncate_inline_inode(struct inode *inode,
+					struct page *ipage, u64 from)
 {
 	void *addr;
 
@@ -97,7 +98,7 @@
 						path, current->comm);
 	}
 
-	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
+	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
 	if (IS_ERR(ipage)) {
 		trace_android_fs_dataread_end(inode, page_offset(page),
 					      PAGE_SIZE);
@@ -115,7 +116,7 @@
 	if (page->index)
 		zero_user_segment(page, 0, PAGE_SIZE);
 	else
-		read_inline_data(page, ipage);
+		f2fs_do_read_inline_data(page, ipage);
 
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
@@ -138,6 +139,7 @@
 		.encrypted_page = NULL,
 		.io_type = FS_DATA_IO,
 	};
+	struct node_info ni;
 	int dirty, err;
 
 	if (!f2fs_exist_data(dn->inode))
@@ -147,9 +149,27 @@
 	if (err)
 		return err;
 
+	err = f2fs_get_node_info(fio.sbi, dn->nid, &ni);
+	if (err) {
+		f2fs_put_dnode(dn);
+		return err;
+	}
+
+	fio.version = ni.version;
+
+	if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
+		f2fs_put_dnode(dn);
+		set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
+		f2fs_msg(fio.sbi->sb, KERN_WARNING,
+			"%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+			"run fsck to fix.",
+			__func__, dn->inode->i_ino, dn->data_blkaddr);
+		return -EINVAL;
+	}
+
 	f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
 
-	read_inline_data(page, dn->inode_page);
+	f2fs_do_read_inline_data(page, dn->inode_page);
 	set_page_dirty(page);
 
 	/* clear dirty state */
@@ -160,18 +180,18 @@
 	ClearPageError(page);
 	fio.old_blkaddr = dn->data_blkaddr;
 	set_inode_flag(dn->inode, FI_HOT_DATA);
-	write_data_page(dn, &fio);
+	f2fs_outplace_write_data(dn, &fio);
 	f2fs_wait_on_page_writeback(page, DATA, true);
 	if (dirty) {
 		inode_dec_dirty_pages(dn->inode);
-		remove_dirty_inode(dn->inode);
+		f2fs_remove_dirty_inode(dn->inode);
 	}
 
 	/* this converted inline_data should be recovered. */
 	set_inode_flag(dn->inode, FI_APPEND_WRITE);
 
 	/* clear inline data and flag after data writeback */
-	truncate_inline_inode(dn->inode, dn->inode_page, 0);
+	f2fs_truncate_inline_inode(dn->inode, dn->inode_page, 0);
 	clear_inline_node(dn->inode_page);
 clear_out:
 	stat_dec_inline_inode(dn->inode);
@@ -196,7 +216,7 @@
 
 	f2fs_lock_op(sbi);
 
-	ipage = get_node_page(sbi, inode->i_ino);
+	ipage = f2fs_get_node_page(sbi, inode->i_ino);
 	if (IS_ERR(ipage)) {
 		err = PTR_ERR(ipage);
 		goto out;
@@ -222,12 +242,10 @@
 {
 	void *src_addr, *dst_addr;
 	struct dnode_of_data dn;
-	struct address_space *mapping = page_mapping(page);
-	unsigned long flags;
 	int err;
 
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
+	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
 	if (err)
 		return err;
 
@@ -245,10 +263,7 @@
 	kunmap_atomic(src_addr);
 	set_page_dirty(dn.inode_page);
 
-	spin_lock_irqsave(&mapping->tree_lock, flags);
-	radix_tree_tag_clear(&mapping->page_tree, page_index(page),
-			     PAGECACHE_TAG_DIRTY);
-	spin_unlock_irqrestore(&mapping->tree_lock, flags);
+	f2fs_clear_radix_tree_dirty_tag(page);
 
 	set_inode_flag(inode, FI_APPEND_WRITE);
 	set_inode_flag(inode, FI_DATA_EXIST);
@@ -258,7 +273,7 @@
 	return 0;
 }
 
-bool recover_inline_data(struct inode *inode, struct page *npage)
+bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct f2fs_inode *ri = NULL;
@@ -279,7 +294,7 @@
 	if (f2fs_has_inline_data(inode) &&
 			ri && (ri->i_inline & F2FS_INLINE_DATA)) {
 process_inline:
-		ipage = get_node_page(sbi, inode->i_ino);
+		ipage = f2fs_get_node_page(sbi, inode->i_ino);
 		f2fs_bug_on(sbi, IS_ERR(ipage));
 
 		f2fs_wait_on_page_writeback(ipage, NODE, true);
@@ -297,20 +312,20 @@
 	}
 
 	if (f2fs_has_inline_data(inode)) {
-		ipage = get_node_page(sbi, inode->i_ino);
+		ipage = f2fs_get_node_page(sbi, inode->i_ino);
 		f2fs_bug_on(sbi, IS_ERR(ipage));
-		truncate_inline_inode(inode, ipage, 0);
+		f2fs_truncate_inline_inode(inode, ipage, 0);
 		clear_inode_flag(inode, FI_INLINE_DATA);
 		f2fs_put_page(ipage, 1);
 	} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
-		if (truncate_blocks(inode, 0, false))
+		if (f2fs_truncate_blocks(inode, 0, false))
 			return false;
 		goto process_inline;
 	}
 	return false;
 }
 
-struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
+struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
 			struct fscrypt_name *fname, struct page **res_page)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
@@ -321,7 +336,7 @@
 	void *inline_dentry;
 	f2fs_hash_t namehash;
 
-	ipage = get_node_page(sbi, dir->i_ino);
+	ipage = f2fs_get_node_page(sbi, dir->i_ino);
 	if (IS_ERR(ipage)) {
 		*res_page = ipage;
 		return NULL;
@@ -332,7 +347,7 @@
 	inline_dentry = inline_data_addr(dir, ipage);
 
 	make_dentry_ptr_inline(dir, &d, inline_dentry);
-	de = find_target_dentry(fname, namehash, NULL, &d);
+	de = f2fs_find_target_dentry(fname, namehash, NULL, &d);
 	unlock_page(ipage);
 	if (de)
 		*res_page = ipage;
@@ -342,7 +357,7 @@
 	return de;
 }
 
-int make_empty_inline_dir(struct inode *inode, struct inode *parent,
+int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
 							struct page *ipage)
 {
 	struct f2fs_dentry_ptr d;
@@ -351,7 +366,7 @@
 	inline_dentry = inline_data_addr(inode, ipage);
 
 	make_dentry_ptr_inline(inode, &d, inline_dentry);
-	do_make_empty_dir(inode, parent, &d);
+	f2fs_do_make_empty_dir(inode, parent, &d);
 
 	set_page_dirty(ipage);
 
@@ -385,8 +400,18 @@
 	if (err)
 		goto out;
 
+	if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
+		f2fs_put_dnode(&dn);
+		set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
+		f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
+			"%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+			"run fsck to fix.",
+			__func__, dir->i_ino, dn.data_blkaddr);
+		err = -EINVAL;
+		goto out;
+	}
+
 	f2fs_wait_on_page_writeback(page, DATA, true);
-	zero_user_segment(page, MAX_INLINE_DATA(dir), PAGE_SIZE);
 
 	dentry_blk = page_address(page);
 
@@ -410,7 +435,7 @@
 	set_page_dirty(page);
 
 	/* clear inline dir and flag after data writeback */
-	truncate_inline_inode(dir, ipage, 0);
+	f2fs_truncate_inline_inode(dir, ipage, 0);
 
 	stat_dec_inline_dir(dir);
 	clear_inode_flag(dir, FI_INLINE_DENTRY);
@@ -453,7 +478,7 @@
 		new_name.len = le16_to_cpu(de->name_len);
 
 		ino = le32_to_cpu(de->ino);
-		fake_mode = get_de_type(de) << S_SHIFT;
+		fake_mode = f2fs_get_de_type(de) << S_SHIFT;
 
 		err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL,
 							ino, fake_mode);
@@ -465,8 +490,8 @@
 	return 0;
 punch_dentry_pages:
 	truncate_inode_pages(&dir->i_data, 0);
-	truncate_blocks(dir, 0, false);
-	remove_dirty_inode(dir);
+	f2fs_truncate_blocks(dir, 0, false);
+	f2fs_remove_dirty_inode(dir);
 	return err;
 }
 
@@ -484,7 +509,7 @@
 	}
 
 	memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA(dir));
-	truncate_inline_inode(dir, ipage, 0);
+	f2fs_truncate_inline_inode(dir, ipage, 0);
 
 	unlock_page(ipage);
 
@@ -500,6 +525,7 @@
 	return 0;
 recover:
 	lock_page(ipage);
+	f2fs_wait_on_page_writeback(ipage, NODE, true);
 	memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
 	f2fs_i_depth_write(dir, 0);
 	f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
@@ -533,14 +559,14 @@
 	struct page *page = NULL;
 	int err = 0;
 
-	ipage = get_node_page(sbi, dir->i_ino);
+	ipage = f2fs_get_node_page(sbi, dir->i_ino);
 	if (IS_ERR(ipage))
 		return PTR_ERR(ipage);
 
 	inline_dentry = inline_data_addr(dir, ipage);
 	make_dentry_ptr_inline(dir, &d, inline_dentry);
 
-	bit_pos = room_for_filename(d.bitmap, slots, d.max);
+	bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
 	if (bit_pos >= d.max) {
 		err = f2fs_convert_inline_dir(dir, ipage, inline_dentry);
 		if (err)
@@ -551,7 +577,7 @@
 
 	if (inode) {
 		down_write(&F2FS_I(inode)->i_sem);
-		page = init_inode_metadata(inode, dir, new_name,
+		page = f2fs_init_inode_metadata(inode, dir, new_name,
 						orig_name, ipage);
 		if (IS_ERR(page)) {
 			err = PTR_ERR(page);
@@ -572,7 +598,7 @@
 		f2fs_put_page(page, 1);
 	}
 
-	update_parent_metadata(dir, inode, 0);
+	f2fs_update_parent_metadata(dir, inode, 0);
 fail:
 	if (inode)
 		up_write(&F2FS_I(inode)->i_sem);
@@ -618,7 +644,7 @@
 	void *inline_dentry;
 	struct f2fs_dentry_ptr d;
 
-	ipage = get_node_page(sbi, dir->i_ino);
+	ipage = f2fs_get_node_page(sbi, dir->i_ino);
 	if (IS_ERR(ipage))
 		return false;
 
@@ -649,7 +675,7 @@
 	if (ctx->pos == d.max)
 		return 0;
 
-	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
+	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
 	if (IS_ERR(ipage))
 		return PTR_ERR(ipage);
 
@@ -675,7 +701,7 @@
 	struct page *ipage;
 	int err = 0;
 
-	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
+	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
 	if (IS_ERR(ipage))
 		return PTR_ERR(ipage);
 
@@ -691,7 +717,10 @@
 		ilen = start + len;
 	ilen -= start;
 
-	get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+	err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+	if (err)
+		goto out;
+
 	byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
 	byteaddr += (char *)inline_data_addr(inode, ipage) -
 					(char *)F2FS_INODE(ipage);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index e0d9e8f..959df22 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -36,15 +36,15 @@
 	unsigned int flags = F2FS_I(inode)->i_flags;
 	unsigned int new_fl = 0;
 
-	if (flags & FS_SYNC_FL)
+	if (flags & F2FS_SYNC_FL)
 		new_fl |= S_SYNC;
-	if (flags & FS_APPEND_FL)
+	if (flags & F2FS_APPEND_FL)
 		new_fl |= S_APPEND;
-	if (flags & FS_IMMUTABLE_FL)
+	if (flags & F2FS_IMMUTABLE_FL)
 		new_fl |= S_IMMUTABLE;
-	if (flags & FS_NOATIME_FL)
+	if (flags & F2FS_NOATIME_FL)
 		new_fl |= S_NOATIME;
-	if (flags & FS_DIRSYNC_FL)
+	if (flags & F2FS_DIRSYNC_FL)
 		new_fl |= S_DIRSYNC;
 	if (f2fs_encrypted_inode(inode))
 		new_fl |= S_ENCRYPTED;
@@ -68,13 +68,16 @@
 	}
 }
 
-static bool __written_first_block(struct f2fs_inode *ri)
+static int __written_first_block(struct f2fs_sb_info *sbi,
+					struct f2fs_inode *ri)
 {
 	block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
 
-	if (addr != NEW_ADDR && addr != NULL_ADDR)
-		return true;
-	return false;
+	if (!__is_valid_data_blkaddr(addr))
+		return 1;
+	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
+		return -EFAULT;
+	return 0;
 }
 
 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -117,15 +120,15 @@
 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
 {
 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
-	int extra_isize = le32_to_cpu(ri->i_extra_isize);
 
 	if (!f2fs_sb_has_inode_chksum(sbi->sb))
 		return false;
 
-	if (!RAW_IS_INODE(F2FS_NODE(page)) || !(ri->i_inline & F2FS_EXTRA_ATTR))
+	if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
 		return false;
 
-	if (!F2FS_FITS_IN_INODE(ri, extra_isize, i_inode_checksum))
+	if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
+				i_inode_checksum))
 		return false;
 
 	return true;
@@ -159,8 +162,15 @@
 	struct f2fs_inode *ri;
 	__u32 provided, calculated;
 
+	if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
+		return true;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+	if (!f2fs_enable_inode_chksum(sbi, page))
+#else
 	if (!f2fs_enable_inode_chksum(sbi, page) ||
 			PageDirty(page) || PageWriteback(page))
+#endif
 		return true;
 
 	ri = &F2FS_NODE(page)->i;
@@ -185,6 +195,101 @@
 	ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
 }
 
+static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct f2fs_inode_info *fi = F2FS_I(inode);
+	unsigned long long iblocks;
+
+	iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
+	if (!iblocks) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
+			"run fsck to fix.",
+			__func__, inode->i_ino, iblocks);
+		return false;
+	}
+
+	if (ino_of_node(node_page) != nid_of_node(node_page)) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: corrupted inode footer i_ino=%lx, ino,nid: "
+			"[%u, %u] run fsck to fix.",
+			__func__, inode->i_ino,
+			ino_of_node(node_page), nid_of_node(node_page));
+		return false;
+	}
+
+	if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)
+			&& !f2fs_has_extra_attr(inode)) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: corrupted inode ino=%lx, run fsck to fix.",
+			__func__, inode->i_ino);
+		return false;
+	}
+
+	if (f2fs_has_extra_attr(inode) &&
+			!f2fs_sb_has_extra_attr(sbi->sb)) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: inode (ino=%lx) is with extra_attr, "
+			"but extra_attr feature is off",
+			__func__, inode->i_ino);
+		return false;
+	}
+
+	if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
+			fi->i_extra_isize % sizeof(__le32)) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: inode (ino=%lx) has corrupted i_extra_isize: %d, "
+			"max: %zu",
+			__func__, inode->i_ino, fi->i_extra_isize,
+			F2FS_TOTAL_EXTRA_ATTR_SIZE);
+		return false;
+	}
+
+	if (F2FS_I(inode)->extent_tree) {
+		struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
+
+		if (ei->len &&
+			(!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
+			!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
+							DATA_GENERIC))) {
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			f2fs_msg(sbi->sb, KERN_WARNING,
+				"%s: inode (ino=%lx) extent info [%u, %u, %u] "
+				"is incorrect, run fsck to fix",
+				__func__, inode->i_ino,
+				ei->blk, ei->fofs, ei->len);
+			return false;
+		}
+	}
+
+	if (f2fs_has_inline_data(inode) &&
+			(!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: inode (ino=%lx, mode=%u) should not have "
+			"inline_data, run fsck to fix",
+			__func__, inode->i_ino, inode->i_mode);
+		return false;
+	}
+
+	if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: inode (ino=%lx, mode=%u) should not have "
+			"inline_dentry, run fsck to fix",
+			__func__, inode->i_ino, inode->i_mode);
+		return false;
+	}
+
+	return true;
+}
+
 static int do_read_inode(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -192,16 +297,13 @@
 	struct page *node_page;
 	struct f2fs_inode *ri;
 	projid_t i_projid;
+	int err;
 
 	/* Check if ino is within scope */
-	if (check_nid_range(sbi, inode->i_ino)) {
-		f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
-			 (unsigned long) inode->i_ino);
-		WARN_ON(1);
+	if (f2fs_check_nid_range(sbi, inode->i_ino))
 		return -EINVAL;
-	}
 
-	node_page = get_node_page(sbi, inode->i_ino);
+	node_page = f2fs_get_node_page(sbi, inode->i_ino);
 	if (IS_ERR(node_page))
 		return PTR_ERR(node_page);
 
@@ -221,8 +323,11 @@
 	inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
 	inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
 	inode->i_generation = le32_to_cpu(ri->i_generation);
-
-	fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
+	if (S_ISDIR(inode->i_mode))
+		fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
+	else if (S_ISREG(inode->i_mode))
+		fi->i_gc_failures[GC_FAILURE_PIN] =
+					le16_to_cpu(ri->i_gc_failures);
 	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
 	fi->i_flags = le32_to_cpu(ri->i_flags);
 	fi->flags = 0;
@@ -239,7 +344,6 @@
 					le16_to_cpu(ri->i_extra_isize) : 0;
 
 	if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
-		f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode));
 		fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
 	} else if (f2fs_has_inline_xattr(inode) ||
 				f2fs_has_inline_dentry(inode)) {
@@ -255,6 +359,11 @@
 		fi->i_inline_xattr_size = 0;
 	}
 
+	if (!sanity_check_inode(inode, node_page)) {
+		f2fs_put_page(node_page, 1);
+		return -EINVAL;
+	}
+
 	/* check data exist */
 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
 		__recover_inline_status(inode, node_page);
@@ -262,13 +371,20 @@
 	/* get rdev by using inline_info */
 	__get_inode_rdev(inode, ri);
 
-	if (__written_first_block(ri))
-		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+	if (S_ISREG(inode->i_mode)) {
+		err = __written_first_block(sbi, ri);
+		if (err < 0) {
+			f2fs_put_page(node_page, 1);
+			return err;
+		}
+		if (!err)
+			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+	}
 
-	if (!need_inode_block_update(sbi, inode->i_ino))
+	if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
 		fi->last_disk_size = inode->i_size;
 
-	if (fi->i_flags & FS_PROJINHERIT_FL)
+	if (fi->i_flags & F2FS_PROJINHERIT_FL)
 		set_inode_flag(inode, FI_PROJ_INHERIT);
 
 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi->sb) &&
@@ -320,10 +436,10 @@
 make_now:
 	if (ino == F2FS_NODE_INO(sbi)) {
 		inode->i_mapping->a_ops = &f2fs_node_aops;
-		mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
+		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
 	} else if (ino == F2FS_META_INO(sbi)) {
 		inode->i_mapping->a_ops = &f2fs_meta_aops;
-		mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
+		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
 	} else if (S_ISREG(inode->i_mode)) {
 		inode->i_op = &f2fs_file_inode_operations;
 		inode->i_fop = &f2fs_file_operations;
@@ -373,7 +489,7 @@
 	return inode;
 }
 
-void update_inode(struct inode *inode, struct page *node_page)
+void f2fs_update_inode(struct inode *inode, struct page *node_page)
 {
 	struct f2fs_inode *ri;
 	struct extent_tree *et = F2FS_I(inode)->extent_tree;
@@ -408,7 +524,12 @@
 	ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
 	ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 	ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
-	ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth);
+	if (S_ISDIR(inode->i_mode))
+		ri->i_current_depth =
+			cpu_to_le32(F2FS_I(inode)->i_current_depth);
+	else if (S_ISREG(inode->i_mode))
+		ri->i_gc_failures =
+			cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
 	ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
 	ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
 	ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
@@ -452,14 +573,18 @@
 	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
 	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
 	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+	f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
+#endif
 }
 
-void update_inode_page(struct inode *inode)
+void f2fs_update_inode_page(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct page *node_page;
 retry:
-	node_page = get_node_page(sbi, inode->i_ino);
+	node_page = f2fs_get_node_page(sbi, inode->i_ino);
 	if (IS_ERR(node_page)) {
 		int err = PTR_ERR(node_page);
 		if (err == -ENOMEM) {
@@ -470,7 +595,7 @@
 		}
 		return;
 	}
-	update_inode(inode, node_page);
+	f2fs_update_inode(inode, node_page);
 	f2fs_put_page(node_page, 1);
 }
 
@@ -489,7 +614,7 @@
 	 * We need to balance fs here to prevent from producing dirty node pages
 	 * during the urgent cleaning time when runing out of free sections.
 	 */
-	update_inode_page(inode);
+	f2fs_update_inode_page(inode);
 	if (wbc && wbc->nr_to_write)
 		f2fs_balance_fs(sbi, true);
 	return 0;
@@ -506,7 +631,7 @@
 
 	/* some remained atomic pages should discarded */
 	if (f2fs_is_atomic_file(inode))
-		drop_inmem_pages(inode);
+		f2fs_drop_inmem_pages(inode);
 
 	trace_f2fs_evict_inode(inode);
 	truncate_inode_pages_final(&inode->i_data);
@@ -516,7 +641,7 @@
 		goto out_clear;
 
 	f2fs_bug_on(sbi, get_dirty_pages(inode));
-	remove_dirty_inode(inode);
+	f2fs_remove_dirty_inode(inode);
 
 	f2fs_destroy_extent_tree(inode);
 
@@ -525,9 +650,9 @@
 
 	dquot_initialize(inode);
 
-	remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
-	remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
-	remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
+	f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
+	f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+	f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
 
 	sb_start_intwrite(inode->i_sb);
 	set_inode_flag(inode, FI_NO_ALLOC);
@@ -536,15 +661,14 @@
 	if (F2FS_HAS_BLOCKS(inode))
 		err = f2fs_truncate(inode);
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
 		f2fs_show_injection_info(FAULT_EVICT_INODE);
 		err = -EIO;
 	}
-#endif
+
 	if (!err) {
 		f2fs_lock_op(sbi);
-		err = remove_inode_page(inode);
+		err = f2fs_remove_inode_page(inode);
 		f2fs_unlock_op(sbi);
 		if (err == -ENOENT)
 			err = 0;
@@ -557,7 +681,7 @@
 	}
 
 	if (err)
-		update_inode_page(inode);
+		f2fs_update_inode_page(inode);
 	dquot_free_inode(inode);
 	sb_end_intwrite(inode->i_sb);
 no_delete:
@@ -580,16 +704,19 @@
 		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
 	if (inode->i_nlink) {
 		if (is_inode_flag_set(inode, FI_APPEND_WRITE))
-			add_ino_entry(sbi, inode->i_ino, APPEND_INO);
+			f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
-			add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+			f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
 	}
 	if (is_inode_flag_set(inode, FI_FREE_NID)) {
-		alloc_nid_failed(sbi, inode->i_ino);
+		f2fs_alloc_nid_failed(sbi, inode->i_ino);
 		clear_inode_flag(inode, FI_FREE_NID);
 	} else {
-		f2fs_bug_on(sbi, err &&
-			!exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
+		/*
+		 * If xattr nid is corrupted, we can reach out error condition,
+		 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
+		 * In that case, f2fs_check_nid_range() is enough to give a clue.
+		 */
 	}
 out_clear:
 	fscrypt_put_encryption_info(inode);
@@ -597,10 +724,11 @@
 }
 
 /* caller should call f2fs_lock_op() */
-void handle_failed_inode(struct inode *inode)
+void f2fs_handle_failed_inode(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct node_info ni;
+	int err;
 
 	/*
 	 * clear nlink of inode in order to release resource of inode
@@ -612,7 +740,7 @@
 	 * we must call this to avoid inode being remained as dirty, resulting
 	 * in a panic when flushing dirty inodes in gdirty_list.
 	 */
-	update_inode_page(inode);
+	f2fs_update_inode_page(inode);
 	f2fs_inode_synced(inode);
 
 	/* don't make bad inode, since it becomes a regular file. */
@@ -623,22 +751,29 @@
 	 * so we can prevent losing this orphan when encoutering checkpoint
 	 * and following suddenly power-off.
 	 */
-	get_node_info(sbi, inode->i_ino, &ni);
+	err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+	if (err) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"May loss orphan inode, run fsck to fix.");
+		goto out;
+	}
 
 	if (ni.blk_addr != NULL_ADDR) {
-		int err = acquire_orphan_inode(sbi);
+		err = f2fs_acquire_orphan_inode(sbi);
 		if (err) {
 			set_sbi_flag(sbi, SBI_NEED_FSCK);
 			f2fs_msg(sbi->sb, KERN_WARNING,
 				"Too many orphan inodes, run fsck to fix.");
 		} else {
-			add_orphan_inode(inode);
+			f2fs_add_orphan_inode(inode);
 		}
-		alloc_nid_done(sbi, inode->i_ino);
+		f2fs_alloc_nid_done(sbi, inode->i_ino);
 	} else {
 		set_inode_flag(inode, FI_FREE_NID);
 	}
 
+out:
 	f2fs_unlock_op(sbi);
 
 	/* iput will drop the inode object */
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index f1e1ff1..56593b3 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -37,7 +37,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	f2fs_lock_op(sbi);
-	if (!alloc_nid(sbi, &ino)) {
+	if (!f2fs_alloc_nid(sbi, &ino)) {
 		f2fs_unlock_op(sbi);
 		err = -ENOSPC;
 		goto fail;
@@ -54,6 +54,9 @@
 			F2FS_I(inode)->i_crtime = current_time(inode);
 	inode->i_generation = sbi->s_next_generation++;
 
+	if (S_ISDIR(inode->i_mode))
+		F2FS_I(inode)->i_current_depth = 1;
+
 	err = insert_inode_locked(inode);
 	if (err) {
 		err = -EINVAL;
@@ -61,7 +64,7 @@
 	}
 
 	if (f2fs_sb_has_project_quota(sbi->sb) &&
-		(F2FS_I(dir)->i_flags & FS_PROJINHERIT_FL))
+		(F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL))
 		F2FS_I(inode)->i_projid = F2FS_I(dir)->i_projid;
 	else
 		F2FS_I(inode)->i_projid = make_kprojid(&init_user_ns,
@@ -116,9 +119,9 @@
 		f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED);
 
 	if (S_ISDIR(inode->i_mode))
-		F2FS_I(inode)->i_flags |= FS_INDEX_FL;
+		F2FS_I(inode)->i_flags |= F2FS_INDEX_FL;
 
-	if (F2FS_I(inode)->i_flags & FS_PROJINHERIT_FL)
+	if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL)
 		set_inode_flag(inode, FI_PROJ_INHERIT);
 
 	trace_f2fs_new_inode(inode, 0);
@@ -193,7 +196,7 @@
 	up_read(&sbi->sb_lock);
 }
 
-int update_extension_list(struct f2fs_sb_info *sbi, const char *name,
+int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
 							bool hot, bool set)
 {
 	__u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
@@ -243,7 +246,7 @@
 		return -EINVAL;
 
 	if (hot) {
-		strncpy(extlist[count], name, strlen(name));
+		memcpy(extlist[count], name, strlen(name));
 		sbi->raw_super->hot_ext_count = hot_count + 1;
 	} else {
 		char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];
@@ -251,7 +254,7 @@
 		memcpy(buf, &extlist[cold_count],
 				F2FS_EXTENSION_LEN * hot_count);
 		memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN);
-		strncpy(extlist[cold_count], name, strlen(name));
+		memcpy(extlist[cold_count], name, strlen(name));
 		memcpy(&extlist[cold_count + 1], buf,
 				F2FS_EXTENSION_LEN * hot_count);
 		sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1);
@@ -292,7 +295,7 @@
 		goto out;
 	f2fs_unlock_op(sbi);
 
-	alloc_nid_done(sbi, ino);
+	f2fs_alloc_nid_done(sbi, ino);
 
 	d_instantiate_new(dentry, inode);
 
@@ -302,7 +305,7 @@
 	f2fs_balance_fs(sbi, true);
 	return 0;
 out:
-	handle_failed_inode(inode);
+	f2fs_handle_failed_inode(inode);
 	return err;
 }
 
@@ -397,7 +400,7 @@
 		err = PTR_ERR(page);
 		goto out;
 	} else {
-		err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
+		err = f2fs_do_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
 		if (err)
 			goto out;
 	}
@@ -408,7 +411,7 @@
 	else if (IS_ERR(page))
 		err = PTR_ERR(page);
 	else
-		err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR);
+		err = f2fs_do_add_link(dir, &dotdot, NULL, pino, S_IFDIR);
 out:
 	if (!err)
 		clear_inode_flag(dir, FI_INLINE_DOTS);
@@ -520,7 +523,7 @@
 	f2fs_balance_fs(sbi, true);
 
 	f2fs_lock_op(sbi);
-	err = acquire_orphan_inode(sbi);
+	err = f2fs_acquire_orphan_inode(sbi);
 	if (err) {
 		f2fs_unlock_op(sbi);
 		f2fs_put_page(page, 0);
@@ -585,9 +588,9 @@
 	f2fs_lock_op(sbi);
 	err = f2fs_add_link(dentry, inode);
 	if (err)
-		goto out_handle_failed_inode;
+		goto out_f2fs_handle_failed_inode;
 	f2fs_unlock_op(sbi);
-	alloc_nid_done(sbi, inode->i_ino);
+	f2fs_alloc_nid_done(sbi, inode->i_ino);
 
 	err = fscrypt_encrypt_symlink(inode, symname, len, &disk_link);
 	if (err)
@@ -620,8 +623,8 @@
 	f2fs_balance_fs(sbi, true);
 	goto out_free_encrypted_link;
 
-out_handle_failed_inode:
-	handle_failed_inode(inode);
+out_f2fs_handle_failed_inode:
+	f2fs_handle_failed_inode(inode);
 out_free_encrypted_link:
 	if (disk_link.name != (unsigned char *)symname)
 		kfree(disk_link.name);
@@ -657,7 +660,7 @@
 		goto out_fail;
 	f2fs_unlock_op(sbi);
 
-	alloc_nid_done(sbi, inode->i_ino);
+	f2fs_alloc_nid_done(sbi, inode->i_ino);
 
 	d_instantiate_new(dentry, inode);
 
@@ -669,7 +672,7 @@
 
 out_fail:
 	clear_inode_flag(inode, FI_INC_LINK);
-	handle_failed_inode(inode);
+	f2fs_handle_failed_inode(inode);
 	return err;
 }
 
@@ -708,7 +711,7 @@
 		goto out;
 	f2fs_unlock_op(sbi);
 
-	alloc_nid_done(sbi, inode->i_ino);
+	f2fs_alloc_nid_done(sbi, inode->i_ino);
 
 	d_instantiate_new(dentry, inode);
 
@@ -718,7 +721,7 @@
 	f2fs_balance_fs(sbi, true);
 	return 0;
 out:
-	handle_failed_inode(inode);
+	f2fs_handle_failed_inode(inode);
 	return err;
 }
 
@@ -747,7 +750,7 @@
 	}
 
 	f2fs_lock_op(sbi);
-	err = acquire_orphan_inode(sbi);
+	err = f2fs_acquire_orphan_inode(sbi);
 	if (err)
 		goto out;
 
@@ -759,8 +762,8 @@
 	 * add this non-linked tmpfile to orphan list, in this way we could
 	 * remove all unused data of tmpfile after abnormal power-off.
 	 */
-	add_orphan_inode(inode);
-	alloc_nid_done(sbi, inode->i_ino);
+	f2fs_add_orphan_inode(inode);
+	f2fs_alloc_nid_done(sbi, inode->i_ino);
 
 	if (whiteout) {
 		f2fs_i_links_write(inode, false);
@@ -776,9 +779,9 @@
 	return 0;
 
 release_out:
-	release_orphan_inode(sbi);
+	f2fs_release_orphan_inode(sbi);
 out:
-	handle_failed_inode(inode);
+	f2fs_handle_failed_inode(inode);
 	return err;
 }
 
@@ -885,7 +888,7 @@
 
 		f2fs_lock_op(sbi);
 
-		err = acquire_orphan_inode(sbi);
+		err = f2fs_acquire_orphan_inode(sbi);
 		if (err)
 			goto put_out_dir;
 
@@ -899,9 +902,9 @@
 		up_write(&F2FS_I(new_inode)->i_sem);
 
 		if (!new_inode->i_nlink)
-			add_orphan_inode(new_inode);
+			f2fs_add_orphan_inode(new_inode);
 		else
-			release_orphan_inode(sbi);
+			f2fs_release_orphan_inode(sbi);
 	} else {
 		f2fs_balance_fs(sbi, true);
 
@@ -969,8 +972,12 @@
 			f2fs_put_page(old_dir_page, 0);
 		f2fs_i_links_write(old_dir, false);
 	}
-	if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
-		add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
+	if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) {
+		f2fs_add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
+		if (S_ISDIR(old_inode->i_mode))
+			f2fs_add_ino_entry(sbi, old_inode->i_ino,
+							TRANS_DIR_INO);
+	}
 
 	f2fs_unlock_op(sbi);
 
@@ -1121,8 +1128,8 @@
 	f2fs_mark_inode_dirty_sync(new_dir, false);
 
 	if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) {
-		add_ino_entry(sbi, old_dir->i_ino, TRANS_DIR_INO);
-		add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
+		f2fs_add_ino_entry(sbi, old_dir->i_ino, TRANS_DIR_INO);
+		f2fs_add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
 	}
 
 	f2fs_unlock_op(sbi);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 803a010..f213a53 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -23,13 +23,29 @@
 #include "trace.h"
 #include <trace/events/f2fs.h>
 
-#define on_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
+#define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
 
 static struct kmem_cache *nat_entry_slab;
 static struct kmem_cache *free_nid_slab;
 static struct kmem_cache *nat_entry_set_slab;
+static struct kmem_cache *fsync_node_entry_slab;
 
-bool available_free_memory(struct f2fs_sb_info *sbi, int type)
+/*
+ * Check whether the given nid is within node id range.
+ */
+int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
+{
+	if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+				"%s: out-of-range nid=%x, run fsck to fix.",
+				__func__, nid);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct sysinfo val;
@@ -87,44 +103,33 @@
 
 static void clear_node_page_dirty(struct page *page)
 {
-	struct address_space *mapping = page->mapping;
-	unsigned int long flags;
-
 	if (PageDirty(page)) {
-		spin_lock_irqsave(&mapping->tree_lock, flags);
-		radix_tree_tag_clear(&mapping->page_tree,
-				page_index(page),
-				PAGECACHE_TAG_DIRTY);
-		spin_unlock_irqrestore(&mapping->tree_lock, flags);
-
+		f2fs_clear_radix_tree_dirty_tag(page);
 		clear_page_dirty_for_io(page);
-		dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
+		dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
 	}
 	ClearPageUptodate(page);
 }
 
 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
 {
-	pgoff_t index = current_nat_addr(sbi, nid);
-	return get_meta_page(sbi, index);
+	return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
 }
 
 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
 {
 	struct page *src_page;
 	struct page *dst_page;
-	pgoff_t src_off;
 	pgoff_t dst_off;
 	void *src_addr;
 	void *dst_addr;
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 
-	src_off = current_nat_addr(sbi, nid);
-	dst_off = next_nat_addr(sbi, src_off);
+	dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
 
 	/* get current nat block page with lock */
-	src_page = get_meta_page(sbi, src_off);
-	dst_page = grab_meta_page(sbi, dst_off);
+	src_page = get_current_nat_page(sbi, nid);
+	dst_page = f2fs_grab_meta_page(sbi, dst_off);
 	f2fs_bug_on(sbi, PageDirty(src_page));
 
 	src_addr = page_address(src_page);
@@ -169,14 +174,30 @@
 
 	if (raw_ne)
 		node_info_from_raw_nat(&ne->ni, raw_ne);
+
+	spin_lock(&nm_i->nat_list_lock);
 	list_add_tail(&ne->list, &nm_i->nat_entries);
+	spin_unlock(&nm_i->nat_list_lock);
+
 	nm_i->nat_cnt++;
 	return ne;
 }
 
 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
 {
-	return radix_tree_lookup(&nm_i->nat_root, n);
+	struct nat_entry *ne;
+
+	ne = radix_tree_lookup(&nm_i->nat_root, n);
+
+	/* for recent accessed nat entry, move it to tail of lru list */
+	if (ne && !get_nat_flag(ne, IS_DIRTY)) {
+		spin_lock(&nm_i->nat_list_lock);
+		if (!list_empty(&ne->list))
+			list_move_tail(&ne->list, &nm_i->nat_entries);
+		spin_unlock(&nm_i->nat_list_lock);
+	}
+
+	return ne;
 }
 
 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
@@ -187,7 +208,6 @@
 
 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
 {
-	list_del(&e->list);
 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
 	nm_i->nat_cnt--;
 	__free_nat_entry(e);
@@ -238,16 +258,21 @@
 	nm_i->dirty_nat_cnt++;
 	set_nat_flag(ne, IS_DIRTY, true);
 refresh_list:
+	spin_lock(&nm_i->nat_list_lock);
 	if (new_ne)
 		list_del_init(&ne->list);
 	else
 		list_move_tail(&ne->list, &head->entry_list);
+	spin_unlock(&nm_i->nat_list_lock);
 }
 
 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
 		struct nat_entry_set *set, struct nat_entry *ne)
 {
+	spin_lock(&nm_i->nat_list_lock);
 	list_move_tail(&ne->list, &nm_i->nat_entries);
+	spin_unlock(&nm_i->nat_list_lock);
+
 	set_nat_flag(ne, IS_DIRTY, false);
 	set->entry_cnt--;
 	nm_i->dirty_nat_cnt--;
@@ -260,7 +285,73 @@
 							start, nr);
 }
 
-int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
+{
+	return NODE_MAPPING(sbi) == page->mapping &&
+			IS_DNODE(page) && is_cold_node(page);
+}
+
+void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
+{
+	spin_lock_init(&sbi->fsync_node_lock);
+	INIT_LIST_HEAD(&sbi->fsync_node_list);
+	sbi->fsync_seg_id = 0;
+	sbi->fsync_node_num = 0;
+}
+
+static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
+							struct page *page)
+{
+	struct fsync_node_entry *fn;
+	unsigned long flags;
+	unsigned int seq_id;
+
+	fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
+
+	get_page(page);
+	fn->page = page;
+	INIT_LIST_HEAD(&fn->list);
+
+	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+	list_add_tail(&fn->list, &sbi->fsync_node_list);
+	fn->seq_id = sbi->fsync_seg_id++;
+	seq_id = fn->seq_id;
+	sbi->fsync_node_num++;
+	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+
+	return seq_id;
+}
+
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
+{
+	struct fsync_node_entry *fn;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+	list_for_each_entry(fn, &sbi->fsync_node_list, list) {
+		if (fn->page == page) {
+			list_del(&fn->list);
+			sbi->fsync_node_num--;
+			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+			kmem_cache_free(fsync_node_entry_slab, fn);
+			put_page(page);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+	f2fs_bug_on(sbi, 1);
+}
+
+void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+	sbi->fsync_seg_id = 0;
+	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+}
+
+int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct nat_entry *e;
@@ -277,7 +368,7 @@
 	return need;
 }
 
-bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
+bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct nat_entry *e;
@@ -291,7 +382,7 @@
 	return is_cp;
 }
 
-bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
+bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct nat_entry *e;
@@ -364,8 +455,7 @@
 			new_blkaddr == NULL_ADDR);
 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
 			new_blkaddr == NEW_ADDR);
-	f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
-			nat_get_blkaddr(e) != NULL_ADDR &&
+	f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
 			new_blkaddr == NEW_ADDR);
 
 	/* increment version no as node is removed */
@@ -376,7 +466,7 @@
 
 	/* change address */
 	nat_set_blkaddr(e, new_blkaddr);
-	if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
+	if (!is_valid_data_blkaddr(sbi, new_blkaddr))
 		set_nat_flag(e, IS_CHECKPOINTED, false);
 	__set_nat_cache_dirty(nm_i, e);
 
@@ -391,7 +481,7 @@
 	up_write(&nm_i->nat_tree_lock);
 }
 
-int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
+int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	int nr = nr_shrink;
@@ -399,13 +489,25 @@
 	if (!down_write_trylock(&nm_i->nat_tree_lock))
 		return 0;
 
-	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
+	spin_lock(&nm_i->nat_list_lock);
+	while (nr_shrink) {
 		struct nat_entry *ne;
+
+		if (list_empty(&nm_i->nat_entries))
+			break;
+
 		ne = list_first_entry(&nm_i->nat_entries,
 					struct nat_entry, list);
+		list_del(&ne->list);
+		spin_unlock(&nm_i->nat_list_lock);
+
 		__del_from_nat_cache(nm_i, ne);
 		nr_shrink--;
+
+		spin_lock(&nm_i->nat_list_lock);
 	}
+	spin_unlock(&nm_i->nat_list_lock);
+
 	up_write(&nm_i->nat_tree_lock);
 	return nr - nr_shrink;
 }
@@ -413,7 +515,8 @@
 /*
  * This function always returns success
  */
-void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
+int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+						struct node_info *ni)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -436,14 +539,14 @@
 		ni->blk_addr = nat_get_blkaddr(e);
 		ni->version = nat_get_version(e);
 		up_read(&nm_i->nat_tree_lock);
-		return;
+		return 0;
 	}
 
 	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
 
 	/* Check current segment summary */
 	down_read(&curseg->journal_rwsem);
-	i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
+	i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
 	if (i >= 0) {
 		ne = nat_in_journal(journal, i);
 		node_info_from_raw_nat(ni, &ne);
@@ -458,7 +561,10 @@
 	index = current_nat_addr(sbi, nid);
 	up_read(&nm_i->nat_tree_lock);
 
-	page = get_meta_page(sbi, index);
+	page = f2fs_get_meta_page(sbi, index);
+	if (IS_ERR(page))
+		return PTR_ERR(page);
+
 	nat_blk = (struct f2fs_nat_block *)page_address(page);
 	ne = nat_blk->entries[nid - start_nid];
 	node_info_from_raw_nat(ni, &ne);
@@ -466,12 +572,13 @@
 cache:
 	/* cache nat entry */
 	cache_nat_entry(sbi, nid, &ne);
+	return 0;
 }
 
 /*
  * readahead MAX_RA_NODE number of node pages.
  */
-static void ra_node_pages(struct page *parent, int start, int n)
+static void f2fs_ra_node_pages(struct page *parent, int start, int n)
 {
 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
 	struct blk_plug plug;
@@ -485,13 +592,13 @@
 	end = min(end, NIDS_PER_BLOCK);
 	for (i = start; i < end; i++) {
 		nid = get_nid(parent, i, false);
-		ra_node_page(sbi, nid);
+		f2fs_ra_node_page(sbi, nid);
 	}
 
 	blk_finish_plug(&plug);
 }
 
-pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
+pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
 {
 	const long direct_index = ADDRS_PER_INODE(dn->inode);
 	const long direct_blks = ADDRS_PER_BLOCK;
@@ -606,7 +713,7 @@
  * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
  * In the case of RDONLY_NODE, we don't need to care about mutex.
  */
-int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
+int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	struct page *npage[4];
@@ -625,7 +732,7 @@
 	npage[0] = dn->inode_page;
 
 	if (!npage[0]) {
-		npage[0] = get_node_page(sbi, nids[0]);
+		npage[0] = f2fs_get_node_page(sbi, nids[0]);
 		if (IS_ERR(npage[0]))
 			return PTR_ERR(npage[0]);
 	}
@@ -649,24 +756,24 @@
 
 		if (!nids[i] && mode == ALLOC_NODE) {
 			/* alloc new node */
-			if (!alloc_nid(sbi, &(nids[i]))) {
+			if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
 				err = -ENOSPC;
 				goto release_pages;
 			}
 
 			dn->nid = nids[i];
-			npage[i] = new_node_page(dn, noffset[i]);
+			npage[i] = f2fs_new_node_page(dn, noffset[i]);
 			if (IS_ERR(npage[i])) {
-				alloc_nid_failed(sbi, nids[i]);
+				f2fs_alloc_nid_failed(sbi, nids[i]);
 				err = PTR_ERR(npage[i]);
 				goto release_pages;
 			}
 
 			set_nid(parent, offset[i - 1], nids[i], i == 1);
-			alloc_nid_done(sbi, nids[i]);
+			f2fs_alloc_nid_done(sbi, nids[i]);
 			done = true;
 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
-			npage[i] = get_node_page_ra(parent, offset[i - 1]);
+			npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
 			if (IS_ERR(npage[i])) {
 				err = PTR_ERR(npage[i]);
 				goto release_pages;
@@ -681,7 +788,7 @@
 		}
 
 		if (!done) {
-			npage[i] = get_node_page(sbi, nids[i]);
+			npage[i] = f2fs_get_node_page(sbi, nids[i]);
 			if (IS_ERR(npage[i])) {
 				err = PTR_ERR(npage[i]);
 				f2fs_put_page(npage[0], 0);
@@ -715,20 +822,23 @@
 	return err;
 }
 
-static void truncate_node(struct dnode_of_data *dn)
+static int truncate_node(struct dnode_of_data *dn)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	struct node_info ni;
+	int err;
 
-	get_node_info(sbi, dn->nid, &ni);
+	err = f2fs_get_node_info(sbi, dn->nid, &ni);
+	if (err)
+		return err;
 
 	/* Deallocate node address */
-	invalidate_blocks(sbi, ni.blk_addr);
+	f2fs_invalidate_blocks(sbi, ni.blk_addr);
 	dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
 	set_node_addr(sbi, &ni, NULL_ADDR, false);
 
 	if (dn->nid == dn->inode->i_ino) {
-		remove_orphan_inode(sbi, dn->nid);
+		f2fs_remove_orphan_inode(sbi, dn->nid);
 		dec_valid_inode_count(sbi);
 		f2fs_inode_synced(dn->inode);
 	}
@@ -743,17 +853,20 @@
 
 	dn->node_page = NULL;
 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
+
+	return 0;
 }
 
 static int truncate_dnode(struct dnode_of_data *dn)
 {
 	struct page *page;
+	int err;
 
 	if (dn->nid == 0)
 		return 1;
 
 	/* get direct node */
-	page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
+	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
 	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
 		return 1;
 	else if (IS_ERR(page))
@@ -762,8 +875,11 @@
 	/* Make dnode_of_data for parameter */
 	dn->node_page = page;
 	dn->ofs_in_node = 0;
-	truncate_data_blocks(dn);
-	truncate_node(dn);
+	f2fs_truncate_data_blocks(dn);
+	err = truncate_node(dn);
+	if (err)
+		return err;
+
 	return 1;
 }
 
@@ -783,13 +899,13 @@
 
 	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
 
-	page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
+	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
 	if (IS_ERR(page)) {
 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
 		return PTR_ERR(page);
 	}
 
-	ra_node_pages(page, ofs, NIDS_PER_BLOCK);
+	f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
 
 	rn = F2FS_NODE(page);
 	if (depth < 3) {
@@ -828,7 +944,9 @@
 	if (!ofs) {
 		/* remove current indirect node */
 		dn->node_page = page;
-		truncate_node(dn);
+		ret = truncate_node(dn);
+		if (ret)
+			goto out_err;
 		freed++;
 	} else {
 		f2fs_put_page(page, 1);
@@ -859,7 +977,7 @@
 	/* get indirect nodes in the path */
 	for (i = 0; i < idx + 1; i++) {
 		/* reference count'll be increased */
-		pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]);
+		pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
 		if (IS_ERR(pages[i])) {
 			err = PTR_ERR(pages[i]);
 			idx = i - 1;
@@ -868,7 +986,7 @@
 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
 	}
 
-	ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
+	f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
 
 	/* free direct nodes linked to a partial indirect node */
 	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
@@ -886,7 +1004,9 @@
 	if (offset[idx + 1] == 0) {
 		dn->node_page = pages[idx];
 		dn->nid = nid[idx];
-		truncate_node(dn);
+		err = truncate_node(dn);
+		if (err)
+			goto fail;
 	} else {
 		f2fs_put_page(pages[idx], 1);
 	}
@@ -905,7 +1025,7 @@
 /*
  * All the block addresses of data and nodes should be nullified.
  */
-int truncate_inode_blocks(struct inode *inode, pgoff_t from)
+int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	int err = 0, cont = 1;
@@ -921,7 +1041,7 @@
 	if (level < 0)
 		return level;
 
-	page = get_node_page(sbi, inode->i_ino);
+	page = f2fs_get_node_page(sbi, inode->i_ino);
 	if (IS_ERR(page)) {
 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
 		return PTR_ERR(page);
@@ -1001,24 +1121,30 @@
 }
 
 /* caller must lock inode page */
-int truncate_xattr_node(struct inode *inode)
+int f2fs_truncate_xattr_node(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	nid_t nid = F2FS_I(inode)->i_xattr_nid;
 	struct dnode_of_data dn;
 	struct page *npage;
+	int err;
 
 	if (!nid)
 		return 0;
 
-	npage = get_node_page(sbi, nid);
+	npage = f2fs_get_node_page(sbi, nid);
 	if (IS_ERR(npage))
 		return PTR_ERR(npage);
 
+	set_new_dnode(&dn, inode, NULL, npage, nid);
+	err = truncate_node(&dn);
+	if (err) {
+		f2fs_put_page(npage, 1);
+		return err;
+	}
+
 	f2fs_i_xnid_write(inode, 0);
 
-	set_new_dnode(&dn, inode, NULL, npage, nid);
-	truncate_node(&dn);
 	return 0;
 }
 
@@ -1026,17 +1152,17 @@
  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
  * f2fs_unlock_op().
  */
-int remove_inode_page(struct inode *inode)
+int f2fs_remove_inode_page(struct inode *inode)
 {
 	struct dnode_of_data dn;
 	int err;
 
 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
-	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
+	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
 	if (err)
 		return err;
 
-	err = truncate_xattr_node(inode);
+	err = f2fs_truncate_xattr_node(inode);
 	if (err) {
 		f2fs_put_dnode(&dn);
 		return err;
@@ -1045,18 +1171,26 @@
 	/* remove potential inline_data blocks */
 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 				S_ISLNK(inode->i_mode))
-		truncate_data_blocks_range(&dn, 1);
+		f2fs_truncate_data_blocks_range(&dn, 1);
 
 	/* 0 is possible, after f2fs_new_inode() has failed */
+	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
+		f2fs_put_dnode(&dn);
+		return -EIO;
+	}
 	f2fs_bug_on(F2FS_I_SB(inode),
 			inode->i_blocks != 0 && inode->i_blocks != 8);
 
 	/* will put inode & node pages */
-	truncate_node(&dn);
+	err = truncate_node(&dn);
+	if (err) {
+		f2fs_put_dnode(&dn);
+		return err;
+	}
 	return 0;
 }
 
-struct page *new_inode_page(struct inode *inode)
+struct page *f2fs_new_inode_page(struct inode *inode)
 {
 	struct dnode_of_data dn;
 
@@ -1064,10 +1198,10 @@
 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
 
 	/* caller should f2fs_put_page(page, 1); */
-	return new_node_page(&dn, 0);
+	return f2fs_new_node_page(&dn, 0);
 }
 
-struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	struct node_info new_ni;
@@ -1085,7 +1219,11 @@
 		goto fail;
 
 #ifdef CONFIG_F2FS_CHECK_FS
-	get_node_info(sbi, dn->nid, &new_ni);
+	err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
+	if (err) {
+		dec_valid_node_count(sbi, dn->inode, !ofs);
+		goto fail;
+	}
 	f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
 #endif
 	new_ni.nid = dn->nid;
@@ -1133,13 +1271,21 @@
 		.page = page,
 		.encrypted_page = NULL,
 	};
+	int err;
 
-	if (PageUptodate(page))
+	if (PageUptodate(page)) {
+#ifdef CONFIG_F2FS_CHECK_FS
+		f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
+#endif
 		return LOCKED_PAGE;
+	}
 
-	get_node_info(sbi, page->index, &ni);
+	err = f2fs_get_node_info(sbi, page->index, &ni);
+	if (err)
+		return err;
 
-	if (unlikely(ni.blk_addr == NULL_ADDR)) {
+	if (unlikely(ni.blk_addr == NULL_ADDR) ||
+			is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
 		ClearPageUptodate(page);
 		return -ENOENT;
 	}
@@ -1151,14 +1297,15 @@
 /*
  * Readahead a node page
  */
-void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
+void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
 {
 	struct page *apage;
 	int err;
 
 	if (!nid)
 		return;
-	f2fs_bug_on(sbi, check_nid_range(sbi, nid));
+	if (f2fs_check_nid_range(sbi, nid))
+		return;
 
 	rcu_read_lock();
 	apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
@@ -1182,7 +1329,8 @@
 
 	if (!nid)
 		return ERR_PTR(-ENOENT);
-	f2fs_bug_on(sbi, check_nid_range(sbi, nid));
+	if (f2fs_check_nid_range(sbi, nid))
+		return ERR_PTR(-EINVAL);
 repeat:
 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
 	if (!page)
@@ -1198,7 +1346,7 @@
 	}
 
 	if (parent)
-		ra_node_pages(parent, start + 1, MAX_RA_NODE);
+		f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
 
 	lock_page(page);
 
@@ -1232,12 +1380,12 @@
 	return page;
 }
 
-struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
 {
 	return __get_node_page(sbi, nid, NULL, 0);
 }
 
-struct page *get_node_page_ra(struct page *parent, int start)
+struct page *f2fs_get_node_page_ra(struct page *parent, int start)
 {
 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
 	nid_t nid = get_nid(parent, start, false);
@@ -1272,7 +1420,7 @@
 
 	ret = f2fs_write_inline_data(inode, page);
 	inode_dec_dirty_pages(inode);
-	remove_dirty_inode(inode);
+	f2fs_remove_dirty_inode(inode);
 	if (ret)
 		set_page_dirty(page);
 page_out:
@@ -1283,21 +1431,17 @@
 
 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
 {
-	pgoff_t index, end;
+	pgoff_t index;
 	struct pagevec pvec;
 	struct page *last_page = NULL;
+	int nr_pages;
 
 	pagevec_init(&pvec, 0);
 	index = 0;
-	end = ULONG_MAX;
 
-	while (index <= end) {
-		int i, nr_pages;
-		nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
-				PAGECACHE_TAG_DIRTY,
-				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
-		if (nr_pages == 0)
-			break;
+	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+				PAGECACHE_TAG_DIRTY))) {
+		int i;
 
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
@@ -1343,7 +1487,7 @@
 
 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
 				struct writeback_control *wbc, bool do_balance,
-				enum iostat_type io_type)
+				enum iostat_type io_type, unsigned int *seq_id)
 {
 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
 	nid_t nid;
@@ -1360,22 +1504,27 @@
 		.io_type = io_type,
 		.io_wbc = wbc,
 	};
+	unsigned int seq;
 
 	trace_f2fs_writepage(page, NODE);
 
-	if (unlikely(f2fs_cp_error(sbi))) {
-		dec_page_count(sbi, F2FS_DIRTY_NODES);
-		unlock_page(page);
-		return 0;
-	}
+	if (unlikely(f2fs_cp_error(sbi)))
+		goto redirty_out;
 
 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
 		goto redirty_out;
 
+	if (wbc->sync_mode == WB_SYNC_NONE &&
+			IS_DNODE(page) && is_cold_node(page))
+		goto redirty_out;
+
 	/* get old block addr of this node page */
 	nid = nid_of_node(page);
 	f2fs_bug_on(sbi, page->index != nid);
 
+	if (f2fs_get_node_info(sbi, nid, &ni))
+		goto redirty_out;
+
 	if (wbc->for_reclaim) {
 		if (!down_read_trylock(&sbi->node_write))
 			goto redirty_out;
@@ -1383,8 +1532,6 @@
 		down_read(&sbi->node_write);
 	}
 
-	get_node_info(sbi, nid, &ni);
-
 	/* This page is already truncated */
 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
 		ClearPageUptodate(page);
@@ -1394,13 +1541,24 @@
 		return 0;
 	}
 
+	if (__is_valid_data_blkaddr(ni.blk_addr) &&
+		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
+		goto redirty_out;
+
 	if (atomic && !test_opt(sbi, NOBARRIER))
 		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
 
 	set_page_writeback(page);
 	ClearPageError(page);
+
+	if (f2fs_in_warm_node_list(sbi, page)) {
+		seq = f2fs_add_fsync_node_entry(sbi, page);
+		if (seq_id)
+			*seq_id = seq;
+	}
+
 	fio.old_blkaddr = ni.blk_addr;
-	write_node_page(nid, &fio);
+	f2fs_do_write_node_page(nid, &fio);
 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
 	dec_page_count(sbi, F2FS_DIRTY_NODES);
 	up_read(&sbi->node_write);
@@ -1429,7 +1587,7 @@
 	return AOP_WRITEPAGE_ACTIVATE;
 }
 
-void move_node_page(struct page *node_page, int gc_type)
+void f2fs_move_node_page(struct page *node_page, int gc_type)
 {
 	if (gc_type == FG_GC) {
 		struct writeback_control wbc = {
@@ -1446,7 +1604,7 @@
 			goto out_page;
 
 		if (__write_node_page(node_page, false, NULL,
-					&wbc, false, FS_GC_NODE_IO))
+					&wbc, false, FS_GC_NODE_IO, NULL))
 			unlock_page(node_page);
 		goto release_page;
 	} else {
@@ -1463,19 +1621,22 @@
 static int f2fs_write_node_page(struct page *page,
 				struct writeback_control *wbc)
 {
-	return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO);
+	return __write_node_page(page, false, NULL, wbc, false,
+						FS_NODE_IO, NULL);
 }
 
-int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
-			struct writeback_control *wbc, bool atomic)
+int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
+			struct writeback_control *wbc, bool atomic,
+			unsigned int *seq_id)
 {
-	pgoff_t index, end;
+	pgoff_t index;
 	pgoff_t last_idx = ULONG_MAX;
 	struct pagevec pvec;
 	int ret = 0;
 	struct page *last_page = NULL;
 	bool marked = false;
 	nid_t ino = inode->i_ino;
+	int nr_pages;
 
 	if (atomic) {
 		last_page = last_fsync_dnode(sbi, ino);
@@ -1485,15 +1646,10 @@
 retry:
 	pagevec_init(&pvec, 0);
 	index = 0;
-	end = ULONG_MAX;
 
-	while (index <= end) {
-		int i, nr_pages;
-		nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
-				PAGECACHE_TAG_DIRTY,
-				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
-		if (nr_pages == 0)
-			break;
+	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+				PAGECACHE_TAG_DIRTY))) {
+		int i;
 
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
@@ -1537,9 +1693,9 @@
 				if (IS_INODE(page)) {
 					if (is_inode_flag_set(inode,
 								FI_DIRTY_INODE))
-						update_inode(inode, page);
+						f2fs_update_inode(inode, page);
 					set_dentry_mark(page,
-						need_dentry_mark(sbi, ino));
+						f2fs_need_dentry_mark(sbi, ino));
 				}
 				/*  may be written by other thread */
 				if (!PageDirty(page))
@@ -1552,7 +1708,7 @@
 			ret = __write_node_page(page, atomic &&
 						page == last_page,
 						&submitted, wbc, true,
-						FS_NODE_IO);
+						FS_NODE_IO, seq_id);
 			if (ret) {
 				unlock_page(page);
 				f2fs_put_page(last_page, 0);
@@ -1589,33 +1745,37 @@
 	return ret ? -EIO: 0;
 }
 
-int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc,
+int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
+				struct writeback_control *wbc,
 				bool do_balance, enum iostat_type io_type)
 {
-	pgoff_t index, end;
+	pgoff_t index;
 	struct pagevec pvec;
 	int step = 0;
 	int nwritten = 0;
 	int ret = 0;
+	int nr_pages, done = 0;
 
 	pagevec_init(&pvec, 0);
 
 next_step:
 	index = 0;
-	end = ULONG_MAX;
 
-	while (index <= end) {
-		int i, nr_pages;
-		nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
-				PAGECACHE_TAG_DIRTY,
-				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
-		if (nr_pages == 0)
-			break;
+	while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
+			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
+		int i;
 
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
 			bool submitted = false;
 
+			/* give a priority to WB_SYNC threads */
+			if (atomic_read(&sbi->wb_sync_req[NODE]) &&
+					wbc->sync_mode == WB_SYNC_NONE) {
+				done = 1;
+				break;
+			}
+
 			/*
 			 * flushing sequence with step:
 			 * 0. indirect nodes
@@ -1631,7 +1791,9 @@
 						!is_cold_node(page)))
 				continue;
 lock_node:
-			if (!trylock_page(page))
+			if (wbc->sync_mode == WB_SYNC_ALL)
+				lock_page(page);
+			else if (!trylock_page(page))
 				continue;
 
 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
@@ -1663,7 +1825,7 @@
 			set_dentry_mark(page, 0);
 
 			ret = __write_node_page(page, false, &submitted,
-						wbc, do_balance, io_type);
+						wbc, do_balance, io_type, NULL);
 			if (ret)
 				unlock_page(page);
 			else if (submitted)
@@ -1682,10 +1844,12 @@
 	}
 
 	if (step < 2) {
+		if (wbc->sync_mode == WB_SYNC_NONE && step == 1)
+			goto out;
 		step++;
 		goto next_step;
 	}
-
+out:
 	if (nwritten)
 		f2fs_submit_merged_write(sbi, NODE);
 
@@ -1694,42 +1858,46 @@
 	return ret;
 }
 
-int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+						unsigned int seq_id)
 {
-	pgoff_t index = 0, end = ULONG_MAX;
-	struct pagevec pvec;
+	struct fsync_node_entry *fn;
+	struct page *page;
+	struct list_head *head = &sbi->fsync_node_list;
+	unsigned long flags;
+	unsigned int cur_seq_id = 0;
 	int ret2, ret = 0;
 
-	pagevec_init(&pvec, 0);
-
-	while (index <= end) {
-		int i, nr_pages;
-		nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
-				PAGECACHE_TAG_WRITEBACK,
-				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
-		if (nr_pages == 0)
+	while (seq_id && cur_seq_id < seq_id) {
+		spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+		if (list_empty(head)) {
+			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
 			break;
-
-		for (i = 0; i < nr_pages; i++) {
-			struct page *page = pvec.pages[i];
-
-			/* until radix tree lookup accepts end_index */
-			if (unlikely(page->index > end))
-				continue;
-
-			if (ino && ino_of_node(page) == ino) {
-				f2fs_wait_on_page_writeback(page, NODE, true);
-				if (TestClearPageError(page))
-					ret = -EIO;
-			}
 		}
-		pagevec_release(&pvec);
-		cond_resched();
+		fn = list_first_entry(head, struct fsync_node_entry, list);
+		if (fn->seq_id > seq_id) {
+			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+			break;
+		}
+		cur_seq_id = fn->seq_id;
+		page = fn->page;
+		get_page(page);
+		spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+
+		f2fs_wait_on_page_writeback(page, NODE, true);
+		if (TestClearPageError(page))
+			ret = -EIO;
+
+		put_page(page);
+
+		if (ret)
+			break;
 	}
 
 	ret2 = filemap_check_errors(NODE_MAPPING(sbi));
 	if (!ret)
 		ret = ret2;
+
 	return ret;
 }
 
@@ -1750,14 +1918,21 @@
 	if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
 		goto skip_write;
 
+	if (wbc->sync_mode == WB_SYNC_ALL)
+		atomic_inc(&sbi->wb_sync_req[NODE]);
+	else if (atomic_read(&sbi->wb_sync_req[NODE]))
+		goto skip_write;
+
 	trace_f2fs_writepages(mapping->host, wbc, NODE);
 
 	diff = nr_pages_to_write(sbi, NODE, wbc);
-	wbc->sync_mode = WB_SYNC_NONE;
 	blk_start_plug(&plug);
-	sync_node_pages(sbi, wbc, true, FS_NODE_IO);
+	f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
 	blk_finish_plug(&plug);
 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
+
+	if (wbc->sync_mode == WB_SYNC_ALL)
+		atomic_dec(&sbi->wb_sync_req[NODE]);
 	return 0;
 
 skip_write:
@@ -1772,6 +1947,10 @@
 
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
+#ifdef CONFIG_F2FS_CHECK_FS
+	if (IS_INODE(page))
+		f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+#endif
 	if (!PageDirty(page)) {
 		__set_page_dirty_nobuffers(page);
 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
@@ -1903,20 +2082,20 @@
 		 *   Thread A             Thread B
 		 *  - f2fs_create
 		 *   - f2fs_new_inode
-		 *    - alloc_nid
+		 *    - f2fs_alloc_nid
 		 *     - __insert_nid_to_list(PREALLOC_NID)
 		 *                     - f2fs_balance_fs_bg
-		 *                      - build_free_nids
-		 *                       - __build_free_nids
+		 *                      - f2fs_build_free_nids
+		 *                       - __f2fs_build_free_nids
 		 *                        - scan_nat_page
 		 *                         - add_free_nid
 		 *                          - __lookup_nat_cache
 		 *  - f2fs_add_link
-		 *   - init_inode_metadata
-		 *    - new_inode_page
-		 *     - new_node_page
+		 *   - f2fs_init_inode_metadata
+		 *    - f2fs_new_inode_page
+		 *     - f2fs_new_node_page
 		 *      - set_node_addr
-		 *  - alloc_nid_done
+		 *  - f2fs_alloc_nid_done
 		 *   - __remove_nid_from_list(PREALLOC_NID)
 		 *                         - __insert_nid_to_list(FREE_NID)
 		 */
@@ -1966,7 +2145,7 @@
 		kmem_cache_free(free_nid_slab, i);
 }
 
-static void scan_nat_page(struct f2fs_sb_info *sbi,
+static int scan_nat_page(struct f2fs_sb_info *sbi,
 			struct page *nat_page, nid_t start_nid)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -1984,7 +2163,10 @@
 			break;
 
 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
-		f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
+
+		if (blk_addr == NEW_ADDR)
+			return -EINVAL;
+
 		if (blk_addr == NULL_ADDR) {
 			add_free_nid(sbi, start_nid, true, true);
 		} else {
@@ -1993,6 +2175,8 @@
 			spin_unlock(&NM_I(sbi)->nid_list_lock);
 		}
 	}
+
+	return 0;
 }
 
 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
@@ -2048,10 +2232,11 @@
 	up_read(&nm_i->nat_tree_lock);
 }
 
-static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
+static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+						bool sync, bool mount)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
-	int i = 0;
+	int i = 0, ret;
 	nid_t nid = nm_i->next_scan_nid;
 
 	if (unlikely(nid >= nm_i->max_nid))
@@ -2059,21 +2244,21 @@
 
 	/* Enough entries */
 	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
-		return;
+		return 0;
 
-	if (!sync && !available_free_memory(sbi, FREE_NIDS))
-		return;
+	if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
+		return 0;
 
 	if (!mount) {
 		/* try to find free nids in free_nid_bitmap */
 		scan_free_nid_bits(sbi);
 
 		if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
-			return;
+			return 0;
 	}
 
 	/* readahead nat pages to be scanned */
-	ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
+	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
 							META_NAT, true);
 
 	down_read(&nm_i->nat_tree_lock);
@@ -2083,8 +2268,16 @@
 						nm_i->nat_block_bitmap)) {
 			struct page *page = get_current_nat_page(sbi, nid);
 
-			scan_nat_page(sbi, page, nid);
+			ret = scan_nat_page(sbi, page, nid);
 			f2fs_put_page(page, 1);
+
+			if (ret) {
+				up_read(&nm_i->nat_tree_lock);
+				f2fs_bug_on(sbi, !mount);
+				f2fs_msg(sbi->sb, KERN_ERR,
+					"NAT is corrupt, run fsck to fix it");
+				return -EINVAL;
+			}
 		}
 
 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
@@ -2103,15 +2296,21 @@
 
 	up_read(&nm_i->nat_tree_lock);
 
-	ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
+	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
 					nm_i->ra_nid_pages, META_NAT, false);
+
+	return 0;
 }
 
-void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
 {
+	int ret;
+
 	mutex_lock(&NM_I(sbi)->build_lock);
-	__build_free_nids(sbi, sync, mount);
+	ret = __f2fs_build_free_nids(sbi, sync, mount);
 	mutex_unlock(&NM_I(sbi)->build_lock);
+
+	return ret;
 }
 
 /*
@@ -2119,17 +2318,16 @@
  * from second parameter of this function.
  * The returned nid could be used ino as well as nid when inode is created.
  */
-bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
+bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct free_nid *i = NULL;
 retry:
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
 		f2fs_show_injection_info(FAULT_ALLOC_NID);
 		return false;
 	}
-#endif
+
 	spin_lock(&nm_i->nid_list_lock);
 
 	if (unlikely(nm_i->available_nids == 0)) {
@@ -2137,8 +2335,8 @@
 		return false;
 	}
 
-	/* We should not use stale free nids created by build_free_nids */
-	if (nm_i->nid_cnt[FREE_NID] && !on_build_free_nids(nm_i)) {
+	/* We should not use stale free nids created by f2fs_build_free_nids */
+	if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
 		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
 		i = list_first_entry(&nm_i->free_nid_list,
 					struct free_nid, list);
@@ -2155,14 +2353,14 @@
 	spin_unlock(&nm_i->nid_list_lock);
 
 	/* Let's scan nat pages and its caches to get free nids */
-	build_free_nids(sbi, true, false);
+	f2fs_build_free_nids(sbi, true, false);
 	goto retry;
 }
 
 /*
- * alloc_nid() should be called prior to this function.
+ * f2fs_alloc_nid() should be called prior to this function.
  */
-void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
+void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct free_nid *i;
@@ -2177,9 +2375,9 @@
 }
 
 /*
- * alloc_nid() should be called prior to this function.
+ * f2fs_alloc_nid() should be called prior to this function.
  */
-void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
+void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct free_nid *i;
@@ -2192,7 +2390,7 @@
 	i = __lookup_free_nid_list(nm_i, nid);
 	f2fs_bug_on(sbi, !i);
 
-	if (!available_free_memory(sbi, FREE_NIDS)) {
+	if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
 		__remove_free_nid(sbi, i, PREALLOC_NID);
 		need_free = true;
 	} else {
@@ -2209,7 +2407,7 @@
 		kmem_cache_free(free_nid_slab, i);
 }
 
-int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
+int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct free_nid *i, *next;
@@ -2237,14 +2435,14 @@
 	return nr - nr_shrink;
 }
 
-void recover_inline_xattr(struct inode *inode, struct page *page)
+void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
 {
 	void *src_addr, *dst_addr;
 	size_t inline_size;
 	struct page *ipage;
 	struct f2fs_inode *ri;
 
-	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
+	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
 	f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
 
 	ri = F2FS_INODE(page);
@@ -2262,11 +2460,11 @@
 	f2fs_wait_on_page_writeback(ipage, NODE, true);
 	memcpy(dst_addr, src_addr, inline_size);
 update_inode:
-	update_inode(inode, ipage);
+	f2fs_update_inode(inode, ipage);
 	f2fs_put_page(ipage, 1);
 }
 
-int recover_xattr_data(struct inode *inode, struct page *page)
+int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
@@ -2274,30 +2472,34 @@
 	struct dnode_of_data dn;
 	struct node_info ni;
 	struct page *xpage;
+	int err;
 
 	if (!prev_xnid)
 		goto recover_xnid;
 
 	/* 1: invalidate the previous xattr nid */
-	get_node_info(sbi, prev_xnid, &ni);
-	invalidate_blocks(sbi, ni.blk_addr);
+	err = f2fs_get_node_info(sbi, prev_xnid, &ni);
+	if (err)
+		return err;
+
+	f2fs_invalidate_blocks(sbi, ni.blk_addr);
 	dec_valid_node_count(sbi, inode, false);
 	set_node_addr(sbi, &ni, NULL_ADDR, false);
 
 recover_xnid:
 	/* 2: update xattr nid in inode */
-	if (!alloc_nid(sbi, &new_xnid))
+	if (!f2fs_alloc_nid(sbi, &new_xnid))
 		return -ENOSPC;
 
 	set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
-	xpage = new_node_page(&dn, XATTR_NODE_OFFSET);
+	xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
 	if (IS_ERR(xpage)) {
-		alloc_nid_failed(sbi, new_xnid);
+		f2fs_alloc_nid_failed(sbi, new_xnid);
 		return PTR_ERR(xpage);
 	}
 
-	alloc_nid_done(sbi, new_xnid);
-	update_inode_page(inode);
+	f2fs_alloc_nid_done(sbi, new_xnid);
+	f2fs_update_inode_page(inode);
 
 	/* 3: update and set xattr node page dirty */
 	memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
@@ -2308,14 +2510,17 @@
 	return 0;
 }
 
-int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
+int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
 {
 	struct f2fs_inode *src, *dst;
 	nid_t ino = ino_of_node(page);
 	struct node_info old_ni, new_ni;
 	struct page *ipage;
+	int err;
 
-	get_node_info(sbi, ino, &old_ni);
+	err = f2fs_get_node_info(sbi, ino, &old_ni);
+	if (err)
+		return err;
 
 	if (unlikely(old_ni.blk_addr != NULL_ADDR))
 		return -EINVAL;
@@ -2369,7 +2574,7 @@
 	return 0;
 }
 
-void restore_node_summary(struct f2fs_sb_info *sbi,
+int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
 			unsigned int segno, struct f2fs_summary_block *sum)
 {
 	struct f2fs_node *rn;
@@ -2386,10 +2591,13 @@
 		nrpages = min(last_offset - i, BIO_MAX_PAGES);
 
 		/* readahead node pages */
-		ra_meta_pages(sbi, addr, nrpages, META_POR, true);
+		f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
 
 		for (idx = addr; idx < addr + nrpages; idx++) {
-			struct page *page = get_tmp_page(sbi, idx);
+			struct page *page = f2fs_get_tmp_page(sbi, idx);
+
+			if (IS_ERR(page))
+				return PTR_ERR(page);
 
 			rn = F2FS_NODE(page);
 			sum_entry->nid = rn->footer.nid;
@@ -2402,6 +2610,7 @@
 		invalidate_mapping_pages(META_MAPPING(sbi), addr,
 							addr + nrpages);
 	}
+	return 0;
 }
 
 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
@@ -2531,7 +2740,7 @@
 		f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
 
 		if (to_journal) {
-			offset = lookup_journal_in_cursum(journal,
+			offset = f2fs_lookup_journal_in_cursum(journal,
 							NAT_JOURNAL, nid, 1);
 			f2fs_bug_on(sbi, offset < 0);
 			raw_ne = &nat_in_journal(journal, offset);
@@ -2568,7 +2777,7 @@
 /*
  * This function is called during the checkpointing process.
  */
-void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -2579,6 +2788,13 @@
 	nid_t set_idx = 0;
 	LIST_HEAD(sets);
 
+	/* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
+	if (enabled_nat_bits(sbi, cpc)) {
+		down_write(&nm_i->nat_tree_lock);
+		remove_nats_in_journal(sbi);
+		up_write(&nm_i->nat_tree_lock);
+	}
+
 	if (!nm_i->dirty_nat_cnt)
 		return;
 
@@ -2631,7 +2847,13 @@
 	nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
 						nm_i->nat_bits_blocks;
 	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
-		struct page *page = get_meta_page(sbi, nat_bits_addr++);
+		struct page *page;
+
+		page = f2fs_get_meta_page(sbi, nat_bits_addr++);
+		if (IS_ERR(page)) {
+			disable_nat_bits(sbi, true);
+			return PTR_ERR(page);
+		}
 
 		memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
 					page_address(page), F2FS_BLKSIZE);
@@ -2715,6 +2937,7 @@
 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
 	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
 	INIT_LIST_HEAD(&nm_i->nat_entries);
+	spin_lock_init(&nm_i->nat_list_lock);
 
 	mutex_init(&nm_i->build_lock);
 	spin_lock_init(&nm_i->nid_list_lock);
@@ -2750,15 +2973,17 @@
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	int i;
 
-	nm_i->free_nid_bitmap = f2fs_kzalloc(sbi, nm_i->nat_blocks *
-				sizeof(unsigned char *), GFP_KERNEL);
+	nm_i->free_nid_bitmap =
+		f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *),
+					     nm_i->nat_blocks),
+			     GFP_KERNEL);
 	if (!nm_i->free_nid_bitmap)
 		return -ENOMEM;
 
 	for (i = 0; i < nm_i->nat_blocks; i++) {
 		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
-				NAT_ENTRY_BITMAP_SIZE_ALIGNED, GFP_KERNEL);
-		if (!nm_i->free_nid_bitmap)
+			f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
+		if (!nm_i->free_nid_bitmap[i])
 			return -ENOMEM;
 	}
 
@@ -2767,14 +2992,16 @@
 	if (!nm_i->nat_block_bitmap)
 		return -ENOMEM;
 
-	nm_i->free_nid_count = f2fs_kvzalloc(sbi, nm_i->nat_blocks *
-					sizeof(unsigned short), GFP_KERNEL);
+	nm_i->free_nid_count =
+		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
+					      nm_i->nat_blocks),
+			      GFP_KERNEL);
 	if (!nm_i->free_nid_count)
 		return -ENOMEM;
 	return 0;
 }
 
-int build_node_manager(struct f2fs_sb_info *sbi)
+int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
 {
 	int err;
 
@@ -2794,11 +3021,10 @@
 	/* load free nid status from nat_bits table */
 	load_free_nid_bitmap(sbi);
 
-	build_free_nids(sbi, true, true);
-	return 0;
+	return f2fs_build_free_nids(sbi, true, true);
 }
 
-void destroy_node_manager(struct f2fs_sb_info *sbi)
+void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct free_nid *i, *next_i;
@@ -2830,8 +3056,13 @@
 		unsigned idx;
 
 		nid = nat_get_nid(natvec[found - 1]) + 1;
-		for (idx = 0; idx < found; idx++)
+		for (idx = 0; idx < found; idx++) {
+			spin_lock(&nm_i->nat_list_lock);
+			list_del(&natvec[idx]->list);
+			spin_unlock(&nm_i->nat_list_lock);
+
 			__del_from_nat_cache(nm_i, natvec[idx]);
+		}
 	}
 	f2fs_bug_on(sbi, nm_i->nat_cnt);
 
@@ -2870,7 +3101,7 @@
 	kfree(nm_i);
 }
 
-int __init create_node_manager_caches(void)
+int __init f2fs_create_node_manager_caches(void)
 {
 	nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
 			sizeof(struct nat_entry));
@@ -2886,8 +3117,15 @@
 			sizeof(struct nat_entry_set));
 	if (!nat_entry_set_slab)
 		goto destroy_free_nid;
+
+	fsync_node_entry_slab = f2fs_kmem_cache_create("fsync_node_entry",
+			sizeof(struct fsync_node_entry));
+	if (!fsync_node_entry_slab)
+		goto destroy_nat_entry_set;
 	return 0;
 
+destroy_nat_entry_set:
+	kmem_cache_destroy(nat_entry_set_slab);
 destroy_free_nid:
 	kmem_cache_destroy(free_nid_slab);
 destroy_nat_entry:
@@ -2896,8 +3134,9 @@
 	return -ENOMEM;
 }
 
-void destroy_node_manager_caches(void)
+void f2fs_destroy_node_manager_caches(void)
 {
+	kmem_cache_destroy(fsync_node_entry_slab);
 	kmem_cache_destroy(nat_entry_set_slab);
 	kmem_cache_destroy(free_nid_slab);
 	kmem_cache_destroy(nat_entry_slab);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index b95e49e..0f4db7a 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -135,6 +135,11 @@
 	return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
 }
 
+static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
+{
+	return get_pages(sbi, F2FS_DIRTY_NODES) >= sbi->blocks_per_seg * 8;
+}
+
 enum mem_type {
 	FREE_NIDS,	/* indicates the free nid list */
 	NAT_ENTRIES,	/* indicates the cached nat entry */
@@ -444,6 +449,10 @@
 	else
 		flag &= ~(0x1 << type);
 	rn->footer.flag = cpu_to_le32(flag);
+
+#ifdef CONFIG_F2FS_CHECK_FS
+	f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+#endif
 }
 #define set_dentry_mark(page, mark)	set_mark(page, mark, DENT_BIT_SHIFT)
 #define set_fsync_mark(page, mark)	set_mark(page, mark, FSYNC_BIT_SHIFT)
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 4ddc226..501bb0f 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -47,7 +47,7 @@
 
 static struct kmem_cache *fsync_entry_slab;
 
-bool space_for_roll_forward(struct f2fs_sb_info *sbi)
+bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
 {
 	s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
 
@@ -162,7 +162,7 @@
 			goto out_put;
 		}
 
-		err = acquire_orphan_inode(F2FS_I_SB(inode));
+		err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
 		if (err) {
 			iput(einode);
 			goto out_put;
@@ -173,7 +173,7 @@
 	} else if (IS_ERR(page)) {
 		err = PTR_ERR(page);
 	} else {
-		err = __f2fs_do_add_link(dir, &fname, inode,
+		err = f2fs_add_dentry(dir, &fname, inode,
 					inode->i_ino, inode->i_mode);
 	}
 	if (err == -ENOMEM)
@@ -204,8 +204,6 @@
 		set_inode_flag(inode, FI_DATA_EXIST);
 	else
 		clear_inode_flag(inode, FI_DATA_EXIST);
-	if (!(ri->i_inline & F2FS_INLINE_DOTS))
-		clear_inode_flag(inode, FI_INLINE_DOTS);
 }
 
 static void recover_inode(struct inode *inode, struct page *page)
@@ -243,8 +241,8 @@
 	struct page *page = NULL;
 	block_t blkaddr;
 	unsigned int loop_cnt = 0;
-	unsigned int free_blocks = sbi->user_block_count -
-					valid_user_blocks(sbi);
+	unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
+						valid_user_blocks(sbi);
 	int err = 0;
 
 	/* get node pages in the current segment */
@@ -254,10 +252,14 @@
 	while (1) {
 		struct fsync_inode_entry *entry;
 
-		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
+		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
 			return 0;
 
-		page = get_tmp_page(sbi, blkaddr);
+		page = f2fs_get_tmp_page(sbi, blkaddr);
+		if (IS_ERR(page)) {
+			err = PTR_ERR(page);
+			break;
+		}
 
 		if (!is_recoverable_dnode(page))
 			break;
@@ -271,7 +273,7 @@
 
 			if (!check_only &&
 					IS_INODE(page) && is_dent_dnode(page)) {
-				err = recover_inode_page(sbi, page);
+				err = f2fs_recover_inode_page(sbi, page);
 				if (err)
 					break;
 				quota_inode = true;
@@ -312,7 +314,7 @@
 		blkaddr = next_blkaddr_of_node(page);
 		f2fs_put_page(page, 1);
 
-		ra_meta_pages_cond(sbi, blkaddr);
+		f2fs_ra_meta_pages_cond(sbi, blkaddr);
 	}
 	f2fs_put_page(page, 1);
 	return err;
@@ -355,7 +357,7 @@
 		}
 	}
 
-	sum_page = get_sum_page(sbi, segno);
+	sum_page = f2fs_get_sum_page(sbi, segno);
 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
 	sum = sum_node->entries[blkoff];
 	f2fs_put_page(sum_page, 1);
@@ -375,7 +377,7 @@
 	}
 
 	/* Get the node page */
-	node_page = get_node_page(sbi, nid);
+	node_page = f2fs_get_node_page(sbi, nid);
 	if (IS_ERR(node_page))
 		return PTR_ERR(node_page);
 
@@ -400,7 +402,8 @@
 		inode = dn->inode;
 	}
 
-	bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
+	bidx = f2fs_start_bidx_of_node(offset, inode) +
+				le16_to_cpu(sum.ofs_in_node);
 
 	/*
 	 * if inode page is locked, unlock temporarily, but its reference
@@ -410,11 +413,11 @@
 		unlock_page(dn->inode_page);
 
 	set_new_dnode(&tdn, inode, NULL, NULL, 0);
-	if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
+	if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
 		goto out;
 
 	if (tdn.data_blkaddr == blkaddr)
-		truncate_data_blocks_range(&tdn, 1);
+		f2fs_truncate_data_blocks_range(&tdn, 1);
 
 	f2fs_put_dnode(&tdn);
 out:
@@ -427,7 +430,7 @@
 truncate_out:
 	if (datablock_addr(tdn.inode, tdn.node_page,
 					tdn.ofs_in_node) == blkaddr)
-		truncate_data_blocks_range(&tdn, 1);
+		f2fs_truncate_data_blocks_range(&tdn, 1);
 	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
 		unlock_page(dn->inode_page);
 	return 0;
@@ -443,25 +446,25 @@
 
 	/* step 1: recover xattr */
 	if (IS_INODE(page)) {
-		recover_inline_xattr(inode, page);
+		f2fs_recover_inline_xattr(inode, page);
 	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
-		err = recover_xattr_data(inode, page);
+		err = f2fs_recover_xattr_data(inode, page);
 		if (!err)
 			recovered++;
 		goto out;
 	}
 
 	/* step 2: recover inline data */
-	if (recover_inline_data(inode, page))
+	if (f2fs_recover_inline_data(inode, page))
 		goto out;
 
 	/* step 3: recover data indices */
-	start = start_bidx_of_node(ofs_of_node(page), inode);
+	start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
 	end = start + ADDRS_PER_PAGE(page, inode);
 
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
 retry_dn:
-	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
+	err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
 	if (err) {
 		if (err == -ENOMEM) {
 			congestion_wait(BLK_RW_ASYNC, HZ/50);
@@ -472,7 +475,10 @@
 
 	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
 
-	get_node_info(sbi, dn.nid, &ni);
+	err = f2fs_get_node_info(sbi, dn.nid, &ni);
+	if (err)
+		goto err;
+
 	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
 	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
 
@@ -488,7 +494,7 @@
 
 		/* dest is invalid, just invalidate src block */
 		if (dest == NULL_ADDR) {
-			truncate_data_blocks_range(&dn, 1);
+			f2fs_truncate_data_blocks_range(&dn, 1);
 			continue;
 		}
 
@@ -502,20 +508,19 @@
 		 * and then reserve one new block in dnode page.
 		 */
 		if (dest == NEW_ADDR) {
-			truncate_data_blocks_range(&dn, 1);
-			reserve_new_block(&dn);
+			f2fs_truncate_data_blocks_range(&dn, 1);
+			f2fs_reserve_new_block(&dn);
 			continue;
 		}
 
 		/* dest is valid block, try to recover from src to dest */
-		if (is_valid_blkaddr(sbi, dest, META_POR)) {
+		if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
 
 			if (src == NULL_ADDR) {
-				err = reserve_new_block(&dn);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-				while (err)
-					err = reserve_new_block(&dn);
-#endif
+				err = f2fs_reserve_new_block(&dn);
+				while (err &&
+				       IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
+					err = f2fs_reserve_new_block(&dn);
 				/* We should not get -ENOSPC */
 				f2fs_bug_on(sbi, err);
 				if (err)
@@ -569,12 +574,16 @@
 	while (1) {
 		struct fsync_inode_entry *entry;
 
-		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
+		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
 			break;
 
-		ra_meta_pages_cond(sbi, blkaddr);
+		f2fs_ra_meta_pages_cond(sbi, blkaddr);
 
-		page = get_tmp_page(sbi, blkaddr);
+		page = f2fs_get_tmp_page(sbi, blkaddr);
+		if (IS_ERR(page)) {
+			err = PTR_ERR(page);
+			break;
+		}
 
 		if (!is_recoverable_dnode(page)) {
 			f2fs_put_page(page, 1);
@@ -612,11 +621,11 @@
 		f2fs_put_page(page, 1);
 	}
 	if (!err)
-		allocate_new_segments(sbi);
+		f2fs_allocate_new_segments(sbi);
 	return err;
 }
 
-int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
+int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
 {
 	struct list_head inode_list;
 	struct list_head dir_list;
@@ -629,7 +638,8 @@
 #endif
 
 	if (s_flags & MS_RDONLY) {
-		f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
+		f2fs_msg(sbi->sb, KERN_INFO,
+				"recover fsync data on readonly fs");
 		sbi->sb->s_flags &= ~MS_RDONLY;
 	}
 
@@ -691,7 +701,7 @@
 		struct cp_control cpc = {
 			.reason = CP_RECOVERY,
 		};
-		err = write_checkpoint(sbi, &cpc);
+		err = f2fs_write_checkpoint(sbi, &cpc);
 	}
 
 	kmem_cache_destroy(fsync_entry_slab);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 284faa5..1da9a3c 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -169,7 +169,7 @@
 	return result - size + __reverse_ffz(tmp);
 }
 
-bool need_SSR(struct f2fs_sb_info *sbi)
+bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
 {
 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
@@ -177,14 +177,14 @@
 
 	if (test_opt(sbi, LFS))
 		return false;
-	if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
+	if (sbi->gc_mode == GC_URGENT)
 		return true;
 
 	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
 			SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
 }
 
-void register_inmem_page(struct inode *inode, struct page *page)
+void f2fs_register_inmem_page(struct inode *inode, struct page *page)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -230,6 +230,8 @@
 
 		lock_page(page);
 
+		f2fs_wait_on_page_writeback(page, DATA, true);
+
 		if (recover) {
 			struct dnode_of_data dn;
 			struct node_info ni;
@@ -237,7 +239,8 @@
 			trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
 retry:
 			set_new_dnode(&dn, inode, NULL, NULL, 0);
-			err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
+			err = f2fs_get_dnode_of_data(&dn, page->index,
+								LOOKUP_NODE);
 			if (err) {
 				if (err == -ENOMEM) {
 					congestion_wait(BLK_RW_ASYNC, HZ/50);
@@ -247,9 +250,15 @@
 				err = -EAGAIN;
 				goto next;
 			}
-			get_node_info(sbi, dn.nid, &ni);
+
+			err = f2fs_get_node_info(sbi, dn.nid, &ni);
+			if (err) {
+				f2fs_put_dnode(&dn);
+				return err;
+			}
+
 			if (cur->old_addr == NEW_ADDR) {
-				invalidate_blocks(sbi, dn.data_blkaddr);
+				f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
 				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
 			} else
 				f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
@@ -271,7 +280,7 @@
 	return err;
 }
 
-void drop_inmem_pages_all(struct f2fs_sb_info *sbi)
+void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
 {
 	struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
 	struct inode *inode;
@@ -287,15 +296,23 @@
 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
 
 	if (inode) {
-		drop_inmem_pages(inode);
+		if (gc_failure) {
+			if (fi->i_gc_failures[GC_FAILURE_ATOMIC])
+				goto drop;
+			goto skip;
+		}
+drop:
+		set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
+		f2fs_drop_inmem_pages(inode);
 		iput(inode);
 	}
+skip:
 	congestion_wait(BLK_RW_ASYNC, HZ/50);
 	cond_resched();
 	goto next;
 }
 
-void drop_inmem_pages(struct inode *inode)
+void f2fs_drop_inmem_pages(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -309,11 +326,11 @@
 	mutex_unlock(&fi->inmem_lock);
 
 	clear_inode_flag(inode, FI_ATOMIC_FILE);
-	clear_inode_flag(inode, FI_HOT_DATA);
+	fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
 	stat_dec_atomic_write(inode);
 }
 
-void drop_inmem_page(struct inode *inode, struct page *page)
+void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
 {
 	struct f2fs_inode_info *fi = F2FS_I(inode);
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -328,7 +345,7 @@
 			break;
 	}
 
-	f2fs_bug_on(sbi, !cur || cur->page != page);
+	f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
 	list_del(&cur->list);
 	mutex_unlock(&fi->inmem_lock);
 
@@ -343,8 +360,7 @@
 	trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
 }
 
-static int __commit_inmem_pages(struct inode *inode,
-					struct list_head *revoke_list)
+static int __f2fs_commit_inmem_pages(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -357,9 +373,12 @@
 		.op_flags = REQ_SYNC | REQ_PRIO,
 		.io_type = FS_DATA_IO,
 	};
+	struct list_head revoke_list;
 	pgoff_t last_idx = ULONG_MAX;
 	int err = 0;
 
+	INIT_LIST_HEAD(&revoke_list);
+
 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
 		struct page *page = cur->page;
 
@@ -371,14 +390,14 @@
 			f2fs_wait_on_page_writeback(page, DATA, true);
 			if (clear_page_dirty_for_io(page)) {
 				inode_dec_dirty_pages(inode);
-				remove_dirty_inode(inode);
+				f2fs_remove_dirty_inode(inode);
 			}
 retry:
 			fio.page = page;
 			fio.old_blkaddr = NULL_ADDR;
 			fio.encrypted_page = NULL;
 			fio.need_lock = LOCK_DONE;
-			err = do_write_data_page(&fio);
+			err = f2fs_do_write_data_page(&fio);
 			if (err) {
 				if (err == -ENOMEM) {
 					congestion_wait(BLK_RW_ASYNC, HZ/50);
@@ -393,35 +412,13 @@
 			last_idx = page->index;
 		}
 		unlock_page(page);
-		list_move_tail(&cur->list, revoke_list);
+		list_move_tail(&cur->list, &revoke_list);
 	}
 
 	if (last_idx != ULONG_MAX)
 		f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA);
 
-	if (!err)
-		__revoke_inmem_pages(inode, revoke_list, false, false);
-
-	return err;
-}
-
-int commit_inmem_pages(struct inode *inode)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct f2fs_inode_info *fi = F2FS_I(inode);
-	struct list_head revoke_list;
-	int err;
-
-	INIT_LIST_HEAD(&revoke_list);
-	f2fs_balance_fs(sbi, true);
-	f2fs_lock_op(sbi);
-
-	set_inode_flag(inode, FI_ATOMIC_COMMIT);
-
-	mutex_lock(&fi->inmem_lock);
-	err = __commit_inmem_pages(inode, &revoke_list);
 	if (err) {
-		int ret;
 		/*
 		 * try to revoke all committed pages, but still we could fail
 		 * due to no memory or other reason, if that happened, EAGAIN
@@ -430,13 +427,33 @@
 		 * recovery or rewrite & commit last transaction. For other
 		 * error number, revoking was done by filesystem itself.
 		 */
-		ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
-		if (ret)
-			err = ret;
+		err = __revoke_inmem_pages(inode, &revoke_list, false, true);
 
 		/* drop all uncommitted pages */
 		__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
+	} else {
+		__revoke_inmem_pages(inode, &revoke_list, false, false);
 	}
+
+	return err;
+}
+
+int f2fs_commit_inmem_pages(struct inode *inode)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct f2fs_inode_info *fi = F2FS_I(inode);
+	int err;
+
+	f2fs_balance_fs(sbi, true);
+
+	down_write(&fi->i_gc_rwsem[WRITE]);
+
+	f2fs_lock_op(sbi);
+	set_inode_flag(inode, FI_ATOMIC_COMMIT);
+
+	mutex_lock(&fi->inmem_lock);
+	err = __f2fs_commit_inmem_pages(inode);
+
 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
 	if (!list_empty(&fi->inmem_ilist))
 		list_del_init(&fi->inmem_ilist);
@@ -446,6 +463,8 @@
 	clear_inode_flag(inode, FI_ATOMIC_COMMIT);
 
 	f2fs_unlock_op(sbi);
+	up_write(&fi->i_gc_rwsem[WRITE]);
+
 	return err;
 }
 
@@ -455,12 +474,10 @@
  */
 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
 {
-#ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
 		f2fs_show_injection_info(FAULT_CHECKPOINT);
 		f2fs_stop_checkpoint(sbi, false);
 	}
-#endif
 
 	/* balance_fs_bg is able to be pending */
 	if (need && excess_cached_nats(sbi))
@@ -478,33 +495,38 @@
 
 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
 {
+	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+		return;
+
 	/* try to shrink extent cache when there is no enough memory */
-	if (!available_free_memory(sbi, EXTENT_CACHE))
+	if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
 		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
 
 	/* check the # of cached NAT entries */
-	if (!available_free_memory(sbi, NAT_ENTRIES))
-		try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
+	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
+		f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
 
-	if (!available_free_memory(sbi, FREE_NIDS))
-		try_to_free_nids(sbi, MAX_FREE_NIDS);
+	if (!f2fs_available_free_memory(sbi, FREE_NIDS))
+		f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
 	else
-		build_free_nids(sbi, false, false);
+		f2fs_build_free_nids(sbi, false, false);
 
-	if (!is_idle(sbi) && !excess_dirty_nats(sbi))
+	if (!is_idle(sbi) &&
+		(!excess_dirty_nats(sbi) && !excess_dirty_nodes(sbi)))
 		return;
 
 	/* checkpoint is the only way to shrink partial cached entries */
-	if (!available_free_memory(sbi, NAT_ENTRIES) ||
-			!available_free_memory(sbi, INO_ENTRIES) ||
+	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES) ||
+			!f2fs_available_free_memory(sbi, INO_ENTRIES) ||
 			excess_prefree_segs(sbi) ||
 			excess_dirty_nats(sbi) ||
+			excess_dirty_nodes(sbi) ||
 			f2fs_time_over(sbi, CP_TIME)) {
 		if (test_opt(sbi, DATA_FLUSH)) {
 			struct blk_plug plug;
 
 			blk_start_plug(&plug);
-			sync_dirty_inodes(sbi, FILE_INODE);
+			f2fs_sync_dirty_inodes(sbi, FILE_INODE);
 			blk_finish_plug(&plug);
 		}
 		f2fs_sync_fs(sbi->sb, true);
@@ -537,7 +559,7 @@
 		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
 
 	for (i = 0; i < sbi->s_ndevs; i++) {
-		if (!is_dirty_device(sbi, ino, i, FLUSH_INO))
+		if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
 			continue;
 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
 		if (ret)
@@ -648,7 +670,7 @@
 	return cmd.ret;
 }
 
-int create_flush_cmd_control(struct f2fs_sb_info *sbi)
+int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
 {
 	dev_t dev = sbi->sb->s_bdev->bd_dev;
 	struct flush_cmd_control *fcc;
@@ -685,7 +707,7 @@
 	return err;
 }
 
-void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
+void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
 {
 	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
 
@@ -819,9 +841,12 @@
 	dc->len = len;
 	dc->ref = 0;
 	dc->state = D_PREP;
+	dc->issuing = 0;
 	dc->error = 0;
 	init_completion(&dc->wait);
 	list_add_tail(&dc->list, pend_list);
+	spin_lock_init(&dc->lock);
+	dc->bio_ref = 0;
 	atomic_inc(&dcc->discard_cmd_cnt);
 	dcc->undiscard_blks += len;
 
@@ -848,7 +873,7 @@
 							struct discard_cmd *dc)
 {
 	if (dc->state == D_DONE)
-		atomic_dec(&dcc->issing_discard);
+		atomic_sub(dc->issuing, &dcc->issing_discard);
 
 	list_del(&dc->list);
 	rb_erase(&dc->rb_node, &dcc->root);
@@ -863,9 +888,17 @@
 							struct discard_cmd *dc)
 {
 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+	unsigned long flags;
 
 	trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
 
+	spin_lock_irqsave(&dc->lock, flags);
+	if (dc->bio_ref) {
+		spin_unlock_irqrestore(&dc->lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&dc->lock, flags);
+
 	f2fs_bug_on(sbi, dc->ref);
 
 	if (dc->error == -EOPNOTSUPP)
@@ -881,10 +914,17 @@
 static void f2fs_submit_discard_endio(struct bio *bio)
 {
 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
+	unsigned long flags;
 
 	dc->error = bio->bi_error;
-	dc->state = D_DONE;
-	complete_all(&dc->wait);
+
+	spin_lock_irqsave(&dc->lock, flags);
+	dc->bio_ref--;
+	if (!dc->bio_ref && dc->state == D_SUBMIT) {
+		dc->state = D_DONE;
+		complete_all(&dc->wait);
+	}
+	spin_unlock_irqrestore(&dc->lock, flags);
 	bio_put(bio);
 }
 
@@ -922,6 +962,7 @@
 	/* common policy */
 	dpolicy->type = discard_type;
 	dpolicy->sync = true;
+	dpolicy->ordered = false;
 	dpolicy->granularity = granularity;
 
 	dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
@@ -929,63 +970,137 @@
 
 	if (discard_type == DPOLICY_BG) {
 		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
+		dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
 		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
 		dpolicy->io_aware = true;
 		dpolicy->sync = false;
+		dpolicy->ordered = true;
 		if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
 			dpolicy->granularity = 1;
 			dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
 		}
 	} else if (discard_type == DPOLICY_FORCE) {
 		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
+		dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
 		dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
 		dpolicy->io_aware = false;
 	} else if (discard_type == DPOLICY_FSTRIM) {
 		dpolicy->io_aware = false;
 	} else if (discard_type == DPOLICY_UMOUNT) {
+		dpolicy->max_requests = UINT_MAX;
 		dpolicy->io_aware = false;
 	}
 }
 
-
+static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
+				struct block_device *bdev, block_t lstart,
+				block_t start, block_t len);
 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
-static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
+static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
 						struct discard_policy *dpolicy,
-						struct discard_cmd *dc)
+						struct discard_cmd *dc,
+						unsigned int *issued)
 {
+	struct block_device *bdev = dc->bdev;
+	struct request_queue *q = bdev_get_queue(bdev);
+	unsigned int max_discard_blocks =
+			SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
 					&(dcc->fstrim_list) : &(dcc->wait_list);
-	struct bio *bio = NULL;
 	int flag = dpolicy->sync ? REQ_SYNC : 0;
+	block_t lstart, start, len, total_len;
+	int err = 0;
 
 	if (dc->state != D_PREP)
-		return;
+		return 0;
 
-	trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
+	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
+		return 0;
 
-	dc->error = __blkdev_issue_discard(dc->bdev,
-				SECTOR_FROM_BLOCK(dc->start),
-				SECTOR_FROM_BLOCK(dc->len),
-				GFP_NOFS, 0, &bio);
-	if (!dc->error) {
-		/* should keep before submission to avoid D_DONE right away */
-		dc->state = D_SUBMIT;
-		atomic_inc(&dcc->issued_discard);
-		atomic_inc(&dcc->issing_discard);
-		if (bio) {
-			bio->bi_private = dc;
-			bio->bi_end_io = f2fs_submit_discard_endio;
-			bio->bi_opf |= flag;
-			submit_bio(bio);
-			list_move_tail(&dc->list, wait_list);
-			__check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
+	trace_f2fs_issue_discard(bdev, dc->start, dc->len);
 
-			f2fs_update_iostat(sbi, FS_DISCARD, 1);
+	lstart = dc->lstart;
+	start = dc->start;
+	len = dc->len;
+	total_len = len;
+
+	dc->len = 0;
+
+	while (total_len && *issued < dpolicy->max_requests && !err) {
+		struct bio *bio = NULL;
+		unsigned long flags;
+		bool last = true;
+
+		if (len > max_discard_blocks) {
+			len = max_discard_blocks;
+			last = false;
 		}
-	} else {
-		__remove_discard_cmd(sbi, dc);
+
+		(*issued)++;
+		if (*issued == dpolicy->max_requests)
+			last = true;
+
+		dc->len += len;
+
+		if (time_to_inject(sbi, FAULT_DISCARD)) {
+			f2fs_show_injection_info(FAULT_DISCARD);
+			err = -EIO;
+			goto submit;
+		}
+		err = __blkdev_issue_discard(bdev,
+					SECTOR_FROM_BLOCK(start),
+					SECTOR_FROM_BLOCK(len),
+					GFP_NOFS, 0, &bio);
+submit:
+		if (err) {
+			spin_lock_irqsave(&dc->lock, flags);
+			if (dc->state == D_PARTIAL)
+				dc->state = D_SUBMIT;
+			spin_unlock_irqrestore(&dc->lock, flags);
+
+			break;
+		}
+
+		f2fs_bug_on(sbi, !bio);
+
+		/*
+		 * should keep before submission to avoid D_DONE
+		 * right away
+		 */
+		spin_lock_irqsave(&dc->lock, flags);
+		if (last)
+			dc->state = D_SUBMIT;
+		else
+			dc->state = D_PARTIAL;
+		dc->bio_ref++;
+		spin_unlock_irqrestore(&dc->lock, flags);
+
+		atomic_inc(&dcc->issing_discard);
+		dc->issuing++;
+		list_move_tail(&dc->list, wait_list);
+
+		/* sanity check on discard range */
+		__check_sit_bitmap(sbi, start, start + len);
+
+		bio->bi_private = dc;
+		bio->bi_end_io = f2fs_submit_discard_endio;
+		bio->bi_opf |= flag;
+		submit_bio(bio);
+
+		atomic_inc(&dcc->issued_discard);
+
+		f2fs_update_iostat(sbi, FS_DISCARD, 1);
+
+		lstart += len;
+		start += len;
+		total_len -= len;
+		len = total_len;
 	}
+
+	if (!err && len)
+		__update_discard_tree_range(sbi, bdev, lstart, start, len);
+	return err;
 }
 
 static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
@@ -1005,7 +1120,7 @@
 		goto do_insert;
 	}
 
-	p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
+	p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
 do_insert:
 	dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
 	if (!dc)
@@ -1066,11 +1181,12 @@
 	struct discard_cmd *dc;
 	struct discard_info di = {0};
 	struct rb_node **insert_p = NULL, *insert_parent = NULL;
+	struct request_queue *q = bdev_get_queue(bdev);
+	unsigned int max_discard_blocks =
+			SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
 	block_t end = lstart + len;
 
-	mutex_lock(&dcc->cmd_lock);
-
-	dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
+	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
 					NULL, lstart,
 					(struct rb_entry **)&prev_dc,
 					(struct rb_entry **)&next_dc,
@@ -1109,7 +1225,8 @@
 
 		if (prev_dc && prev_dc->state == D_PREP &&
 			prev_dc->bdev == bdev &&
-			__is_discard_back_mergeable(&di, &prev_dc->di)) {
+			__is_discard_back_mergeable(&di, &prev_dc->di,
+							max_discard_blocks)) {
 			prev_dc->di.len += di.len;
 			dcc->undiscard_blks += di.len;
 			__relocate_discard_cmd(dcc, prev_dc);
@@ -1120,7 +1237,8 @@
 
 		if (next_dc && next_dc->state == D_PREP &&
 			next_dc->bdev == bdev &&
-			__is_discard_front_mergeable(&di, &next_dc->di)) {
+			__is_discard_front_mergeable(&di, &next_dc->di,
+							max_discard_blocks)) {
 			next_dc->di.lstart = di.lstart;
 			next_dc->di.len += di.len;
 			next_dc->di.start = di.start;
@@ -1143,8 +1261,6 @@
 		node = rb_next(&prev_dc->rb_node);
 		next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
 	}
-
-	mutex_unlock(&dcc->cmd_lock);
 }
 
 static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
@@ -1159,10 +1275,72 @@
 
 		blkstart -= FDEV(devi).start_blk;
 	}
+	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
 	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
+	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
 	return 0;
 }
 
+static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
+					struct discard_policy *dpolicy)
+{
+	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
+	struct rb_node **insert_p = NULL, *insert_parent = NULL;
+	struct discard_cmd *dc;
+	struct blk_plug plug;
+	unsigned int pos = dcc->next_pos;
+	unsigned int issued = 0;
+	bool io_interrupted = false;
+
+	mutex_lock(&dcc->cmd_lock);
+	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
+					NULL, pos,
+					(struct rb_entry **)&prev_dc,
+					(struct rb_entry **)&next_dc,
+					&insert_p, &insert_parent, true);
+	if (!dc)
+		dc = next_dc;
+
+	blk_start_plug(&plug);
+
+	while (dc) {
+		struct rb_node *node;
+		int err = 0;
+
+		if (dc->state != D_PREP)
+			goto next;
+
+		if (dpolicy->io_aware && !is_idle(sbi)) {
+			io_interrupted = true;
+			break;
+		}
+
+		dcc->next_pos = dc->lstart + dc->len;
+		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
+
+		if (issued >= dpolicy->max_requests)
+			break;
+next:
+		node = rb_next(&dc->rb_node);
+		if (err)
+			__remove_discard_cmd(sbi, dc);
+		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
+	}
+
+	blk_finish_plug(&plug);
+
+	if (!dc)
+		dcc->next_pos = 0;
+
+	mutex_unlock(&dcc->cmd_lock);
+
+	if (!issued && io_interrupted)
+		issued = -1;
+
+	return issued;
+}
+
 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
 					struct discard_policy *dpolicy)
 {
@@ -1170,18 +1348,24 @@
 	struct list_head *pend_list;
 	struct discard_cmd *dc, *tmp;
 	struct blk_plug plug;
-	int i, iter = 0, issued = 0;
+	int i, issued = 0;
 	bool io_interrupted = false;
 
 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
 		if (i + 1 < dpolicy->granularity)
 			break;
+
+		if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
+			return __issue_discard_cmd_orderly(sbi, dpolicy);
+
 		pend_list = &dcc->pend_list[i];
 
 		mutex_lock(&dcc->cmd_lock);
 		if (list_empty(pend_list))
 			goto next;
-		f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
+		if (unlikely(dcc->rbtree_check))
+			f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+								&dcc->root));
 		blk_start_plug(&plug);
 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
 			f2fs_bug_on(sbi, dc->state != D_PREP);
@@ -1189,20 +1373,19 @@
 			if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
 								!is_idle(sbi)) {
 				io_interrupted = true;
-				goto skip;
+				break;
 			}
 
-			__submit_discard_cmd(sbi, dpolicy, dc);
-			issued++;
-skip:
-			if (++iter >= dpolicy->max_requests)
+			__submit_discard_cmd(sbi, dpolicy, dc, &issued);
+
+			if (issued >= dpolicy->max_requests)
 				break;
 		}
 		blk_finish_plug(&plug);
 next:
 		mutex_unlock(&dcc->cmd_lock);
 
-		if (iter >= dpolicy->max_requests)
+		if (issued >= dpolicy->max_requests || io_interrupted)
 			break;
 	}
 
@@ -1234,7 +1417,7 @@
 	return dropped;
 }
 
-void drop_discard_cmd(struct f2fs_sb_info *sbi)
+void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
 {
 	__drop_discard_cmd(sbi);
 }
@@ -1300,21 +1483,22 @@
 	return trimmed;
 }
 
-static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
+static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
 						struct discard_policy *dpolicy)
 {
 	struct discard_policy dp;
+	unsigned int discard_blks;
 
-	if (dpolicy) {
-		__wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
-		return;
-	}
+	if (dpolicy)
+		return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
 
 	/* wait all */
 	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
-	__wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+	discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
 	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
-	__wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+	discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+
+	return discard_blks;
 }
 
 /* This should be covered by global mutex, &sit_i->sentry_lock */
@@ -1325,7 +1509,8 @@
 	bool need_wait = false;
 
 	mutex_lock(&dcc->cmd_lock);
-	dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr);
+	dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root,
+							NULL, blkaddr);
 	if (dc) {
 		if (dc->state == D_PREP) {
 			__punch_discard_cmd(sbi, dc, blkaddr);
@@ -1340,7 +1525,7 @@
 		__wait_one_discard_bio(sbi, dc);
 }
 
-void stop_discard_thread(struct f2fs_sb_info *sbi)
+void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
 {
 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 
@@ -1366,6 +1551,8 @@
 
 	/* just to make sure there is no pending discard commands */
 	__wait_all_discard_cmd(sbi, NULL);
+
+	f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
 	return dropped;
 }
 
@@ -1377,6 +1564,8 @@
 	struct discard_policy dpolicy;
 	unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
 	int issued;
+	unsigned long interval = sbi->interval_time[REQ_TIME] * HZ;
+	long delta;
 
 	set_freezable();
 
@@ -1388,25 +1577,36 @@
 				kthread_should_stop() || freezing(current) ||
 				dcc->discard_wake,
 				msecs_to_jiffies(wait_ms));
+
+		if (dcc->discard_wake)
+			dcc->discard_wake = 0;
+
 		if (try_to_freeze())
 			continue;
 		if (f2fs_readonly(sbi->sb))
 			continue;
 		if (kthread_should_stop())
 			return 0;
+		if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
+			wait_ms = dpolicy.max_interval;
+			continue;
+		}
 
-		if (dcc->discard_wake)
-			dcc->discard_wake = 0;
-
-		if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
+		if (sbi->gc_mode == GC_URGENT)
 			__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
 
 		sb_start_intwrite(sbi->sb);
 
 		issued = __issue_discard_cmd(sbi, &dpolicy);
-		if (issued) {
+		if (issued > 0) {
 			__wait_all_discard_cmd(sbi, &dpolicy);
 			wait_ms = dpolicy.min_interval;
+		} else if (issued == -1){
+			delta = (sbi->last_time[REQ_TIME] + interval) - jiffies;
+			if (delta > 0)
+				wait_ms = jiffies_to_msecs(delta);
+			else
+				wait_ms = dpolicy.mid_interval;
 		} else {
 			wait_ms = dpolicy.max_interval;
 		}
@@ -1575,20 +1775,24 @@
 	return false;
 }
 
-void release_discard_addrs(struct f2fs_sb_info *sbi)
+static void release_discard_addr(struct discard_entry *entry)
+{
+	list_del(&entry->list);
+	kmem_cache_free(discard_entry_slab, entry);
+}
+
+void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
 {
 	struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
 	struct discard_entry *entry, *this;
 
 	/* drop caches */
-	list_for_each_entry_safe(entry, this, head, list) {
-		list_del(&entry->list);
-		kmem_cache_free(discard_entry_slab, entry);
-	}
+	list_for_each_entry_safe(entry, this, head, list)
+		release_discard_addr(entry);
 }
 
 /*
- * Should call clear_prefree_segments after checkpoint is done.
+ * Should call f2fs_clear_prefree_segments after checkpoint is done.
  */
 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
 {
@@ -1601,7 +1805,8 @@
 	mutex_unlock(&dirty_i->seglist_lock);
 }
 
-void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
+						struct cp_control *cpc)
 {
 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 	struct list_head *head = &dcc->entry_list;
@@ -1611,21 +1816,30 @@
 	unsigned int start = 0, end = -1;
 	unsigned int secno, start_segno;
 	bool force = (cpc->reason & CP_DISCARD);
+	bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
 
 	mutex_lock(&dirty_i->seglist_lock);
 
 	while (1) {
 		int i;
+
+		if (need_align && end != -1)
+			end--;
 		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
 		if (start >= MAIN_SEGS(sbi))
 			break;
 		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
 								start + 1);
 
-		for (i = start; i < end; i++)
-			clear_bit(i, prefree_map);
+		if (need_align) {
+			start = rounddown(start, sbi->segs_per_sec);
+			end = roundup(end, sbi->segs_per_sec);
+		}
 
-		dirty_i->nr_dirty[PRE] -= end - start;
+		for (i = start; i < end; i++) {
+			if (test_and_clear_bit(i, prefree_map))
+				dirty_i->nr_dirty[PRE]--;
+		}
 
 		if (!test_opt(sbi, DISCARD))
 			continue;
@@ -1684,9 +1898,8 @@
 		if (cur_pos < sbi->blocks_per_seg)
 			goto find_next;
 
-		list_del(&entry->list);
+		release_discard_addr(entry);
 		dcc->nr_discards -= total_len;
-		kmem_cache_free(discard_entry_slab, entry);
 	}
 
 	wake_up_discard_thread(sbi, false);
@@ -1720,7 +1933,9 @@
 	dcc->nr_discards = 0;
 	dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
 	dcc->undiscard_blks = 0;
+	dcc->next_pos = 0;
 	dcc->root = RB_ROOT;
+	dcc->rbtree_check = false;
 
 	init_waitqueue_head(&dcc->discard_wait_queue);
 	SM_I(sbi)->dcc_info = dcc;
@@ -1744,7 +1959,7 @@
 	if (!dcc)
 		return;
 
-	stop_discard_thread(sbi);
+	f2fs_stop_discard_thread(sbi);
 
 	kfree(dcc);
 	SM_I(sbi)->dcc_info = NULL;
@@ -1791,8 +2006,9 @@
 				(new_vblocks > sbi->blocks_per_seg)));
 
 	se->valid_blocks = new_vblocks;
-	se->mtime = get_mtime(sbi);
-	SIT_I(sbi)->max_mtime = se->mtime;
+	se->mtime = get_mtime(sbi, false);
+	if (se->mtime > SIT_I(sbi)->max_mtime)
+		SIT_I(sbi)->max_mtime = se->mtime;
 
 	/* Update valid block bitmap */
 	if (del > 0) {
@@ -1860,7 +2076,7 @@
 		get_sec_entry(sbi, segno)->valid_blocks += del;
 }
 
-void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
+void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
 {
 	unsigned int segno = GET_SEGNO(sbi, addr);
 	struct sit_info *sit_i = SIT_I(sbi);
@@ -1869,6 +2085,8 @@
 	if (addr == NEW_ADDR)
 		return;
 
+	invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
+
 	/* add it into sit main buffer */
 	down_write(&sit_i->sentry_lock);
 
@@ -1880,14 +2098,14 @@
 	up_write(&sit_i->sentry_lock);
 }
 
-bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
+bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
 {
 	struct sit_info *sit_i = SIT_I(sbi);
 	unsigned int segno, offset;
 	struct seg_entry *se;
 	bool is_cp = false;
 
-	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
+	if (!is_valid_data_blkaddr(sbi, blkaddr))
 		return true;
 
 	down_read(&sit_i->sentry_lock);
@@ -1919,7 +2137,7 @@
 /*
  * Calculate the number of current summary pages for writing
  */
-int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
+int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
 {
 	int valid_sum_count = 0;
 	int i, sum_in_page;
@@ -1949,14 +2167,15 @@
 /*
  * Caller should put this summary page
  */
-struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
+struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
 {
-	return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
+	return f2fs_get_meta_page_nofail(sbi, GET_SUM_BLOCK(sbi, segno));
 }
 
-void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
+void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
+					void *src, block_t blk_addr)
 {
-	struct page *page = grab_meta_page(sbi, blk_addr);
+	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
 
 	memcpy(page_address(page), src, PAGE_SIZE);
 	set_page_dirty(page);
@@ -1966,18 +2185,19 @@
 static void write_sum_page(struct f2fs_sb_info *sbi,
 			struct f2fs_summary_block *sum_blk, block_t blk_addr)
 {
-	update_meta_page(sbi, (void *)sum_blk, blk_addr);
+	f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
 }
 
 static void write_current_sum_page(struct f2fs_sb_info *sbi,
 						int type, block_t blk_addr)
 {
 	struct curseg_info *curseg = CURSEG_I(sbi, type);
-	struct page *page = grab_meta_page(sbi, blk_addr);
+	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
 	struct f2fs_summary_block *src = curseg->sum_blk;
 	struct f2fs_summary_block *dst;
 
 	dst = (struct f2fs_summary_block *)page_address(page);
+	memset(dst, 0, PAGE_SIZE);
 
 	mutex_lock(&curseg->curseg_mutex);
 
@@ -2217,7 +2437,7 @@
 	curseg->alloc_type = SSR;
 	__next_free_blkoff(sbi, curseg, 0);
 
-	sum_page = get_sum_page(sbi, new_segno);
+	sum_page = f2fs_get_sum_page(sbi, new_segno);
 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
 	memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
 	f2fs_put_page(sum_page, 1);
@@ -2231,7 +2451,7 @@
 	int i, cnt;
 	bool reversed = false;
 
-	/* need_SSR() already forces to do this */
+	/* f2fs_need_SSR() already forces to do this */
 	if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
 		curseg->next_segno = segno;
 		return 1;
@@ -2283,7 +2503,7 @@
 		new_curseg(sbi, type, false);
 	else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
 		new_curseg(sbi, type, false);
-	else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
+	else if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type))
 		change_curseg(sbi, type);
 	else
 		new_curseg(sbi, type, false);
@@ -2291,7 +2511,7 @@
 	stat_inc_seg_type(sbi, curseg);
 }
 
-void allocate_new_segments(struct f2fs_sb_info *sbi)
+void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
 {
 	struct curseg_info *curseg;
 	unsigned int old_segno;
@@ -2313,7 +2533,8 @@
 	.allocate_segment = allocate_segment_by_default,
 };
 
-bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
+						struct cp_control *cpc)
 {
 	__u64 trim_start = cpc->trim_start;
 	bool has_candidate = false;
@@ -2331,7 +2552,7 @@
 	return has_candidate;
 }
 
-static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
+static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
 					struct discard_policy *dpolicy,
 					unsigned int start, unsigned int end)
 {
@@ -2341,14 +2562,17 @@
 	struct discard_cmd *dc;
 	struct blk_plug plug;
 	int issued;
+	unsigned int trimmed = 0;
 
 next:
 	issued = 0;
 
 	mutex_lock(&dcc->cmd_lock);
-	f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
+	if (unlikely(dcc->rbtree_check))
+		f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+								&dcc->root));
 
-	dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
+	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
 					NULL, start,
 					(struct rb_entry **)&prev_dc,
 					(struct rb_entry **)&next_dc,
@@ -2360,6 +2584,7 @@
 
 	while (dc && dc->lstart <= end) {
 		struct rb_node *node;
+		int err = 0;
 
 		if (dc->len < dpolicy->granularity)
 			goto skip;
@@ -2369,19 +2594,24 @@
 			goto skip;
 		}
 
-		__submit_discard_cmd(sbi, dpolicy, dc);
+		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
 
-		if (++issued >= dpolicy->max_requests) {
+		if (issued >= dpolicy->max_requests) {
 			start = dc->lstart + dc->len;
 
+			if (err)
+				__remove_discard_cmd(sbi, dc);
+
 			blk_finish_plug(&plug);
 			mutex_unlock(&dcc->cmd_lock);
-			__wait_all_discard_cmd(sbi, NULL);
+			trimmed += __wait_all_discard_cmd(sbi, NULL);
 			congestion_wait(BLK_RW_ASYNC, HZ/50);
 			goto next;
 		}
 skip:
 		node = rb_next(&dc->rb_node);
+		if (err)
+			__remove_discard_cmd(sbi, dc);
 		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
 
 		if (fatal_signal_pending(current))
@@ -2390,6 +2620,8 @@
 
 	blk_finish_plug(&plug);
 	mutex_unlock(&dcc->cmd_lock);
+
+	return trimmed;
 }
 
 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
@@ -2402,23 +2634,28 @@
 	struct discard_policy dpolicy;
 	unsigned long long trimmed = 0;
 	int err = 0;
+	bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
 
 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
 		return -EINVAL;
 
-	if (end <= MAIN_BLKADDR(sbi))
+	if (end < MAIN_BLKADDR(sbi))
 		goto out;
 
 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
 		f2fs_msg(sbi->sb, KERN_WARNING,
 			"Found FS corruption, run fsck to fix.");
-		goto out;
+		return -EIO;
 	}
 
 	/* start/end segment number in main_area */
 	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
 	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
 						GET_SEGNO(sbi, end);
+	if (need_align) {
+		start_segno = rounddown(start_segno, sbi->segs_per_sec);
+		end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
+	}
 
 	cpc.reason = CP_DISCARD;
 	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
@@ -2429,29 +2666,32 @@
 		goto out;
 
 	mutex_lock(&sbi->gc_mutex);
-	err = write_checkpoint(sbi, &cpc);
+	err = f2fs_write_checkpoint(sbi, &cpc);
 	mutex_unlock(&sbi->gc_mutex);
 	if (err)
 		goto out;
 
-	start_block = START_BLOCK(sbi, start_segno);
-	end_block = START_BLOCK(sbi, end_segno + 1);
-
-	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
-	__issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
-
 	/*
 	 * We filed discard candidates, but actually we don't need to wait for
 	 * all of them, since they'll be issued in idle time along with runtime
 	 * discard option. User configuration looks like using runtime discard
 	 * or periodic fstrim instead of it.
 	 */
-	if (!test_opt(sbi, DISCARD)) {
-		trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
+	if (test_opt(sbi, DISCARD))
+		goto out;
+
+	start_block = START_BLOCK(sbi, start_segno);
+	end_block = START_BLOCK(sbi, end_segno + 1);
+
+	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
+	trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
 					start_block, end_block);
-		range->len = F2FS_BLK_TO_BYTES(trimmed);
-	}
+
+	trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
+					start_block, end_block);
 out:
+	if (!err)
+		range->len = F2FS_BLK_TO_BYTES(trimmed);
 	return err;
 }
 
@@ -2463,7 +2703,7 @@
 	return false;
 }
 
-int rw_hint_to_seg_type(enum rw_hint hint)
+int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
 {
 	switch (hint) {
 	case WRITE_LIFE_SHORT:
@@ -2536,7 +2776,7 @@
  * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
  */
 
-enum rw_hint io_type_to_rw_hint(struct f2fs_sb_info *sbi,
+enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
 				enum page_type type, enum temp_type temp)
 {
 	if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
@@ -2603,9 +2843,11 @@
 		if (is_cold_data(fio->page) || file_is_cold(inode))
 			return CURSEG_COLD_DATA;
 		if (file_is_hot(inode) ||
-				is_inode_flag_set(inode, FI_HOT_DATA))
+				is_inode_flag_set(inode, FI_HOT_DATA) ||
+				f2fs_is_atomic_file(inode) ||
+				f2fs_is_volatile_file(inode))
 			return CURSEG_HOT_DATA;
-		/* rw_hint_to_seg_type(inode->i_write_hint); */
+		/* f2fs_rw_hint_to_seg_type(inode->i_write_hint); */
 		return CURSEG_WARM_DATA;
 	} else {
 		if (IS_DNODE(fio->page))
@@ -2642,7 +2884,7 @@
 	return type;
 }
 
-void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
 		block_t old_blkaddr, block_t *new_blkaddr,
 		struct f2fs_summary *sum, int type,
 		struct f2fs_io_info *fio, bool add_list)
@@ -2702,6 +2944,7 @@
 
 		INIT_LIST_HEAD(&fio->list);
 		fio->in_list = true;
+		fio->retry = false;
 		io = sbi->write_io[fio->type] + fio->temp;
 		spin_lock(&io->io_lock);
 		list_add_tail(&fio->list, &io->io_list);
@@ -2724,7 +2967,7 @@
 	devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
 
 	/* update device state for fsync */
-	set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
+	f2fs_set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
 
 	/* update device state for checkpoint */
 	if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
@@ -2737,23 +2980,31 @@
 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
 {
 	int type = __get_segment_type(fio);
-	int err;
+	bool keep_order = (test_opt(fio->sbi, LFS) && type == CURSEG_COLD_DATA);
 
+	if (keep_order)
+		down_read(&fio->sbi->io_order_lock);
 reallocate:
-	allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
+	f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
 			&fio->new_blkaddr, sum, type, fio, true);
+	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
+		invalidate_mapping_pages(META_MAPPING(fio->sbi),
+					fio->old_blkaddr, fio->old_blkaddr);
 
 	/* writeout dirty page into bdev */
-	err = f2fs_submit_page_write(fio);
-	if (err == -EAGAIN) {
+	f2fs_submit_page_write(fio);
+	if (fio->retry) {
 		fio->old_blkaddr = fio->new_blkaddr;
 		goto reallocate;
-	} else if (!err) {
-		update_device_state(fio);
 	}
+
+	update_device_state(fio);
+
+	if (keep_order)
+		up_read(&fio->sbi->io_order_lock);
 }
 
-void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
 					enum iostat_type io_type)
 {
 	struct f2fs_io_info fio = {
@@ -2779,7 +3030,7 @@
 	f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
 }
 
-void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
+void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
 {
 	struct f2fs_summary sum;
 
@@ -2789,22 +3040,21 @@
 	f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
 }
 
-void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
+void f2fs_outplace_write_data(struct dnode_of_data *dn,
+					struct f2fs_io_info *fio)
 {
 	struct f2fs_sb_info *sbi = fio->sbi;
 	struct f2fs_summary sum;
-	struct node_info ni;
 
 	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
-	get_node_info(sbi, dn->nid, &ni);
-	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
+	set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
 	do_write_page(&sum, fio);
 	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
 
 	f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
 }
 
-int rewrite_data_page(struct f2fs_io_info *fio)
+int f2fs_inplace_write_data(struct f2fs_io_info *fio)
 {
 	int err;
 	struct f2fs_sb_info *sbi = fio->sbi;
@@ -2839,7 +3089,7 @@
 	return i;
 }
 
-void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 				block_t old_blkaddr, block_t new_blkaddr,
 				bool recover_curseg, bool recover_newaddr)
 {
@@ -2894,8 +3144,11 @@
 
 	if (!recover_curseg || recover_newaddr)
 		update_sit_entry(sbi, new_blkaddr, 1);
-	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
+		invalidate_mapping_pages(META_MAPPING(sbi),
+					old_blkaddr, old_blkaddr);
 		update_sit_entry(sbi, old_blkaddr, -1);
+	}
 
 	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
 	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
@@ -2924,7 +3177,7 @@
 
 	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
 
-	__f2fs_replace_block(sbi, &sum, old_addr, new_addr,
+	f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
 					recover_curseg, recover_newaddr);
 
 	f2fs_update_data_blkaddr(dn, new_addr);
@@ -2949,7 +3202,7 @@
 {
 	struct page *cpage;
 
-	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
+	if (!is_valid_data_blkaddr(sbi, blkaddr))
 		return;
 
 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
@@ -2959,7 +3212,7 @@
 	}
 }
 
-static void read_compacted_summaries(struct f2fs_sb_info *sbi)
+static int read_compacted_summaries(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 	struct curseg_info *seg_i;
@@ -2970,7 +3223,9 @@
 
 	start = start_sum_block(sbi);
 
-	page = get_meta_page(sbi, start++);
+	page = f2fs_get_meta_page(sbi, start++);
+	if (IS_ERR(page))
+		return PTR_ERR(page);
 	kaddr = (unsigned char *)page_address(page);
 
 	/* Step 1: restore nat cache */
@@ -3010,12 +3265,15 @@
 			f2fs_put_page(page, 1);
 			page = NULL;
 
-			page = get_meta_page(sbi, start++);
+			page = f2fs_get_meta_page(sbi, start++);
+			if (IS_ERR(page))
+				return PTR_ERR(page);
 			kaddr = (unsigned char *)page_address(page);
 			offset = 0;
 		}
 	}
 	f2fs_put_page(page, 1);
+	return 0;
 }
 
 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
@@ -3027,6 +3285,7 @@
 	unsigned short blk_off;
 	unsigned int segno = 0;
 	block_t blk_addr = 0;
+	int err = 0;
 
 	/* get segment number and block addr */
 	if (IS_DATASEG(type)) {
@@ -3049,7 +3308,9 @@
 			blk_addr = GET_SUM_BLOCK(sbi, segno);
 	}
 
-	new = get_meta_page(sbi, blk_addr);
+	new = f2fs_get_meta_page(sbi, blk_addr);
+	if (IS_ERR(new))
+		return PTR_ERR(new);
 	sum = (struct f2fs_summary_block *)page_address(new);
 
 	if (IS_NODESEG(type)) {
@@ -3061,7 +3322,9 @@
 				ns->ofs_in_node = 0;
 			}
 		} else {
-			restore_node_summary(sbi, segno, sum);
+			err = f2fs_restore_node_summary(sbi, segno, sum);
+			if (err)
+				goto out;
 		}
 	}
 
@@ -3081,8 +3344,9 @@
 	curseg->alloc_type = ckpt->alloc_type[type];
 	curseg->next_blkoff = blk_off;
 	mutex_unlock(&curseg->curseg_mutex);
+out:
 	f2fs_put_page(new, 1);
-	return 0;
+	return err;
 }
 
 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
@@ -3093,19 +3357,21 @@
 	int err;
 
 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
-		int npages = npages_for_summary_flush(sbi, true);
+		int npages = f2fs_npages_for_summary_flush(sbi, true);
 
 		if (npages >= 2)
-			ra_meta_pages(sbi, start_sum_block(sbi), npages,
+			f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
 							META_CP, true);
 
 		/* restore for compacted data summary */
-		read_compacted_summaries(sbi);
+		err = read_compacted_summaries(sbi);
+		if (err)
+			return err;
 		type = CURSEG_HOT_NODE;
 	}
 
 	if (__exist_node_summaries(sbi))
-		ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
+		f2fs_ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
 					NR_CURSEG_TYPE - type, META_CP, true);
 
 	for (; type <= CURSEG_COLD_NODE; type++) {
@@ -3131,8 +3397,9 @@
 	int written_size = 0;
 	int i, j;
 
-	page = grab_meta_page(sbi, blkaddr++);
+	page = f2fs_grab_meta_page(sbi, blkaddr++);
 	kaddr = (unsigned char *)page_address(page);
+	memset(kaddr, 0, PAGE_SIZE);
 
 	/* Step 1: write nat cache */
 	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -3155,8 +3422,9 @@
 
 		for (j = 0; j < blkoff; j++) {
 			if (!page) {
-				page = grab_meta_page(sbi, blkaddr++);
+				page = f2fs_grab_meta_page(sbi, blkaddr++);
 				kaddr = (unsigned char *)page_address(page);
+				memset(kaddr, 0, PAGE_SIZE);
 				written_size = 0;
 			}
 			summary = (struct f2fs_summary *)(kaddr + written_size);
@@ -3191,7 +3459,7 @@
 		write_current_sum_page(sbi, i, blkaddr + (i - type));
 }
 
-void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
+void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
 {
 	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
 		write_compacted_summaries(sbi, start_blk);
@@ -3199,12 +3467,12 @@
 		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
 }
 
-void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
+void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
 {
 	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
 }
 
-int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
+int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
 					unsigned int val, int alloc)
 {
 	int i;
@@ -3229,7 +3497,7 @@
 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
 					unsigned int segno)
 {
-	return get_meta_page(sbi, current_sit_addr(sbi, segno));
+	return f2fs_get_meta_page_nofail(sbi, current_sit_addr(sbi, segno));
 }
 
 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
@@ -3242,7 +3510,7 @@
 	src_off = current_sit_addr(sbi, start);
 	dst_off = next_sit_addr(sbi, src_off);
 
-	page = grab_meta_page(sbi, dst_off);
+	page = f2fs_grab_meta_page(sbi, dst_off);
 	seg_info_to_sit_page(sbi, page, start);
 
 	set_page_dirty(page);
@@ -3338,7 +3606,7 @@
  * CP calls this function, which flushes SIT entries including sit_journal,
  * and moves prefree segs to free segs.
  */
-void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 {
 	struct sit_info *sit_i = SIT_I(sbi);
 	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
@@ -3397,6 +3665,11 @@
 			int offset, sit_offset;
 
 			se = get_seg_entry(sbi, segno);
+#ifdef CONFIG_F2FS_CHECK_FS
+			if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
+						SIT_VBLOCK_MAP_SIZE))
+				f2fs_bug_on(sbi, 1);
+#endif
 
 			/* add discard candidates */
 			if (!(cpc->reason & CP_DISCARD)) {
@@ -3405,17 +3678,21 @@
 			}
 
 			if (to_journal) {
-				offset = lookup_journal_in_cursum(journal,
+				offset = f2fs_lookup_journal_in_cursum(journal,
 							SIT_JOURNAL, segno, 1);
 				f2fs_bug_on(sbi, offset < 0);
 				segno_in_journal(journal, offset) =
 							cpu_to_le32(segno);
 				seg_info_to_raw_sit(se,
 					&sit_in_journal(journal, offset));
+				check_block_count(sbi, segno,
+					&sit_in_journal(journal, offset));
 			} else {
 				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
 				seg_info_to_raw_sit(se,
 						&raw_sit->entries[sit_offset]);
+				check_block_count(sbi, segno,
+						&raw_sit->entries[sit_offset]);
 			}
 
 			__clear_bit(segno, bitmap);
@@ -3463,8 +3740,10 @@
 
 	SM_I(sbi)->sit_info = sit_i;
 
-	sit_i->sentries = f2fs_kvzalloc(sbi, MAIN_SEGS(sbi) *
-					sizeof(struct seg_entry), GFP_KERNEL);
+	sit_i->sentries =
+		f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
+					      MAIN_SEGS(sbi)),
+			      GFP_KERNEL);
 	if (!sit_i->sentries)
 		return -ENOMEM;
 
@@ -3504,8 +3783,10 @@
 		return -ENOMEM;
 
 	if (sbi->segs_per_sec > 1) {
-		sit_i->sec_entries = f2fs_kvzalloc(sbi, MAIN_SECS(sbi) *
-					sizeof(struct sec_entry), GFP_KERNEL);
+		sit_i->sec_entries =
+			f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
+						      MAIN_SECS(sbi)),
+				      GFP_KERNEL);
 		if (!sit_i->sec_entries)
 			return -ENOMEM;
 	}
@@ -3581,7 +3862,8 @@
 	struct curseg_info *array;
 	int i;
 
-	array = f2fs_kzalloc(sbi, sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
+	array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE, sizeof(*array)),
+			     GFP_KERNEL);
 	if (!array)
 		return -ENOMEM;
 
@@ -3614,9 +3896,10 @@
 	unsigned int i, start, end;
 	unsigned int readed, start_blk = 0;
 	int err = 0;
+	block_t total_node_blocks = 0;
 
 	do {
-		readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
+		readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
 							META_SIT, true);
 
 		start = start_blk * sit_i->sents_per_block;
@@ -3636,6 +3919,8 @@
 			if (err)
 				return err;
 			seg_info_from_raw_sit(se, &sit);
+			if (IS_NODESEG(se->type))
+				total_node_blocks += se->valid_blocks;
 
 			/* build discard map only one time */
 			if (f2fs_discard_en(sbi)) {
@@ -3664,15 +3949,28 @@
 		unsigned int old_valid_blocks;
 
 		start = le32_to_cpu(segno_in_journal(journal, i));
+		if (start >= MAIN_SEGS(sbi)) {
+			f2fs_msg(sbi->sb, KERN_ERR,
+					"Wrong journal entry on segno %u",
+					start);
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			err = -EINVAL;
+			break;
+		}
+
 		se = &sit_i->sentries[start];
 		sit = sit_in_journal(journal, i);
 
 		old_valid_blocks = se->valid_blocks;
+		if (IS_NODESEG(se->type))
+			total_node_blocks -= old_valid_blocks;
 
 		err = check_block_count(sbi, start, &sit);
 		if (err)
 			break;
 		seg_info_from_raw_sit(se, &sit);
+		if (IS_NODESEG(se->type))
+			total_node_blocks += se->valid_blocks;
 
 		if (f2fs_discard_en(sbi)) {
 			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
@@ -3681,16 +3979,28 @@
 			} else {
 				memcpy(se->discard_map, se->cur_valid_map,
 							SIT_VBLOCK_MAP_SIZE);
-				sbi->discard_blks += old_valid_blocks -
-							se->valid_blocks;
+				sbi->discard_blks += old_valid_blocks;
+				sbi->discard_blks -= se->valid_blocks;
 			}
 		}
 
-		if (sbi->segs_per_sec > 1)
+		if (sbi->segs_per_sec > 1) {
 			get_sec_entry(sbi, start)->valid_blocks +=
-				se->valid_blocks - old_valid_blocks;
+							se->valid_blocks;
+			get_sec_entry(sbi, start)->valid_blocks -=
+							old_valid_blocks;
+		}
 	}
 	up_read(&curseg->journal_rwsem);
+
+	if (!err && total_node_blocks != valid_node_count(sbi)) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+			"SIT is corrupted node# %u vs %u",
+			total_node_blocks, valid_node_count(sbi));
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		err = -EINVAL;
+	}
+
 	return err;
 }
 
@@ -3789,7 +4099,7 @@
 
 	down_write(&sit_i->sentry_lock);
 
-	sit_i->min_mtime = LLONG_MAX;
+	sit_i->min_mtime = ULLONG_MAX;
 
 	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
 		unsigned int i;
@@ -3803,11 +4113,11 @@
 		if (sit_i->min_mtime > mtime)
 			sit_i->min_mtime = mtime;
 	}
-	sit_i->max_mtime = get_mtime(sbi);
+	sit_i->max_mtime = get_mtime(sbi, false);
 	up_write(&sit_i->sentry_lock);
 }
 
-int build_segment_manager(struct f2fs_sb_info *sbi)
+int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@@ -3845,7 +4155,7 @@
 	init_rwsem(&sm_info->curseg_lock);
 
 	if (!f2fs_readonly(sbi->sb)) {
-		err = create_flush_cmd_control(sbi);
+		err = f2fs_create_flush_cmd_control(sbi);
 		if (err)
 			return err;
 	}
@@ -3970,13 +4280,13 @@
 	kfree(sit_i);
 }
 
-void destroy_segment_manager(struct f2fs_sb_info *sbi)
+void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_sm_info *sm_info = SM_I(sbi);
 
 	if (!sm_info)
 		return;
-	destroy_flush_cmd_control(sbi, true);
+	f2fs_destroy_flush_cmd_control(sbi, true);
 	destroy_discard_cmd_control(sbi);
 	destroy_dirty_segmap(sbi);
 	destroy_curseg(sbi);
@@ -3986,7 +4296,7 @@
 	kfree(sm_info);
 }
 
-int __init create_segment_manager_caches(void)
+int __init f2fs_create_segment_manager_caches(void)
 {
 	discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
 			sizeof(struct discard_entry));
@@ -4019,7 +4329,7 @@
 	return -ENOMEM;
 }
 
-void destroy_segment_manager_caches(void)
+void f2fs_destroy_segment_manager_caches(void)
 {
 	kmem_cache_destroy(sit_entry_set_slab);
 	kmem_cache_destroy(discard_cmd_slab);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 3325d07..b3d9e31 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -85,7 +85,7 @@
 	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
 
 #define GET_SEGNO(sbi, blk_addr)					\
-	((((blk_addr) == NULL_ADDR) || ((blk_addr) == NEW_ADDR)) ?	\
+	((!is_valid_data_blkaddr(sbi, blk_addr)) ?			\
 	NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),			\
 		GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
 #define BLKS_PER_SEC(sbi)					\
@@ -215,6 +215,8 @@
 #define IS_DUMMY_WRITTEN_PAGE(page)			\
 		(page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
 
+#define MAX_SKIP_GC_COUNT			16
+
 struct inmem_pages {
 	struct list_head list;
 	struct page *page;
@@ -375,6 +377,7 @@
 	int i;
 
 	raw_sit = (struct f2fs_sit_block *)page_address(page);
+	memset(raw_sit, 0, PAGE_SIZE);
 	for (i = 0; i < end - start; i++) {
 		rs = &raw_sit->entries[i];
 		se = get_seg_entry(sbi, start + i);
@@ -445,6 +448,8 @@
 	if (test_and_clear_bit(segno, free_i->free_segmap)) {
 		free_i->free_segments++;
 
+		if (IS_CURSEC(sbi, secno))
+			goto skip_free;
 		next = find_next_bit(free_i->free_segmap,
 				start_segno + sbi->segs_per_sec, start_segno);
 		if (next >= start_segno + sbi->segs_per_sec) {
@@ -452,6 +457,7 @@
 				free_i->free_sections++;
 		}
 	}
+skip_free:
 	spin_unlock(&free_i->segmap_lock);
 }
 
@@ -642,13 +648,10 @@
 {
 	struct f2fs_sb_info *sbi = fio->sbi;
 
-	if (PAGE_TYPE_OF_BIO(fio->type) == META &&
-				(!is_read_io(fio->op) || fio->is_meta))
-		BUG_ON(blk_addr < SEG0_BLKADDR(sbi) ||
-				blk_addr >= MAIN_BLKADDR(sbi));
+	if (__is_meta_io(fio))
+		verify_blkaddr(sbi, blk_addr, META_GENERIC);
 	else
-		BUG_ON(blk_addr < MAIN_BLKADDR(sbi) ||
-				blk_addr >= MAX_BLKADDR(sbi));
+		verify_blkaddr(sbi, blk_addr, DATA_GENERIC);
 }
 
 /*
@@ -742,12 +745,23 @@
 #endif
 }
 
-static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
+static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
+						bool base_time)
 {
 	struct sit_info *sit_i = SIT_I(sbi);
-	time64_t now = ktime_get_real_seconds();
+	time64_t diff, now = ktime_get_real_seconds();
 
-	return sit_i->elapsed_time + now - sit_i->mounted_time;
+	if (now >= sit_i->mounted_time)
+		return sit_i->elapsed_time + now - sit_i->mounted_time;
+
+	/* system time is set to the past */
+	if (!base_time) {
+		diff = sit_i->mounted_time - now;
+		if (sit_i->elapsed_time >= diff)
+			return sit_i->elapsed_time - diff;
+		return 0;
+	}
+	return sit_i->elapsed_time;
 }
 
 static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
@@ -771,15 +785,6 @@
 				- (base + 1) + type;
 }
 
-static inline bool no_fggc_candidate(struct f2fs_sb_info *sbi,
-						unsigned int secno)
-{
-	if (get_valid_blocks(sbi, GET_SEG_FROM_SEC(sbi, secno), true) >
-						sbi->fggc_threshold)
-		return true;
-	return false;
-}
-
 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
 {
 	if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index 0b5664a..36cfd81 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -109,11 +109,11 @@
 
 		/* shrink clean nat cache entries */
 		if (freed < nr)
-			freed += try_to_free_nats(sbi, nr - freed);
+			freed += f2fs_try_to_free_nats(sbi, nr - freed);
 
 		/* shrink free nids cache entries */
 		if (freed < nr)
-			freed += try_to_free_nids(sbi, nr - freed);
+			freed += f2fs_try_to_free_nids(sbi, nr - freed);
 
 		spin_lock(&f2fs_list_lock);
 		p = p->next;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index ee2ebbf..bba0cd4 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -41,7 +41,7 @@
 
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 
-char *fault_name[FAULT_MAX] = {
+char *f2fs_fault_name[FAULT_MAX] = {
 	[FAULT_KMALLOC]		= "kmalloc",
 	[FAULT_KVMALLOC]	= "kvmalloc",
 	[FAULT_PAGE_ALLOC]	= "page alloc",
@@ -55,20 +55,24 @@
 	[FAULT_TRUNCATE]	= "truncate fail",
 	[FAULT_IO]		= "IO error",
 	[FAULT_CHECKPOINT]	= "checkpoint error",
+	[FAULT_DISCARD]		= "discard error",
 };
 
-static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
-						unsigned int rate)
+void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+							unsigned int type)
 {
 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
 
 	if (rate) {
 		atomic_set(&ffi->inject_ops, 0);
 		ffi->inject_rate = rate;
-		ffi->inject_type = (1 << FAULT_MAX) - 1;
-	} else {
-		memset(ffi, 0, sizeof(struct f2fs_fault_info));
 	}
+
+	if (type)
+		ffi->inject_type = type;
+
+	if (!rate && !type)
+		memset(ffi, 0, sizeof(struct f2fs_fault_info));
 }
 #endif
 
@@ -113,6 +117,7 @@
 	Opt_mode,
 	Opt_io_size_bits,
 	Opt_fault_injection,
+	Opt_fault_type,
 	Opt_lazytime,
 	Opt_nolazytime,
 	Opt_quota,
@@ -170,6 +175,7 @@
 	{Opt_mode, "mode=%s"},
 	{Opt_io_size_bits, "io_bits=%u"},
 	{Opt_fault_injection, "fault_injection=%u"},
+	{Opt_fault_type, "fault_type=%u"},
 	{Opt_lazytime, "lazytime"},
 	{Opt_nolazytime, "nolazytime"},
 	{Opt_quota, "quota"},
@@ -347,12 +353,6 @@
 			"QUOTA feature is enabled, so ignore jquota_fmt");
 		F2FS_OPTION(sbi).s_jquota_fmt = 0;
 	}
-	if (f2fs_sb_has_quota_ino(sbi->sb) && f2fs_readonly(sbi->sb)) {
-		f2fs_msg(sbi->sb, KERN_INFO,
-			 "Filesystem with quota feature cannot be mounted RDWR "
-			 "without CONFIG_QUOTA");
-		return -1;
-	}
 	return 0;
 }
 #endif
@@ -606,7 +606,18 @@
 			if (args->from && match_int(args, &arg))
 				return -EINVAL;
 #ifdef CONFIG_F2FS_FAULT_INJECTION
-			f2fs_build_fault_attr(sbi, arg);
+			f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
+			set_opt(sbi, FAULT_INJECTION);
+#else
+			f2fs_msg(sb, KERN_INFO,
+				"FAULT_INJECTION was not selected");
+#endif
+			break;
+		case Opt_fault_type:
+			if (args->from && match_int(args, &arg))
+				return -EINVAL;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+			f2fs_build_fault_attr(sbi, 0, arg);
 			set_opt(sbi, FAULT_INJECTION);
 #else
 			f2fs_msg(sb, KERN_INFO,
@@ -775,6 +786,19 @@
 #ifdef CONFIG_QUOTA
 	if (f2fs_check_quota_options(sbi))
 		return -EINVAL;
+#else
+	if (f2fs_sb_has_quota_ino(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+		f2fs_msg(sbi->sb, KERN_INFO,
+			 "Filesystem with quota feature cannot be mounted RDWR "
+			 "without CONFIG_QUOTA");
+		return -EINVAL;
+	}
+	if (f2fs_sb_has_project_quota(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+		f2fs_msg(sb, KERN_ERR,
+			"Filesystem with project quota feature cannot be "
+			"mounted RDWR without CONFIG_QUOTA");
+		return -EINVAL;
+	}
 #endif
 
 	if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
@@ -830,15 +854,14 @@
 
 	/* Initialize f2fs-specific inode info */
 	atomic_set(&fi->dirty_pages, 0);
-	fi->i_current_depth = 1;
 	init_rwsem(&fi->i_sem);
 	INIT_LIST_HEAD(&fi->dirty_list);
 	INIT_LIST_HEAD(&fi->gdirty_list);
 	INIT_LIST_HEAD(&fi->inmem_ilist);
 	INIT_LIST_HEAD(&fi->inmem_pages);
 	mutex_init(&fi->inmem_lock);
-	init_rwsem(&fi->dio_rwsem[READ]);
-	init_rwsem(&fi->dio_rwsem[WRITE]);
+	init_rwsem(&fi->i_gc_rwsem[READ]);
+	init_rwsem(&fi->i_gc_rwsem[WRITE]);
 	init_rwsem(&fi->i_mmap_sem);
 	init_rwsem(&fi->i_xattr_sem);
 
@@ -866,7 +889,7 @@
 
 			/* some remained atomic pages should discarded */
 			if (f2fs_is_atomic_file(inode))
-				drop_inmem_pages(inode);
+				f2fs_drop_inmem_pages(inode);
 
 			/* should remain fi->extent_tree for writepage */
 			f2fs_destroy_extent_node(inode);
@@ -1003,7 +1026,7 @@
 		struct cp_control cpc = {
 			.reason = CP_UMOUNT,
 		};
-		write_checkpoint(sbi, &cpc);
+		f2fs_write_checkpoint(sbi, &cpc);
 	}
 
 	/* be sure to wait for any on-going discard commands */
@@ -1013,17 +1036,17 @@
 		struct cp_control cpc = {
 			.reason = CP_UMOUNT | CP_TRIMMED,
 		};
-		write_checkpoint(sbi, &cpc);
+		f2fs_write_checkpoint(sbi, &cpc);
 	}
 
-	/* write_checkpoint can update stat informaion */
+	/* f2fs_write_checkpoint can update stat informaion */
 	f2fs_destroy_stats(sbi);
 
 	/*
 	 * normally superblock is clean, so we need to release this.
 	 * In addition, EIO will skip do checkpoint, we need this as well.
 	 */
-	release_ino_entry(sbi, true);
+	f2fs_release_ino_entry(sbi, true);
 
 	f2fs_leave_shrinker(sbi);
 	mutex_unlock(&sbi->umount_mutex);
@@ -1031,12 +1054,16 @@
 	/* our cp_error case, we can wait for any writeback page */
 	f2fs_flush_merged_writes(sbi);
 
+	f2fs_wait_on_all_pages_writeback(sbi);
+
+	f2fs_bug_on(sbi, sbi->fsync_node_num);
+
 	iput(sbi->node_inode);
 	iput(sbi->meta_inode);
 
 	/* destroy f2fs internal modules */
-	destroy_node_manager(sbi);
-	destroy_segment_manager(sbi);
+	f2fs_destroy_node_manager(sbi);
+	f2fs_destroy_segment_manager(sbi);
 
 	kfree(sbi->ckpt);
 
@@ -1078,7 +1105,7 @@
 		cpc.reason = __get_cp_reason(sbi);
 
 		mutex_lock(&sbi->gc_mutex);
-		err = write_checkpoint(sbi, &cpc);
+		err = f2fs_write_checkpoint(sbi, &cpc);
 		mutex_unlock(&sbi->gc_mutex);
 	}
 	f2fs_trace_ios(NULL, 1);
@@ -1311,9 +1338,12 @@
 	if (F2FS_IO_SIZE_BITS(sbi))
 		seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
 #ifdef CONFIG_F2FS_FAULT_INJECTION
-	if (test_opt(sbi, FAULT_INJECTION))
+	if (test_opt(sbi, FAULT_INJECTION)) {
 		seq_printf(seq, ",fault_injection=%u",
 				F2FS_OPTION(sbi).fault_info.inject_rate);
+		seq_printf(seq, ",fault_type=%u",
+				F2FS_OPTION(sbi).fault_info.inject_type);
+	}
 #endif
 #ifdef CONFIG_QUOTA
 	if (test_opt(sbi, QUOTA))
@@ -1358,7 +1388,8 @@
 	F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
 	F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
 	F2FS_OPTION(sbi).test_dummy_encryption = false;
-	sbi->readdir_ra = 1;
+	F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
+	F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
 
 	set_opt(sbi, BG_GC);
 	set_opt(sbi, INLINE_XATTR);
@@ -1368,12 +1399,12 @@
 	set_opt(sbi, NOHEAP);
 	sbi->sb->s_flags |= MS_LAZYTIME;
 	set_opt(sbi, FLUSH_MERGE);
-	if (f2fs_sb_has_blkzoned(sbi->sb)) {
-		set_opt_mode(sbi, F2FS_MOUNT_LFS);
+	if (blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev)))
 		set_opt(sbi, DISCARD);
-	} else {
+	if (f2fs_sb_has_blkzoned(sbi->sb))
+		set_opt_mode(sbi, F2FS_MOUNT_LFS);
+	else
 		set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
-	}
 
 #ifdef CONFIG_F2FS_FS_XATTR
 	set_opt(sbi, XATTR_USER);
@@ -1382,9 +1413,7 @@
 	set_opt(sbi, POSIX_ACL);
 #endif
 
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-	f2fs_build_fault_attr(sbi, 0);
-#endif
+	f2fs_build_fault_attr(sbi, 0, 0);
 }
 
 #ifdef CONFIG_QUOTA
@@ -1483,11 +1512,11 @@
 	 */
 	if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
 		if (sbi->gc_thread) {
-			stop_gc_thread(sbi);
+			f2fs_stop_gc_thread(sbi);
 			need_restart_gc = true;
 		}
 	} else if (!sbi->gc_thread) {
-		err = start_gc_thread(sbi);
+		err = f2fs_start_gc_thread(sbi);
 		if (err)
 			goto restore_opts;
 		need_stop_gc = true;
@@ -1510,9 +1539,9 @@
 	 */
 	if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
 		clear_opt(sbi, FLUSH_MERGE);
-		destroy_flush_cmd_control(sbi, false);
+		f2fs_destroy_flush_cmd_control(sbi, false);
 	} else {
-		err = create_flush_cmd_control(sbi);
+		err = f2fs_create_flush_cmd_control(sbi);
 		if (err)
 			goto restore_gc;
 	}
@@ -1530,11 +1559,11 @@
 	return 0;
 restore_gc:
 	if (need_restart_gc) {
-		if (start_gc_thread(sbi))
+		if (f2fs_start_gc_thread(sbi))
 			f2fs_msg(sbi->sb, KERN_WARNING,
 				"background gc thread has stopped");
 	} else if (need_stop_gc) {
-		stop_gc_thread(sbi);
+		f2fs_stop_gc_thread(sbi);
 	}
 restore_opts:
 #ifdef CONFIG_QUOTA
@@ -1806,7 +1835,7 @@
 	inode = d_inode(path->dentry);
 
 	inode_lock(inode);
-	F2FS_I(inode)->i_flags |= FS_NOATIME_FL | FS_IMMUTABLE_FL;
+	F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
 	inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
 					S_NOATIME | S_IMMUTABLE);
 	inode_unlock(inode);
@@ -1830,7 +1859,7 @@
 		goto out_put;
 
 	inode_lock(inode);
-	F2FS_I(inode)->i_flags &= ~(FS_NOATIME_FL | FS_IMMUTABLE_FL);
+	F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
 	inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
 	inode_unlock(inode);
 	f2fs_mark_inode_dirty_sync(inode, false);
@@ -1936,19 +1965,13 @@
 	return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
 }
 
-static unsigned f2fs_max_namelen(struct inode *inode)
-{
-	return S_ISLNK(inode->i_mode) ?
-			inode->i_sb->s_blocksize : F2FS_NAME_LEN;
-}
-
 static const struct fscrypt_operations f2fs_cryptops = {
 	.key_prefix	= "f2fs:",
 	.get_context	= f2fs_get_context,
 	.set_context	= f2fs_set_context,
 	.dummy_context	= f2fs_dummy_context,
 	.empty_dir	= f2fs_empty_dir,
-	.max_namelen	= f2fs_max_namelen,
+	.max_namelen	= F2FS_NAME_LEN,
 };
 #endif
 
@@ -1958,7 +1981,7 @@
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 	struct inode *inode;
 
-	if (check_nid_range(sbi, ino))
+	if (f2fs_check_nid_range(sbi, ino))
 		return ERR_PTR(-ESTALE);
 
 	/*
@@ -2141,6 +2164,8 @@
 static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
 				struct buffer_head *bh)
 {
+	block_t segment_count, segs_per_sec, secs_per_zone;
+	block_t total_sections, blocks_per_seg;
 	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
 					(bh->b_data + F2FS_SUPER_OFFSET);
 	struct super_block *sb = sbi->sb;
@@ -2197,6 +2222,72 @@
 		return 1;
 	}
 
+	segment_count = le32_to_cpu(raw_super->segment_count);
+	segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
+	secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
+	total_sections = le32_to_cpu(raw_super->section_count);
+
+	/* blocks_per_seg should be 512, given the above check */
+	blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
+
+	if (segment_count > F2FS_MAX_SEGMENT ||
+				segment_count < F2FS_MIN_SEGMENTS) {
+		f2fs_msg(sb, KERN_INFO,
+			"Invalid segment count (%u)",
+			segment_count);
+		return 1;
+	}
+
+	if (total_sections > segment_count ||
+			total_sections < F2FS_MIN_SEGMENTS ||
+			segs_per_sec > segment_count || !segs_per_sec) {
+		f2fs_msg(sb, KERN_INFO,
+			"Invalid segment/section count (%u, %u x %u)",
+			segment_count, total_sections, segs_per_sec);
+		return 1;
+	}
+
+	if ((segment_count / segs_per_sec) < total_sections) {
+		f2fs_msg(sb, KERN_INFO,
+			"Small segment_count (%u < %u * %u)",
+			segment_count, segs_per_sec, total_sections);
+		return 1;
+	}
+
+	if (segment_count > (le32_to_cpu(raw_super->block_count) >> 9)) {
+		f2fs_msg(sb, KERN_INFO,
+			"Wrong segment_count / block_count (%u > %u)",
+			segment_count, le32_to_cpu(raw_super->block_count));
+		return 1;
+	}
+
+	if (secs_per_zone > total_sections || !secs_per_zone) {
+		f2fs_msg(sb, KERN_INFO,
+			"Wrong secs_per_zone / total_sections (%u, %u)",
+			secs_per_zone, total_sections);
+		return 1;
+	}
+	if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
+			raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
+			(le32_to_cpu(raw_super->extension_count) +
+			raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
+		f2fs_msg(sb, KERN_INFO,
+			"Corrupted extension count (%u + %u > %u)",
+			le32_to_cpu(raw_super->extension_count),
+			raw_super->hot_ext_count,
+			F2FS_MAX_EXTENSION);
+		return 1;
+	}
+
+	if (le32_to_cpu(raw_super->cp_payload) >
+				(blocks_per_seg - F2FS_CP_PACKS)) {
+		f2fs_msg(sb, KERN_INFO,
+			"Insane cp_payload (%u > %u)",
+			le32_to_cpu(raw_super->cp_payload),
+			blocks_per_seg - F2FS_CP_PACKS);
+		return 1;
+	}
+
 	/* check reserved ino info */
 	if (le32_to_cpu(raw_super->node_ino) != 1 ||
 		le32_to_cpu(raw_super->meta_ino) != 2 ||
@@ -2209,13 +2300,6 @@
 		return 1;
 	}
 
-	if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
-		f2fs_msg(sb, KERN_INFO,
-			"Invalid segment count (%u)",
-			le32_to_cpu(raw_super->segment_count));
-		return 1;
-	}
-
 	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
 	if (sanity_check_area_boundary(sbi, bh))
 		return 1;
@@ -2223,19 +2307,27 @@
 	return 0;
 }
 
-int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
 {
 	unsigned int total, fsmeta;
 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 	unsigned int ovp_segments, reserved_segments;
 	unsigned int main_segs, blocks_per_seg;
+	unsigned int sit_segs, nat_segs;
+	unsigned int sit_bitmap_size, nat_bitmap_size;
+	unsigned int log_blocks_per_seg;
+	unsigned int segment_count_main;
+	unsigned int cp_pack_start_sum, cp_payload;
+	block_t user_block_count;
 	int i;
 
 	total = le32_to_cpu(raw_super->segment_count);
 	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
-	fsmeta += le32_to_cpu(raw_super->segment_count_sit);
-	fsmeta += le32_to_cpu(raw_super->segment_count_nat);
+	sit_segs = le32_to_cpu(raw_super->segment_count_sit);
+	fsmeta += sit_segs;
+	nat_segs = le32_to_cpu(raw_super->segment_count_nat);
+	fsmeta += nat_segs;
 	fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
 	fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
 
@@ -2252,6 +2344,16 @@
 		return 1;
 	}
 
+	user_block_count = le64_to_cpu(ckpt->user_block_count);
+	segment_count_main = le32_to_cpu(raw_super->segment_count_main);
+	log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+	if (!user_block_count || user_block_count >=
+			segment_count_main << log_blocks_per_seg) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+			"Wrong user_block_count: %u", user_block_count);
+		return 1;
+	}
+
 	main_segs = le32_to_cpu(raw_super->segment_count_main);
 	blocks_per_seg = sbi->blocks_per_seg;
 
@@ -2266,6 +2368,28 @@
 			return 1;
 	}
 
+	sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
+	nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
+
+	if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
+		nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+			"Wrong bitmap size: sit: %u, nat:%u",
+			sit_bitmap_size, nat_bitmap_size);
+		return 1;
+	}
+
+	cp_pack_start_sum = __start_sum_addr(sbi);
+	cp_payload = __cp_payload(sbi);
+	if (cp_pack_start_sum < cp_payload + 1 ||
+		cp_pack_start_sum > blocks_per_seg - 1 -
+			NR_CURSEG_TYPE) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+			"Wrong cp_pack_start_sum: %u",
+			cp_pack_start_sum);
+		return 1;
+	}
+
 	if (unlikely(f2fs_cp_error(sbi))) {
 		f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
 		return 1;
@@ -2304,13 +2428,15 @@
 	for (i = 0; i < NR_COUNT_TYPE; i++)
 		atomic_set(&sbi->nr_pages[i], 0);
 
-	atomic_set(&sbi->wb_sync_req, 0);
+	for (i = 0; i < META; i++)
+		atomic_set(&sbi->wb_sync_req[i], 0);
 
 	INIT_LIST_HEAD(&sbi->s_list);
 	mutex_init(&sbi->umount_mutex);
 	for (i = 0; i < NR_PAGE_TYPE - 1; i++)
 		for (j = HOT; j < NR_TEMP_TYPE; j++)
 			mutex_init(&sbi->wio_mutex[i][j]);
+	init_rwsem(&sbi->io_order_lock);
 	spin_lock_init(&sbi->cp_lock);
 
 	sbi->dirty_device = 0;
@@ -2365,8 +2491,10 @@
 
 #define F2FS_REPORT_NR_ZONES   4096
 
-	zones = f2fs_kzalloc(sbi, sizeof(struct blk_zone) *
-				F2FS_REPORT_NR_ZONES, GFP_KERNEL);
+	zones = f2fs_kzalloc(sbi,
+			     array_size(F2FS_REPORT_NR_ZONES,
+					sizeof(struct blk_zone)),
+			     GFP_KERNEL);
 	if (!zones)
 		return -ENOMEM;
 
@@ -2510,8 +2638,10 @@
 	 * Initialize multiple devices information, or single
 	 * zoned block device information.
 	 */
-	sbi->devs = f2fs_kzalloc(sbi, sizeof(struct f2fs_dev_info) *
-						max_devices, GFP_KERNEL);
+	sbi->devs = f2fs_kzalloc(sbi,
+				 array_size(max_devices,
+					    sizeof(struct f2fs_dev_info)),
+				 GFP_KERNEL);
 	if (!sbi->devs)
 		return -ENOMEM;
 
@@ -2597,6 +2727,8 @@
 		sm_i->dcc_info->discard_granularity = 1;
 		sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
 	}
+
+	sbi->readdir_ra = 1;
 }
 
 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -2646,9 +2778,6 @@
 	sb->s_fs_info = sbi;
 	sbi->raw_super = raw_super;
 
-	F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
-	F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
-
 	/* precompute checksum seed for metadata */
 	if (f2fs_sb_has_inode_chksum(sb))
 		sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
@@ -2734,9 +2863,11 @@
 		int n = (i == META) ? 1: NR_TEMP_TYPE;
 		int j;
 
-		sbi->write_io[i] = f2fs_kmalloc(sbi,
-					n * sizeof(struct f2fs_bio_info),
-					GFP_KERNEL);
+		sbi->write_io[i] =
+			f2fs_kmalloc(sbi,
+				     array_size(n,
+						sizeof(struct f2fs_bio_info)),
+				     GFP_KERNEL);
 		if (!sbi->write_io[i]) {
 			err = -ENOMEM;
 			goto free_options;
@@ -2776,7 +2907,7 @@
 		goto free_io_dummy;
 	}
 
-	err = get_valid_checkpoint(sbi);
+	err = f2fs_get_valid_checkpoint(sbi);
 	if (err) {
 		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
 		goto free_meta_inode;
@@ -2806,18 +2937,20 @@
 		spin_lock_init(&sbi->inode_lock[i]);
 	}
 
-	init_extent_cache_info(sbi);
+	f2fs_init_extent_cache_info(sbi);
 
-	init_ino_entry_info(sbi);
+	f2fs_init_ino_entry_info(sbi);
+
+	f2fs_init_fsync_node_info(sbi);
 
 	/* setup f2fs internal modules */
-	err = build_segment_manager(sbi);
+	err = f2fs_build_segment_manager(sbi);
 	if (err) {
 		f2fs_msg(sb, KERN_ERR,
 			"Failed to initialize F2FS segment manager");
 		goto free_sm;
 	}
-	err = build_node_manager(sbi);
+	err = f2fs_build_node_manager(sbi);
 	if (err) {
 		f2fs_msg(sb, KERN_ERR,
 			"Failed to initialize F2FS node manager");
@@ -2835,7 +2968,7 @@
 		sbi->kbytes_written =
 			le64_to_cpu(seg_i->journal->info.kbytes_written);
 
-	build_gc_manager(sbi);
+	f2fs_build_gc_manager(sbi);
 
 	/* get an inode for node space */
 	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
@@ -2856,10 +2989,11 @@
 		err = PTR_ERR(root);
 		goto free_stats;
 	}
-	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
+	if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
+			!root->i_size || !root->i_nlink) {
 		iput(root);
 		err = -EINVAL;
-		goto free_node_inode;
+		goto free_stats;
 	}
 
 	sb->s_root = d_make_root(root); /* allocate root dentry */
@@ -2873,10 +3007,7 @@
 		goto free_root_inode;
 
 #ifdef CONFIG_QUOTA
-	/*
-	 * Turn on quotas which were not enabled for read-only mounts if
-	 * filesystem has quota feature, so that they are updated correctly.
-	 */
+	/* Enable quota usage during mount */
 	if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
 		err = f2fs_enable_quotas(sb);
 		if (err) {
@@ -2887,7 +3018,7 @@
 	}
 #endif
 	/* if there are nt orphan nodes free them */
-	err = recover_orphan_inodes(sbi);
+	err = f2fs_recover_orphan_inodes(sbi);
 	if (err)
 		goto free_meta;
 
@@ -2909,7 +3040,7 @@
 		if (!retry)
 			goto skip_recovery;
 
-		err = recover_fsync_data(sbi, false);
+		err = f2fs_recover_fsync_data(sbi, false);
 		if (err < 0) {
 			need_fsck = true;
 			f2fs_msg(sb, KERN_ERR,
@@ -2917,7 +3048,7 @@
 			goto free_meta;
 		}
 	} else {
-		err = recover_fsync_data(sbi, true);
+		err = f2fs_recover_fsync_data(sbi, true);
 
 		if (!f2fs_readonly(sb) && err > 0) {
 			err = -EINVAL;
@@ -2927,7 +3058,7 @@
 		}
 	}
 skip_recovery:
-	/* recover_fsync_data() cleared this already */
+	/* f2fs_recover_fsync_data() cleared this already */
 	clear_sbi_flag(sbi, SBI_POR_DOING);
 
 	/*
@@ -2936,7 +3067,7 @@
 	 */
 	if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
 		/* After POR, we can run background GC thread.*/
-		err = start_gc_thread(sbi);
+		err = f2fs_start_gc_thread(sbi);
 		if (err)
 			goto free_meta;
 	}
@@ -2967,10 +3098,10 @@
 #endif
 	f2fs_sync_inode_meta(sbi);
 	/*
-	 * Some dirty meta pages can be produced by recover_orphan_inodes()
+	 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
 	 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
-	 * followed by write_checkpoint() through f2fs_write_node_pages(), which
-	 * falls into an infinite loop in sync_meta_pages().
+	 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
+	 * falls into an infinite loop in f2fs_sync_meta_pages().
 	 */
 	truncate_inode_pages_final(META_MAPPING(sbi));
 #ifdef CONFIG_QUOTA
@@ -2983,13 +3114,13 @@
 free_stats:
 	f2fs_destroy_stats(sbi);
 free_node_inode:
-	release_ino_entry(sbi, true);
+	f2fs_release_ino_entry(sbi, true);
 	truncate_inode_pages_final(NODE_MAPPING(sbi));
 	iput(sbi->node_inode);
 free_nm:
-	destroy_node_manager(sbi);
+	f2fs_destroy_node_manager(sbi);
 free_sm:
-	destroy_segment_manager(sbi);
+	f2fs_destroy_segment_manager(sbi);
 free_devices:
 	destroy_device_list(sbi);
 	kfree(sbi->ckpt);
@@ -3034,9 +3165,19 @@
 static void kill_f2fs_super(struct super_block *sb)
 {
 	if (sb->s_root) {
-		set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
-		stop_gc_thread(F2FS_SB(sb));
-		stop_discard_thread(F2FS_SB(sb));
+		struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+		set_sbi_flag(sbi, SBI_IS_CLOSE);
+		f2fs_stop_gc_thread(sbi);
+		f2fs_stop_discard_thread(sbi);
+
+		if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
+				!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+			struct cp_control cpc = {
+				.reason = CP_UMOUNT,
+			};
+			f2fs_write_checkpoint(sbi, &cpc);
+		}
 	}
 	kill_block_super(sb);
 }
@@ -3074,21 +3215,27 @@
 {
 	int err;
 
+	if (PAGE_SIZE != F2FS_BLKSIZE) {
+		printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
+				PAGE_SIZE, F2FS_BLKSIZE);
+		return -EINVAL;
+	}
+
 	f2fs_build_trace_ios();
 
 	err = init_inodecache();
 	if (err)
 		goto fail;
-	err = create_node_manager_caches();
+	err = f2fs_create_node_manager_caches();
 	if (err)
 		goto free_inodecache;
-	err = create_segment_manager_caches();
+	err = f2fs_create_segment_manager_caches();
 	if (err)
 		goto free_node_manager_caches;
-	err = create_checkpoint_caches();
+	err = f2fs_create_checkpoint_caches();
 	if (err)
 		goto free_segment_manager_caches;
-	err = create_extent_cache();
+	err = f2fs_create_extent_cache();
 	if (err)
 		goto free_checkpoint_caches;
 	err = f2fs_init_sysfs();
@@ -3117,13 +3264,13 @@
 free_sysfs:
 	f2fs_exit_sysfs();
 free_extent_cache:
-	destroy_extent_cache();
+	f2fs_destroy_extent_cache();
 free_checkpoint_caches:
-	destroy_checkpoint_caches();
+	f2fs_destroy_checkpoint_caches();
 free_segment_manager_caches:
-	destroy_segment_manager_caches();
+	f2fs_destroy_segment_manager_caches();
 free_node_manager_caches:
-	destroy_node_manager_caches();
+	f2fs_destroy_node_manager_caches();
 free_inodecache:
 	destroy_inodecache();
 fail:
@@ -3137,10 +3284,10 @@
 	unregister_filesystem(&f2fs_fs_type);
 	unregister_shrinker(&f2fs_shrinker_info);
 	f2fs_exit_sysfs();
-	destroy_extent_cache();
-	destroy_checkpoint_caches();
-	destroy_segment_manager_caches();
-	destroy_node_manager_caches();
+	f2fs_destroy_extent_cache();
+	f2fs_destroy_checkpoint_caches();
+	f2fs_destroy_segment_manager_caches();
+	f2fs_destroy_node_manager_caches();
 	destroy_inodecache();
 	f2fs_destroy_trace_ios();
 }
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 26cb855..30fd016 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -9,6 +9,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/compiler.h>
 #include <linux/proc_fs.h>
 #include <linux/f2fs_fs.h>
 #include <linux/seq_file.h>
@@ -147,13 +148,13 @@
 		int len = 0, i;
 
 		len += snprintf(buf + len, PAGE_SIZE - len,
-						"cold file extenstion:\n");
+						"cold file extension:\n");
 		for (i = 0; i < cold_count; i++)
 			len += snprintf(buf + len, PAGE_SIZE - len, "%s\n",
 								extlist[i]);
 
 		len += snprintf(buf + len, PAGE_SIZE - len,
-						"hot file extenstion:\n");
+						"hot file extension:\n");
 		for (i = cold_count; i < cold_count + hot_count; i++)
 			len += snprintf(buf + len, PAGE_SIZE - len, "%s\n",
 								extlist[i]);
@@ -165,7 +166,7 @@
 	return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
 }
 
-static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
+static ssize_t __sbi_store(struct f2fs_attr *a,
 			struct f2fs_sb_info *sbi,
 			const char *buf, size_t count)
 {
@@ -201,13 +202,13 @@
 
 		down_write(&sbi->sb_lock);
 
-		ret = update_extension_list(sbi, name, hot, set);
+		ret = f2fs_update_extension_list(sbi, name, hot, set);
 		if (ret)
 			goto out;
 
 		ret = f2fs_commit_super(sbi, false);
 		if (ret)
-			update_extension_list(sbi, name, hot, !set);
+			f2fs_update_extension_list(sbi, name, hot, !set);
 out:
 		up_write(&sbi->sb_lock);
 		return ret ? ret : count;
@@ -248,19 +249,56 @@
 	if (!strcmp(a->attr.name, "trim_sections"))
 		return -EINVAL;
 
+	if (!strcmp(a->attr.name, "gc_urgent")) {
+		if (t >= 1) {
+			sbi->gc_mode = GC_URGENT;
+			if (sbi->gc_thread) {
+				sbi->gc_thread->gc_wake = 1;
+				wake_up_interruptible_all(
+					&sbi->gc_thread->gc_wait_queue_head);
+				wake_up_discard_thread(sbi, true);
+			}
+		} else {
+			sbi->gc_mode = GC_NORMAL;
+		}
+		return count;
+	}
+	if (!strcmp(a->attr.name, "gc_idle")) {
+		if (t == GC_IDLE_CB)
+			sbi->gc_mode = GC_IDLE_CB;
+		else if (t == GC_IDLE_GREEDY)
+			sbi->gc_mode = GC_IDLE_GREEDY;
+		else
+			sbi->gc_mode = GC_NORMAL;
+		return count;
+	}
+
 	*ui = t;
 
 	if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0)
 		f2fs_reset_iostat(sbi);
-	if (!strcmp(a->attr.name, "gc_urgent") && t == 1 && sbi->gc_thread) {
-		sbi->gc_thread->gc_wake = 1;
-		wake_up_interruptible_all(&sbi->gc_thread->gc_wait_queue_head);
-		wake_up_discard_thread(sbi, true);
-	}
-
 	return count;
 }
 
+static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
+			struct f2fs_sb_info *sbi,
+			const char *buf, size_t count)
+{
+	ssize_t ret;
+	bool gc_entry = (!strcmp(a->attr.name, "gc_urgent") ||
+					a->struct_type == GC_THREAD);
+
+	if (gc_entry) {
+		if (!down_read_trylock(&sbi->sb->s_umount))
+			return -EAGAIN;
+	}
+	ret = __sbi_store(a, sbi, buf, count);
+	if (gc_entry)
+		up_read(&sbi->sb->s_umount);
+
+	return ret;
+}
+
 static ssize_t f2fs_attr_show(struct kobject *kobj,
 				struct attribute *attr, char *buf)
 {
@@ -349,8 +387,8 @@
 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_urgent, gc_urgent);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle, gc_mode);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_urgent, gc_mode);
 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_small_discards, max_discards);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_granularity, discard_granularity);
@@ -484,7 +522,8 @@
 	.kset	= &f2fs_kset,
 };
 
-static int segment_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
+						void *offset)
 {
 	struct super_block *sb = seq->private;
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -511,7 +550,8 @@
 	return 0;
 }
 
-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
+						void *offset)
 {
 	struct super_block *sb = seq->private;
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -535,7 +575,8 @@
 	return 0;
 }
 
-static int iostat_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
+					       void *offset)
 {
 	struct super_block *sb = seq->private;
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -577,6 +618,28 @@
 	return 0;
 }
 
+static int __maybe_unused victim_bits_seq_show(struct seq_file *seq,
+						void *offset)
+{
+	struct super_block *sb = seq->private;
+	struct f2fs_sb_info *sbi = F2FS_SB(sb);
+	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+	int i;
+
+	seq_puts(seq, "format: victim_secmap bitmaps\n");
+
+	for (i = 0; i < MAIN_SECS(sbi); i++) {
+		if ((i % 10) == 0)
+			seq_printf(seq, "%-10d", i);
+		seq_printf(seq, "%d", test_bit(i, dirty_i->victim_secmap) ? 1 : 0);
+		if ((i % 10) == 9 || i == (MAIN_SECS(sbi) - 1))
+			seq_putc(seq, '\n');
+		else
+			seq_putc(seq, ' ');
+	}
+	return 0;
+}
+
 #define F2FS_PROC_FILE_DEF(_name)					\
 static int _name##_open_fs(struct inode *inode, struct file *file)	\
 {									\
@@ -593,6 +656,7 @@
 F2FS_PROC_FILE_DEF(segment_info);
 F2FS_PROC_FILE_DEF(segment_bits);
 F2FS_PROC_FILE_DEF(iostat_info);
+F2FS_PROC_FILE_DEF(victim_bits);
 
 int __init f2fs_init_sysfs(void)
 {
@@ -643,6 +707,8 @@
 				 &f2fs_seq_segment_bits_fops, sb);
 		proc_create_data("iostat_info", S_IRUGO, sbi->s_proc,
 				&f2fs_seq_iostat_info_fops, sb);
+		proc_create_data("victim_bits", S_IRUGO, sbi->s_proc,
+				&f2fs_seq_victim_bits_fops, sb);
 	}
 	return 0;
 }
@@ -653,6 +719,7 @@
 		remove_proc_entry("iostat_info", sbi->s_proc);
 		remove_proc_entry("segment_info", sbi->s_proc);
 		remove_proc_entry("segment_bits", sbi->s_proc);
+		remove_proc_entry("victim_bits", sbi->s_proc);
 		remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
 	}
 	kobject_del(&sbi->s_kobj);
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index ae2dfa7..77a010e 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -37,9 +37,6 @@
 			return -EOPNOTSUPP;
 		break;
 	case F2FS_XATTR_INDEX_TRUSTED:
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-		break;
 	case F2FS_XATTR_INDEX_SECURITY:
 		break;
 	default:
@@ -62,9 +59,6 @@
 			return -EOPNOTSUPP;
 		break;
 	case F2FS_XATTR_INDEX_TRUSTED:
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-		break;
 	case F2FS_XATTR_INDEX_SECURITY:
 		break;
 	default:
@@ -100,12 +94,22 @@
 		const char *name, const void *value,
 		size_t size, int flags)
 {
+	unsigned char old_advise = F2FS_I(inode)->i_advise;
+	unsigned char new_advise;
+
 	if (!inode_owner_or_capable(inode))
 		return -EPERM;
 	if (value == NULL)
 		return -EINVAL;
 
-	F2FS_I(inode)->i_advise |= *(char *)value;
+	new_advise = *(char *)value;
+	if (new_advise & ~FADVISE_MODIFIABLE_BITS)
+		return -EINVAL;
+
+	new_advise = new_advise & FADVISE_MODIFIABLE_BITS;
+	new_advise |= old_advise & ~FADVISE_MODIFIABLE_BITS;
+
+	F2FS_I(inode)->i_advise = new_advise;
 	f2fs_mark_inode_dirty_sync(inode, true);
 	return 0;
 }
@@ -252,7 +256,7 @@
 	if (ipage) {
 		inline_addr = inline_xattr_addr(inode, ipage);
 	} else {
-		page = get_node_page(sbi, inode->i_ino);
+		page = f2fs_get_node_page(sbi, inode->i_ino);
 		if (IS_ERR(page))
 			return PTR_ERR(page);
 
@@ -273,7 +277,7 @@
 	void *xattr_addr;
 
 	/* The inode already has an extended attribute block. */
-	xpage = get_node_page(sbi, xnid);
+	xpage = f2fs_get_node_page(sbi, xnid);
 	if (IS_ERR(xpage))
 		return PTR_ERR(xpage);
 
@@ -397,7 +401,7 @@
 	int err = 0;
 
 	if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid)
-		if (!alloc_nid(sbi, &new_nid))
+		if (!f2fs_alloc_nid(sbi, &new_nid))
 			return -ENOSPC;
 
 	/* write to inline xattr */
@@ -405,9 +409,9 @@
 		if (ipage) {
 			inline_addr = inline_xattr_addr(inode, ipage);
 		} else {
-			in_page = get_node_page(sbi, inode->i_ino);
+			in_page = f2fs_get_node_page(sbi, inode->i_ino);
 			if (IS_ERR(in_page)) {
-				alloc_nid_failed(sbi, new_nid);
+				f2fs_alloc_nid_failed(sbi, new_nid);
 				return PTR_ERR(in_page);
 			}
 			inline_addr = inline_xattr_addr(inode, in_page);
@@ -417,8 +421,8 @@
 							NODE, true);
 		/* no need to use xattr node block */
 		if (hsize <= inline_size) {
-			err = truncate_xattr_node(inode);
-			alloc_nid_failed(sbi, new_nid);
+			err = f2fs_truncate_xattr_node(inode);
+			f2fs_alloc_nid_failed(sbi, new_nid);
 			if (err) {
 				f2fs_put_page(in_page, 1);
 				return err;
@@ -431,10 +435,10 @@
 
 	/* write to xattr node block */
 	if (F2FS_I(inode)->i_xattr_nid) {
-		xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
+		xpage = f2fs_get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
 		if (IS_ERR(xpage)) {
 			err = PTR_ERR(xpage);
-			alloc_nid_failed(sbi, new_nid);
+			f2fs_alloc_nid_failed(sbi, new_nid);
 			goto in_page_out;
 		}
 		f2fs_bug_on(sbi, new_nid);
@@ -442,13 +446,13 @@
 	} else {
 		struct dnode_of_data dn;
 		set_new_dnode(&dn, inode, NULL, NULL, new_nid);
-		xpage = new_node_page(&dn, XATTR_NODE_OFFSET);
+		xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
 		if (IS_ERR(xpage)) {
 			err = PTR_ERR(xpage);
-			alloc_nid_failed(sbi, new_nid);
+			f2fs_alloc_nid_failed(sbi, new_nid);
 			goto in_page_out;
 		}
-		alloc_nid_done(sbi, new_nid);
+		f2fs_alloc_nid_done(sbi, new_nid);
 	}
 	xattr_addr = page_address(xpage);
 
@@ -693,7 +697,7 @@
 	if (err)
 		return err;
 
-	/* this case is only from init_inode_metadata */
+	/* this case is only from f2fs_init_inode_metadata */
 	if (ipage)
 		return __f2fs_setxattr(inode, index, name, value,
 						size, ipage, flags);
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 5d38492..f04b189 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -224,7 +224,8 @@
 int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
 {
 	struct super_block *sb = inode->i_sb;
-	const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	const int limit = sb->s_maxbytes >> sbi->cluster_bits;
 	struct fat_entry fatent;
 	struct fat_cache_id cid;
 	int nr;
@@ -233,6 +234,12 @@
 
 	*fclus = 0;
 	*dclus = MSDOS_I(inode)->i_start;
+	if (!fat_valid_entry(sbi, *dclus)) {
+		fat_fs_error_ratelimit(sb,
+			"%s: invalid start cluster (i_pos %lld, start %08x)",
+			__func__, MSDOS_I(inode)->i_pos, *dclus);
+		return -EIO;
+	}
 	if (cluster == 0)
 		return 0;
 
@@ -249,9 +256,8 @@
 		/* prevent the infinite loop of cluster chain */
 		if (*fclus > limit) {
 			fat_fs_error_ratelimit(sb,
-					"%s: detected the cluster chain loop"
-					" (i_pos %lld)", __func__,
-					MSDOS_I(inode)->i_pos);
+				"%s: detected the cluster chain loop (i_pos %lld)",
+				__func__, MSDOS_I(inode)->i_pos);
 			nr = -EIO;
 			goto out;
 		}
@@ -261,9 +267,8 @@
 			goto out;
 		else if (nr == FAT_ENT_FREE) {
 			fat_fs_error_ratelimit(sb,
-				       "%s: invalid cluster chain (i_pos %lld)",
-				       __func__,
-				       MSDOS_I(inode)->i_pos);
+				"%s: invalid cluster chain (i_pos %lld)",
+				__func__, MSDOS_I(inode)->i_pos);
 			nr = -EIO;
 			goto out;
 		} else if (nr == FAT_ENT_EOF) {
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index e6b764a..437affe 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -347,6 +347,11 @@
 	fatent->fat_inode = NULL;
 }
 
+static inline bool fat_valid_entry(struct msdos_sb_info *sbi, int entry)
+{
+	return FAT_START_ENT <= entry && entry < sbi->max_cluster;
+}
+
 extern void fat_ent_access_init(struct super_block *sb);
 extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent,
 			int entry);
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 57b0902..76181d5 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -23,7 +23,7 @@
 {
 	struct msdos_sb_info *sbi = MSDOS_SB(sb);
 	int bytes = entry + (entry >> 1);
-	WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
+	WARN_ON(!fat_valid_entry(sbi, entry));
 	*offset = bytes & (sb->s_blocksize - 1);
 	*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
 }
@@ -33,7 +33,7 @@
 {
 	struct msdos_sb_info *sbi = MSDOS_SB(sb);
 	int bytes = (entry << sbi->fatent_shift);
-	WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
+	WARN_ON(!fat_valid_entry(sbi, entry));
 	*offset = bytes & (sb->s_blocksize - 1);
 	*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
 }
@@ -354,7 +354,7 @@
 	int err, offset;
 	sector_t blocknr;
 
-	if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
+	if (!fat_valid_entry(sbi, entry)) {
 		fatent_brelse(fatent);
 		fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
 		return -EIO;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 0b6ba8c..eea5bc2 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -696,13 +696,21 @@
 	brelse(bh);
 }
 
+static void fat_reset_iocharset(struct fat_mount_options *opts)
+{
+	if (opts->iocharset != fat_default_iocharset) {
+		/* Note: opts->iocharset can be NULL here */
+		kfree(opts->iocharset);
+		opts->iocharset = fat_default_iocharset;
+	}
+}
+
 static void delayed_free(struct rcu_head *p)
 {
 	struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
 	unload_nls(sbi->nls_disk);
 	unload_nls(sbi->nls_io);
-	if (sbi->options.iocharset != fat_default_iocharset)
-		kfree(sbi->options.iocharset);
+	fat_reset_iocharset(&sbi->options);
 	kfree(sbi);
 }
 
@@ -1118,7 +1126,7 @@
 	opts->fs_fmask = opts->fs_dmask = current_umask();
 	opts->allow_utime = -1;
 	opts->codepage = fat_default_codepage;
-	opts->iocharset = fat_default_iocharset;
+	fat_reset_iocharset(opts);
 	if (is_vfat) {
 		opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
 		opts->rodir = 0;
@@ -1275,8 +1283,7 @@
 
 		/* vfat specific */
 		case Opt_charset:
-			if (opts->iocharset != fat_default_iocharset)
-				kfree(opts->iocharset);
+			fat_reset_iocharset(opts);
 			iocharset = match_strdup(&args[0]);
 			if (!iocharset)
 				return -ENOMEM;
@@ -1867,8 +1874,7 @@
 		iput(fat_inode);
 	unload_nls(sbi->nls_io);
 	unload_nls(sbi->nls_disk);
-	if (sbi->options.iocharset != fat_default_iocharset)
-		kfree(sbi->options.iocharset);
+	fat_reset_iocharset(&sbi->options);
 	sb->s_fs_info = NULL;
 	kfree(sbi);
 	return error;
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index de67745..77946d6 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -66,7 +66,8 @@
 	ASSERT(op->processor != NULL);
 	ASSERT(fscache_object_is_available(op->object));
 	ASSERTCMP(atomic_read(&op->usage), >, 0);
-	ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
+	ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
+		    op->state, ==,  FSCACHE_OP_ST_CANCELLED);
 
 	fscache_stat(&fscache_n_op_enqueue);
 	switch (op->flags & FSCACHE_OP_TYPE) {
@@ -481,7 +482,8 @@
 	struct fscache_cache *cache;
 
 	_enter("{OBJ%x OP%x,%d}",
-	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
+	       op->object ? op->object->debug_id : 0,
+	       op->debug_id, atomic_read(&op->usage));
 
 	ASSERTCMP(atomic_read(&op->usage), >, 0);
 
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a0b0683..7d656ad 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -133,6 +133,16 @@
 	return !fc->initialized || (for_background && fc->blocked);
 }
 
+static void fuse_drop_waiting(struct fuse_conn *fc)
+{
+	if (fc->connected) {
+		atomic_dec(&fc->num_waiting);
+	} else if (atomic_dec_and_test(&fc->num_waiting)) {
+		/* wake up aborters */
+		wake_up_all(&fc->blocked_waitq);
+	}
+}
+
 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
 				       bool for_background)
 {
@@ -173,7 +183,7 @@
 	return req;
 
  out:
-	atomic_dec(&fc->num_waiting);
+	fuse_drop_waiting(fc);
 	return ERR_PTR(err);
 }
 
@@ -280,7 +290,7 @@
 
 		if (test_bit(FR_WAITING, &req->flags)) {
 			__clear_bit(FR_WAITING, &req->flags);
-			atomic_dec(&fc->num_waiting);
+			fuse_drop_waiting(fc);
 		}
 
 		if (req->stolen_file)
@@ -366,7 +376,7 @@
 	struct fuse_iqueue *fiq = &fc->iq;
 
 	if (test_and_set_bit(FR_FINISHED, &req->flags))
-		return;
+		goto put_request;
 
 	spin_lock(&fiq->waitq.lock);
 	list_del_init(&req->intr_entry);
@@ -396,6 +406,7 @@
 	wake_up(&req->waitq);
 	if (req->end)
 		req->end(fc, req);
+put_request:
 	fuse_put_request(fc, req);
 }
 
@@ -1954,11 +1965,14 @@
 	if (!fud)
 		return -EPERM;
 
-	bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
-	if (!bufs)
-		return -ENOMEM;
-
 	pipe_lock(pipe);
+
+	bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
+	if (!bufs) {
+		pipe_unlock(pipe);
+		return -ENOMEM;
+	}
+
 	nbuf = 0;
 	rem = 0;
 	for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
@@ -2113,6 +2127,7 @@
 				set_bit(FR_ABORTED, &req->flags);
 				if (!test_bit(FR_LOCKED, &req->flags)) {
 					set_bit(FR_PRIVATE, &req->flags);
+					__fuse_get_request(req);
 					list_move(&req->list, &to_end1);
 				}
 				spin_unlock(&req->waitq.lock);
@@ -2139,7 +2154,6 @@
 
 		while (!list_empty(&to_end1)) {
 			req = list_first_entry(&to_end1, struct fuse_req, list);
-			__fuse_get_request(req);
 			list_del_init(&req->list);
 			request_end(fc, req);
 		}
@@ -2150,6 +2164,11 @@
 }
 EXPORT_SYMBOL_GPL(fuse_abort_conn);
 
+void fuse_wait_aborted(struct fuse_conn *fc)
+{
+	wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
+}
+
 int fuse_dev_release(struct inode *inode, struct file *file)
 {
 	struct fuse_dev *fud = fuse_get_dev(file);
@@ -2157,9 +2176,15 @@
 	if (fud) {
 		struct fuse_conn *fc = fud->fc;
 		struct fuse_pqueue *fpq = &fud->pq;
+		LIST_HEAD(to_end);
 
+		spin_lock(&fpq->lock);
 		WARN_ON(!list_empty(&fpq->io));
-		end_requests(fc, &fpq->processing);
+		list_splice_init(&fpq->processing, &to_end);
+		spin_unlock(&fpq->lock);
+
+		end_requests(fc, &to_end);
+
 		/* Are we the last open device? */
 		if (atomic_dec_and_test(&fc->dev_count)) {
 			WARN_ON(fc->iq.fasync != NULL);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 5dcba9e..e43f1c6 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -401,11 +401,12 @@
 	struct inode *inode;
 	struct dentry *newent;
 	bool outarg_valid = true;
+	bool locked;
 
-	fuse_lock_inode(dir);
+	locked = fuse_lock_inode(dir);
 	err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
 			       &outarg, &inode);
-	fuse_unlock_inode(dir);
+	fuse_unlock_inode(dir, locked);
 	if (err == -ENOENT) {
 		outarg_valid = false;
 		err = 0;
@@ -1385,6 +1386,7 @@
 	struct fuse_conn *fc = get_fuse_conn(inode);
 	struct fuse_req *req;
 	u64 attr_version = 0;
+	bool locked;
 
 	if (is_bad_inode(inode))
 		return -EIO;
@@ -1412,9 +1414,9 @@
 		fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
 			       FUSE_READDIR);
 	}
-	fuse_lock_inode(inode);
+	locked = fuse_lock_inode(inode);
 	fuse_request_send(fc, req);
-	fuse_unlock_inode(inode);
+	fuse_unlock_inode(inode, locked);
 	nbytes = req->out.args[0].size;
 	err = req->out.h.error;
 	fuse_put_request(fc, req);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 7adf871..fb1d0f8 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -886,6 +886,7 @@
 	}
 
 	if (WARN_ON(req->num_pages >= req->max_pages)) {
+		unlock_page(page);
 		fuse_put_request(fc, req);
 		return -EIO;
 	}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index cc2c82c..2c1e88c 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -868,6 +868,7 @@
 
 /* Abort all requests */
 void fuse_abort_conn(struct fuse_conn *fc);
+void fuse_wait_aborted(struct fuse_conn *fc);
 
 /**
  * Invalidate inode attributes
@@ -981,8 +982,8 @@
 
 void fuse_set_initialized(struct fuse_conn *fc);
 
-void fuse_unlock_inode(struct inode *inode);
-void fuse_lock_inode(struct inode *inode);
+void fuse_unlock_inode(struct inode *inode, bool locked);
+bool fuse_lock_inode(struct inode *inode);
 
 int fuse_setxattr(struct inode *inode, const char *name, const void *value,
 		  size_t size, int flags);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index c506fa9..20d30eb 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -356,15 +356,21 @@
 	return 0;
 }
 
-void fuse_lock_inode(struct inode *inode)
+bool fuse_lock_inode(struct inode *inode)
 {
-	if (!get_fuse_conn(inode)->parallel_dirops)
+	bool locked = false;
+
+	if (!get_fuse_conn(inode)->parallel_dirops) {
 		mutex_lock(&get_fuse_inode(inode)->mutex);
+		locked = true;
+	}
+
+	return locked;
 }
 
-void fuse_unlock_inode(struct inode *inode)
+void fuse_unlock_inode(struct inode *inode, bool locked)
 {
-	if (!get_fuse_conn(inode)->parallel_dirops)
+	if (locked)
 		mutex_unlock(&get_fuse_inode(inode)->mutex);
 }
 
@@ -396,9 +402,6 @@
 {
 	struct fuse_conn *fc = get_fuse_conn_super(sb);
 
-	fuse_send_destroy(fc);
-
-	fuse_abort_conn(fc);
 	mutex_lock(&fuse_mutex);
 	list_del(&fc->entry);
 	fuse_ctl_remove_conn(fc);
@@ -1204,16 +1207,25 @@
 	return mount_nodev(fs_type, flags, raw_data, fuse_fill_super);
 }
 
-static void fuse_kill_sb_anon(struct super_block *sb)
+static void fuse_sb_destroy(struct super_block *sb)
 {
 	struct fuse_conn *fc = get_fuse_conn_super(sb);
 
 	if (fc) {
+		fuse_send_destroy(fc);
+
+		fuse_abort_conn(fc);
+		fuse_wait_aborted(fc);
+
 		down_write(&fc->killsb);
 		fc->sb = NULL;
 		up_write(&fc->killsb);
 	}
+}
 
+static void fuse_kill_sb_anon(struct super_block *sb)
+{
+	fuse_sb_destroy(sb);
 	kill_anon_super(sb);
 }
 
@@ -1236,14 +1248,7 @@
 
 static void fuse_kill_sb_blk(struct super_block *sb)
 {
-	struct fuse_conn *fc = get_fuse_conn_super(sb);
-
-	if (fc) {
-		down_write(&fc->killsb);
-		fc->sb = NULL;
-		up_write(&fc->killsb);
-	}
-
+	fuse_sb_destroy(sb);
 	kill_block_super(sb);
 }
 
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index c6c507c..0283ee0 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -268,22 +268,6 @@
 	for(i = 0; i < nr_pages; i++) {
 		struct page *page = pvec->pages[i];
 
-		/*
-		 * At this point, the page may be truncated or
-		 * invalidated (changing page->mapping to NULL), or
-		 * even swizzled back from swapper_space to tmpfs file
-		 * mapping. However, page->index will not change
-		 * because we have a reference on the page.
-		 */
-		if (page->index > end) {
-			/*
-			 * can't be range_cyclic (1st pass) because
-			 * end == -1 in that case.
-			 */
-			ret = 1;
-			break;
-		}
-
 		*done_index = page->index;
 
 		lock_page(page);
@@ -401,8 +385,8 @@
 		tag_pages_for_writeback(mapping, index, end);
 	done_index = index;
 	while (!done && (index <= end)) {
-		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
-			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
+				tag);
 		if (nr_pages == 0)
 			break;
 
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index fc5da4c..39af17b 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1472,7 +1472,7 @@
 	end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
 	lblock = offset >> shift;
 	lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
-	if (lblock_stop > end_of_file)
+	if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
 		return 1;
 
 	size = (lblock_stop - lblock) << shift;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 86ccc015..8328249 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1675,7 +1675,8 @@
 
 	while(1) {
 		bi = rbm_bi(rbm);
-		if (test_bit(GBF_FULL, &bi->bi_flags) &&
+		if ((ip == NULL || !gfs2_rs_active(&ip->i_res)) &&
+		    test_bit(GBF_FULL, &bi->bi_flags) &&
 		    (state == GFS2_BLKST_FREE))
 			goto next_bitmap;
 
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 6fc766d..2a6f3c6 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -74,9 +74,10 @@
 	if (!fd->bnode) {
 		if (!tree->root)
 			hfs_btree_inc_height(tree);
-		fd->bnode = hfs_bnode_find(tree, tree->leaf_head);
-		if (IS_ERR(fd->bnode))
-			return PTR_ERR(fd->bnode);
+		node = hfs_bnode_find(tree, tree->leaf_head);
+		if (IS_ERR(node))
+			return PTR_ERR(node);
+		fd->bnode = node;
 		fd->record = -1;
 	}
 	new_node = NULL;
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 31d5e3f..193d5411 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -77,13 +77,13 @@
 				cpu_to_be32(HFSP_HARDLINK_TYPE) &&
 				entry.file.user_info.fdCreator ==
 				cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
+				HFSPLUS_SB(sb)->hidden_dir &&
 				(entry.file.create_date ==
 					HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->
 						create_date ||
 				entry.file.create_date ==
 					HFSPLUS_I(d_inode(sb->s_root))->
-						create_date) &&
-				HFSPLUS_SB(sb)->hidden_dir) {
+						create_date)) {
 			struct qstr str;
 			char name[32];
 
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index b9563cd..7fb976e 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -524,8 +524,10 @@
 		goto out_put_root;
 	if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
 		hfs_find_exit(&fd);
-		if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
+		if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
+			err = -EINVAL;
 			goto out_put_root;
+		}
 		inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
 		if (IS_ERR(inode)) {
 			err = PTR_ERR(inode);
diff --git a/fs/inode.c b/fs/inode.c
index 3844c31..1d1a957 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -2003,8 +2003,14 @@
 	inode->i_uid = current_fsuid();
 	if (dir && dir->i_mode & S_ISGID) {
 		inode->i_gid = dir->i_gid;
+
+		/* Directories are special, and always inherit S_ISGID */
 		if (S_ISDIR(mode))
 			mode |= S_ISGID;
+		else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
+			 !in_group_p(inode->i_gid) &&
+			 !capable_wrt_inode_uidgid(dir, CAP_FSETID))
+			mode &= ~S_ISGID;
 	} else
 		inode->i_gid = current_fsgid();
 	inode->i_mode = mode;
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index c60f3d3..a679798 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -491,15 +491,17 @@
 	if (size > PSIZE) {
 		/*
 		 * To keep the rest of the code simple.  Allocate a
-		 * contiguous buffer to work with
+		 * contiguous buffer to work with. Make the buffer large
+		 * enough to make use of the whole extent.
 		 */
-		ea_buf->xattr = kmalloc(size, GFP_KERNEL);
+		ea_buf->max_size = (size + sb->s_blocksize - 1) &
+		    ~(sb->s_blocksize - 1);
+
+		ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
 		if (ea_buf->xattr == NULL)
 			return -ENOMEM;
 
 		ea_buf->flag = EA_MALLOC;
-		ea_buf->max_size = (size + sb->s_blocksize - 1) &
-		    ~(sb->s_blocksize - 1);
 
 		if (ea_size == 0)
 			return 0;
diff --git a/fs/namespace.c b/fs/namespace.c
index 4628d08c..c2a51a8 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -605,12 +605,21 @@
 		return 0;
 	mnt = real_mount(bastard);
 	mnt_add_count(mnt, 1);
+	smp_mb();			// see mntput_no_expire()
 	if (likely(!read_seqretry(&mount_lock, seq)))
 		return 0;
 	if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
 		mnt_add_count(mnt, -1);
 		return 1;
 	}
+	lock_mount_hash();
+	if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
+		mnt_add_count(mnt, -1);
+		unlock_mount_hash();
+		return 1;
+	}
+	unlock_mount_hash();
+	/* caller will mntput() */
 	return -1;
 }
 
@@ -1157,12 +1166,27 @@
 static void mntput_no_expire(struct mount *mnt)
 {
 	rcu_read_lock();
-	mnt_add_count(mnt, -1);
-	if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
+	if (likely(READ_ONCE(mnt->mnt_ns))) {
+		/*
+		 * Since we don't do lock_mount_hash() here,
+		 * ->mnt_ns can change under us.  However, if it's
+		 * non-NULL, then there's a reference that won't
+		 * be dropped until after an RCU delay done after
+		 * turning ->mnt_ns NULL.  So if we observe it
+		 * non-NULL under rcu_read_lock(), the reference
+		 * we are dropping is not the final one.
+		 */
+		mnt_add_count(mnt, -1);
 		rcu_read_unlock();
 		return;
 	}
 	lock_mount_hash();
+	/*
+	 * make sure that if __legitimize_mnt() has not seen us grab
+	 * mount_lock, we'll see their refcount increment here.
+	 */
+	smp_mb();
+	mnt_add_count(mnt, -1);
 	if (mnt_get_count(mnt)) {
 		rcu_read_unlock();
 		unlock_mount_hash();
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index a69ef4e..d6e4191 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -203,7 +203,7 @@
 	chunk = div_u64(offset, dev->chunk_size);
 	div_u64_rem(chunk, dev->nr_children, &chunk_idx);
 
-	if (chunk_idx > dev->nr_children) {
+	if (chunk_idx >= dev->nr_children) {
 		dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
 			__func__, chunk_idx, offset, dev->chunk_size);
 		/* error, should not happen */
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 2e7ebd9..9d75374 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -175,9 +175,9 @@
 {
 	u32 oldseq, newseq;
 
-	/* Is the stateid still not initialised? */
+	/* Is the stateid not initialised? */
 	if (!pnfs_layout_is_valid(lo))
-		return NFS4ERR_DELAY;
+		return NFS4ERR_NOMATCHING_LAYOUT;
 
 	/* Mismatched stateid? */
 	if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index eb094c6..67903ee 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -968,16 +968,21 @@
 
 	if (hdr_arg.minorversion == 0) {
 		cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
-		if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
+		if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
+			if (cps.clp)
+				nfs_put_client(cps.clp);
 			goto out_invalidcred;
+		}
 	}
 
 	cps.minorversion = hdr_arg.minorversion;
 	hdr_res.taglen = hdr_arg.taglen;
 	hdr_res.tag = hdr_arg.tag;
-	if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
+	if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) {
+		if (cps.clp)
+			nfs_put_client(cps.clp);
 		return rpc_system_err;
-
+	}
 	while (status == 0 && nops != hdr_arg.nops) {
 		status = process_op(nops, rqstp, &xdr_in,
 				    argp, &xdr_out, resp, &cps);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 91e017c..eb55ab6 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -541,8 +541,15 @@
 		ret = -EIO;
 	return ret;
 out_retry:
-	if (ret == 0)
+	if (ret == 0) {
 		exception->retry = 1;
+		/*
+		 * For NFS4ERR_MOVED, the client transport will need to
+		 * be recomputed after migration recovery has completed.
+		 */
+		if (errorcode == -NFS4ERR_MOVED)
+			rpc_task_release_transport(task);
+	}
 	return ret;
 }
 
@@ -2532,14 +2539,18 @@
 	}
 
 	nfs4_stateid_copy(&stateid, &delegation->stateid);
-	if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
-		!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
-			&delegation->flags)) {
+	if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
 		rcu_read_unlock();
 		nfs_finish_clear_delegation_stateid(state, &stateid);
 		return;
 	}
 
+	if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
+				&delegation->flags)) {
+		rcu_read_unlock();
+		return;
+	}
+
 	cred = get_rpccred(delegation->cred);
 	rcu_read_unlock();
 	status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
@@ -2701,7 +2712,7 @@
 	if (ret != 0)
 		goto out;
 
-	state = nfs4_opendata_to_nfs4_state(opendata);
+	state = _nfs4_opendata_to_nfs4_state(opendata);
 	ret = PTR_ERR(state);
 	if (IS_ERR(state))
 		goto out;
@@ -2737,6 +2748,7 @@
 			nfs4_schedule_stateid_recovery(server, state);
 	}
 out:
+	nfs4_sequence_free_slot(&opendata->o_res.seq_res);
 	return ret;
 }
 
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 3536913..857af95 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1336,6 +1336,8 @@
 
 	if (!nfs4_state_mark_reclaim_nograce(clp, state))
 		return -EBADF;
+	nfs_inode_find_delegation_state_and_recover(state->inode,
+			&state->stateid);
 	dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
 			clp->cl_hostname);
 	nfs4_schedule_state_manager(clp);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index eef0caf..e949551 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1725,6 +1725,7 @@
 	if (status) {
 		op = &args->ops[0];
 		op->status = status;
+		resp->opcnt = 1;
 		goto encode_op;
 	}
 
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index bdbd9e6..b16a6c0 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1536,6 +1536,8 @@
 	gdev->gd_maxcount = be32_to_cpup(p++);
 	num = be32_to_cpup(p++);
 	if (num) {
+		if (num > 1000)
+			goto xdr_error;
 		READ_BUF(4 * num);
 		gdev->gd_notify_types = be32_to_cpup(p++);
 		for (i = 1; i < num; i++) {
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 2e315f9..ac1ec8f 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -2158,8 +2158,8 @@
 
 	pagevec_init(&pvec, 0);
 
-	while (pagevec_lookup_tag(&pvec, btcache, &index, PAGECACHE_TAG_DIRTY,
-				  PAGEVEC_SIZE)) {
+	while (pagevec_lookup_tag(&pvec, btcache, &index,
+					PAGECACHE_TAG_DIRTY)) {
 		for (i = 0; i < pagevec_count(&pvec); i++) {
 			bh = head = page_buffers(pvec.pages[i]);
 			do {
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index f11a3ad..454ee52 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -257,8 +257,7 @@
 
 	pagevec_init(&pvec, 0);
 repeat:
-	if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY,
-				PAGEVEC_SIZE))
+	if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY))
 		return 0;
 
 	for (i = 0; i < pagevec_count(&pvec); i++) {
@@ -377,8 +376,8 @@
 
 	pagevec_init(&pvec, 0);
 
-	while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
-				  PAGEVEC_SIZE)) {
+	while (pagevec_lookup_tag(&pvec, mapping, &index,
+					PAGECACHE_TAG_DIRTY)) {
 		for (i = 0; i < pagevec_count(&pvec); i++) {
 			struct page *page = pvec.pages[i];
 
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 36362d4..b0eb58c 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -709,18 +709,14 @@
 	pagevec_init(&pvec, 0);
  repeat:
 	if (unlikely(index > last) ||
-	    !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
-				min_t(pgoff_t, last - index,
-				      PAGEVEC_SIZE - 1) + 1))
+	    !pagevec_lookup_range_tag(&pvec, mapping, &index, last,
+				PAGECACHE_TAG_DIRTY))
 		return ndirties;
 
 	for (i = 0; i < pagevec_count(&pvec); i++) {
 		struct buffer_head *bh, *head;
 		struct page *page = pvec.pages[i];
 
-		if (unlikely(page->index > last))
-			break;
-
 		lock_page(page);
 		if (!page_has_buffers(page))
 			create_empty_buffers(page, i_blocksize(inode), 0);
@@ -757,8 +753,8 @@
 
 	pagevec_init(&pvec, 0);
 
-	while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
-				  PAGEVEC_SIZE)) {
+	while (pagevec_lookup_tag(&pvec, mapping, &index,
+					PAGECACHE_TAG_DIRTY)) {
 		for (i = 0; i < pagevec_count(&pvec); i++) {
 			bh = head = page_buffers(pvec.pages[i]);
 			do {
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index f2961b1..c26d046 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -134,6 +134,19 @@
 	return err;
 }
 
+static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock,
+		    struct buffer_head *bh_result, int create)
+{
+	int ret = 0;
+	struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+	down_read(&oi->ip_alloc_sem);
+	ret = ocfs2_get_block(inode, iblock, bh_result, create);
+	up_read(&oi->ip_alloc_sem);
+
+	return ret;
+}
+
 int ocfs2_get_block(struct inode *inode, sector_t iblock,
 		    struct buffer_head *bh_result, int create)
 {
@@ -2120,7 +2133,7 @@
  * called like this: dio->get_blocks(dio->inode, fs_startblk,
  * 					fs_count, map_bh, dio->rw == WRITE);
  */
-static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock,
+static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
 			       struct buffer_head *bh_result, int create)
 {
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
@@ -2146,12 +2159,9 @@
 	 * while file size will be changed.
 	 */
 	if (pos + total_len <= i_size_read(inode)) {
-		down_read(&oi->ip_alloc_sem);
+
 		/* This is the fast path for re-write. */
-		ret = ocfs2_get_block(inode, iblock, bh_result, create);
-
-		up_read(&oi->ip_alloc_sem);
-
+		ret = ocfs2_lock_get_block(inode, iblock, bh_result, create);
 		if (buffer_mapped(bh_result) &&
 		    !buffer_new(bh_result) &&
 		    ret == 0)
@@ -2416,9 +2426,9 @@
 		return 0;
 
 	if (iov_iter_rw(iter) == READ)
-		get_block = ocfs2_get_block;
+		get_block = ocfs2_lock_get_block;
 	else
-		get_block = ocfs2_dio_get_block;
+		get_block = ocfs2_dio_wr_get_block;
 
 	return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
 				    iter, get_block,
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 8f040f8..25c8b32 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -341,6 +341,7 @@
 				 * for this bh as it's not marked locally
 				 * uptodate. */
 				status = -EIO;
+				clear_buffer_needs_validate(bh);
 				put_bh(bh);
 				bhs[i] = NULL;
 				continue;
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index b17d180..c204ac9b 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -40,6 +40,9 @@
 		"panic",	/* O2NM_FENCE_PANIC */
 };
 
+static inline void o2nm_lock_subsystem(void);
+static inline void o2nm_unlock_subsystem(void);
+
 struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
 {
 	struct o2nm_node *node = NULL;
@@ -181,7 +184,10 @@
 {
 	/* through the first node_set .parent
 	 * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
-	return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
+	if (node->nd_item.ci_parent)
+		return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
+	else
+		return NULL;
 }
 
 enum {
@@ -194,7 +200,7 @@
 				   size_t count)
 {
 	struct o2nm_node *node = to_o2nm_node(item);
-	struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+	struct o2nm_cluster *cluster;
 	unsigned long tmp;
 	char *p = (char *)page;
 	int ret = 0;
@@ -214,6 +220,13 @@
 	    !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
 		return -EINVAL; /* XXX */
 
+	o2nm_lock_subsystem();
+	cluster = to_o2nm_cluster_from_node(node);
+	if (!cluster) {
+		o2nm_unlock_subsystem();
+		return -EINVAL;
+	}
+
 	write_lock(&cluster->cl_nodes_lock);
 	if (cluster->cl_nodes[tmp])
 		ret = -EEXIST;
@@ -226,6 +239,8 @@
 		set_bit(tmp, cluster->cl_nodes_bitmap);
 	}
 	write_unlock(&cluster->cl_nodes_lock);
+	o2nm_unlock_subsystem();
+
 	if (ret)
 		return ret;
 
@@ -269,7 +284,7 @@
 					    size_t count)
 {
 	struct o2nm_node *node = to_o2nm_node(item);
-	struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+	struct o2nm_cluster *cluster;
 	int ret, i;
 	struct rb_node **p, *parent;
 	unsigned int octets[4];
@@ -286,6 +301,13 @@
 		be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
 	}
 
+	o2nm_lock_subsystem();
+	cluster = to_o2nm_cluster_from_node(node);
+	if (!cluster) {
+		o2nm_unlock_subsystem();
+		return -EINVAL;
+	}
+
 	ret = 0;
 	write_lock(&cluster->cl_nodes_lock);
 	if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
@@ -298,6 +320,8 @@
 		rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
 	}
 	write_unlock(&cluster->cl_nodes_lock);
+	o2nm_unlock_subsystem();
+
 	if (ret)
 		return ret;
 
@@ -315,7 +339,7 @@
 				     size_t count)
 {
 	struct o2nm_node *node = to_o2nm_node(item);
-	struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+	struct o2nm_cluster *cluster;
 	unsigned long tmp;
 	char *p = (char *)page;
 	ssize_t ret;
@@ -333,17 +357,26 @@
 	    !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
 		return -EINVAL; /* XXX */
 
+	o2nm_lock_subsystem();
+	cluster = to_o2nm_cluster_from_node(node);
+	if (!cluster) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	/* the only failure case is trying to set a new local node
 	 * when a different one is already set */
 	if (tmp && tmp == cluster->cl_has_local &&
-	    cluster->cl_local_node != node->nd_num)
-		return -EBUSY;
+	    cluster->cl_local_node != node->nd_num) {
+		ret = -EBUSY;
+		goto out;
+	}
 
 	/* bring up the rx thread if we're setting the new local node. */
 	if (tmp && !cluster->cl_has_local) {
 		ret = o2net_start_listening(node);
 		if (ret)
-			return ret;
+			goto out;
 	}
 
 	if (!tmp && cluster->cl_has_local &&
@@ -358,7 +391,11 @@
 		cluster->cl_local_node = node->nd_num;
 	}
 
-	return count;
+	ret = count;
+
+out:
+	o2nm_unlock_subsystem();
+	return ret;
 }
 
 CONFIGFS_ATTR(o2nm_node_, num);
@@ -738,6 +775,16 @@
 	},
 };
 
+static inline void o2nm_lock_subsystem(void)
+{
+	mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
+}
+
+static inline void o2nm_unlock_subsystem(void)
+{
+	mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
+}
+
 int o2nm_depend_item(struct config_item *item)
 {
 	return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 3f828a1..0cc30a5 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -589,9 +589,9 @@
 
 	res->last_used = 0;
 
-	spin_lock(&dlm->spinlock);
+	spin_lock(&dlm->track_lock);
 	list_add_tail(&res->tracking, &dlm->tracking_list);
-	spin_unlock(&dlm->spinlock);
+	spin_unlock(&dlm->track_lock);
 
 	memset(res->lvb, 0, DLM_LVB_LEN);
 	memset(res->refmap, 0, sizeof(res->refmap));
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 0f23b3b..37e04a0 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -455,6 +455,20 @@
 	int err;
 	int i;
 
+	/*
+	 * The ability to racily run the kernel stack unwinder on a running task
+	 * and then observe the unwinder output is scary; while it is useful for
+	 * debugging kernel issues, it can also allow an attacker to leak kernel
+	 * stack contents.
+	 * Doing this in a manner that is at least safe from races would require
+	 * some work to ensure that the remote task can not be scheduled; and
+	 * even then, this would still expose the unwinder as local attack
+	 * surface.
+	 * Therefore, this interface is restricted to root.
+	 */
+	if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
+		return -EACCES;
+
 	entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
 	if (!entries)
 		return -ENOMEM;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index e69ebe6..4298a39 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -43,10 +43,11 @@
 	de = PDE(inode);
 	if (de)
 		pde_put(de);
+
 	head = PROC_I(inode)->sysctl;
 	if (head) {
 		RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
-		sysctl_head_put(head);
+		proc_sys_evict_inode(inode, head);
 	}
 }
 
@@ -456,17 +457,12 @@
 	return inode;
 }
 
-int proc_fill_super(struct super_block *s, void *data, int silent)
+int proc_fill_super(struct super_block *s)
 {
-	struct pid_namespace *ns = get_pid_ns(s->s_fs_info);
 	struct inode *root_inode;
 	int ret;
 
-	if (!proc_parse_options(data, ns))
-		return -EINVAL;
-
-	/* User space would break if executables or devices appear on proc */
-	s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
+	s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NODEV;
 	s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
 	s->s_blocksize = 1024;
 	s->s_blocksize_bits = 10;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index d8105cd..d960512 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -65,6 +65,7 @@
 	struct proc_dir_entry *pde;
 	struct ctl_table_header *sysctl;
 	struct ctl_table *sysctl_entry;
+	struct hlist_node sysctl_inodes;
 	const struct proc_ns_operations *ns_ops;
 	struct inode vfs_inode;
 };
@@ -213,7 +214,7 @@
 
 extern void proc_init_inodecache(void);
 extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
-extern int proc_fill_super(struct super_block *, void *data, int flags);
+extern int proc_fill_super(struct super_block *);
 extern void proc_entry_rundown(struct proc_dir_entry *);
 
 /*
@@ -250,10 +251,12 @@
  */
 #ifdef CONFIG_PROC_SYSCTL
 extern int proc_sys_init(void);
-extern void sysctl_head_put(struct ctl_table_header *);
+extern void proc_sys_evict_inode(struct inode *inode,
+				 struct ctl_table_header *head);
 #else
 static inline void proc_sys_init(void) { }
-static inline void sysctl_head_put(struct ctl_table_header *head) { }
+static inline void proc_sys_evict_inode(struct  inode *inode,
+					struct ctl_table_header *head) { }
 #endif
 
 /*
@@ -278,7 +281,6 @@
  * root.c
  */
 extern struct proc_dir_entry proc_root;
-extern int proc_parse_options(char *options, struct pid_namespace *pid);
 
 extern void proc_self_init(void);
 extern int proc_remount(struct super_block *, int *, char *);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 847f234..46cd2e1 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -190,6 +190,7 @@
 	head->set = set;
 	head->parent = NULL;
 	head->node = node;
+	INIT_HLIST_HEAD(&head->inodes);
 	if (node) {
 		struct ctl_table *entry;
 		for (entry = table; entry->procname; entry++, node++)
@@ -259,6 +260,44 @@
 			complete(p->unregistering);
 }
 
+static void proc_sys_prune_dcache(struct ctl_table_header *head)
+{
+	struct inode *inode;
+	struct proc_inode *ei;
+	struct hlist_node *node;
+	struct super_block *sb;
+
+	rcu_read_lock();
+	for (;;) {
+		node = hlist_first_rcu(&head->inodes);
+		if (!node)
+			break;
+		ei = hlist_entry(node, struct proc_inode, sysctl_inodes);
+		spin_lock(&sysctl_lock);
+		hlist_del_init_rcu(&ei->sysctl_inodes);
+		spin_unlock(&sysctl_lock);
+
+		inode = &ei->vfs_inode;
+		sb = inode->i_sb;
+		if (!atomic_inc_not_zero(&sb->s_active))
+			continue;
+		inode = igrab(inode);
+		rcu_read_unlock();
+		if (unlikely(!inode)) {
+			deactivate_super(sb);
+			rcu_read_lock();
+			continue;
+		}
+
+		d_prune_aliases(inode);
+		iput(inode);
+		deactivate_super(sb);
+
+		rcu_read_lock();
+	}
+	rcu_read_unlock();
+}
+
 /* called under sysctl_lock, will reacquire if has to wait */
 static void start_unregistering(struct ctl_table_header *p)
 {
@@ -272,33 +311,24 @@
 		p->unregistering = &wait;
 		spin_unlock(&sysctl_lock);
 		wait_for_completion(&wait);
-		spin_lock(&sysctl_lock);
 	} else {
 		/* anything non-NULL; we'll never dereference it */
 		p->unregistering = ERR_PTR(-EINVAL);
+		spin_unlock(&sysctl_lock);
 	}
 	/*
+	 * Prune dentries for unregistered sysctls: namespaced sysctls
+	 * can have duplicate names and contaminate dcache very badly.
+	 */
+	proc_sys_prune_dcache(p);
+	/*
 	 * do not remove from the list until nobody holds it; walking the
 	 * list in do_sysctl() relies on that.
 	 */
+	spin_lock(&sysctl_lock);
 	erase_header(p);
 }
 
-static void sysctl_head_get(struct ctl_table_header *head)
-{
-	spin_lock(&sysctl_lock);
-	head->count++;
-	spin_unlock(&sysctl_lock);
-}
-
-void sysctl_head_put(struct ctl_table_header *head)
-{
-	spin_lock(&sysctl_lock);
-	if (!--head->count)
-		kfree_rcu(head, rcu);
-	spin_unlock(&sysctl_lock);
-}
-
 static struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head)
 {
 	BUG_ON(!head);
@@ -440,10 +470,20 @@
 
 	inode->i_ino = get_next_ino();
 
-	sysctl_head_get(head);
 	ei = PROC_I(inode);
+
+	spin_lock(&sysctl_lock);
+	if (unlikely(head->unregistering)) {
+		spin_unlock(&sysctl_lock);
+		iput(inode);
+		inode = NULL;
+		goto out;
+	}
 	ei->sysctl = head;
 	ei->sysctl_entry = table;
+	hlist_add_head_rcu(&ei->sysctl_inodes, &head->inodes);
+	head->count++;
+	spin_unlock(&sysctl_lock);
 
 	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	inode->i_mode = table->mode;
@@ -466,6 +506,15 @@
 	return inode;
 }
 
+void proc_sys_evict_inode(struct inode *inode, struct ctl_table_header *head)
+{
+	spin_lock(&sysctl_lock);
+	hlist_del_init_rcu(&PROC_I(inode)->sysctl_inodes);
+	if (!--head->count)
+		kfree_rcu(head, rcu);
+	spin_unlock(&sysctl_lock);
+}
+
 static struct ctl_table_header *grab_header(struct inode *inode)
 {
 	struct ctl_table_header *head = PROC_I(inode)->sysctl;
diff --git a/fs/proc/root.c b/fs/proc/root.c
index c2f5014..1d68fcd 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -23,6 +23,21 @@
 
 #include "internal.h"
 
+static int proc_test_super(struct super_block *sb, void *data)
+{
+	return sb->s_fs_info == data;
+}
+
+static int proc_set_super(struct super_block *sb, void *data)
+{
+	int err = set_anon_super(sb, NULL);
+	if (!err) {
+		struct pid_namespace *ns = (struct pid_namespace *)data;
+		sb->s_fs_info = get_pid_ns(ns);
+	}
+	return err;
+}
+
 enum {
 	Opt_gid, Opt_hidepid, Opt_err,
 };
@@ -33,7 +48,7 @@
 	{Opt_err, NULL},
 };
 
-int proc_parse_options(char *options, struct pid_namespace *pid)
+static int proc_parse_options(char *options, struct pid_namespace *pid)
 {
 	char *p;
 	substring_t args[MAX_OPT_ARGS];
@@ -85,16 +100,45 @@
 static struct dentry *proc_mount(struct file_system_type *fs_type,
 	int flags, const char *dev_name, void *data)
 {
+	int err;
+	struct super_block *sb;
 	struct pid_namespace *ns;
+	char *options;
 
 	if (flags & MS_KERNMOUNT) {
-		ns = data;
-		data = NULL;
+		ns = (struct pid_namespace *)data;
+		options = NULL;
 	} else {
 		ns = task_active_pid_ns(current);
+		options = data;
+
+		/* Does the mounter have privilege over the pid namespace? */
+		if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
+			return ERR_PTR(-EPERM);
 	}
 
-	return mount_ns(fs_type, flags, data, ns, ns->user_ns, proc_fill_super);
+	sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns);
+	if (IS_ERR(sb))
+		return ERR_CAST(sb);
+
+	if (!proc_parse_options(options, ns)) {
+		deactivate_locked_super(sb);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!sb->s_root) {
+		err = proc_fill_super(sb);
+		if (err) {
+			deactivate_locked_super(sb);
+			return ERR_PTR(err);
+		}
+
+		sb->s_flags |= MS_ACTIVE;
+		/* User space would break if executables appear on proc */
+		sb->s_iflags |= SB_I_NOEXEC;
+	}
+
+	return dget(sb->s_root);
 }
 
 static void proc_kill_sb(struct super_block *sb)
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index e11672a..ecdb3ba 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -421,7 +421,12 @@
 	vaddr = vmap(pages, page_count, VM_MAP, prot);
 	kfree(pages);
 
-	return vaddr;
+	/*
+	 * Since vmap() uses page granularity, we must add the offset
+	 * into the page here, to get the byte granularity address
+	 * into the mapping to represent the actual "start" location.
+	 */
+	return vaddr + offset_in_page(start);
 }
 
 static void *persistent_ram_iomap(phys_addr_t start, size_t size,
@@ -440,6 +445,11 @@
 	else
 		va = ioremap_wc(start, size);
 
+	/*
+	 * Since request_mem_region() and ioremap() are byte-granularity
+	 * there is no need handle anything special like we do when the
+	 * vmap() case in persistent_ram_vmap() above.
+	 */
 	return va;
 }
 
@@ -460,7 +470,7 @@
 		return -ENOMEM;
 	}
 
-	prz->buffer = prz->vaddr + offset_in_page(start);
+	prz->buffer = prz->vaddr;
 	prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
 
 	return 0;
@@ -507,7 +517,8 @@
 
 	if (prz->vaddr) {
 		if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
-			vunmap(prz->vaddr);
+			/* We must vunmap() at page-granularity. */
+			vunmap(prz->vaddr - offset_in_page(prz->paddr));
 		} else {
 			iounmap(prz->vaddr);
 			release_mem_region(prz->paddr, prz->size);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 2d44542..a2329f7 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -17,6 +17,7 @@
 #include <linux/quotaops.h>
 #include <linux/types.h>
 #include <linux/writeback.h>
+#include <linux/nospec.h>
 
 static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
 				     qid_t id)
@@ -706,6 +707,7 @@
 
 	if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
 		return -EINVAL;
+	type = array_index_nospec(type, MAXQUOTAS);
 	/*
 	 * Quota not supported on this fs? Check this before s_quota_types
 	 * since they needn't be set if quota is not supported at all.
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 4f3f928..92470e5 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -76,83 +76,99 @@
 }
 
 /* %k */
-static void sprintf_le_key(char *buf, struct reiserfs_key *key)
+static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key)
 {
 	if (key)
-		sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
-			le32_to_cpu(key->k_objectid), le_offset(key),
-			le_type(key));
+		return scnprintf(buf, size, "[%d %d %s %s]",
+				 le32_to_cpu(key->k_dir_id),
+				 le32_to_cpu(key->k_objectid), le_offset(key),
+				 le_type(key));
 	else
-		sprintf(buf, "[NULL]");
+		return scnprintf(buf, size, "[NULL]");
 }
 
 /* %K */
-static void sprintf_cpu_key(char *buf, struct cpu_key *key)
+static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key)
 {
 	if (key)
-		sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
-			key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
-			cpu_type(key));
+		return scnprintf(buf, size, "[%d %d %s %s]",
+				 key->on_disk_key.k_dir_id,
+				 key->on_disk_key.k_objectid,
+				 reiserfs_cpu_offset(key), cpu_type(key));
 	else
-		sprintf(buf, "[NULL]");
+		return scnprintf(buf, size, "[NULL]");
 }
 
-static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
+static int scnprintf_de_head(char *buf, size_t size,
+			     struct reiserfs_de_head *deh)
 {
 	if (deh)
-		sprintf(buf,
-			"[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
-			deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
-			deh_location(deh), deh_state(deh));
+		return scnprintf(buf, size,
+				 "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
+				 deh_offset(deh), deh_dir_id(deh),
+				 deh_objectid(deh), deh_location(deh),
+				 deh_state(deh));
 	else
-		sprintf(buf, "[NULL]");
+		return scnprintf(buf, size, "[NULL]");
 
 }
 
-static void sprintf_item_head(char *buf, struct item_head *ih)
+static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih)
 {
 	if (ih) {
-		strcpy(buf,
-		       (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
-		sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
-		sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
-			"free_space(entry_count) %d",
-			ih_item_len(ih), ih_location(ih), ih_free_space(ih));
+		char *p = buf;
+		char * const end = buf + size;
+
+		p += scnprintf(p, end - p, "%s",
+			       (ih_version(ih) == KEY_FORMAT_3_6) ?
+			       "*3.6* " : "*3.5*");
+
+		p += scnprintf_le_key(p, end - p, &ih->ih_key);
+
+		p += scnprintf(p, end - p,
+			       ", item_len %d, item_location %d, free_space(entry_count) %d",
+			       ih_item_len(ih), ih_location(ih),
+			       ih_free_space(ih));
+		return p - buf;
 	} else
-		sprintf(buf, "[NULL]");
+		return scnprintf(buf, size, "[NULL]");
 }
 
-static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
+static int scnprintf_direntry(char *buf, size_t size,
+			      struct reiserfs_dir_entry *de)
 {
 	char name[20];
 
 	memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
 	name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
-	sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
+	return scnprintf(buf, size, "\"%s\"==>[%d %d]",
+			 name, de->de_dir_id, de->de_objectid);
 }
 
-static void sprintf_block_head(char *buf, struct buffer_head *bh)
+static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh)
 {
-	sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
-		B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
+	return scnprintf(buf, size,
+			 "level=%d, nr_items=%d, free_space=%d rdkey ",
+			 B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
 }
 
-static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
+static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh)
 {
-	sprintf(buf,
-		"dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
-		bh->b_bdev, bh->b_size,
-		(unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
-		bh->b_state, bh->b_page,
-		buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
-		buffer_dirty(bh) ? "DIRTY" : "CLEAN",
-		buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
+	return scnprintf(buf, size,
+			 "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
+			 bh->b_bdev, bh->b_size,
+			 (unsigned long long)bh->b_blocknr,
+			 atomic_read(&(bh->b_count)),
+			 bh->b_state, bh->b_page,
+			 buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
+			 buffer_dirty(bh) ? "DIRTY" : "CLEAN",
+			 buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
 }
 
-static void sprintf_disk_child(char *buf, struct disk_child *dc)
+static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc)
 {
-	sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
-		dc_size(dc));
+	return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]",
+			 dc_block_number(dc), dc_size(dc));
 }
 
 static char *is_there_reiserfs_struct(char *fmt, int *what)
@@ -189,55 +205,60 @@
 	char *fmt1 = fmt_buf;
 	char *k;
 	char *p = error_buf;
+	char * const end = &error_buf[sizeof(error_buf)];
 	int what;
 
 	spin_lock(&error_lock);
 
-	strcpy(fmt1, fmt);
+	if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) {
+		strscpy(error_buf, "format string too long", end - error_buf);
+		goto out_unlock;
+	}
 
 	while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
 		*k = 0;
 
-		p += vsprintf(p, fmt1, args);
+		p += vscnprintf(p, end - p, fmt1, args);
 
 		switch (what) {
 		case 'k':
-			sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
+			p += scnprintf_le_key(p, end - p,
+					      va_arg(args, struct reiserfs_key *));
 			break;
 		case 'K':
-			sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
+			p += scnprintf_cpu_key(p, end - p,
+					       va_arg(args, struct cpu_key *));
 			break;
 		case 'h':
-			sprintf_item_head(p, va_arg(args, struct item_head *));
+			p += scnprintf_item_head(p, end - p,
+						 va_arg(args, struct item_head *));
 			break;
 		case 't':
-			sprintf_direntry(p,
-					 va_arg(args,
-						struct reiserfs_dir_entry *));
+			p += scnprintf_direntry(p, end - p,
+						va_arg(args, struct reiserfs_dir_entry *));
 			break;
 		case 'y':
-			sprintf_disk_child(p,
-					   va_arg(args, struct disk_child *));
+			p += scnprintf_disk_child(p, end - p,
+						  va_arg(args, struct disk_child *));
 			break;
 		case 'z':
-			sprintf_block_head(p,
-					   va_arg(args, struct buffer_head *));
+			p += scnprintf_block_head(p, end - p,
+						  va_arg(args, struct buffer_head *));
 			break;
 		case 'b':
-			sprintf_buffer_head(p,
-					    va_arg(args, struct buffer_head *));
+			p += scnprintf_buffer_head(p, end - p,
+						   va_arg(args, struct buffer_head *));
 			break;
 		case 'a':
-			sprintf_de_head(p,
-					va_arg(args,
-					       struct reiserfs_de_head *));
+			p += scnprintf_de_head(p, end - p,
+					       va_arg(args, struct reiserfs_de_head *));
 			break;
 		}
 
-		p += strlen(p);
 		fmt1 = k + 2;
 	}
-	vsprintf(p, fmt1, args);
+	p += vscnprintf(p, end - p, fmt1, args);
+out_unlock:
 	spin_unlock(&error_lock);
 
 }
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 6ca0047..d920a64 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -270,7 +270,7 @@
 
 	struct mutex j_commit_mutex;
 	unsigned int j_trans_id;
-	time_t j_timestamp;
+	time64_t j_timestamp; /* write-only but useful for crash dump analysis */
 	struct reiserfs_list_bitmap *j_list_bitmap;
 	struct buffer_head *j_commit_bh;	/* commit buffer head */
 	struct reiserfs_journal_cnode *j_realblock;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index e87aa21..06a9fae 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -791,8 +791,10 @@
 			return 0;
 		size = namelen + 1;
 		if (b->buf) {
-			if (size > b->size)
+			if (b->pos + size > b->size) {
+				b->pos = -ERANGE;
 				return -ERANGE;
+			}
 			memcpy(b->buf + b->pos, name, namelen);
 			b->buf[b->pos + namelen] = 0;
 		}
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
index 1461254..271c4c4 100644
--- a/fs/sdcardfs/file.c
+++ b/fs/sdcardfs/file.c
@@ -118,7 +118,11 @@
 		goto out;
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+	saved_cred = override_fsids(sbi, SDCARDFS_I(file_inode(file))->data);
+	if (!saved_cred) {
+		err = -ENOMEM;
+		goto out;
+	}
 
 	if (lower_file->f_op->unlocked_ioctl)
 		err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
@@ -127,7 +131,7 @@
 	if (!err)
 		sdcardfs_copy_and_fix_attrs(file_inode(file),
 				      file_inode(lower_file));
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out:
 	return err;
 }
@@ -149,12 +153,16 @@
 		goto out;
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+	saved_cred = override_fsids(sbi, SDCARDFS_I(file_inode(file))->data);
+	if (!saved_cred) {
+		err = -ENOMEM;
+		goto out;
+	}
 
 	if (lower_file->f_op->compat_ioctl)
 		err = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
 
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out:
 	return err;
 }
@@ -241,7 +249,11 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(inode));
+	saved_cred = override_fsids(sbi, SDCARDFS_I(inode)->data);
+	if (!saved_cred) {
+		err = -ENOMEM;
+		goto out_err;
+	}
 
 	file->private_data =
 		kzalloc(sizeof(struct sdcardfs_file_info), GFP_KERNEL);
@@ -271,7 +283,7 @@
 		sdcardfs_copy_and_fix_attrs(inode, sdcardfs_lower_inode(inode));
 
 out_revert_cred:
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_err:
 	dput(parent);
 	return err;
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 30d4db2..7c08ffe 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -22,7 +22,6 @@
 #include <linux/fs_struct.h>
 #include <linux/ratelimit.h>
 
-/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
 const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
 		struct sdcardfs_inode_data *data)
 {
@@ -50,7 +49,6 @@
 	return old_cred;
 }
 
-/* Do not directly use this function, use REVERT_CRED() instead. */
 void revert_fsids(const struct cred *old_cred)
 {
 	const struct cred *cur_cred;
@@ -78,7 +76,10 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+	saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+					SDCARDFS_I(dir)->data);
+	if (!saved_cred)
+		return -ENOMEM;
 
 	sdcardfs_get_lower_path(dentry, &lower_path);
 	lower_dentry = lower_path.dentry;
@@ -95,8 +96,11 @@
 		err = -ENOMEM;
 		goto out_unlock;
 	}
+	copied_fs->umask = 0;
+	task_lock(current);
 	current->fs = copied_fs;
-	current->fs->umask = 0;
+	task_unlock(current);
+
 	err = vfs_create2(lower_dentry_mnt, d_inode(lower_parent_dentry), lower_dentry, mode, want_excl);
 	if (err)
 		goto out;
@@ -110,58 +114,18 @@
 	fixup_lower_ownership(dentry, dentry->d_name.name);
 
 out:
+	task_lock(current);
 	current->fs = saved_fs;
+	task_unlock(current);
 	free_fs_struct(copied_fs);
 out_unlock:
 	unlock_dir(lower_parent_dentry);
 	sdcardfs_put_lower_path(dentry, &lower_path);
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_eacces:
 	return err;
 }
 
-#if 0
-static int sdcardfs_link(struct dentry *old_dentry, struct inode *dir,
-		       struct dentry *new_dentry)
-{
-	struct dentry *lower_old_dentry;
-	struct dentry *lower_new_dentry;
-	struct dentry *lower_dir_dentry;
-	u64 file_size_save;
-	int err;
-	struct path lower_old_path, lower_new_path;
-
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
-	file_size_save = i_size_read(d_inode(old_dentry));
-	sdcardfs_get_lower_path(old_dentry, &lower_old_path);
-	sdcardfs_get_lower_path(new_dentry, &lower_new_path);
-	lower_old_dentry = lower_old_path.dentry;
-	lower_new_dentry = lower_new_path.dentry;
-	lower_dir_dentry = lock_parent(lower_new_dentry);
-
-	err = vfs_link(lower_old_dentry, d_inode(lower_dir_dentry),
-		       lower_new_dentry, NULL);
-	if (err || !d_inode(lower_new_dentry))
-		goto out;
-
-	err = sdcardfs_interpose(new_dentry, dir->i_sb, &lower_new_path);
-	if (err)
-		goto out;
-	fsstack_copy_attr_times(dir, d_inode(lower_new_dentry));
-	fsstack_copy_inode_size(dir, d_inode(lower_new_dentry));
-	set_nlink(d_inode(old_dentry),
-		  sdcardfs_lower_inode(d_inode(old_dentry))->i_nlink);
-	i_size_write(d_inode(new_dentry), file_size_save);
-out:
-	unlock_dir(lower_dir_dentry);
-	sdcardfs_put_lower_path(old_dentry, &lower_old_path);
-	sdcardfs_put_lower_path(new_dentry, &lower_new_path);
-	REVERT_CRED();
-	return err;
-}
-#endif
-
 static int sdcardfs_unlink(struct inode *dir, struct dentry *dentry)
 {
 	int err;
@@ -178,7 +142,10 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+	saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+						SDCARDFS_I(dir)->data);
+	if (!saved_cred)
+		return -ENOMEM;
 
 	sdcardfs_get_lower_path(dentry, &lower_path);
 	lower_dentry = lower_path.dentry;
@@ -209,43 +176,11 @@
 	unlock_dir(lower_dir_dentry);
 	dput(lower_dentry);
 	sdcardfs_put_lower_path(dentry, &lower_path);
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_eacces:
 	return err;
 }
 
-#if 0
-static int sdcardfs_symlink(struct inode *dir, struct dentry *dentry,
-			  const char *symname)
-{
-	int err;
-	struct dentry *lower_dentry;
-	struct dentry *lower_parent_dentry = NULL;
-	struct path lower_path;
-
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
-	sdcardfs_get_lower_path(dentry, &lower_path);
-	lower_dentry = lower_path.dentry;
-	lower_parent_dentry = lock_parent(lower_dentry);
-
-	err = vfs_symlink(d_inode(lower_parent_dentry), lower_dentry, symname);
-	if (err)
-		goto out;
-	err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
-	if (err)
-		goto out;
-	fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
-	fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
-
-out:
-	unlock_dir(lower_parent_dentry);
-	sdcardfs_put_lower_path(dentry, &lower_path);
-	REVERT_CRED();
-	return err;
-}
-#endif
-
 static int touch(char *abs_path, mode_t mode)
 {
 	struct file *filp = filp_open(abs_path, O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW, mode);
@@ -287,7 +222,10 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+	saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+						SDCARDFS_I(dir)->data);
+	if (!saved_cred)
+		return -ENOMEM;
 
 	/* check disk space */
 	parent_dentry = dget_parent(dentry);
@@ -316,8 +254,11 @@
 		unlock_dir(lower_parent_dentry);
 		goto out_unlock;
 	}
+	copied_fs->umask = 0;
+	task_lock(current);
 	current->fs = copied_fs;
-	current->fs->umask = 0;
+	task_unlock(current);
+
 	err = vfs_mkdir2(lower_mnt, d_inode(lower_parent_dentry), lower_dentry, mode);
 
 	if (err) {
@@ -366,23 +307,34 @@
 	if (make_nomedia_in_obb ||
 		((pd->perm == PERM_ANDROID)
 				&& (qstr_case_eq(&dentry->d_name, &q_data)))) {
-		REVERT_CRED(saved_cred);
-		OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(d_inode(dentry)));
+		revert_fsids(saved_cred);
+		saved_cred = override_fsids(sbi,
+					SDCARDFS_I(d_inode(dentry))->data);
+		if (!saved_cred) {
+			pr_err("sdcardfs: failed to set up .nomedia in %s: %d\n",
+						lower_path.dentry->d_name.name,
+						-ENOMEM);
+			goto out;
+		}
 		set_fs_pwd(current->fs, &lower_path);
 		touch_err = touch(".nomedia", 0664);
 		if (touch_err) {
 			pr_err("sdcardfs: failed to create .nomedia in %s: %d\n",
-							lower_path.dentry->d_name.name, touch_err);
+						lower_path.dentry->d_name.name,
+						touch_err);
 			goto out;
 		}
 	}
 out:
+	task_lock(current);
 	current->fs = saved_fs;
+	task_unlock(current);
+
 	free_fs_struct(copied_fs);
 out_unlock:
 	sdcardfs_put_lower_path(dentry, &lower_path);
 out_revert:
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_eacces:
 	return err;
 }
@@ -402,7 +354,10 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+	saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+						SDCARDFS_I(dir)->data);
+	if (!saved_cred)
+		return -ENOMEM;
 
 	/* sdcardfs_get_real_lower(): in case of remove an user's obb dentry
 	 * the dentry on the original path should be deleted.
@@ -427,44 +382,11 @@
 out:
 	unlock_dir(lower_dir_dentry);
 	sdcardfs_put_real_lower(dentry, &lower_path);
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_eacces:
 	return err;
 }
 
-#if 0
-static int sdcardfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
-			dev_t dev)
-{
-	int err;
-	struct dentry *lower_dentry;
-	struct dentry *lower_parent_dentry = NULL;
-	struct path lower_path;
-
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
-	sdcardfs_get_lower_path(dentry, &lower_path);
-	lower_dentry = lower_path.dentry;
-	lower_parent_dentry = lock_parent(lower_dentry);
-
-	err = vfs_mknod(d_inode(lower_parent_dentry), lower_dentry, mode, dev);
-	if (err)
-		goto out;
-
-	err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
-	if (err)
-		goto out;
-	fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
-	fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
-
-out:
-	unlock_dir(lower_parent_dentry);
-	sdcardfs_put_lower_path(dentry, &lower_path);
-	REVERT_CRED();
-	return err;
-}
-#endif
-
 /*
  * The locking rules in sdcardfs_rename are complex.  We could use a simpler
  * superblock-level name-space lock for renames and copy-ups.
@@ -493,7 +415,10 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred, SDCARDFS_I(new_dir));
+	saved_cred = override_fsids(SDCARDFS_SB(old_dir->i_sb),
+						SDCARDFS_I(new_dir)->data);
+	if (!saved_cred)
+		return -ENOMEM;
 
 	sdcardfs_get_real_lower(old_dentry, &lower_old_path);
 	sdcardfs_get_lower_path(new_dentry, &lower_new_path);
@@ -540,7 +465,7 @@
 	dput(lower_new_dir_dentry);
 	sdcardfs_put_real_lower(old_dentry, &lower_old_path);
 	sdcardfs_put_lower_path(new_dentry, &lower_new_path);
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_eacces:
 	return err;
 }
@@ -659,33 +584,7 @@
 	if (IS_POSIXACL(inode))
 		pr_warn("%s: This may be undefined behavior...\n", __func__);
 	err = generic_permission(&tmp, mask);
-	/* XXX
-	 * Original sdcardfs code calls inode_permission(lower_inode,.. )
-	 * for checking inode permission. But doing such things here seems
-	 * duplicated work, because the functions called after this func,
-	 * such as vfs_create, vfs_unlink, vfs_rename, and etc,
-	 * does exactly same thing, i.e., they calls inode_permission().
-	 * So we just let they do the things.
-	 * If there are any security hole, just uncomment following if block.
-	 */
-#if 0
-	if (!err) {
-		/*
-		 * Permission check on lower_inode(=EXT4).
-		 * we check it with AID_MEDIA_RW permission
-		 */
-		struct inode *lower_inode;
-
-		OVERRIDE_CRED(SDCARDFS_SB(inode->sb));
-
-		lower_inode = sdcardfs_lower_inode(inode);
-		err = inode_permission(lower_inode, mask);
-
-		REVERT_CRED();
-	}
-#endif
 	return err;
-
 }
 
 static int sdcardfs_setattr_wrn(struct dentry *dentry, struct iattr *ia)
@@ -763,7 +662,10 @@
 		goto out_err;
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dentry->d_sb), saved_cred, SDCARDFS_I(inode));
+	saved_cred = override_fsids(SDCARDFS_SB(dentry->d_sb),
+						SDCARDFS_I(inode)->data);
+	if (!saved_cred)
+		return -ENOMEM;
 
 	sdcardfs_get_lower_path(dentry, &lower_path);
 	lower_dentry = lower_path.dentry;
@@ -822,7 +724,7 @@
 
 out:
 	sdcardfs_put_lower_path(dentry, &lower_path);
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_err:
 	return err;
 }
@@ -905,13 +807,6 @@
 	.setattr	= sdcardfs_setattr_wrn,
 	.setattr2	= sdcardfs_setattr,
 	.getattr	= sdcardfs_getattr,
-	/* XXX Following operations are implemented,
-	 *     but FUSE(sdcard) or FAT does not support them
-	 *     These methods are *NOT* perfectly tested.
-	.symlink	= sdcardfs_symlink,
-	.link		= sdcardfs_link,
-	.mknod		= sdcardfs_mknod,
-	 */
 };
 
 const struct inode_operations sdcardfs_main_iops = {
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 98051996..beec63b 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -426,7 +426,12 @@
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+	saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+						SDCARDFS_I(dir)->data);
+	if (!saved_cred) {
+		ret = ERR_PTR(-ENOMEM);
+		goto out_err;
+	}
 
 	sdcardfs_get_lower_path(parent, &lower_parent_path);
 
@@ -457,7 +462,7 @@
 
 out:
 	sdcardfs_put_lower_path(parent, &lower_parent_path);
-	REVERT_CRED(saved_cred);
+	revert_fsids(saved_cred);
 out_err:
 	dput(parent);
 	return ret;
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index d5701dd..1ad7718 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -295,6 +295,13 @@
 	atomic_inc(&lower_sb->s_active);
 	sdcardfs_set_lower_super(sb, lower_sb);
 
+	sb->s_stack_depth = lower_sb->s_stack_depth + 1;
+	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
+		pr_err("sdcardfs: maximum fs stacking depth exceeded\n");
+		err = -EINVAL;
+		goto out_sput;
+	}
+
 	/* inherit maxbytes from lower file system */
 	sb->s_maxbytes = lower_sb->s_maxbytes;
 
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 826afb5..ec2290a 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -88,31 +88,6 @@
 		(x)->i_mode = ((x)->i_mode & S_IFMT) | 0775;\
 	} while (0)
 
-/* OVERRIDE_CRED() and REVERT_CRED()
- *	OVERRIDE_CRED()
- *		backup original task->cred
- *		and modifies task->cred->fsuid/fsgid to specified value.
- *	REVERT_CRED()
- *		restore original task->cred->fsuid/fsgid.
- * These two macro should be used in pair, and OVERRIDE_CRED() should be
- * placed at the beginning of a function, right after variable declaration.
- */
-#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred, info)		\
-	do {	\
-		saved_cred = override_fsids(sdcardfs_sbi, info->data);	\
-		if (!saved_cred)	\
-			return -ENOMEM;	\
-	} while (0)
-
-#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred, info)	\
-	do {	\
-		saved_cred = override_fsids(sdcardfs_sbi, info->data);	\
-		if (!saved_cred)	\
-			return ERR_PTR(-ENOMEM);	\
-	} while (0)
-
-#define REVERT_CRED(saved_cred)	revert_fsids(saved_cred)
-
 /* Android 5.0 support */
 
 /* Permission mode for a specific node. Controls how file permissions
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 05e4244..9d9d4aa 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -340,6 +340,9 @@
 
 	TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
 
+	if (unlikely(length < 0))
+		return -EIO;
+
 	while (length) {
 		entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
 		if (entry->error) {
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index bb2e77e..9236e7d 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -195,7 +195,11 @@
 		}
 
 		for (i = 0; i < blocks; i++) {
-			int size = le32_to_cpu(blist[i]);
+			int size = squashfs_block_size(blist[i]);
+			if (size < 0) {
+				err = size;
+				goto failure;
+			}
 			block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
 		}
 		n -= blocks;
@@ -368,7 +372,24 @@
 			sizeof(size));
 	if (res < 0)
 		return res;
-	return le32_to_cpu(size);
+	return squashfs_block_size(size);
+}
+
+void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
+{
+	int copied;
+	void *pageaddr;
+
+	pageaddr = kmap_atomic(page);
+	copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
+	memset(pageaddr + copied, 0, PAGE_SIZE - copied);
+	kunmap_atomic(pageaddr);
+
+	flush_dcache_page(page);
+	if (copied == avail)
+		SetPageUptodate(page);
+	else
+		SetPageError(page);
 }
 
 /* Copy data into page cache  */
@@ -377,7 +398,6 @@
 {
 	struct inode *inode = page->mapping->host;
 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-	void *pageaddr;
 	int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
 	int start_index = page->index & ~mask, end_index = start_index | mask;
 
@@ -403,12 +423,7 @@
 		if (PageUptodate(push_page))
 			goto skip_page;
 
-		pageaddr = kmap_atomic(push_page);
-		squashfs_copy_data(pageaddr, buffer, offset, avail);
-		memset(pageaddr + avail, 0, PAGE_SIZE - avail);
-		kunmap_atomic(pageaddr);
-		flush_dcache_page(push_page);
-		SetPageUptodate(push_page);
+		squashfs_fill_page(push_page, buffer, offset, avail);
 skip_page:
 		unlock_page(push_page);
 		if (i != page->index)
@@ -417,10 +432,9 @@
 }
 
 /* Read datablock stored packed inside a fragment (tail-end packed block) */
-static int squashfs_readpage_fragment(struct page *page)
+static int squashfs_readpage_fragment(struct page *page, int expected)
 {
 	struct inode *inode = page->mapping->host;
-	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
 	struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
 		squashfs_i(inode)->fragment_block,
 		squashfs_i(inode)->fragment_size);
@@ -431,8 +445,7 @@
 			squashfs_i(inode)->fragment_block,
 			squashfs_i(inode)->fragment_size);
 	else
-		squashfs_copy_cache(page, buffer, i_size_read(inode) &
-			(msblk->block_size - 1),
+		squashfs_copy_cache(page, buffer, expected,
 			squashfs_i(inode)->fragment_offset);
 
 	squashfs_cache_put(buffer);
@@ -440,7 +453,8 @@
 }
 
 static int squashfs_readpages_fragment(struct page *page,
-	struct list_head *readahead_pages, struct address_space *mapping)
+	struct list_head *readahead_pages, struct address_space *mapping,
+	int expected)
 {
 	if (!page) {
 		page = lru_to_page(readahead_pages);
@@ -451,24 +465,18 @@
 			return 0;
 		}
 	}
-	return squashfs_readpage_fragment(page);
+	return squashfs_readpage_fragment(page, expected);
 }
 
-static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
+static int squashfs_readpage_sparse(struct page *page, int expected)
 {
-	struct inode *inode = page->mapping->host;
-	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-	int bytes = index == file_end ?
-			(i_size_read(inode) & (msblk->block_size - 1)) :
-			 msblk->block_size;
-
-	squashfs_copy_cache(page, NULL, bytes, 0);
+	squashfs_copy_cache(page, NULL, expected, 0);
 	return 0;
 }
 
 static int squashfs_readpages_sparse(struct page *page,
-	struct list_head *readahead_pages, int index, int file_end,
-	struct address_space *mapping)
+	struct list_head *readahead_pages, struct address_space *mapping,
+	int expected)
 {
 	if (!page) {
 		page = lru_to_page(readahead_pages);
@@ -479,7 +487,7 @@
 			return 0;
 		}
 	}
-	return squashfs_readpage_sparse(page, index, file_end);
+	return squashfs_readpage_sparse(page, expected);
 }
 
 static int __squashfs_readpages(struct file *file, struct page *page,
@@ -496,6 +504,9 @@
 					     : lru_to_page(readahead_pages);
 		int page_index = cur_page->index;
 		int index = page_index >> (msblk->block_log - PAGE_SHIFT);
+		int expected = index == file_end ?
+			(i_size_read(inode) & (msblk->block_size - 1)) :
+			 msblk->block_size;
 
 		if (page_index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
 						PAGE_SHIFT))
@@ -511,8 +522,7 @@
 
 			if (bsize == 0) {
 				res = squashfs_readpages_sparse(page,
-					readahead_pages, index, file_end,
-					mapping);
+					readahead_pages, mapping, expected);
 			} else {
 				res = squashfs_readpages_block(page,
 					readahead_pages, &nr_pages, mapping,
@@ -520,7 +530,7 @@
 			}
 		} else {
 			res = squashfs_readpages_fragment(page,
-				readahead_pages, mapping);
+				readahead_pages, mapping, expected);
 		}
 		if (res)
 			return 0;
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index 0ed6edb..0681fea 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -49,11 +49,16 @@
 				u64 *fragment_block)
 {
 	struct squashfs_sb_info *msblk = sb->s_fs_info;
-	int block = SQUASHFS_FRAGMENT_INDEX(fragment);
-	int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
-	u64 start_block = le64_to_cpu(msblk->fragment_index[block]);
+	int block, offset, size;
 	struct squashfs_fragment_entry fragment_entry;
-	int size;
+	u64 start_block;
+
+	if (fragment >= msblk->fragments)
+		return -EIO;
+	block = SQUASHFS_FRAGMENT_INDEX(fragment);
+	offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
+
+	start_block = le64_to_cpu(msblk->fragment_index[block]);
 
 	size = squashfs_read_metadata(sb, &fragment_entry, &start_block,
 					&offset, sizeof(fragment_entry));
@@ -61,9 +66,7 @@
 		return size;
 
 	*fragment_block = le64_to_cpu(fragment_entry.start_block);
-	size = le32_to_cpu(fragment_entry.size);
-
-	return size;
+	return squashfs_block_size(fragment_entry.size);
 }
 
 
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index f4faab5..a9ea6ee 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -71,6 +71,7 @@
 				u64, u64, unsigned int);
 
 /* file.c */
+void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int);
 void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
 				int);
 
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 506f4ba..e664863 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -129,6 +129,12 @@
 
 #define SQUASHFS_COMPRESSED_BLOCK(B)	(!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
 
+static inline int squashfs_block_size(__le32 raw)
+{
+	u32 size = le32_to_cpu(raw);
+	return (size >> 25) ? -EIO : size;
+}
+
 /*
  * Inode number ops.  Inodes consist of a compressed block number, and an
  * uncompressed offset within that block
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 8a6995d..3b767ce 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -75,6 +75,7 @@
 	unsigned short				block_log;
 	long long				bytes_used;
 	unsigned int				inodes;
+	unsigned int				fragments;
 	int					xattr_ids;
 };
 #endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index e2a0a73..445ce58 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -175,6 +175,7 @@
 	msblk->inode_table = le64_to_cpu(sblk->inode_table_start);
 	msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
 	msblk->inodes = le32_to_cpu(sblk->inodes);
+	msblk->fragments = le32_to_cpu(sblk->fragments);
 	flags = le16_to_cpu(sblk->flags);
 
 	TRACE("Found valid superblock on %pg\n", sb->s_bdev);
@@ -185,7 +186,7 @@
 	TRACE("Filesystem size %lld bytes\n", msblk->bytes_used);
 	TRACE("Block size %d\n", msblk->block_size);
 	TRACE("Number of inodes %d\n", msblk->inodes);
-	TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments));
+	TRACE("Number of fragments %d\n", msblk->fragments);
 	TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
 	TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
 	TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
@@ -272,7 +273,7 @@
 	sb->s_export_op = &squashfs_export_ops;
 
 handle_fragments:
-	fragments = le32_to_cpu(sblk->fragments);
+	fragments = msblk->fragments;
 	if (fragments == 0)
 		goto check_directory_table;
 
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 39c75a8..666986b 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -408,6 +408,50 @@
 EXPORT_SYMBOL_GPL(sysfs_chmod_file);
 
 /**
+ * sysfs_break_active_protection - break "active" protection
+ * @kobj: The kernel object @attr is associated with.
+ * @attr: The attribute to break the "active" protection for.
+ *
+ * With sysfs, just like kernfs, deletion of an attribute is postponed until
+ * all active .show() and .store() callbacks have finished unless this function
+ * is called. Hence this function is useful in methods that implement self
+ * deletion.
+ */
+struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+						  const struct attribute *attr)
+{
+	struct kernfs_node *kn;
+
+	kobject_get(kobj);
+	kn = kernfs_find_and_get(kobj->sd, attr->name);
+	if (kn)
+		kernfs_break_active_protection(kn);
+	return kn;
+}
+EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
+
+/**
+ * sysfs_unbreak_active_protection - restore "active" protection
+ * @kn: Pointer returned by sysfs_break_active_protection().
+ *
+ * Undo the effects of sysfs_break_active_protection(). Since this function
+ * calls kernfs_put() on the kernfs node that corresponds to the 'attr'
+ * argument passed to sysfs_break_active_protection() that attribute may have
+ * been removed between the sysfs_break_active_protection() and
+ * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after
+ * this function has returned.
+ */
+void sysfs_unbreak_active_protection(struct kernfs_node *kn)
+{
+	struct kobject *kobj = kn->parent->priv;
+
+	kernfs_unbreak_active_protection(kn);
+	kernfs_put(kn);
+	kobject_put(kobj);
+}
+EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection);
+
+/**
  * sysfs_remove_file_ns - remove an object attribute with a custom ns tag
  * @kobj: object we're acting for
  * @attr: attribute descriptor
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 504658f..f8ce849 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -661,6 +661,11 @@
 	spin_lock(&ui->ui_lock);
 	ui->synced_i_size = ui->ui_size;
 	spin_unlock(&ui->ui_lock);
+	if (xent) {
+		spin_lock(&host_ui->ui_lock);
+		host_ui->synced_i_size = host_ui->ui_size;
+		spin_unlock(&host_ui->ui_lock);
+	}
 	mark_inode_clean(c, ui);
 	mark_inode_clean(c, host_ui);
 	return 0;
@@ -1265,7 +1270,7 @@
 	int err, len, compr_type, out_len;
 
 	out_len = le32_to_cpu(dn->size);
-	buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
+	buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
 	if (!buf)
 		return -ENOMEM;
 
@@ -1344,7 +1349,16 @@
 		else if (err)
 			goto out_free;
 		else {
-			if (le32_to_cpu(dn->size) <= dlen)
+			int dn_len = le32_to_cpu(dn->size);
+
+			if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
+				ubifs_err(c, "bad data node (block %u, inode %lu)",
+					  blk, inode->i_ino);
+				ubifs_dump_node(c, dn);
+				goto out_free;
+			}
+
+			if (dn_len <= dlen)
 				dlen = 0; /* Nothing to do */
 			else {
 				int compr_type = le16_to_cpu(dn->compr_type);
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index 6c3a1ab..780a436 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -1091,10 +1091,6 @@
 		}
 	}
 
-	buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
 	/*
 	 * After an unclean unmount, empty and freeable LEBs
 	 * may contain garbage - do not scan them.
@@ -1113,6 +1109,10 @@
 		return LPT_SCAN_CONTINUE;
 	}
 
+	buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
 	sleb = ubifs_scan(c, lnum, 0, buf, 0);
 	if (IS_ERR(sleb)) {
 		ret = PTR_ERR(sleb);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 03dda1c..727a9e3 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1918,6 +1918,9 @@
 	int dev, vol;
 	char *endptr;
 
+	if (!name || !*name)
+		return ERR_PTR(-EINVAL);
+
 	/* First, try to open using the device node path method */
 	ubi = ubi_open_volume_path(name, mode);
 	if (!IS_ERR(ubi))
diff --git a/fs/xattr.c b/fs/xattr.c
index 1b00bab..1c91835 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -540,7 +540,7 @@
 	if (error > 0) {
 		if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
 		    (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
-			posix_acl_fix_xattr_to_user(kvalue, size);
+			posix_acl_fix_xattr_to_user(kvalue, error);
 		if (size && copy_to_user(value, kvalue, error))
 			error = -EFAULT;
 	} else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
@@ -953,17 +953,19 @@
 	int err = 0;
 
 #ifdef CONFIG_FS_POSIX_ACL
-	if (inode->i_acl) {
-		err = xattr_list_one(&buffer, &remaining_size,
-				     XATTR_NAME_POSIX_ACL_ACCESS);
-		if (err)
-			return err;
-	}
-	if (inode->i_default_acl) {
-		err = xattr_list_one(&buffer, &remaining_size,
-				     XATTR_NAME_POSIX_ACL_DEFAULT);
-		if (err)
-			return err;
+	if (IS_POSIXACL(inode)) {
+		if (inode->i_acl) {
+			err = xattr_list_one(&buffer, &remaining_size,
+					     XATTR_NAME_POSIX_ACL_ACCESS);
+			if (err)
+				return err;
+		}
+		if (inode->i_default_acl) {
+			err = xattr_list_one(&buffer, &remaining_size,
+					     XATTR_NAME_POSIX_ACL_DEFAULT);
+			if (err)
+				return err;
+		}
 	}
 #endif
 
diff --git a/include/Kbuild b/include/Kbuild
index bab1145..9205b04 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -1,2 +1,6 @@
 # Top-level Makefile calls into asm-$(ARCH)
 # List only non-arch directories below
+
+ifneq ($(VSERVICES_SUPPORT), "")
+header-y += vservices/
+endif
diff --git a/include/asm-generic/okl4_virq.h b/include/asm-generic/okl4_virq.h
new file mode 100644
index 0000000..2eca110
--- /dev/null
+++ b/include/asm-generic/okl4_virq.h
@@ -0,0 +1,27 @@
+/*
+ * include/asm-generic/okl4_virq.h
+ *
+ * Copyright (c) 2017 General Dynamics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OKL4_VIRQ_H__
+#define __OKL4_VIRQ_H__
+
+#include <linux/irq.h>
+#include <microvisor/microvisor.h>
+
+static inline okl4_virq_flags_t okl4_get_virq_payload(unsigned int irq)
+{
+	struct irq_data *irqd = irq_get_irq_data(irq);
+
+	if (WARN_ON_ONCE(!irqd))
+		return 0;
+
+	return _okl4_sys_interrupt_get_payload(irqd_to_hwirq(irqd)).payload;
+}
+
+#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 4e8551c..0a4c2d4 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -779,8 +779,8 @@
 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
 int pud_clear_huge(pud_t *pud);
 int pmd_clear_huge(pmd_t *pmd);
-int pud_free_pmd_page(pud_t *pud);
-int pmd_free_pte_page(pmd_t *pmd);
+int pud_free_pmd_page(pud_t *pud, unsigned long addr);
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
 #else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
 static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
 {
@@ -798,11 +798,11 @@
 {
 	return 0;
 }
-static inline int pud_free_pmd_page(pud_t *pud)
+static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
 {
 	return 0;
 }
-static inline int pmd_free_pte_page(pmd_t *pmd)
+static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 {
 	return 0;
 }
@@ -828,6 +828,19 @@
 struct file;
 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
 			unsigned long size, pgprot_t *vma_prot);
+
+#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
+static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+	return true;
+}
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+	return false;
+}
+#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
+
 #endif /* !__ASSEMBLY__ */
 
 #ifndef io_remap_pfn_range
diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
deleted file mode 100644
index 6b700c7..0000000
--- a/include/crypto/vmac.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Modified to interface to the Linux kernel
- * Copyright (c) 2009, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#ifndef __CRYPTO_VMAC_H
-#define __CRYPTO_VMAC_H
-
-/* --------------------------------------------------------------------------
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Please send bug reports to the authors.
- * Last modified: 17 APR 08, 1700 PDT
- * ----------------------------------------------------------------------- */
-
-/*
- * User definable settings.
- */
-#define VMAC_TAG_LEN	64
-#define VMAC_KEY_SIZE	128/* Must be 128, 192 or 256			*/
-#define VMAC_KEY_LEN	(VMAC_KEY_SIZE/8)
-#define VMAC_NHBYTES	128/* Must 2^i for any 3 < i < 13 Standard = 128*/
-
-/*
- * This implementation uses u32 and u64 as names for unsigned 32-
- * and 64-bit integer types. These are defined in C99 stdint.h. The
- * following may need adaptation if you are not running a C99 or
- * Microsoft C environment.
- */
-struct vmac_ctx {
-	u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
-	u64 polykey[2*VMAC_TAG_LEN/64];
-	u64 l3key[2*VMAC_TAG_LEN/64];
-	u64 polytmp[2*VMAC_TAG_LEN/64];
-	u64 cached_nonce[2];
-	u64 cached_aes[2];
-	int first_block_processed;
-};
-
-typedef u64 vmac_t;
-
-struct vmac_ctx_t {
-	struct crypto_cipher *child;
-	struct vmac_ctx __vmac_ctx;
-	u8 partial[VMAC_NHBYTES];	/* partial block */
-	int partial_size;		/* size of the partial block */
-};
-
-#endif /* __CRYPTO_VMAC_H */
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 50810be..1daeacb 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -346,6 +346,7 @@
 # define DP_PSR_FRAME_CAPTURE		    (1 << 3)
 # define DP_PSR_SELECTIVE_UPDATE	    (1 << 4)
 # define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS     (1 << 5)
+# define DP_PSR_ENABLE_PSR2		    (1 << 6) /* eDP 1.4a */
 
 #define DP_ADAPTER_CTRL			    0x1a0
 # define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE   (1 << 0)
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
index 8108c98..f9781b5 100644
--- a/include/dt-bindings/clock/mdss-10nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -26,16 +26,32 @@
 #define PCLK_SRC_MUX_0_CLK	7
 #define PCLK_SRC_0_CLK		8
 #define PCLK_MUX_0_CLK		9
-#define VCO_CLK_1		10
-#define PLL_OUT_DIV_1_CLK	11
-#define BITCLK_SRC_1_CLK	12
-#define BYTECLK_SRC_1_CLK	13
-#define POST_BIT_DIV_1_CLK	14
-#define POST_VCO_DIV_1_CLK	15
-#define BYTECLK_MUX_1_CLK	16
-#define PCLK_SRC_MUX_1_CLK	17
-#define PCLK_SRC_1_CLK		18
-#define PCLK_MUX_1_CLK		19
+#define SHADOW_VCO_CLK_0		10
+#define SHADOW_PLL_OUT_DIV_0_CLK	11
+#define SHADOW_BITCLK_SRC_0_CLK		12
+#define SHADOW_BYTECLK_SRC_0_CLK	13
+#define SHADOW_POST_BIT_DIV_0_CLK	14
+#define SHADOW_POST_VCO_DIV_0_CLK	15
+#define SHADOW_PCLK_SRC_MUX_0_CLK	16
+#define SHADOW_PCLK_SRC_0_CLK		17
+#define VCO_CLK_1		18
+#define PLL_OUT_DIV_1_CLK	19
+#define BITCLK_SRC_1_CLK	20
+#define BYTECLK_SRC_1_CLK	21
+#define POST_BIT_DIV_1_CLK	22
+#define POST_VCO_DIV_1_CLK	23
+#define BYTECLK_MUX_1_CLK	24
+#define PCLK_SRC_MUX_1_CLK	25
+#define PCLK_SRC_1_CLK		26
+#define PCLK_MUX_1_CLK		27
+#define SHADOW_VCO_CLK_1		28
+#define SHADOW_PLL_OUT_DIV_1_CLK	29
+#define SHADOW_BITCLK_SRC_1_CLK		30
+#define SHADOW_BYTECLK_SRC_1_CLK	31
+#define SHADOW_POST_BIT_DIV_1_CLK	32
+#define SHADOW_POST_VCO_DIV_1_CLK	33
+#define SHADOW_PCLK_SRC_MUX_1_CLK	34
+#define SHADOW_PCLK_SRC_1_CLK		35
 
 /* DP PLL clocks */
 #define	DP_VCO_CLK	0
diff --git a/include/linux/Kbuild.vservices b/include/linux/Kbuild.vservices
new file mode 100644
index 0000000..392f559
--- /dev/null
+++ b/include/linux/Kbuild.vservices
@@ -0,0 +1,3 @@
+#
+# Virtual Services headers which need to be exported for user-space
+#
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 43690f5..9f721ee 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -80,6 +80,11 @@
 			   ARM_SMCCC_SMC_32,				\
 			   0, 0x8000)
 
+#define ARM_SMCCC_ARCH_WORKAROUND_2					\
+	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
+			   ARM_SMCCC_SMC_32,				\
+			   0, 0x7fff)
+
 #ifndef __ASSEMBLY__
 
 #include <linux/linkage.h>
@@ -196,47 +201,57 @@
 
 #define __declare_arg_0(a0, res)					\
 	struct arm_smccc_res   *___res = res;				\
-	register u32           r0 SMCCC_REG(0) = a0;			\
+	register unsigned long r0 SMCCC_REG(0) = (u32)a0;		\
 	register unsigned long r1 SMCCC_REG(1);				\
 	register unsigned long r2 SMCCC_REG(2);				\
 	register unsigned long r3 SMCCC_REG(3)
 
 #define __declare_arg_1(a0, a1, res)					\
+	typeof(a1) __a1 = a1;						\
 	struct arm_smccc_res   *___res = res;				\
-	register u32           r0 SMCCC_REG(0) = a0;			\
-	register typeof(a1)    r1 SMCCC_REG(1) = a1;			\
+	register unsigned long r0 SMCCC_REG(0) = (u32)a0;		\
+	register unsigned long r1 SMCCC_REG(1) = __a1;			\
 	register unsigned long r2 SMCCC_REG(2);				\
 	register unsigned long r3 SMCCC_REG(3)
 
 #define __declare_arg_2(a0, a1, a2, res)				\
+	typeof(a1) __a1 = a1;						\
+	typeof(a2) __a2 = a2;						\
 	struct arm_smccc_res   *___res = res;				\
-	register u32           r0 SMCCC_REG(0) = a0;			\
-	register typeof(a1)    r1 SMCCC_REG(1) = a1;			\
-	register typeof(a2)    r2 SMCCC_REG(2) = a2;			\
+	register unsigned long r0 SMCCC_REG(0) = (u32)a0;		\
+	register unsigned long r1 SMCCC_REG(1) = __a1;			\
+	register unsigned long r2 SMCCC_REG(2) = __a2;			\
 	register unsigned long r3 SMCCC_REG(3)
 
 #define __declare_arg_3(a0, a1, a2, a3, res)				\
+	typeof(a1) __a1 = a1;						\
+	typeof(a2) __a2 = a2;						\
+	typeof(a3) __a3 = a3;						\
 	struct arm_smccc_res   *___res = res;				\
-	register u32           r0 SMCCC_REG(0) = a0;			\
-	register typeof(a1)    r1 SMCCC_REG(1) = a1;			\
-	register typeof(a2)    r2 SMCCC_REG(2) = a2;			\
-	register typeof(a3)    r3 SMCCC_REG(3) = a3
+	register unsigned long r0 SMCCC_REG(0) = (u32)a0;		\
+	register unsigned long r1 SMCCC_REG(1) = __a1;			\
+	register unsigned long r2 SMCCC_REG(2) = __a2;			\
+	register unsigned long r3 SMCCC_REG(3) = __a3
 
 #define __declare_arg_4(a0, a1, a2, a3, a4, res)			\
+	typeof(a4) __a4 = a4;						\
 	__declare_arg_3(a0, a1, a2, a3, res);				\
-	register typeof(a4) r4 SMCCC_REG(4) = a4
+	register unsigned long r4 SMCCC_REG(4) = __a4
 
 #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res)			\
+	typeof(a5) __a5 = a5;						\
 	__declare_arg_4(a0, a1, a2, a3, a4, res);			\
-	register typeof(a5) r5 SMCCC_REG(5) = a5
+	register unsigned long r5 SMCCC_REG(5) = __a5
 
 #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res)		\
+	typeof(a6) __a6 = a6;						\
 	__declare_arg_5(a0, a1, a2, a3, a4, a5, res);			\
-	register typeof(a6) r6 SMCCC_REG(6) = a6
+	register unsigned long r6 SMCCC_REG(6) = __a6
 
 #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res)		\
+	typeof(a7) __a7 = a7;						\
 	__declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res);		\
-	register typeof(a7) r7 SMCCC_REG(7) = a7
+	register unsigned long r7 SMCCC_REG(7) = __a7
 
 #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
 #define __declare_args(count, ...)  ___declare_args(count, __VA_ARGS__)
@@ -293,5 +308,10 @@
  */
 #define arm_smccc_1_1_hvc(...)	__arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
 
+/* Return codes defined in ARM DEN 0070A */
+#define SMCCC_RET_SUCCESS			0
+#define SMCCC_RET_NOT_SUPPORTED			-1
+#define SMCCC_RET_NOT_REQUIRED			-2
+
 #endif /*__ASSEMBLY__*/
 #endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/batterydata-lib.h b/include/linux/batterydata-lib.h
index 39517f8..d4cbff4 100644
--- a/include/linux/batterydata-lib.h
+++ b/include/linux/batterydata-lib.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2015, 2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -160,55 +160,55 @@
 extern struct bms_battery_data QRD_4v35_2000mAh_data;
 extern struct bms_battery_data  qrd_4v2_1300mah_data;
 
-int interpolate_fcc(struct single_row_lut *fcc_temp_lut, int batt_temp);
-int interpolate_scalingfactor(struct sf_lut *sf_lut, int row_entry, int pc);
-int interpolate_scalingfactor_fcc(struct single_row_lut *fcc_sf_lut,
+int interpolate_fcc_bms(struct single_row_lut *fcc_temp_lut, int batt_temp);
+int interpolate_scalingfactor_bms(struct sf_lut *sf_lut, int row_entry, int pc);
+int interpolate_scalingfactor_fcc_bms(struct single_row_lut *fcc_sf_lut,
 				int cycles);
-int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv,
+int interpolate_pc_bms(struct pc_temp_ocv_lut *pc_temp_ocv,
 				int batt_temp_degc, int ocv);
-int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv,
+int interpolate_ocv_bms(struct pc_temp_ocv_lut *pc_temp_ocv,
 				int batt_temp_degc, int pc);
-int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv,
+int interpolate_slope_bms(struct pc_temp_ocv_lut *pc_temp_ocv,
 					int batt_temp, int pc);
-int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut,
+int interpolate_acc_bms(struct ibat_temp_acc_lut *ibat_acc_lut,
 					int batt_temp, int ibat);
-int linear_interpolate(int y0, int x0, int y1, int x1, int x);
+int linear_interpolate_bms(int y0, int x0, int y1, int x1, int x);
 #else
-static inline int interpolate_fcc(struct single_row_lut *fcc_temp_lut,
+static inline int interpolate_fcc_bms(struct single_row_lut *fcc_temp_lut,
 			int batt_temp)
 {
 	return -EINVAL;
 }
-static inline int interpolate_scalingfactor(struct sf_lut *sf_lut,
+static inline int interpolate_scalingfactor_bms(struct sf_lut *sf_lut,
 			int row_entry, int pc)
 {
 	return -EINVAL;
 }
-static inline int interpolate_scalingfactor_fcc(
+static inline int interpolate_scalingfactor_fcc_bms(
 			struct single_row_lut *fcc_sf_lut, int cycles)
 {
 	return -EINVAL;
 }
-static inline int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv,
+static inline int interpolate_pc_bms(struct pc_temp_ocv_lut *pc_temp_ocv,
 			int batt_temp_degc, int ocv)
 {
 	return -EINVAL;
 }
-static inline int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv,
+static inline int interpolate_ocv_bms(struct pc_temp_ocv_lut *pc_temp_ocv,
 			int batt_temp_degc, int pc)
 {
 	return -EINVAL;
 }
-static inline int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv,
+static inline int interpolate_slope_bms(struct pc_temp_ocv_lut *pc_temp_ocv,
 					int batt_temp, int pc)
 {
 	return -EINVAL;
 }
-static inline int linear_interpolate(int y0, int x0, int y1, int x1, int x)
+static inline int linear_interpolate_bms(int y0, int x0, int y1, int x1, int x)
 {
 	return -EINVAL;
 }
-static inline int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut,
+static inline int interpolate_acc_bms(struct ibat_temp_acc_lut *ibat_acc_lut,
 						int batt_temp, int ibat)
 {
 	return -EINVAL;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 42ad6b5..a149671 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -78,6 +78,9 @@
 	*/
 	struct inode            *bi_dio_inode;
 #endif
+#ifdef CONFIG_DM_DEFAULT_KEY
+	int bi_crypt_skip;
+#endif
 
 	unsigned short		bi_vcnt;	/* how many bio_vec's */
 
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 6f3da08..a883715 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -127,4 +127,22 @@
 #else
 #define PANIC_CORRUPTION 0
 #endif  /* CONFIG_PANIC_ON_DATA_CORRUPTION */
+/*
+ * Since detected data corruption should stop operation on the affected
+ * structures. Return value must be checked and sanely acted on by caller.
+ */
+static inline __must_check bool check_data_corruption(bool v) { return v; }
+#define CHECK_DATA_CORRUPTION(condition, fmt, ...)			 \
+	check_data_corruption(({					 \
+		bool corruption = unlikely(condition);			 \
+		if (corruption) {					 \
+			if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
+				pr_err(fmt, ##__VA_ARGS__);		 \
+				BUG();					 \
+			} else						 \
+				WARN(1, fmt, ##__VA_ARGS__);		 \
+		}							 \
+		corruption;						 \
+	}))
+
 #endif	/* _LINUX_BUG_H */
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index e4f142c..eeef7d6 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -16,6 +16,14 @@
  */
 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
 
+#undef __no_sanitize_address
+#define __no_sanitize_address __attribute__((no_sanitize("address")))
+
+/* Clang doesn't have a way to turn it off per-function, yet. */
+#ifdef __noretpoline
+#undef __noretpoline
+#endif
+
 #ifdef CONFIG_LTO_CLANG
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
 #define __norecordmcount \
@@ -32,8 +40,3 @@
 #if __has_feature(address_sanitizer)
 #define __SANITIZE_ADDRESS__
 #endif
-
-/* Clang doesn't have a way to turn it off per-function, yet. */
-#ifdef __noretpoline
-#undef __noretpoline
-#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index a6d1bf2..8e82e33 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -65,25 +65,40 @@
 #endif
 
 /*
+ * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
+ * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
+ * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
+ * defined so the gnu89 semantics are the default.
+ */
+#ifdef __GNUC_STDC_INLINE__
+# define __gnu_inline	__attribute__((gnu_inline))
+#else
+# define __gnu_inline
+#endif
+
+/*
  * Force always-inline if the user requests it so via the .config,
  * or if gcc is too old.
  * GCC does not warn about unused static inline functions for
  * -Wunused-function.  This turns out to avoid the need for complex #ifdef
  * directives.  Suppress the warning in clang as well by using "unused"
  * function attribute, which is redundant but not harmful for gcc.
+ * Prefer gnu_inline, so that extern inline functions do not emit an
+ * externally visible function. This makes extern inline behave as per gnu89
+ * semantics rather than c99. This prevents multiple symbol definition errors
+ * of extern inline functions at link time.
+ * A lot of inline functions can cause havoc with function tracing.
  */
 #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) ||		\
     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline inline		__attribute__((always_inline,unused)) notrace
-#define __inline__ __inline__	__attribute__((always_inline,unused)) notrace
-#define __inline __inline	__attribute__((always_inline,unused)) notrace
+#define inline \
+	inline __attribute__((always_inline, unused)) notrace __gnu_inline
 #else
-/* A lot of inline functions can cause havoc with function tracing */
-#define inline inline		__attribute__((unused)) notrace
-#define __inline__ __inline__	__attribute__((unused)) notrace
-#define __inline __inline	__attribute__((unused)) notrace
+#define inline inline		__attribute__((unused)) notrace __gnu_inline
 #endif
 
+#define __inline__ inline
+#define __inline inline
 #define __always_inline	inline __attribute__((always_inline))
 #define  noinline	__attribute__((noinline))
 
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 912d945..fc3192b 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -29,7 +29,7 @@
 };
 
 extern void boot_cpu_init(void);
-extern void boot_cpu_state_init(void);
+extern void boot_cpu_hotplug_init(void);
 
 extern int register_cpu(struct cpu *cpu, int num);
 extern struct device *get_cpu_device(unsigned cpu);
@@ -52,6 +52,8 @@
 				   struct device_attribute *attr, char *buf);
 extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
 					  struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_l1tf(struct device *dev,
+			     struct device_attribute *attr, char *buf);
 
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -257,6 +259,25 @@
 static inline void cpuhp_report_idle_dead(void) { }
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
+enum cpuhp_smt_control {
+	CPU_SMT_ENABLED,
+	CPU_SMT_DISABLED,
+	CPU_SMT_FORCE_DISABLED,
+	CPU_SMT_NOT_SUPPORTED,
+};
+
+#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+extern enum cpuhp_smt_control cpu_smt_control;
+extern void cpu_smt_disable(bool force);
+extern void cpu_smt_check_topology_early(void);
+extern void cpu_smt_check_topology(void);
+#else
+# define cpu_smt_control		(CPU_SMT_ENABLED)
+static inline void cpu_smt_disable(bool force) { }
+static inline void cpu_smt_check_topology_early(void) { }
+static inline void cpu_smt_check_topology(void) { }
+#endif
+
 #define IDLE_START 1
 #define IDLE_END 2
 
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 0fbce32..48bc2b7 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -425,6 +425,7 @@
 #define CPUFREQ_CREATE_POLICY		(3)
 #define CPUFREQ_REMOVE_POLICY		(4)
 #define CPUFREQ_STOP			(5)
+#define CPUFREQ_INCOMPATIBLE		(6)
 
 /* Govinfo Notifiers */
 #define CPUFREQ_LOAD_CHANGE		(0)
diff --git a/include/linux/cpufreq_times.h b/include/linux/cpufreq_times.h
index 3fb3875..356a3fa 100644
--- a/include/linux/cpufreq_times.h
+++ b/include/linux/cpufreq_times.h
@@ -22,6 +22,7 @@
 
 #ifdef CONFIG_CPU_FREQ_TIMES
 void cpufreq_task_times_init(struct task_struct *p);
+void cpufreq_task_times_alloc(struct task_struct *p);
 void cpufreq_task_times_exit(struct task_struct *p);
 int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
 			    struct pid *pid, struct task_struct *p);
@@ -31,6 +32,11 @@
 void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end);
 int single_uid_time_in_state_open(struct inode *inode, struct file *file);
 #else
+static inline void cpufreq_task_times_init(struct task_struct *p) {}
+static inline void cpufreq_task_times_alloc(struct task_struct *p) {}
+static inline void cpufreq_task_times_exit(struct task_struct *p) {}
+static inline void cpufreq_acct_update_power(struct task_struct *p,
+					     u64 cputime) {}
 static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
 static inline void cpufreq_times_record_transition(
 	struct cpufreq_freqs *freq) {}
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 6666ea0..81339e1 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -110,6 +110,11 @@
 #define CRYPTO_ALG_OPTIONAL_KEY		0x00004000
 
 /*
+ * Don't trigger module loading
+ */
+#define CRYPTO_NOLOAD			0x00008000
+
+/*
  * Transform masks and values (for crt_flags).
  */
 #define CRYPTO_TFM_NEED_KEY		0x00000001
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 1f6d0ac..83a36f8 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -32,6 +32,7 @@
 #define UART_MODE			4
 #define SOCKET_MODE			5
 #define CALLBACK_MODE			6
+#define PCIE_MODE			7
 
 /* different values that go in for diag_data_type */
 #define DATA_TYPE_EVENT			0
@@ -146,7 +147,7 @@
  * a new RANGE of SSIDs to the msg_mask_tbl.
  */
 #define MSG_MASK_TBL_CNT		26
-#define APPS_EVENT_LAST_ID		0xC85
+#define APPS_EVENT_LAST_ID		0xC92
 
 #define MSG_SSID_0			0
 #define MSG_SSID_0_LAST			129
@@ -181,7 +182,7 @@
 #define MSG_SSID_15			8000
 #define MSG_SSID_15_LAST		8000
 #define MSG_SSID_16			8500
-#define MSG_SSID_16_LAST		8529
+#define MSG_SSID_16_LAST		8531
 #define MSG_SSID_17			9000
 #define MSG_SSID_17_LAST		9008
 #define MSG_SSID_18			9500
@@ -777,7 +778,9 @@
 	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
 		MSG_LVL_FATAL,
 	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
-		MSG_LVL_FATAL
+		MSG_LVL_FATAL,
+	MSG_LVL_MED,
+	MSG_LVL_MED
 };
 
 static const uint32_t msg_bld_masks_17[] =  {
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 8dce6fd..f17e1f2 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -17,6 +17,7 @@
 #define __DMA_IOMMU_H
 
 #ifdef __KERNEL__
+#include <linux/types.h>
 #include <asm/errno.h>
 
 #ifdef CONFIG_IOMMU_DMA
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index aa5db8b..f70f8ac 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -304,11 +304,6 @@
  * For NAT entries
  */
 #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
-#define NAT_ENTRY_BITMAP_SIZE	((NAT_ENTRY_PER_BLOCK + 7) / 8)
-#define NAT_ENTRY_BITMAP_SIZE_ALIGNED				\
-	((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) /		\
-	BITS_PER_LONG * BITS_PER_LONG)
-
 
 struct f2fs_nat_entry {
 	__u8 version;		/* latest version of cached nat entry */
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 1e03614..f5371c2 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -257,7 +257,9 @@
 extern int fscrypt_using_hardware_encryption(const struct inode *inode);
 extern void fscrypt_set_ice_dun(const struct inode *inode,
 	struct bio *bio, u64 dun);
-extern bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted);
+extern bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted,
+		int bi_crypt_skip);
+extern void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip);
 #else
 static inline int fscrypt_using_hardware_encryption(const struct inode *inode)
 {
@@ -270,8 +272,12 @@
 	return;
 }
 
+static inline void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip)
+{
+}
+
 static inline bool fscrypt_mergeable_bio(struct bio *bio,
-	u64 dun, bool bio_encrypted)
+	u64 dun, bool bio_encrypted, int bi_crypt_skip)
 {
 	return true;
 }
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index 172d418..5017122 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -67,16 +67,6 @@
 	return;
 }
 
-static inline void fscrypt_set_d_op(struct dentry *dentry)
-{
-	return;
-}
-
-static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
-{
-	return;
-}
-
 /* policy.c */
 static inline int fscrypt_ioctl_set_policy(struct file *filp,
 					   const void __user *arg)
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index 1ed79ee..99b6c52 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -28,7 +28,7 @@
 	int (*set_context)(struct inode *, const void *, size_t, void *);
 	bool (*dummy_context)(struct inode *);
 	bool (*empty_dir)(struct inode *);
-	unsigned (*max_namelen)(struct inode *);
+	unsigned int max_namelen;
 };
 
 struct fscrypt_ctx {
@@ -74,20 +74,6 @@
 
 extern void fscrypt_restore_control_page(struct page *);
 
-extern const struct dentry_operations fscrypt_d_ops;
-
-static inline void fscrypt_set_d_op(struct dentry *dentry)
-{
-	d_set_d_op(dentry, &fscrypt_d_ops);
-}
-
-static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
-{
-	spin_lock(&dentry->d_lock);
-	dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
-	spin_unlock(&dentry->d_lock);
-}
-
 /* policy.c */
 extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
 extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 649e917..270eef7 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -16,6 +16,7 @@
 #define __FSL_GUTS_H__
 
 #include <linux/types.h>
+#include <linux/io.h>
 
 /**
  * Global Utility Registers.
diff --git a/include/linux/iio/imu/mpu.h b/include/linux/iio/imu/mpu.h
new file mode 100644
index 0000000..4dbb86c
--- /dev/null
+++ b/include/linux/iio/imu/mpu.h
@@ -0,0 +1,124 @@
+/*
+* Copyright (C) 2012-2017 InvenSense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+#ifndef __MPU_H_
+#define __MPU_H_
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#endif
+
+enum secondary_slave_type {
+	SECONDARY_SLAVE_TYPE_NONE,
+	SECONDARY_SLAVE_TYPE_ACCEL,
+	SECONDARY_SLAVE_TYPE_COMPASS,
+	SECONDARY_SLAVE_TYPE_PRESSURE,
+	SECONDARY_SLAVE_TYPE_ALS,
+
+	SECONDARY_SLAVE_TYPE_TYPES
+};
+
+enum ext_slave_id {
+	ID_INVALID = 0,
+	GYRO_ID_MPU3050,
+	GYRO_ID_MPU6050A2,
+	GYRO_ID_MPU6050B1,
+	GYRO_ID_MPU6050B1_NO_ACCEL,
+	GYRO_ID_ITG3500,
+
+	ACCEL_ID_LIS331,
+	ACCEL_ID_LSM303DLX,
+	ACCEL_ID_LIS3DH,
+	ACCEL_ID_KXSD9,
+	ACCEL_ID_KXTF9,
+	ACCEL_ID_BMA150,
+	ACCEL_ID_BMA222,
+	ACCEL_ID_BMA250,
+	ACCEL_ID_ADXL34X,
+	ACCEL_ID_MMA8450,
+	ACCEL_ID_MMA845X,
+	ACCEL_ID_MPU6050,
+
+	COMPASS_ID_AK8963,
+	COMPASS_ID_AK8975,
+	COMPASS_ID_AK8972,
+	COMPASS_ID_AMI30X,
+	COMPASS_ID_AMI306,
+	COMPASS_ID_YAS529,
+	COMPASS_ID_YAS530,
+	COMPASS_ID_HMC5883,
+	COMPASS_ID_LSM303DLH,
+	COMPASS_ID_LSM303DLM,
+	COMPASS_ID_MMC314X,
+	COMPASS_ID_HSCDTD002B,
+	COMPASS_ID_HSCDTD004A,
+	COMPASS_ID_MLX90399,
+	COMPASS_ID_AK09911,
+	COMPASS_ID_AK09912,
+	COMPASS_ID_AK09916,
+
+	PRESSURE_ID_BMP085,
+	PRESSURE_ID_BMP280,
+
+	ALS_ID_APDS_9900,
+	ALS_ID_APDS_9930,
+	ALS_ID_TSL_2772,
+};
+
+#define INV_PROD_KEY(ver, rev) (ver * 100 + rev)
+/**
+ * struct mpu_platform_data - Platform data for the mpu driver
+ * @int_config:		Bits [7:3] of the int config register.
+ * @level_shifter:	0: VLogic, 1: VDD
+ * @orientation:	Orientation matrix of the gyroscope
+ * @sec_slave_type:     secondary slave device type, can be compass, accel, etc
+ * @sec_slave_id:       id of the secondary slave device
+ * @secondary_i2c_address: secondary device's i2c address
+ * @secondary_orientation: secondary device's orientation matrix
+ * @aux_slave_type: auxiliary slave. Another slave device type
+ * @aux_slave_id: auxiliary slave ID.
+ * @aux_i2c_addr: auxiliary device I2C address.
+ * @read_only_slave_type: read only slave type.
+ * @read_only_slave_id: read only slave device ID.
+ * @read_only_i2c_addr: read only slave device address.
+ *
+ * Contains platform specific information on how to configure the MPU3050 to
+ * work on this platform.  The orientation matricies are 3x3 rotation matricies
+ * that are applied to the data to rotate from the mounting orientation to the
+ * platform orientation.  The values must be one of 0, 1, or -1 and each row and
+ * column should have exactly 1 non-zero value.
+ */
+struct mpu_platform_data {
+	__u8 int_config;
+	__u8 level_shifter;
+	__s8 orientation[9];
+	enum secondary_slave_type sec_slave_type;
+	enum ext_slave_id sec_slave_id;
+	__u16 secondary_i2c_addr;
+	__s8 secondary_orientation[9];
+	enum secondary_slave_type aux_slave_type;
+	enum ext_slave_id aux_slave_id;
+	__u16 aux_i2c_addr;
+	enum secondary_slave_type read_only_slave_type;
+	enum ext_slave_id read_only_slave_id;
+	__u16 read_only_i2c_addr;
+#ifdef CONFIG_OF
+	int (*power_on)(struct mpu_platform_data *);
+	int (*power_off)(struct mpu_platform_data *);
+	struct regulator *vdd_ana;
+	struct regulator *vdd_i2c;
+#endif
+};
+
+#endif	/* __MPU_H_ */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 23e129e..e353f66 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -125,6 +125,7 @@
  * Extended Capability Register
  */
 
+#define ecap_dit(e)		((e >> 41) & 0x1)
 #define ecap_pasid(e)		((e >> 40) & 0x1)
 #define ecap_pss(e)		((e >> 35) & 0x1f)
 #define ecap_eafs(e)		((e >> 34) & 0x1)
@@ -294,6 +295,7 @@
 #define QI_DEV_IOTLB_SID(sid)	((u64)((sid) & 0xffff) << 32)
 #define QI_DEV_IOTLB_QDEP(qdep)	(((qdep) & 0x1f) << 16)
 #define QI_DEV_IOTLB_ADDR(addr)	((u64)(addr) & VTD_PAGE_MASK)
+#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
 #define QI_DEV_IOTLB_SIZE	1
 #define QI_DEV_IOTLB_MAX_INVS	32
 
@@ -318,6 +320,7 @@
 #define QI_DEV_EIOTLB_PASID(p)	(((u64)p) << 32)
 #define QI_DEV_EIOTLB_SID(sid)	((u64)((sid) & 0xffff) << 16)
 #define QI_DEV_EIOTLB_QDEP(qd)	((u64)((qd) & 0x1f) << 4)
+#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
 #define QI_DEV_EIOTLB_MAX_INVS	32
 
 #define QI_PGRP_IDX(idx)	(((u64)(idx)) << 55)
@@ -463,9 +466,8 @@
 			     u8 fm, u64 type);
 extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
 			  unsigned int size_order, u64 type);
-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
-			       u64 addr, unsigned mask);
-
+extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+			u16 qdep, u64 addr, unsigned mask);
 extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
 
 extern int dmar_ir_support(void);
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index c2a0f00..734377a 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -292,6 +292,8 @@
 	return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
 }
 
+extern u64 jiffies64_to_nsecs(u64 j);
+
 extern unsigned long __msecs_to_jiffies(const unsigned int m);
 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
 /*
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 616eef4..df58b01 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -208,6 +208,7 @@
 	ATA_FLAG_SLAVE_POSS	= (1 << 0), /* host supports slave dev */
 					    /* (doesn't imply presence) */
 	ATA_FLAG_SATA		= (1 << 1),
+	ATA_FLAG_NO_LPM		= (1 << 2), /* host not happy with LPM */
 	ATA_FLAG_NO_LOG_PAGE	= (1 << 5), /* do not issue log page read */
 	ATA_FLAG_NO_ATAPI	= (1 << 6), /* No ATAPI support */
 	ATA_FLAG_PIO_DMA	= (1 << 7), /* PIO cmds via DMA */
diff --git a/include/linux/list.h b/include/linux/list.h
index 5809e9a2..d1039ec 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -28,27 +28,42 @@
 	list->prev = list;
 }
 
+#ifdef CONFIG_DEBUG_LIST
+extern bool __list_add_valid(struct list_head *new,
+			      struct list_head *prev,
+			      struct list_head *next);
+extern bool __list_del_entry_valid(struct list_head *entry);
+#else
+static inline bool __list_add_valid(struct list_head *new,
+				struct list_head *prev,
+				struct list_head *next)
+{
+	return true;
+}
+static inline bool __list_del_entry_valid(struct list_head *entry)
+{
+	return true;
+}
+#endif
+
 /*
  * Insert a new entry between two known consecutive entries.
  *
  * This is only for internal list manipulation where we know
  * the prev/next entries already!
  */
-#ifndef CONFIG_DEBUG_LIST
 static inline void __list_add(struct list_head *new,
 			      struct list_head *prev,
 			      struct list_head *next)
 {
+	if (!__list_add_valid(new, prev, next))
+		return;
+
 	next->prev = new;
 	new->next = next;
 	new->prev = prev;
 	WRITE_ONCE(prev->next, new);
 }
-#else
-extern void __list_add(struct list_head *new,
-			      struct list_head *prev,
-			      struct list_head *next);
-#endif
 
 /**
  * list_add - add a new entry
@@ -96,22 +111,20 @@
  * Note: list_empty() on entry does not return true after this, the entry is
  * in an undefined state.
  */
-#ifndef CONFIG_DEBUG_LIST
 static inline void __list_del_entry(struct list_head *entry)
 {
+	if (!__list_del_entry_valid(entry))
+		return;
+
 	__list_del(entry->prev, entry->next);
 }
 
 static inline void list_del(struct list_head *entry)
 {
-	__list_del(entry->prev, entry->next);
+	__list_del_entry(entry);
 	entry->next = LIST_POISON1;
 	entry->prev = LIST_POISON2;
 }
-#else
-extern void __list_del_entry(struct list_head *entry);
-extern void list_del(struct list_head *entry);
-#endif
 
 /**
  * list_replace - replace old entry by new one
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 7751d72..859fd20 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -786,7 +786,7 @@
 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
 int mlx5_health_init(struct mlx5_core_dev *dev);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cb2cc30..91db632 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -410,7 +410,7 @@
 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
 	rwlock_t mm_rb_lock;
 #endif
-	u32 vmacache_seqnum;                   /* per-thread vmacache */
+	u64 vmacache_seqnum;                   /* per-thread vmacache */
 #ifdef CONFIG_MMU
 	unsigned long (*get_unmapped_area) (struct file *filp,
 				unsigned long addr, unsigned long len,
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 48b7281..bc771bf 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -148,7 +148,8 @@
 
 extern int mmc_cmdq_discard_queue(struct mmc_host *host, u32 tasks);
 extern int mmc_cmdq_halt(struct mmc_host *host, bool enable);
-extern int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host);
+extern int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host,
+				unsigned long timeout);
 extern void mmc_cmdq_post_req(struct mmc_host *host, int tag, int err);
 extern int mmc_cmdq_start_req(struct mmc_host *host,
 			      struct mmc_cmdq_req *cmdq_req);
@@ -231,12 +232,14 @@
 
 extern void mmc_blk_init_bkops_statistics(struct mmc_card *card);
 
-extern void mmc_deferred_scaling(struct mmc_host *host);
+extern void mmc_deferred_scaling(struct mmc_host *host, unsigned long timeout);
 extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
 	bool lock_needed);
 extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
 	bool lock_needed, bool is_cmdq_dcmd);
 extern int mmc_recovery_fallback_lower_speed(struct mmc_host *host);
+extern void mmc_cmdq_up_rwsem(struct mmc_host *host);
+extern int mmc_cmdq_down_rwsem(struct mmc_host *host, struct request *rq);
 
 /**
  *	mmc_claim_host - exclusively claim a host
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 4cbe862..5b79ba2 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -286,6 +286,7 @@
  * @wait		waiting for all conditions described in
  *			mmc_cmdq_ready_wait to be satisified before
  *			issuing the new request to LLD.
+ * @err_rwsem		synchronizes issue/completion/error-handler ctx
  */
 struct mmc_cmdq_context_info {
 	unsigned long	active_reqs; /* in-flight requests */
@@ -299,6 +300,7 @@
 	wait_queue_head_t	queue_empty_wq;
 	wait_queue_head_t	wait;
 	int active_small_sector_read_reqs;
+	struct rw_semaphore err_rwsem;
 };
 
 /**
@@ -582,6 +584,8 @@
 
 	bool			err_occurred;
 	u32			err_stats[MMC_ERR_MAX];
+	ktime_t			last_failed_rq_time;
+	ktime_t			last_completed_rq_time;
 
 	struct mmc_async_req	*areq;		/* active async req */
 	struct mmc_context_info	context_info;	/* async synchronization info */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index d43ef96..3e4d4f4 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -34,6 +34,7 @@
 #define SDIO_DEVICE_ID_BROADCOM_4335_4339	0x4335
 #define SDIO_DEVICE_ID_BROADCOM_4339		0x4339
 #define SDIO_DEVICE_ID_BROADCOM_43362		0xa962
+#define SDIO_DEVICE_ID_BROADCOM_43364		0xa9a4
 #define SDIO_DEVICE_ID_BROADCOM_43430		0xa9a6
 #define SDIO_DEVICE_ID_BROADCOM_4345		0x4345
 #define SDIO_DEVICE_ID_BROADCOM_4354		0x4354
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 815d0f4..57203c7 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -178,6 +178,7 @@
 	NR_VMSCAN_IMMEDIATE,	/* Prioritise for reclaim when writeback ends */
 	NR_DIRTIED,		/* page dirtyings since bootup */
 	NR_WRITTEN,		/* page writings since bootup */
+	NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */
 	NR_VM_NODE_STAT_ITEMS
 };
 
diff --git a/include/linux/msm_mhi_dev.h b/include/linux/msm_mhi_dev.h
index b96591b..54ec9f3 100644
--- a/include/linux/msm_mhi_dev.h
+++ b/include/linux/msm_mhi_dev.h
@@ -67,7 +67,7 @@
 	u32                             snd_cmpl;
 	void                            *context;
 	size_t                          len;
-	size_t                          actual_len;
+	size_t                          transfer_len;
 	uint32_t                        rd_offset;
 	struct mhi_dev_client           *client;
 	struct list_head                list;
@@ -132,6 +132,7 @@
 	MHI_CLIENT_RESERVED_1_UPPER = 99,
 	MHI_CLIENT_IP_HW_0_OUT = 100,
 	MHI_CLIENT_IP_HW_0_IN = 101,
+	MHI_CLIENT_ADPL_IN = 102,
 	MHI_CLIENT_RESERVED_2_LOWER = 102,
 	MHI_CLIENT_RESERVED_2_UPPER = 127,
 	MHI_MAX_CHANNELS = 102,
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 1d6a935..8793f5a 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -65,8 +65,14 @@
 static inline u32
 ip_set_timeout_get(unsigned long *timeout)
 {
-	return *timeout == IPSET_ELEM_PERMANENT ? 0 :
-		jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
+	u32 t;
+
+	if (*timeout == IPSET_ELEM_PERMANENT)
+		return 0;
+
+	t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
+	/* Zero value in userspace means no timeout */
+	return t == 0 ? 1 : t;
 }
 
 #endif	/* __KERNEL__ */
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 984b211..ea8a977 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -123,4 +123,9 @@
 /* True if the target is not a standard target */
 #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
 
+static inline bool ebt_invalid_target(int target)
+{
+	return (target < -NUM_STANDARD_TARGETS || target >= 0);
+}
+
 #endif
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 34ed577..e69445a 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -97,4 +97,8 @@
 extern int sysctl_oom_dump_tasks;
 extern int sysctl_oom_kill_allocating_task;
 extern int sysctl_panic_on_oom;
+extern int sysctl_reap_mem_on_sigkill;
+
+/* calls for LMK reaper */
+extern void add_to_oom_reaper(struct task_struct *p);
 #endif /* _INCLUDE_LINUX_OOM_H */
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
new file mode 100644
index 0000000..8712ff7
--- /dev/null
+++ b/include/linux/overflow.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+#ifndef __LINUX_OVERFLOW_H
+#define __LINUX_OVERFLOW_H
+
+#include <linux/compiler.h>
+
+/*
+ * In the fallback code below, we need to compute the minimum and
+ * maximum values representable in a given type. These macros may also
+ * be useful elsewhere, so we provide them outside the
+ * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
+ *
+ * It would seem more obvious to do something like
+ *
+ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
+ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
+ *
+ * Unfortunately, the middle expressions, strictly speaking, have
+ * undefined behaviour, and at least some versions of gcc warn about
+ * the type_max expression (but not if -fsanitize=undefined is in
+ * effect; in that case, the warning is deferred to runtime...).
+ *
+ * The slightly excessive casting in type_min is to make sure the
+ * macros also produce sensible values for the exotic type _Bool. [The
+ * overflow checkers only almost work for _Bool, but that's
+ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
+ * _Bools. Besides, the gcc builtins don't allow _Bool* as third
+ * argument.]
+ *
+ * Idea stolen from
+ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
+ * credit to Christian Biere.
+ */
+#define is_signed_type(type)       (((type)(-1)) < (type)1)
+#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
+#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+
+
+#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+/*
+ * For simplicity and code hygiene, the fallback code below insists on
+ * a, b and *d having the same type (similar to the min() and max()
+ * macros), whereas gcc's type-generic overflow checkers accept
+ * different types. Hence we don't just make check_add_overflow an
+ * alias for __builtin_add_overflow, but add type checks similar to
+ * below.
+ */
+#define check_add_overflow(a, b, d) ({		\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	__builtin_add_overflow(__a, __b, __d);	\
+})
+
+#define check_sub_overflow(a, b, d) ({		\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	__builtin_sub_overflow(__a, __b, __d);	\
+})
+
+#define check_mul_overflow(a, b, d) ({		\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	__builtin_mul_overflow(__a, __b, __d);	\
+})
+
+#else
+
+
+/* Checking for unsigned overflow is relatively easy without causing UB. */
+#define __unsigned_add_overflow(a, b, d) ({	\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	*__d = __a + __b;			\
+	*__d < __a;				\
+})
+#define __unsigned_sub_overflow(a, b, d) ({	\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	*__d = __a - __b;			\
+	__a < __b;				\
+})
+/*
+ * If one of a or b is a compile-time constant, this avoids a division.
+ */
+#define __unsigned_mul_overflow(a, b, d) ({		\
+	typeof(a) __a = (a);				\
+	typeof(b) __b = (b);				\
+	typeof(d) __d = (d);				\
+	(void) (&__a == &__b);				\
+	(void) (&__a == __d);				\
+	*__d = __a * __b;				\
+	__builtin_constant_p(__b) ?			\
+	  __b > 0 && __a > type_max(typeof(__a)) / __b : \
+	  __a > 0 && __b > type_max(typeof(__b)) / __a;	 \
+})
+
+/*
+ * For signed types, detecting overflow is much harder, especially if
+ * we want to avoid UB. But the interface of these macros is such that
+ * we must provide a result in *d, and in fact we must produce the
+ * result promised by gcc's builtins, which is simply the possibly
+ * wrapped-around value. Fortunately, we can just formally do the
+ * operations in the widest relevant unsigned type (u64) and then
+ * truncate the result - gcc is smart enough to generate the same code
+ * with and without the (u64) casts.
+ */
+
+/*
+ * Adding two signed integers can overflow only if they have the same
+ * sign, and overflow has happened iff the result has the opposite
+ * sign.
+ */
+#define __signed_add_overflow(a, b, d) ({	\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	*__d = (u64)__a + (u64)__b;		\
+	(((~(__a ^ __b)) & (*__d ^ __a))	\
+		& type_min(typeof(__a))) != 0;	\
+})
+
+/*
+ * Subtraction is similar, except that overflow can now happen only
+ * when the signs are opposite. In this case, overflow has happened if
+ * the result has the opposite sign of a.
+ */
+#define __signed_sub_overflow(a, b, d) ({	\
+	typeof(a) __a = (a);			\
+	typeof(b) __b = (b);			\
+	typeof(d) __d = (d);			\
+	(void) (&__a == &__b);			\
+	(void) (&__a == __d);			\
+	*__d = (u64)__a - (u64)__b;		\
+	((((__a ^ __b)) & (*__d ^ __a))		\
+		& type_min(typeof(__a))) != 0;	\
+})
+
+/*
+ * Signed multiplication is rather hard. gcc always follows C99, so
+ * division is truncated towards 0. This means that we can write the
+ * overflow check like this:
+ *
+ * (a > 0 && (b > MAX/a || b < MIN/a)) ||
+ * (a < -1 && (b > MIN/a || b < MAX/a) ||
+ * (a == -1 && b == MIN)
+ *
+ * The redundant casts of -1 are to silence an annoying -Wtype-limits
+ * (included in -Wextra) warning: When the type is u8 or u16, the
+ * __b_c_e in check_mul_overflow obviously selects
+ * __unsigned_mul_overflow, but unfortunately gcc still parses this
+ * code and warns about the limited range of __b.
+ */
+
+#define __signed_mul_overflow(a, b, d) ({				\
+	typeof(a) __a = (a);						\
+	typeof(b) __b = (b);						\
+	typeof(d) __d = (d);						\
+	typeof(a) __tmax = type_max(typeof(a));				\
+	typeof(a) __tmin = type_min(typeof(a));				\
+	(void) (&__a == &__b);						\
+	(void) (&__a == __d);						\
+	*__d = (u64)__a * (u64)__b;					\
+	(__b > 0   && (__a > __tmax/__b || __a < __tmin/__b)) ||	\
+	(__b < (typeof(__b))-1  && (__a > __tmin/__b || __a < __tmax/__b)) || \
+	(__b == (typeof(__b))-1 && __a == __tmin);			\
+})
+
+
+#define check_add_overflow(a, b, d)					\
+	__builtin_choose_expr(is_signed_type(typeof(a)),		\
+			__signed_add_overflow(a, b, d),			\
+			__unsigned_add_overflow(a, b, d))
+
+#define check_sub_overflow(a, b, d)					\
+	__builtin_choose_expr(is_signed_type(typeof(a)),		\
+			__signed_sub_overflow(a, b, d),			\
+			__unsigned_sub_overflow(a, b, d))
+
+#define check_mul_overflow(a, b, d)					\
+	__builtin_choose_expr(is_signed_type(typeof(a)),		\
+			__signed_mul_overflow(a, b, d),			\
+			__unsigned_mul_overflow(a, b, d))
+
+
+#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
+
+/**
+ * array_size() - Calculate size of 2-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ *
+ * Calculates size of 2-dimensional array: @a * @b.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array_size(size_t a, size_t b)
+{
+	size_t bytes;
+
+	if (check_mul_overflow(a, b, &bytes))
+		return SIZE_MAX;
+
+	return bytes;
+}
+
+/**
+ * array3_size() - Calculate size of 3-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ * @c: dimension three
+ *
+ * Calculates size of 3-dimensional array: @a * @b * @c.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
+{
+	size_t bytes;
+
+	if (check_mul_overflow(a, b, &bytes))
+		return SIZE_MAX;
+	if (check_mul_overflow(bytes, c, &bytes))
+		return SIZE_MAX;
+
+	return bytes;
+}
+
+static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c)
+{
+	size_t bytes;
+
+	if (check_mul_overflow(n, size, &bytes))
+		return SIZE_MAX;
+	if (check_add_overflow(bytes, c, &bytes))
+		return SIZE_MAX;
+
+	return bytes;
+}
+
+/**
+ * struct_size() - Calculate size of structure with trailing array.
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @n: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @p followed by an
+ * array of @n @member elements.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size(p, member, n)					\
+	__ab_c_size(n,							\
+		    sizeof(*(p)->member) + __must_be_array((p)->member),\
+		    sizeof(*(p)))
+
+#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 9dbf9c3..c890724a 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -341,8 +341,16 @@
 			unsigned int nr_pages, struct page **pages);
 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
 			       unsigned int nr_pages, struct page **pages);
-unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
-			int tag, unsigned int nr_pages, struct page **pages);
+unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
+			pgoff_t end, int tag, unsigned int nr_pages,
+			struct page **pages);
+static inline unsigned find_get_pages_tag(struct address_space *mapping,
+			pgoff_t *index, int tag, unsigned int nr_pages,
+			struct page **pages)
+{
+	return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
+					nr_pages, pages);
+}
 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
 			int tag, unsigned int nr_entries,
 			struct page **entries, pgoff_t *indices);
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index b45d391..cead441 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -29,9 +29,17 @@
 void pagevec_remove_exceptionals(struct pagevec *pvec);
 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
 		pgoff_t start, unsigned nr_pages);
-unsigned pagevec_lookup_tag(struct pagevec *pvec,
-		struct address_space *mapping, pgoff_t *index, int tag,
-		unsigned nr_pages);
+unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
+		struct address_space *mapping, pgoff_t *index, pgoff_t end,
+		int tag);
+unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
+		struct address_space *mapping, pgoff_t *index, pgoff_t end,
+		int tag, unsigned max_pages);
+static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
+		struct address_space *mapping, pgoff_t *index, int tag)
+{
+	return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag);
+}
 
 static inline void pagevec_init(struct pagevec *pvec, int cold)
 {
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 3652290..534cb43 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1190,6 +1190,8 @@
 unsigned long pci_address_to_pio(phys_addr_t addr);
 phys_addr_t pci_pio_to_address(unsigned long pio);
 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+			   phys_addr_t phys_addr);
 void pci_unmap_iospace(struct resource *res);
 
 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
@@ -2149,4 +2151,16 @@
 /* provide the legacy pci_dma_* API */
 #include <linux/pci-dma-compat.h>
 
+#define pci_printk(level, pdev, fmt, arg...) \
+	dev_printk(level, &(pdev)->dev, fmt, ##arg)
+
+#define pci_emerg(pdev, fmt, arg...)	dev_emerg(&(pdev)->dev, fmt, ##arg)
+#define pci_alert(pdev, fmt, arg...)	dev_alert(&(pdev)->dev, fmt, ##arg)
+#define pci_crit(pdev, fmt, arg...)	dev_crit(&(pdev)->dev, fmt, ##arg)
+#define pci_err(pdev, fmt, arg...)	dev_err(&(pdev)->dev, fmt, ##arg)
+#define pci_warn(pdev, fmt, arg...)	dev_warn(&(pdev)->dev, fmt, ##arg)
+#define pci_notice(pdev, fmt, arg...)	dev_notice(&(pdev)->dev, fmt, ##arg)
+#define pci_info(pdev, fmt, arg...)	dev_info(&(pdev)->dev, fmt, ##arg)
+#define pci_dbg(pdev, fmt, arg...)	dev_dbg(&(pdev)->dev, fmt, ##arg)
+
 #endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 4308204..bba5604 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -3054,4 +3054,6 @@
 
 #define PCI_VENDOR_ID_OCZ		0x1b85
 
+#define PCI_VENDOR_ID_NCUBE		0x10ff
+
 #endif /* _LINUX_PCI_IDS_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 47c5b39..d32e7b8 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -266,7 +266,7 @@
 	int				capabilities;
 
 	int * __percpu			pmu_disable_count;
-	struct perf_cpu_context * __percpu pmu_cpu_context;
+	struct perf_cpu_context __percpu *pmu_cpu_context;
 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
 	int				task_ctx_nr;
 	int				hrtimer_interval_ms;
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h
index 9abc0ca..9f0aa1b 100644
--- a/include/linux/platform_data/ina2xx.h
+++ b/include/linux/platform_data/ina2xx.h
@@ -1,7 +1,7 @@
 /*
  * Driver for Texas Instruments INA219, INA226 power monitor chips
  *
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 4c07788..731dcef 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -443,6 +443,7 @@
 	spinlock_t changed_lock;
 	bool changed;
 	bool initialized;
+	bool removing;
 	atomic_t use_cnt;
 #ifdef CONFIG_THERMAL
 	struct thermal_zone_device *tzd;
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 48fe2e9..770cd64 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -455,6 +455,7 @@
 	SCALE_R_ABSOLUTE,
 	SCALE_QRD_SKUH_RBATT_THERM,
 	SCALE_QRD_SKUT1_RBATT_THERM,
+	SCALE_QRD_215_RBATT_THERM,
 	SCALE_RSCALE_NONE,
 };
 
@@ -1888,6 +1889,22 @@
 		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold);
 /**
+ * qpnp_adc_qrd_215_btm_scaler() - Performs reverse calibration on the
+ *		low/high temperature threshold values passed by the client.
+ *		The function maps the temperature to voltage and applies
+ *		ratiometric calibration on the voltage values for SKUT1 board.
+ * @dev:	Structure device for qpnp vadc
+ * @param:	The input parameters that contain the low/high temperature
+ *		values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ *		the above calibrated voltage value.
+ */
+int32_t qpnp_adc_qrd_215_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold);
+/**
  * qpnp_adc_tm_scale_therm_voltage_pu2() - Performs reverse calibration
  *		and convert given temperature to voltage on supported
  *		thermistor channels using 100k pull-up.
@@ -2279,6 +2296,10 @@
 		struct qpnp_adc_tm_btm_param *param,
 		uint32_t *low_threshold, uint32_t *high_threshold)
 { return -ENXIO; }
+static inline int32_t qpnp_adc_qrd_215_btm_scaler(struct qpnp_vadc_chip *dev,
+		struct qpnp_adc_tm_btm_param *param,
+		uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
 static inline int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(
 		struct qpnp_vadc_chip *dev,
 		struct qpnp_adc_tm_btm_param *param,
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 8beb98d..4f7a956 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -45,19 +45,17 @@
  * This is only for internal list manipulation where we know
  * the prev/next entries already!
  */
-#ifndef CONFIG_DEBUG_LIST
 static inline void __list_add_rcu(struct list_head *new,
 		struct list_head *prev, struct list_head *next)
 {
+	if (!__list_add_valid(new, prev, next))
+		return;
+
 	new->next = next;
 	new->prev = prev;
 	rcu_assign_pointer(list_next_rcu(prev), new);
 	next->prev = new;
 }
-#else
-void __list_add_rcu(struct list_head *new,
-		    struct list_head *prev, struct list_head *next);
-#endif
 
 /**
  * list_add_rcu - add a new entry to rcu-protected list
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 4acc552..19d0778 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -162,6 +162,7 @@
 void ring_buffer_record_off(struct ring_buffer *buffer);
 void ring_buffer_record_on(struct ring_buffer *buffer);
 int ring_buffer_record_is_on(struct ring_buffer *buffer);
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 009ed0a..a0ee4b6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1829,7 +1829,7 @@
 
 	struct mm_struct *mm, *active_mm;
 	/* per-thread vma caching */
-	u32 vmacache_seqnum;
+	u64 vmacache_seqnum;
 	struct vm_area_struct *vmacache[VMACACHE_SIZE];
 #if defined(SPLIT_RSS_COUNTING)
 	struct task_rss_stat	rss_stat;
@@ -3349,7 +3349,11 @@
 {
 	__set_task_comm(tsk, from, false);
 }
-extern char *get_task_comm(char *to, struct task_struct *tsk);
+extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
+#define get_task_comm(buf, tsk) ({			\
+	BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);	\
+	__get_task_comm(buf, sizeof(buf), tsk);		\
+})
 
 #ifdef CONFIG_SMP
 void scheduler_ipi(void);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 42e3f06..c7dff69 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -345,7 +345,8 @@
 };
 
 struct earlycon_id {
-	char	name[16];
+	char	name[15];
+	char	name_term;	/* In case compiler didn't '\0' term name */
 	char	compatible[128];
 	int	(*setup)(struct earlycon_device *, const char *options);
 };
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6b20f91..9b8e0ed 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -602,6 +602,7 @@
  *	@hash: the packet hash
  *	@queue_mapping: Queue mapping for multiqueue devices
  *	@xmit_more: More SKBs are pending for this queue
+ *	@pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
  *	@ndisc_nodetype: router type (from link layer)
  *	@ooo_okay: allow the mapping of a socket to a queue to be changed
  *	@l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -692,7 +693,7 @@
 				peeked:1,
 				head_frag:1,
 				xmit_more:1,
-				__unused:1; /* one bit hole */
+				pfmemalloc:1;
 	kmemcheck_bitfield_end(flags1);
 
 	/* fields enclosed in headers_start/headers_end are copied
@@ -712,19 +713,18 @@
 
 	__u8			__pkt_type_offset[0];
 	__u8			pkt_type:3;
-	__u8			pfmemalloc:1;
 	__u8			ignore_df:1;
 	__u8			nfctinfo:3;
-
 	__u8			nf_trace:1;
+
 	__u8			ip_summed:2;
 	__u8			ooo_okay:1;
 	__u8			l4_hash:1;
 	__u8			sw_hash:1;
 	__u8			wifi_acked_valid:1;
 	__u8			wifi_acked:1;
-
 	__u8			no_fcs:1;
+
 	/* Indicates the inner headers are valid in the skbuff. */
 	__u8			encapsulation:1;
 	__u8			encap_hdr_csum:1;
@@ -732,11 +732,11 @@
 	__u8			csum_complete_sw:1;
 	__u8			csum_level:2;
 	__u8			csum_bad:1;
-
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
 	__u8			ndisc_nodetype:2;
 #endif
 	__u8			ipvs_property:1;
+
 	__u8			inner_protocol_type:1;
 	__u8			fast_forwarded:1;
 	__u8			remcsum_offload:1;
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 75f56c2..b6a59e8 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -67,7 +67,8 @@
 	int size;		/* The size of an object including meta data */
 	int object_size;	/* The size of an object without meta data */
 	int offset;		/* Free pointer offset. */
-	int cpu_partial;	/* Number of per cpu partial objects to keep around */
+	/* Number of per cpu partial objects to keep around */
+	unsigned int cpu_partial;
 	struct kmem_cache_order_objects oo;
 
 	/* Allocation and freeing of slabs */
diff --git a/include/linux/string.h b/include/linux/string.h
index b7e8f42..d55f6ee 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -27,7 +27,7 @@
 size_t strlcpy(char *, const char *, size_t);
 #endif
 #ifndef __HAVE_ARCH_STRSCPY
-ssize_t __must_check strscpy(char *, const char *, size_t);
+ssize_t strscpy(char *, const char *, size_t);
 #endif
 #ifndef __HAVE_ARCH_STRCAT
 extern char * strcat(char *, const char *);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 333ad11..44161a4 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -155,6 +155,7 @@
 
 void		rpc_shutdown_client(struct rpc_clnt *);
 void		rpc_release_client(struct rpc_clnt *);
+void		rpc_task_release_transport(struct rpc_task *);
 void		rpc_task_release_client(struct rpc_task *);
 
 int		rpcb_create_local(struct net *);
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index ed2a9c9..9d995cd 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -14,5 +14,7 @@
 extern int swap_ratio(struct swap_info_struct **si);
 extern void setup_swap_ratio(struct swap_info_struct *p, int prio);
 extern bool is_swap_ratio_group(int prio);
+extern unsigned long generic_max_swapfile_size(void);
+extern unsigned long max_swapfile_size(void);
 
 #endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 91a740f..ef4bc88 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -205,6 +205,26 @@
 	}								\
 	static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 
+/*
+ * Called before coming back to user-mode. Returning to user-mode with an
+ * address limit different than USER_DS can allow to overwrite kernel memory.
+ */
+static inline void addr_limit_user_check(void)
+{
+#ifdef TIF_FSCHECK
+	if (!test_thread_flag(TIF_FSCHECK))
+		return;
+#endif
+
+	if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS),
+				  "Invalid address limit on user-mode return"))
+		force_sig(SIGKILL, current);
+
+#ifdef TIF_FSCHECK
+	clear_thread_flag(TIF_FSCHECK);
+#endif
+}
+
 asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
 			       qid_t id, void __user *addr);
 asmlinkage long sys_time(time_t __user *tloc);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 8f84c84..2ce7675 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -146,6 +146,7 @@
 	struct ctl_table_set *set;
 	struct ctl_dir *parent;
 	struct ctl_node *node;
+	struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */
 };
 
 struct ctl_dir {
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 9c452f6d..2839d62 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -238,6 +238,9 @@
 				   const struct attribute **attr);
 int __must_check sysfs_chmod_file(struct kobject *kobj,
 				  const struct attribute *attr, umode_t mode);
+struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+						  const struct attribute *attr);
+void sysfs_unbreak_active_protection(struct kernfs_node *kn);
 void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
 			  const void *ns);
 bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
@@ -351,6 +354,17 @@
 	return 0;
 }
 
+static inline struct kernfs_node *
+sysfs_break_active_protection(struct kobject *kobj,
+			      const struct attribute *attr)
+{
+	return NULL;
+}
+
+static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn)
+{
+}
+
 static inline void sysfs_remove_file_ns(struct kobject *kobj,
 					const struct attribute *attr,
 					const void *ns)
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 5837387..ce98a8e 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -26,12 +26,7 @@
 
 #ifdef __KERNEL__
 
-#ifdef CONFIG_DEBUG_STACK_USAGE
-# define THREADINFO_GFP		(GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
-				 __GFP_ZERO)
-#else
-# define THREADINFO_GFP		(GFP_KERNEL_ACCOUNT | __GFP_NOTRACK)
-#endif
+#define THREADINFO_GFP	(GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO)
 
 /*
  * flag set/clear/test wrappers
diff --git a/include/linux/tty.h b/include/linux/tty.h
index fe1b862..bcfeb9e 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -313,6 +313,10 @@
 	wait_queue_head_t write_wait;
 	wait_queue_head_t read_wait;
 	struct work_struct hangup_work;
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+	int delayed_work;
+	struct delayed_work echo_delayed_work;
+#endif
 	void *disc_data;
 	void *driver_data;
 	spinlock_t files_lock;		/* protects tty_files list */
diff --git a/include/linux/usb/ipc_bridge.h b/include/linux/usb/ipc_bridge.h
new file mode 100644
index 0000000..b147827
--- /dev/null
+++ b/include/linux/usb/ipc_bridge.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_IPC_BRIDGE_H__
+#define __MSM_IPC_BRIDGE_H__
+
+#include <linux/platform_device.h>
+
+/*
+ * The IPC bridge driver adds a IPC bridge platform device when the
+ * underlying transport is ready. The IPC transport driver acts as a
+ * platform driver for this device. The platform data is populated by
+ * IPC bridge driver to facilitate I/O. The callback functions are
+ * passed in platform data to avoid export functions. This would allow
+ * different bridge drivers to exist in the kernel. The IPC bridge driver
+ * removes the platform device when the underly transport is no longer
+ * available. It typically happens during shutdown and remote processor's
+ * subsystem restart.
+ */
+
+/**
+ * struct ipc_bridge_platform_data - platform device data for IPC
+ *              transport driver.
+ * @max_read_size: The maximum possible read size.
+ * @max_write_size: The maximum possible write size.
+ * @open: The open must be called before starting I/O.  The IPC bridge
+ *              driver use the platform device pointer to identify the
+ *              underlying transport channel. The IPC bridge driver may
+ *              notify that remote processor that it is ready to receive
+ *              data. Returns 0 upon success and appropriate error code
+ *              upon failure.
+ * @read: The read is done synchronously and should be called from process
+ *              context. Returns the number of bytes read from remote
+ *              processor or error code upon failure. The IPC transport
+ *              driver may pass the buffer of max_read_size length if the
+ *              available data size is not known in advance.
+ * @write: The write is done synchronously and should be called from process
+ *              context. The IPC bridge driver uses the same buffer for DMA
+ *              to avoid additional memcpy. So it must be physically contiguous.
+ *              Returns the number of bytes written or error code upon failure.
+ * @close: The close must be called when the IPC bridge platform device
+ *              is removed. The IPC transport driver may call close when
+ *              it is no longer required to communicate with remote processor.
+ */
+struct ipc_bridge_platform_data {
+	unsigned int max_read_size;
+	unsigned int max_write_size;
+	int (*open)(struct platform_device *pdev);
+	int (*read)(struct platform_device *pdev, char *buf,
+			unsigned int count);
+	int (*write)(struct platform_device *pdev, char *buf,
+			unsigned int count);
+	void (*close)(struct platform_device *pdev);
+};
+
+#endif
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 2393c13..0e7f7c5 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -171,6 +171,7 @@
  * @extcon_id: Used for ID notification registration.
  * @vbus_nb: Notification callback for VBUS event.
  * @id_nb: Notification callback for ID event.
+ * @extcon_registered: indicates if extcon notifier registered or not.
  * @dpdm_desc: Regulator descriptor for D+ and D- voting.
  * @dpdm_rdev: Regulator class device for dpdm regulator.
  * @dbg_idx: Dynamic debug buffer Index.
@@ -297,6 +298,7 @@
 	struct extcon_dev       *extcon_id;
 	struct notifier_block   vbus_nb;
 	struct notifier_block   id_nb;
+	bool			extcon_registered;
 	struct regulator_desc	dpdm_rdesc;
 	struct regulator_dev	*dpdm_rdev;
 /* Maximum debug message length */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index d999b3c..4caac13 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -22,6 +22,7 @@
 #define PHY_LANE_B		BIT(7)
 #define PHY_HSFS_MODE		BIT(8)
 #define PHY_LS_MODE		BIT(9)
+#define PHY_USB_DP_CONCURRENT_MODE	BIT(10)
 
 enum usb_phy_interface {
 	USBPHY_INTERFACE_MODE_UNKNOWN,
diff --git a/include/linux/verification.h b/include/linux/verification.h
index a10549a..60ea906 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -13,6 +13,12 @@
 #define _LINUX_VERIFICATION_H
 
 /*
+ * Indicate that both builtin trusted keys and secondary trusted keys
+ * should be used.
+ */
+#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL)
+
+/*
  * The use to which an asymmetric key is being put.
  */
 enum key_being_used_for {
@@ -26,9 +32,13 @@
 };
 extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
 
-#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
-
 struct key;
+struct public_key_signature;
+
+extern int verify_signature_one(const struct public_key_signature *sig,
+			   struct key *trusted_keys, const char *keyid);
+
+#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
 
 extern int verify_pkcs7_signature(const void *data, size_t len,
 				  const void *raw_pkcs7, size_t pkcs7_len,
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 4c679792..52b655a 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -97,7 +97,6 @@
 #ifdef CONFIG_DEBUG_VM_VMACACHE
 		VMACACHE_FIND_CALLS,
 		VMACACHE_FIND_HITS,
-		VMACACHE_FULL_FLUSHES,
 #endif
 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
 		SPECULATIVE_PGFAULT,
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
index c3fa0fd4..4f58ff2 100644
--- a/include/linux/vmacache.h
+++ b/include/linux/vmacache.h
@@ -15,7 +15,6 @@
 	memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
 }
 
-extern void vmacache_flush_all(struct mm_struct *mm);
 extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
 extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
 						    unsigned long addr);
@@ -29,10 +28,6 @@
 static inline void vmacache_invalidate(struct mm_struct *mm)
 {
 	mm->vmacache_seqnum++;
-
-	/* deal with overflows */
-	if (unlikely(mm->vmacache_seqnum == 0))
-		vmacache_flush_all(mm);
 }
 
 #endif /* __LINUX_VMACACHE_H */
diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h
new file mode 100644
index 0000000..9e1f42c
--- /dev/null
+++ b/include/linux/xxhash.h
@@ -0,0 +1,236 @@
+/*
+ * xxHash - Extremely Fast Hash algorithm
+ * Copyright (C) 2012-2016, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ *     copyright notice, this list of conditions and the following disclaimer
+ *     in the documentation and/or other materials provided with the
+ *     distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ *
+ * You can contact the author at:
+ * - xxHash homepage: http://cyan4973.github.io/xxHash/
+ * - xxHash source repository: https://github.com/Cyan4973/xxHash
+ */
+
+/*
+ * Notice extracted from xxHash homepage:
+ *
+ * xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
+ * It also successfully passes all tests from the SMHasher suite.
+ *
+ * Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2
+ * Duo @3GHz)
+ *
+ * Name            Speed       Q.Score   Author
+ * xxHash          5.4 GB/s     10
+ * CrapWow         3.2 GB/s      2       Andrew
+ * MumurHash 3a    2.7 GB/s     10       Austin Appleby
+ * SpookyHash      2.0 GB/s     10       Bob Jenkins
+ * SBox            1.4 GB/s      9       Bret Mulvey
+ * Lookup3         1.2 GB/s      9       Bob Jenkins
+ * SuperFastHash   1.2 GB/s      1       Paul Hsieh
+ * CityHash64      1.05 GB/s    10       Pike & Alakuijala
+ * FNV             0.55 GB/s     5       Fowler, Noll, Vo
+ * CRC32           0.43 GB/s     9
+ * MD5-32          0.33 GB/s    10       Ronald L. Rivest
+ * SHA1-32         0.28 GB/s    10
+ *
+ * Q.Score is a measure of quality of the hash function.
+ * It depends on successfully passing SMHasher test set.
+ * 10 is a perfect score.
+ *
+ * A 64-bits version, named xxh64 offers much better speed,
+ * but for 64-bits applications only.
+ * Name     Speed on 64 bits    Speed on 32 bits
+ * xxh64       13.8 GB/s            1.9 GB/s
+ * xxh32        6.8 GB/s            6.0 GB/s
+ */
+
+#ifndef XXHASH_H
+#define XXHASH_H
+
+#include <linux/types.h>
+
+/*-****************************
+ * Simple Hash Functions
+ *****************************/
+
+/**
+ * xxh32() - calculate the 32-bit hash of the input with a given seed.
+ *
+ * @input:  The data to hash.
+ * @length: The length of the data to hash.
+ * @seed:   The seed can be used to alter the result predictably.
+ *
+ * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
+ *
+ * Return:  The 32-bit hash of the data.
+ */
+uint32_t xxh32(const void *input, size_t length, uint32_t seed);
+
+/**
+ * xxh64() - calculate the 64-bit hash of the input with a given seed.
+ *
+ * @input:  The data to hash.
+ * @length: The length of the data to hash.
+ * @seed:   The seed can be used to alter the result predictably.
+ *
+ * This function runs 2x faster on 64-bit systems, but slower on 32-bit systems.
+ *
+ * Return:  The 64-bit hash of the data.
+ */
+uint64_t xxh64(const void *input, size_t length, uint64_t seed);
+
+/*-****************************
+ * Streaming Hash Functions
+ *****************************/
+
+/*
+ * These definitions are only meant to allow allocation of XXH state
+ * statically, on stack, or in a struct for example.
+ * Do not use members directly.
+ */
+
+/**
+ * struct xxh32_state - private xxh32 state, do not use members directly
+ */
+struct xxh32_state {
+	uint32_t total_len_32;
+	uint32_t large_len;
+	uint32_t v1;
+	uint32_t v2;
+	uint32_t v3;
+	uint32_t v4;
+	uint32_t mem32[4];
+	uint32_t memsize;
+};
+
+/**
+ * struct xxh32_state - private xxh64 state, do not use members directly
+ */
+struct xxh64_state {
+	uint64_t total_len;
+	uint64_t v1;
+	uint64_t v2;
+	uint64_t v3;
+	uint64_t v4;
+	uint64_t mem64[4];
+	uint32_t memsize;
+};
+
+/**
+ * xxh32_reset() - reset the xxh32 state to start a new hashing operation
+ *
+ * @state: The xxh32 state to reset.
+ * @seed:  Initialize the hash state with this seed.
+ *
+ * Call this function on any xxh32_state to prepare for a new hashing operation.
+ */
+void xxh32_reset(struct xxh32_state *state, uint32_t seed);
+
+/**
+ * xxh32_update() - hash the data given and update the xxh32 state
+ *
+ * @state:  The xxh32 state to update.
+ * @input:  The data to hash.
+ * @length: The length of the data to hash.
+ *
+ * After calling xxh32_reset() call xxh32_update() as many times as necessary.
+ *
+ * Return:  Zero on success, otherwise an error code.
+ */
+int xxh32_update(struct xxh32_state *state, const void *input, size_t length);
+
+/**
+ * xxh32_digest() - produce the current xxh32 hash
+ *
+ * @state: Produce the current xxh32 hash of this state.
+ *
+ * A hash value can be produced at any time. It is still possible to continue
+ * inserting input into the hash state after a call to xxh32_digest(), and
+ * generate new hashes later on, by calling xxh32_digest() again.
+ *
+ * Return: The xxh32 hash stored in the state.
+ */
+uint32_t xxh32_digest(const struct xxh32_state *state);
+
+/**
+ * xxh64_reset() - reset the xxh64 state to start a new hashing operation
+ *
+ * @state: The xxh64 state to reset.
+ * @seed:  Initialize the hash state with this seed.
+ */
+void xxh64_reset(struct xxh64_state *state, uint64_t seed);
+
+/**
+ * xxh64_update() - hash the data given and update the xxh64 state
+ * @state:  The xxh64 state to update.
+ * @input:  The data to hash.
+ * @length: The length of the data to hash.
+ *
+ * After calling xxh64_reset() call xxh64_update() as many times as necessary.
+ *
+ * Return:  Zero on success, otherwise an error code.
+ */
+int xxh64_update(struct xxh64_state *state, const void *input, size_t length);
+
+/**
+ * xxh64_digest() - produce the current xxh64 hash
+ *
+ * @state: Produce the current xxh64 hash of this state.
+ *
+ * A hash value can be produced at any time. It is still possible to continue
+ * inserting input into the hash state after a call to xxh64_digest(), and
+ * generate new hashes later on, by calling xxh64_digest() again.
+ *
+ * Return: The xxh64 hash stored in the state.
+ */
+uint64_t xxh64_digest(const struct xxh64_state *state);
+
+/*-**************************
+ * Utils
+ ***************************/
+
+/**
+ * xxh32_copy_state() - copy the source state into the destination state
+ *
+ * @src: The source xxh32 state.
+ * @dst: The destination xxh32 state.
+ */
+void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src);
+
+/**
+ * xxh64_copy_state() - copy the source state into the destination state
+ *
+ * @src: The source xxh64 state.
+ * @dst: The destination xxh64 state.
+ */
+void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src);
+
+#endif /* XXHASH_H */
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 57a8e98..2219cce 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -47,6 +47,8 @@
 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
 void zs_free(struct zs_pool *pool, unsigned long obj);
 
+size_t zs_huge_class_size(struct zs_pool *pool);
+
 void *zs_map_object(struct zs_pool *pool, unsigned long handle,
 			enum zs_mapmode mm);
 void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
diff --git a/include/linux/zstd.h b/include/linux/zstd.h
new file mode 100644
index 0000000..249575e
--- /dev/null
+++ b/include/linux/zstd.h
@@ -0,0 +1,1157 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of https://github.com/facebook/zstd.
+ * An additional grant of patent rights can be found in the PATENTS file in the
+ * same directory.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ */
+
+#ifndef ZSTD_H
+#define ZSTD_H
+
+/* ======   Dependency   ======*/
+#include <linux/types.h>   /* size_t */
+
+
+/*-*****************************************************************************
+ * Introduction
+ *
+ * zstd, short for Zstandard, is a fast lossless compression algorithm,
+ * targeting real-time compression scenarios at zlib-level and better
+ * compression ratios. The zstd compression library provides in-memory
+ * compression and decompression functions. The library supports compression
+ * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled
+ * ultra, should be used with caution, as they require more memory.
+ * Compression can be done in:
+ *  - a single step, reusing a context (described as Explicit memory management)
+ *  - unbounded multiple steps (described as Streaming compression)
+ * The compression ratio achievable on small data can be highly improved using
+ * compression with a dictionary in:
+ *  - a single step (described as Simple dictionary API)
+ *  - a single step, reusing a dictionary (described as Fast dictionary API)
+ ******************************************************************************/
+
+/*======  Helper functions  ======*/
+
+/**
+ * enum ZSTD_ErrorCode - zstd error codes
+ *
+ * Functions that return size_t can be checked for errors using ZSTD_isError()
+ * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode().
+ */
+typedef enum {
+	ZSTD_error_no_error,
+	ZSTD_error_GENERIC,
+	ZSTD_error_prefix_unknown,
+	ZSTD_error_version_unsupported,
+	ZSTD_error_parameter_unknown,
+	ZSTD_error_frameParameter_unsupported,
+	ZSTD_error_frameParameter_unsupportedBy32bits,
+	ZSTD_error_frameParameter_windowTooLarge,
+	ZSTD_error_compressionParameter_unsupported,
+	ZSTD_error_init_missing,
+	ZSTD_error_memory_allocation,
+	ZSTD_error_stage_wrong,
+	ZSTD_error_dstSize_tooSmall,
+	ZSTD_error_srcSize_wrong,
+	ZSTD_error_corruption_detected,
+	ZSTD_error_checksum_wrong,
+	ZSTD_error_tableLog_tooLarge,
+	ZSTD_error_maxSymbolValue_tooLarge,
+	ZSTD_error_maxSymbolValue_tooSmall,
+	ZSTD_error_dictionary_corrupted,
+	ZSTD_error_dictionary_wrong,
+	ZSTD_error_dictionaryCreation_failed,
+	ZSTD_error_maxCode
+} ZSTD_ErrorCode;
+
+/**
+ * ZSTD_maxCLevel() - maximum compression level available
+ *
+ * Return: Maximum compression level available.
+ */
+int ZSTD_maxCLevel(void);
+/**
+ * ZSTD_compressBound() - maximum compressed size in worst case scenario
+ * @srcSize: The size of the data to compress.
+ *
+ * Return:   The maximum compressed size in the worst case scenario.
+ */
+size_t ZSTD_compressBound(size_t srcSize);
+/**
+ * ZSTD_isError() - tells if a size_t function result is an error code
+ * @code:  The function result to check for error.
+ *
+ * Return: Non-zero iff the code is an error.
+ */
+static __attribute__((unused)) unsigned int ZSTD_isError(size_t code)
+{
+	return code > (size_t)-ZSTD_error_maxCode;
+}
+/**
+ * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode
+ * @functionResult: The result of a function for which ZSTD_isError() is true.
+ *
+ * Return:          The ZSTD_ErrorCode corresponding to the functionResult or 0
+ *                  if the functionResult isn't an error.
+ */
+static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode(
+	size_t functionResult)
+{
+	if (!ZSTD_isError(functionResult))
+		return (ZSTD_ErrorCode)0;
+	return (ZSTD_ErrorCode)(0 - functionResult);
+}
+
+/**
+ * enum ZSTD_strategy - zstd compression search strategy
+ *
+ * From faster to stronger.
+ */
+typedef enum {
+	ZSTD_fast,
+	ZSTD_dfast,
+	ZSTD_greedy,
+	ZSTD_lazy,
+	ZSTD_lazy2,
+	ZSTD_btlazy2,
+	ZSTD_btopt,
+	ZSTD_btopt2
+} ZSTD_strategy;
+
+/**
+ * struct ZSTD_compressionParameters - zstd compression parameters
+ * @windowLog:    Log of the largest match distance. Larger means more
+ *                compression, and more memory needed during decompression.
+ * @chainLog:     Fully searched segment. Larger means more compression, slower,
+ *                and more memory (useless for fast).
+ * @hashLog:      Dispatch table. Larger means more compression,
+ *                slower, and more memory.
+ * @searchLog:    Number of searches. Larger means more compression and slower.
+ * @searchLength: Match length searched. Larger means faster decompression,
+ *                sometimes less compression.
+ * @targetLength: Acceptable match size for optimal parser (only). Larger means
+ *                more compression, and slower.
+ * @strategy:     The zstd compression strategy.
+ */
+typedef struct {
+	unsigned int windowLog;
+	unsigned int chainLog;
+	unsigned int hashLog;
+	unsigned int searchLog;
+	unsigned int searchLength;
+	unsigned int targetLength;
+	ZSTD_strategy strategy;
+} ZSTD_compressionParameters;
+
+/**
+ * struct ZSTD_frameParameters - zstd frame parameters
+ * @contentSizeFlag: Controls whether content size will be present in the frame
+ *                   header (when known).
+ * @checksumFlag:    Controls whether a 32-bit checksum is generated at the end
+ *                   of the frame for error detection.
+ * @noDictIDFlag:    Controls whether dictID will be saved into the frame header
+ *                   when using dictionary compression.
+ *
+ * The default value is all fields set to 0.
+ */
+typedef struct {
+	unsigned int contentSizeFlag;
+	unsigned int checksumFlag;
+	unsigned int noDictIDFlag;
+} ZSTD_frameParameters;
+
+/**
+ * struct ZSTD_parameters - zstd parameters
+ * @cParams: The compression parameters.
+ * @fParams: The frame parameters.
+ */
+typedef struct {
+	ZSTD_compressionParameters cParams;
+	ZSTD_frameParameters fParams;
+} ZSTD_parameters;
+
+/**
+ * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level
+ * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
+ * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
+ * @dictSize:         The dictionary size or 0 if a dictionary isn't being used.
+ *
+ * Return:            The selected ZSTD_compressionParameters.
+ */
+ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel,
+	unsigned long long estimatedSrcSize, size_t dictSize);
+
+/**
+ * ZSTD_getParams() - returns ZSTD_parameters for selected level
+ * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
+ * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
+ * @dictSize:         The dictionary size or 0 if a dictionary isn't being used.
+ *
+ * The same as ZSTD_getCParams() except also selects the default frame
+ * parameters (all zero).
+ *
+ * Return:            The selected ZSTD_parameters.
+ */
+ZSTD_parameters ZSTD_getParams(int compressionLevel,
+	unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*-*************************************
+ * Explicit memory management
+ **************************************/
+
+/**
+ * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx
+ * @cParams: The compression parameters to be used for compression.
+ *
+ * If multiple compression parameters might be used, the caller must call
+ * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum
+ * size.
+ *
+ * Return:   A lower bound on the size of the workspace that is passed to
+ *           ZSTD_initCCtx().
+ */
+size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams);
+
+/**
+ * struct ZSTD_CCtx - the zstd compression context
+ *
+ * When compressing many times it is recommended to allocate a context just once
+ * and reuse it for each successive compression operation.
+ */
+typedef struct ZSTD_CCtx_s ZSTD_CCtx;
+/**
+ * ZSTD_initCCtx() - initialize a zstd compression context
+ * @workspace:     The workspace to emplace the context into. It must outlive
+ *                 the returned context.
+ * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to
+ *                 determine how large the workspace must be.
+ *
+ * Return:         A compression context emplaced into workspace.
+ */
+ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize);
+
+/**
+ * ZSTD_compressCCtx() - compress src into dst
+ * @ctx:         The context. Must have been initialized with a workspace at
+ *               least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
+ * @dst:         The buffer to compress src into.
+ * @dstCapacity: The size of the destination buffer. May be any size, but
+ *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src:         The data to compress.
+ * @srcSize:     The size of the data to compress.
+ * @params:      The parameters to use for compression. See ZSTD_getParams().
+ *
+ * Return:       The compressed size or an error, which can be checked using
+ *               ZSTD_isError().
+ */
+size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
+	const void *src, size_t srcSize, ZSTD_parameters params);
+
+/**
+ * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx
+ *
+ * Return: A lower bound on the size of the workspace that is passed to
+ *         ZSTD_initDCtx().
+ */
+size_t ZSTD_DCtxWorkspaceBound(void);
+
+/**
+ * struct ZSTD_DCtx - the zstd decompression context
+ *
+ * When decompressing many times it is recommended to allocate a context just
+ * once and reuse it for each successive decompression operation.
+ */
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+/**
+ * ZSTD_initDCtx() - initialize a zstd decompression context
+ * @workspace:     The workspace to emplace the context into. It must outlive
+ *                 the returned context.
+ * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to
+ *                 determine how large the workspace must be.
+ *
+ * Return:         A decompression context emplaced into workspace.
+ */
+ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize);
+
+/**
+ * ZSTD_decompressDCtx() - decompress zstd compressed src into dst
+ * @ctx:         The decompression context.
+ * @dst:         The buffer to decompress src into.
+ * @dstCapacity: The size of the destination buffer. Must be at least as large
+ *               as the decompressed size. If the caller cannot upper bound the
+ *               decompressed size, then it's better to use the streaming API.
+ * @src:         The zstd compressed data to decompress. Multiple concatenated
+ *               frames and skippable frames are allowed.
+ * @srcSize:     The exact size of the data to decompress.
+ *
+ * Return:       The decompressed size or an error, which can be checked using
+ *               ZSTD_isError().
+ */
+size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
+	const void *src, size_t srcSize);
+
+/*-************************
+ * Simple dictionary API
+ **************************/
+
+/**
+ * ZSTD_compress_usingDict() - compress src into dst using a dictionary
+ * @ctx:         The context. Must have been initialized with a workspace at
+ *               least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
+ * @dst:         The buffer to compress src into.
+ * @dstCapacity: The size of the destination buffer. May be any size, but
+ *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src:         The data to compress.
+ * @srcSize:     The size of the data to compress.
+ * @dict:        The dictionary to use for compression.
+ * @dictSize:    The size of the dictionary.
+ * @params:      The parameters to use for compression. See ZSTD_getParams().
+ *
+ * Compression using a predefined dictionary. The same dictionary must be used
+ * during decompression.
+ *
+ * Return:       The compressed size or an error, which can be checked using
+ *               ZSTD_isError().
+ */
+size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
+	const void *src, size_t srcSize, const void *dict, size_t dictSize,
+	ZSTD_parameters params);
+
+/**
+ * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary
+ * @ctx:         The decompression context.
+ * @dst:         The buffer to decompress src into.
+ * @dstCapacity: The size of the destination buffer. Must be at least as large
+ *               as the decompressed size. If the caller cannot upper bound the
+ *               decompressed size, then it's better to use the streaming API.
+ * @src:         The zstd compressed data to decompress. Multiple concatenated
+ *               frames and skippable frames are allowed.
+ * @srcSize:     The exact size of the data to decompress.
+ * @dict:        The dictionary to use for decompression. The same dictionary
+ *               must've been used to compress the data.
+ * @dictSize:    The size of the dictionary.
+ *
+ * Return:       The decompressed size or an error, which can be checked using
+ *               ZSTD_isError().
+ */
+size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
+	const void *src, size_t srcSize, const void *dict, size_t dictSize);
+
+/*-**************************
+ * Fast dictionary API
+ ***************************/
+
+/**
+ * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict
+ * @cParams: The compression parameters to be used for compression.
+ *
+ * Return:   A lower bound on the size of the workspace that is passed to
+ *           ZSTD_initCDict().
+ */
+size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams);
+
+/**
+ * struct ZSTD_CDict - a digested dictionary to be used for compression
+ */
+typedef struct ZSTD_CDict_s ZSTD_CDict;
+
+/**
+ * ZSTD_initCDict() - initialize a digested dictionary for compression
+ * @dictBuffer:    The dictionary to digest. The buffer is referenced by the
+ *                 ZSTD_CDict so it must outlive the returned ZSTD_CDict.
+ * @dictSize:      The size of the dictionary.
+ * @params:        The parameters to use for compression. See ZSTD_getParams().
+ * @workspace:     The workspace. It must outlive the returned ZSTD_CDict.
+ * @workspaceSize: The workspace size. Must be at least
+ *                 ZSTD_CDictWorkspaceBound(params.cParams).
+ *
+ * When compressing multiple messages / blocks with the same dictionary it is
+ * recommended to load it just once. The ZSTD_CDict merely references the
+ * dictBuffer, so it must outlive the returned ZSTD_CDict.
+ *
+ * Return:         The digested dictionary emplaced into workspace.
+ */
+ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize,
+	ZSTD_parameters params, void *workspace, size_t workspaceSize);
+
+/**
+ * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict
+ * @ctx:         The context. Must have been initialized with a workspace at
+ *               least as large as ZSTD_CCtxWorkspaceBound(cParams) where
+ *               cParams are the compression parameters used to initialize the
+ *               cdict.
+ * @dst:         The buffer to compress src into.
+ * @dstCapacity: The size of the destination buffer. May be any size, but
+ *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src:         The data to compress.
+ * @srcSize:     The size of the data to compress.
+ * @cdict:       The digested dictionary to use for compression.
+ * @params:      The parameters to use for compression. See ZSTD_getParams().
+ *
+ * Compression using a digested dictionary. The same dictionary must be used
+ * during decompression.
+ *
+ * Return:       The compressed size or an error, which can be checked using
+ *               ZSTD_isError().
+ */
+size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
+	const void *src, size_t srcSize, const ZSTD_CDict *cdict);
+
+
+/**
+ * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict
+ *
+ * Return:  A lower bound on the size of the workspace that is passed to
+ *          ZSTD_initDDict().
+ */
+size_t ZSTD_DDictWorkspaceBound(void);
+
+/**
+ * struct ZSTD_DDict - a digested dictionary to be used for decompression
+ */
+typedef struct ZSTD_DDict_s ZSTD_DDict;
+
+/**
+ * ZSTD_initDDict() - initialize a digested dictionary for decompression
+ * @dictBuffer:    The dictionary to digest. The buffer is referenced by the
+ *                 ZSTD_DDict so it must outlive the returned ZSTD_DDict.
+ * @dictSize:      The size of the dictionary.
+ * @workspace:     The workspace. It must outlive the returned ZSTD_DDict.
+ * @workspaceSize: The workspace size. Must be at least
+ *                 ZSTD_DDictWorkspaceBound().
+ *
+ * When decompressing multiple messages / blocks with the same dictionary it is
+ * recommended to load it just once. The ZSTD_DDict merely references the
+ * dictBuffer, so it must outlive the returned ZSTD_DDict.
+ *
+ * Return:         The digested dictionary emplaced into workspace.
+ */
+ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize,
+	void *workspace, size_t workspaceSize);
+
+/**
+ * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict
+ * @ctx:         The decompression context.
+ * @dst:         The buffer to decompress src into.
+ * @dstCapacity: The size of the destination buffer. Must be at least as large
+ *               as the decompressed size. If the caller cannot upper bound the
+ *               decompressed size, then it's better to use the streaming API.
+ * @src:         The zstd compressed data to decompress. Multiple concatenated
+ *               frames and skippable frames are allowed.
+ * @srcSize:     The exact size of the data to decompress.
+ * @ddict:       The digested dictionary to use for decompression. The same
+ *               dictionary must've been used to compress the data.
+ *
+ * Return:       The decompressed size or an error, which can be checked using
+ *               ZSTD_isError().
+ */
+size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst,
+	size_t dstCapacity, const void *src, size_t srcSize,
+	const ZSTD_DDict *ddict);
+
+
+/*-**************************
+ * Streaming
+ ***************************/
+
+/**
+ * struct ZSTD_inBuffer - input buffer for streaming
+ * @src:  Start of the input buffer.
+ * @size: Size of the input buffer.
+ * @pos:  Position where reading stopped. Will be updated.
+ *        Necessarily 0 <= pos <= size.
+ */
+typedef struct ZSTD_inBuffer_s {
+	const void *src;
+	size_t size;
+	size_t pos;
+} ZSTD_inBuffer;
+
+/**
+ * struct ZSTD_outBuffer - output buffer for streaming
+ * @dst:  Start of the output buffer.
+ * @size: Size of the output buffer.
+ * @pos:  Position where writing stopped. Will be updated.
+ *        Necessarily 0 <= pos <= size.
+ */
+typedef struct ZSTD_outBuffer_s {
+	void *dst;
+	size_t size;
+	size_t pos;
+} ZSTD_outBuffer;
+
+
+
+/*-*****************************************************************************
+ * Streaming compression - HowTo
+ *
+ * A ZSTD_CStream object is required to track streaming operation.
+ * Use ZSTD_initCStream() to initialize a ZSTD_CStream object.
+ * ZSTD_CStream objects can be reused multiple times on consecutive compression
+ * operations. It is recommended to re-use ZSTD_CStream in situations where many
+ * streaming operations will be achieved consecutively. Use one separate
+ * ZSTD_CStream per thread for parallel execution.
+ *
+ * Use ZSTD_compressStream() repetitively to consume input stream.
+ * The function will automatically update both `pos` fields.
+ * Note that it may not consume the entire input, in which case `pos < size`,
+ * and it's up to the caller to present again remaining data.
+ * It returns a hint for the preferred number of bytes to use as an input for
+ * the next function call.
+ *
+ * At any moment, it's possible to flush whatever data remains within internal
+ * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might
+ * still be some content left within the internal buffer if `output->size` is
+ * too small. It returns the number of bytes left in the internal buffer and
+ * must be called until it returns 0.
+ *
+ * ZSTD_endStream() instructs to finish a frame. It will perform a flush and
+ * write frame epilogue. The epilogue is required for decoders to consider a
+ * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush
+ * the full content if `output->size` is too small. In which case, call again
+ * ZSTD_endStream() to complete the flush. It returns the number of bytes left
+ * in the internal buffer and must be called until it returns 0.
+ ******************************************************************************/
+
+/**
+ * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream
+ * @cParams: The compression parameters to be used for compression.
+ *
+ * Return:   A lower bound on the size of the workspace that is passed to
+ *           ZSTD_initCStream() and ZSTD_initCStream_usingCDict().
+ */
+size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams);
+
+/**
+ * struct ZSTD_CStream - the zstd streaming compression context
+ */
+typedef struct ZSTD_CStream_s ZSTD_CStream;
+
+/*===== ZSTD_CStream management functions =====*/
+/**
+ * ZSTD_initCStream() - initialize a zstd streaming compression context
+ * @params:         The zstd compression parameters.
+ * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must
+ *                  pass the source size (zero means empty source). Otherwise,
+ *                  the caller may optionally pass the source size, or zero if
+ *                  unknown.
+ * @workspace:      The workspace to emplace the context into. It must outlive
+ *                  the returned context.
+ * @workspaceSize:  The size of workspace.
+ *                  Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine
+ *                  how large the workspace must be.
+ *
+ * Return:          The zstd streaming compression context.
+ */
+ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params,
+	unsigned long long pledgedSrcSize, void *workspace,
+	size_t workspaceSize);
+
+/**
+ * ZSTD_initCStream_usingCDict() - initialize a streaming compression context
+ * @cdict:          The digested dictionary to use for compression.
+ * @pledgedSrcSize: Optionally the source size, or zero if unknown.
+ * @workspace:      The workspace to emplace the context into. It must outlive
+ *                  the returned context.
+ * @workspaceSize:  The size of workspace. Call ZSTD_CStreamWorkspaceBound()
+ *                  with the cParams used to initialize the cdict to determine
+ *                  how large the workspace must be.
+ *
+ * Return:          The zstd streaming compression context.
+ */
+ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict,
+	unsigned long long pledgedSrcSize, void *workspace,
+	size_t workspaceSize);
+
+/*===== Streaming compression functions =====*/
+/**
+ * ZSTD_resetCStream() - reset the context using parameters from creation
+ * @zcs:            The zstd streaming compression context to reset.
+ * @pledgedSrcSize: Optionally the source size, or zero if unknown.
+ *
+ * Resets the context using the parameters from creation. Skips dictionary
+ * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame
+ * content size is always written into the frame header.
+ *
+ * Return:          Zero or an error, which can be checked using ZSTD_isError().
+ */
+size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize);
+/**
+ * ZSTD_compressStream() - streaming compress some of input into output
+ * @zcs:    The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ *          compressed data was written.
+ * @input:  Source buffer. `input->pos` is updated to indicate how much data was
+ *          read. Note that it may not consume the entire input, in which case
+ *          `input->pos < input->size`, and it's up to the caller to present
+ *          remaining data again.
+ *
+ * The `input` and `output` buffers may be any size. Guaranteed to make some
+ * forward progress if `input` and `output` are not empty.
+ *
+ * Return:  A hint for the number of bytes to use as the input for the next
+ *          function call or an error, which can be checked using
+ *          ZSTD_isError().
+ */
+size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output,
+	ZSTD_inBuffer *input);
+/**
+ * ZSTD_flushStream() - flush internal buffers into output
+ * @zcs:    The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ *          compressed data was written.
+ *
+ * ZSTD_flushStream() must be called until it returns 0, meaning all the data
+ * has been flushed. Since ZSTD_flushStream() causes a block to be ended,
+ * calling it too often will degrade the compression ratio.
+ *
+ * Return:  The number of bytes still present within internal buffers or an
+ *          error, which can be checked using ZSTD_isError().
+ */
+size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
+/**
+ * ZSTD_endStream() - flush internal buffers into output and end the frame
+ * @zcs:    The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ *          compressed data was written.
+ *
+ * ZSTD_endStream() must be called until it returns 0, meaning all the data has
+ * been flushed and the frame epilogue has been written.
+ *
+ * Return:  The number of bytes still present within internal buffers or an
+ *          error, which can be checked using ZSTD_isError().
+ */
+size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
+
+/**
+ * ZSTD_CStreamInSize() - recommended size for the input buffer
+ *
+ * Return: The recommended size for the input buffer.
+ */
+size_t ZSTD_CStreamInSize(void);
+/**
+ * ZSTD_CStreamOutSize() - recommended size for the output buffer
+ *
+ * When the output buffer is at least this large, it is guaranteed to be large
+ * enough to flush at least one complete compressed block.
+ *
+ * Return: The recommended size for the output buffer.
+ */
+size_t ZSTD_CStreamOutSize(void);
+
+
+
+/*-*****************************************************************************
+ * Streaming decompression - HowTo
+ *
+ * A ZSTD_DStream object is required to track streaming operations.
+ * Use ZSTD_initDStream() to initialize a ZSTD_DStream object.
+ * ZSTD_DStream objects can be re-used multiple times.
+ *
+ * Use ZSTD_decompressStream() repetitively to consume your input.
+ * The function will update both `pos` fields.
+ * If `input->pos < input->size`, some input has not been consumed.
+ * It's up to the caller to present again remaining data.
+ * If `output->pos < output->size`, decoder has flushed everything it could.
+ * Returns 0 iff a frame is completely decoded and fully flushed.
+ * Otherwise it returns a suggested next input size that will never load more
+ * than the current frame.
+ ******************************************************************************/
+
+/**
+ * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream
+ * @maxWindowSize: The maximum window size allowed for compressed frames.
+ *
+ * Return:         A lower bound on the size of the workspace that is passed to
+ *                 ZSTD_initDStream() and ZSTD_initDStream_usingDDict().
+ */
+size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize);
+
+/**
+ * struct ZSTD_DStream - the zstd streaming decompression context
+ */
+typedef struct ZSTD_DStream_s ZSTD_DStream;
+/*===== ZSTD_DStream management functions =====*/
+/**
+ * ZSTD_initDStream() - initialize a zstd streaming decompression context
+ * @maxWindowSize: The maximum window size allowed for compressed frames.
+ * @workspace:     The workspace to emplace the context into. It must outlive
+ *                 the returned context.
+ * @workspaceSize: The size of workspace.
+ *                 Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
+ *                 how large the workspace must be.
+ *
+ * Return:         The zstd streaming decompression context.
+ */
+ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace,
+	size_t workspaceSize);
+/**
+ * ZSTD_initDStream_usingDDict() - initialize streaming decompression context
+ * @maxWindowSize: The maximum window size allowed for compressed frames.
+ * @ddict:         The digested dictionary to use for decompression.
+ * @workspace:     The workspace to emplace the context into. It must outlive
+ *                 the returned context.
+ * @workspaceSize: The size of workspace.
+ *                 Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
+ *                 how large the workspace must be.
+ *
+ * Return:         The zstd streaming decompression context.
+ */
+ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize,
+	const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize);
+
+/*===== Streaming decompression functions =====*/
+/**
+ * ZSTD_resetDStream() - reset the context using parameters from creation
+ * @zds:   The zstd streaming decompression context to reset.
+ *
+ * Resets the context using the parameters from creation. Skips dictionary
+ * loading, since it can be reused.
+ *
+ * Return: Zero or an error, which can be checked using ZSTD_isError().
+ */
+size_t ZSTD_resetDStream(ZSTD_DStream *zds);
+/**
+ * ZSTD_decompressStream() - streaming decompress some of input into output
+ * @zds:    The zstd streaming decompression context.
+ * @output: Destination buffer. `output.pos` is updated to indicate how much
+ *          decompressed data was written.
+ * @input:  Source buffer. `input.pos` is updated to indicate how much data was
+ *          read. Note that it may not consume the entire input, in which case
+ *          `input.pos < input.size`, and it's up to the caller to present
+ *          remaining data again.
+ *
+ * The `input` and `output` buffers may be any size. Guaranteed to make some
+ * forward progress if `input` and `output` are not empty.
+ * ZSTD_decompressStream() will not consume the last byte of the frame until
+ * the entire frame is flushed.
+ *
+ * Return:  Returns 0 iff a frame is completely decoded and fully flushed.
+ *          Otherwise returns a hint for the number of bytes to use as the input
+ *          for the next function call or an error, which can be checked using
+ *          ZSTD_isError(). The size hint will never load more than the frame.
+ */
+size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output,
+	ZSTD_inBuffer *input);
+
+/**
+ * ZSTD_DStreamInSize() - recommended size for the input buffer
+ *
+ * Return: The recommended size for the input buffer.
+ */
+size_t ZSTD_DStreamInSize(void);
+/**
+ * ZSTD_DStreamOutSize() - recommended size for the output buffer
+ *
+ * When the output buffer is at least this large, it is guaranteed to be large
+ * enough to flush at least one complete decompressed block.
+ *
+ * Return: The recommended size for the output buffer.
+ */
+size_t ZSTD_DStreamOutSize(void);
+
+
+/* --- Constants ---*/
+#define ZSTD_MAGICNUMBER            0xFD2FB528   /* >= v0.8.0 */
+#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50U
+
+#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
+#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
+
+#define ZSTD_WINDOWLOG_MAX_32  27
+#define ZSTD_WINDOWLOG_MAX_64  27
+#define ZSTD_WINDOWLOG_MAX \
+	((unsigned int)(sizeof(size_t) == 4 \
+		? ZSTD_WINDOWLOG_MAX_32 \
+		: ZSTD_WINDOWLOG_MAX_64))
+#define ZSTD_WINDOWLOG_MIN 10
+#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX
+#define ZSTD_HASHLOG_MIN        6
+#define ZSTD_CHAINLOG_MAX     (ZSTD_WINDOWLOG_MAX+1)
+#define ZSTD_CHAINLOG_MIN      ZSTD_HASHLOG_MIN
+#define ZSTD_HASHLOG3_MAX      17
+#define ZSTD_SEARCHLOG_MAX    (ZSTD_WINDOWLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN      1
+/* only for ZSTD_fast, other strategies are limited to 6 */
+#define ZSTD_SEARCHLENGTH_MAX   7
+/* only for ZSTD_btopt, other strategies are limited to 4 */
+#define ZSTD_SEARCHLENGTH_MIN   3
+#define ZSTD_TARGETLENGTH_MIN   4
+#define ZSTD_TARGETLENGTH_MAX 999
+
+/* for static allocation */
+#define ZSTD_FRAMEHEADERSIZE_MAX 18
+#define ZSTD_FRAMEHEADERSIZE_MIN  6
+static const size_t ZSTD_frameHeaderSize_prefix = 5;
+static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN;
+static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
+/* magic number + skippable frame length */
+static const size_t ZSTD_skippableHeaderSize = 8;
+
+
+/*-*************************************
+ * Compressed size functions
+ **************************************/
+
+/**
+ * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame
+ * @src:     Source buffer. It should point to the start of a zstd encoded frame
+ *           or a skippable frame.
+ * @srcSize: The size of the source buffer. It must be at least as large as the
+ *           size of the frame.
+ *
+ * Return:   The compressed size of the frame pointed to by `src` or an error,
+ *           which can be check with ZSTD_isError().
+ *           Suitable to pass to ZSTD_decompress() or similar functions.
+ */
+size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize);
+
+/*-*************************************
+ * Decompressed size functions
+ **************************************/
+/**
+ * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header
+ * @src:     It should point to the start of a zstd encoded frame.
+ * @srcSize: The size of the source buffer. It must be at least as large as the
+ *           frame header. `ZSTD_frameHeaderSize_max` is always large enough.
+ *
+ * Return:   The frame content size stored in the frame header if known.
+ *           `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the
+ *           frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input.
+ */
+unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+
+/**
+ * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames
+ * @src:     It should point to the start of a series of zstd encoded and/or
+ *           skippable frames.
+ * @srcSize: The exact size of the series of frames.
+ *
+ * If any zstd encoded frame in the series doesn't have the frame content size
+ * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always
+ * set when using ZSTD_compress(). The decompressed size can be very large.
+ * If the source is untrusted, the decompressed size could be wrong or
+ * intentionally modified. Always ensure the result fits within the
+ * application's authorized limits. ZSTD_findDecompressedSize() handles multiple
+ * frames, and so it must traverse the input to read each frame header. This is
+ * efficient as most of the data is skipped, however it does mean that all frame
+ * data must be present and valid.
+ *
+ * Return:   Decompressed size of all the data contained in the frames if known.
+ *           `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown.
+ *           `ZSTD_CONTENTSIZE_ERROR` if an error occurred.
+ */
+unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize);
+
+/*-*************************************
+ * Advanced compression functions
+ **************************************/
+/**
+ * ZSTD_checkCParams() - ensure parameter values remain within authorized range
+ * @cParams: The zstd compression parameters.
+ *
+ * Return:   Zero or an error, which can be checked using ZSTD_isError().
+ */
+size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams);
+
+/**
+ * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize
+ * @srcSize:  Optionally the estimated source size, or zero if unknown.
+ * @dictSize: Optionally the estimated dictionary size, or zero if unknown.
+ *
+ * Return:    The optimized parameters.
+ */
+ZSTD_compressionParameters ZSTD_adjustCParams(
+	ZSTD_compressionParameters cParams, unsigned long long srcSize,
+	size_t dictSize);
+
+/*--- Advanced decompression functions ---*/
+
+/**
+ * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame
+ * @buffer: The source buffer to check.
+ * @size:   The size of the source buffer, must be at least 4 bytes.
+ *
+ * Return: True iff the buffer starts with a zstd or skippable frame identifier.
+ */
+unsigned int ZSTD_isFrame(const void *buffer, size_t size);
+
+/**
+ * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary
+ * @dict:     The dictionary buffer.
+ * @dictSize: The size of the dictionary buffer.
+ *
+ * Return:    The dictionary id stored within the dictionary or 0 if the
+ *            dictionary is not a zstd dictionary. If it returns 0 the
+ *            dictionary can still be loaded as a content-only dictionary.
+ */
+unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize);
+
+/**
+ * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict
+ * @ddict: The ddict to find the id of.
+ *
+ * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not
+ *         a zstd dictionary. If it returns 0 `ddict` will be loaded as a
+ *         content-only dictionary.
+ */
+unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict);
+
+/**
+ * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame
+ * @src:     Source buffer. It must be a zstd encoded frame.
+ * @srcSize: The size of the source buffer. It must be at least as large as the
+ *           frame header. `ZSTD_frameHeaderSize_max` is always large enough.
+ *
+ * Return:   The dictionary id required to decompress the frame stored within
+ *           `src` or 0 if the dictionary id could not be decoded. It can return
+ *           0 if the frame does not require a dictionary, the dictionary id
+ *           wasn't stored in the frame, `src` is not a zstd frame, or `srcSize`
+ *           is too small.
+ */
+unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize);
+
+/**
+ * struct ZSTD_frameParams - zstd frame parameters stored in the frame header
+ * @frameContentSize: The frame content size, or 0 if not present.
+ * @windowSize:       The window size, or 0 if the frame is a skippable frame.
+ * @dictID:           The dictionary id, or 0 if not present.
+ * @checksumFlag:     Whether a checksum was used.
+ */
+typedef struct {
+	unsigned long long frameContentSize;
+	unsigned int windowSize;
+	unsigned int dictID;
+	unsigned int checksumFlag;
+} ZSTD_frameParams;
+
+/**
+ * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame
+ * @fparamsPtr: On success the frame parameters are written here.
+ * @src:        The source buffer. It must point to a zstd or skippable frame.
+ * @srcSize:    The size of the source buffer. `ZSTD_frameHeaderSize_max` is
+ *              always large enough to succeed.
+ *
+ * Return:      0 on success. If more data is required it returns how many bytes
+ *              must be provided to make forward progress. Otherwise it returns
+ *              an error, which can be checked using ZSTD_isError().
+ */
+size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src,
+	size_t srcSize);
+
+/*-*****************************************************************************
+ * Buffer-less and synchronous inner streaming functions
+ *
+ * This is an advanced API, giving full control over buffer management, for
+ * users which need direct control over memory.
+ * But it's also a complex one, with many restrictions (documented below).
+ * Prefer using normal streaming API for an easier experience
+ ******************************************************************************/
+
+/*-*****************************************************************************
+ * Buffer-less streaming compression (synchronous mode)
+ *
+ * A ZSTD_CCtx object is required to track streaming operations.
+ * Use ZSTD_initCCtx() to initialize a context.
+ * ZSTD_CCtx object can be re-used multiple times within successive compression
+ * operations.
+ *
+ * Start by initializing a context.
+ * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary
+ * compression,
+ * or ZSTD_compressBegin_advanced(), for finer parameter control.
+ * It's also possible to duplicate a reference context which has already been
+ * initialized, using ZSTD_copyCCtx()
+ *
+ * Then, consume your input using ZSTD_compressContinue().
+ * There are some important considerations to keep in mind when using this
+ * advanced function :
+ * - ZSTD_compressContinue() has no internal buffer. It uses externally provided
+ *   buffer only.
+ * - Interface is synchronous : input is consumed entirely and produce 1+
+ *   (or more) compressed blocks.
+ * - Caller must ensure there is enough space in `dst` to store compressed data
+ *   under worst case scenario. Worst case evaluation is provided by
+ *   ZSTD_compressBound().
+ *   ZSTD_compressContinue() doesn't guarantee recover after a failed
+ *   compression.
+ * - ZSTD_compressContinue() presumes prior input ***is still accessible and
+ *   unmodified*** (up to maximum distance size, see WindowLog).
+ *   It remembers all previous contiguous blocks, plus one separated memory
+ *   segment (which can itself consists of multiple contiguous blocks)
+ * - ZSTD_compressContinue() detects that prior input has been overwritten when
+ *   `src` buffer overlaps. In which case, it will "discard" the relevant memory
+ *   section from its history.
+ *
+ * Finish a frame with ZSTD_compressEnd(), which will write the last block(s)
+ * and optional checksum. It's possible to use srcSize==0, in which case, it
+ * will write a final empty block to end the frame. Without last block mark,
+ * frames will be considered unfinished (corrupted) by decoders.
+ *
+ * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new
+ * frame.
+ ******************************************************************************/
+
+/*=====   Buffer-less streaming compression functions  =====*/
+size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel);
+size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict,
+	size_t dictSize, int compressionLevel);
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict,
+	size_t dictSize, ZSTD_parameters params,
+	unsigned long long pledgedSrcSize);
+size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx,
+	unsigned long long pledgedSrcSize);
+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict,
+	unsigned long long pledgedSrcSize);
+size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
+	const void *src, size_t srcSize);
+size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
+	const void *src, size_t srcSize);
+
+
+
+/*-*****************************************************************************
+ * Buffer-less streaming decompression (synchronous mode)
+ *
+ * A ZSTD_DCtx object is required to track streaming operations.
+ * Use ZSTD_initDCtx() to initialize a context.
+ * A ZSTD_DCtx object can be re-used multiple times.
+ *
+ * First typical operation is to retrieve frame parameters, using
+ * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide
+ * important information to correctly decode the frame, such as the minimum
+ * rolling buffer size to allocate to decompress data (`windowSize`), and the
+ * dictionary ID used.
+ * Note: content size is optional, it may not be present. 0 means unknown.
+ * Note that these values could be wrong, either because of data malformation,
+ * or because an attacker is spoofing deliberate false information. As a
+ * consequence, check that values remain within valid application range,
+ * especially `windowSize`, before allocation. Each application can set its own
+ * limit, depending on local restrictions. For extended interoperability, it is
+ * recommended to support at least 8 MB.
+ * Frame parameters are extracted from the beginning of the compressed frame.
+ * Data fragment must be large enough to ensure successful decoding, typically
+ * `ZSTD_frameHeaderSize_max` bytes.
+ * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled.
+ *        >0: `srcSize` is too small, provide at least this many bytes.
+ *        errorCode, which can be tested using ZSTD_isError().
+ *
+ * Start decompression, with ZSTD_decompressBegin() or
+ * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared
+ * context, using ZSTD_copyDCtx().
+ *
+ * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue()
+ * alternatively.
+ * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize'
+ * to ZSTD_decompressContinue().
+ * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will
+ * fail.
+ *
+ * The result of ZSTD_decompressContinue() is the number of bytes regenerated
+ * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an
+ * error; it just means ZSTD_decompressContinue() has decoded some metadata
+ * item. It can also be an error code, which can be tested with ZSTD_isError().
+ *
+ * ZSTD_decompressContinue() needs previous data blocks during decompression, up
+ * to `windowSize`. They should preferably be located contiguously, prior to
+ * current block. Alternatively, a round buffer of sufficient size is also
+ * possible. Sufficient size is determined by frame parameters.
+ * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't
+ * follow each other, make sure that either the compressor breaks contiguity at
+ * the same place, or that previous contiguous segment is large enough to
+ * properly handle maximum back-reference.
+ *
+ * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
+ * Context can then be reset to start a new decompression.
+ *
+ * Note: it's possible to know if next input to present is a header or a block,
+ * using ZSTD_nextInputType(). This information is not required to properly
+ * decode a frame.
+ *
+ * == Special case: skippable frames ==
+ *
+ * Skippable frames allow integration of user-defined data into a flow of
+ * concatenated frames. Skippable frames will be ignored (skipped) by a
+ * decompressor. The format of skippable frames is as follows:
+ * a) Skippable frame ID - 4 Bytes, Little endian format, any value from
+ *    0x184D2A50 to 0x184D2A5F
+ * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
+ * c) Frame Content - any content (User Data) of length equal to Frame Size
+ * For skippable frames ZSTD_decompressContinue() always returns 0.
+ * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0
+ * what means that a frame is skippable.
+ * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might
+ *       actually be a zstd encoded frame with no content. For purposes of
+ *       decompression, it is valid in both cases to skip the frame using
+ *       ZSTD_findFrameCompressedSize() to find its size in bytes.
+ * It also returns frame size as fparamsPtr->frameContentSize.
+ ******************************************************************************/
+
+/*=====   Buffer-less streaming decompression functions  =====*/
+size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx);
+size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict,
+	size_t dictSize);
+void   ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx);
+size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx);
+size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
+	const void *src, size_t srcSize);
+typedef enum {
+	ZSTDnit_frameHeader,
+	ZSTDnit_blockHeader,
+	ZSTDnit_block,
+	ZSTDnit_lastBlock,
+	ZSTDnit_checksum,
+	ZSTDnit_skippableFrame
+} ZSTD_nextInputType_e;
+ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx);
+
+/*-*****************************************************************************
+ * Block functions
+ *
+ * Block functions produce and decode raw zstd blocks, without frame metadata.
+ * Frame metadata cost is typically ~18 bytes, which can be non-negligible for
+ * very small blocks (< 100 bytes). User will have to take in charge required
+ * information to regenerate data, such as compressed and content sizes.
+ *
+ * A few rules to respect:
+ * - Compressing and decompressing require a context structure
+ *   + Use ZSTD_initCCtx() and ZSTD_initDCtx()
+ * - It is necessary to init context before starting
+ *   + compression : ZSTD_compressBegin()
+ *   + decompression : ZSTD_decompressBegin()
+ *   + variants _usingDict() are also allowed
+ *   + copyCCtx() and copyDCtx() work too
+ * - Block size is limited, it must be <= ZSTD_getBlockSizeMax()
+ *   + If you need to compress more, cut data into multiple blocks
+ *   + Consider using the regular ZSTD_compress() instead, as frame metadata
+ *     costs become negligible when source size is large.
+ * - When a block is considered not compressible enough, ZSTD_compressBlock()
+ *   result will be zero. In which case, nothing is produced into `dst`.
+ *   + User must test for such outcome and deal directly with uncompressed data
+ *   + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!!
+ *   + In case of multiple successive blocks, decoder must be informed of
+ *     uncompressed block existence to follow proper history. Use
+ *     ZSTD_insertBlock() in such a case.
+ ******************************************************************************/
+
+/* Define for static allocation */
+#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)
+/*=====   Raw zstd block functions  =====*/
+size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx);
+size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
+	const void *src, size_t srcSize);
+size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
+	const void *src, size_t srcSize);
+size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart,
+	size_t blockSize);
+
+#endif  /* ZSTD_H */
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index e19e624..d267160 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -42,10 +42,13 @@
  * @prio: priority of the file handler, as defined by &enum v4l2_priority
  *
  * @wait: event' s wait queue
+ * @subscribe_lock: serialise changes to the subscribed list; guarantee that
+ *		    the add and del event callbacks are orderly called
  * @subscribed: list of subscribed events
  * @available: list of events waiting to be dequeued
  * @navailable: number of available events at @available list
  * @sequence: event sequence number
+ *
  * @m2m_ctx: pointer to &struct v4l2_m2m_ctx
  */
 struct v4l2_fh {
@@ -56,6 +59,7 @@
 
 	/* Events */
 	wait_queue_head_t	wait;
+	struct mutex		subscribe_lock;
 	struct list_head	subscribed;
 	struct list_head	available;
 	unsigned int		navailable;
diff --git a/include/microvisor/kernel/microvisor.h b/include/microvisor/kernel/microvisor.h
new file mode 100644
index 0000000..1a30d1f
--- /dev/null
+++ b/include/microvisor/kernel/microvisor.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+/** @addtogroup lib_microvisor
+ * @{
+ */
+
+
+#ifndef __AUTO__MICROVISOR_H__
+#define __AUTO__MICROVISOR_H__
+
+/** SDK Major number */
+#define OKL4_SDK_VERSION_MAJOR 5
+/** SDK Minor number */
+#define OKL4_SDK_VERSION_MINOR 3
+/**
+ * If defined, indicates this is an internal development version.
+ * In this case, OKL4_SDK_VERSION_RELEASE == -1
+ */
+#define OKL4_SDK_VERSION_DEVELOPMENT 1
+/** SDK Release (revision) number */
+#define OKL4_SDK_VERSION_RELEASE (-1)
+/** SDK Maintenance number. Indicates the maintenance sequence revision. */
+#define OKL4_SDK_VERSION_MAINTENANCE 0
+
+
+/** @addtogroup lib_microvisor_helpers Microvisor Helpers
+ * @{
+ */
+
+/** Common C and ASM defines. */
+
+/** OKL4 Kernel supports a Virtual CPU (vCPU) interface. */
+#define OKL4_VCPU_SUPPORT
+
+
+/** OKL4 Kernel vCPU API supports SMP guest cells. */
+#define OKL4_VCPU_SMP_SUPPORT
+
+
+/** @} */
+#endif /* __AUTO__MICROVISOR_H__ */
+/** @} */
diff --git a/include/microvisor/kernel/offsets.h b/include/microvisor/kernel/offsets.h
new file mode 100644
index 0000000..9517acf
--- /dev/null
+++ b/include/microvisor/kernel/offsets.h
@@ -0,0 +1,1534 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+#ifndef __AUTO__MICROVISOR_OFFSETS_H__
+#define __AUTO__MICROVISOR_OFFSETS_H__
+
+#if defined(ASSEMBLY)
+/* LWEE structure's type offsets */
+
+/**
+ *   Offsets for struct okl4_atomic_register
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_REGISTER_SIZE) */
+#define OKL4_STRUCT_ATOMIC_REGISTER_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ATOMIC_REGISTER_VALUE) */
+#define OKL4_OFS_ATOMIC_REGISTER_VALUE (0)
+/**
+ *   Offsets for struct okl4_atomic_uint16
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT16_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT16_SIZE (2)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT16_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT16_VALUE (0)
+/**
+ *   Offsets for struct okl4_atomic_uint32
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT32_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT32_SIZE (4)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT32_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT32_VALUE (0)
+/**
+ *   Offsets for struct okl4_atomic_uint64
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT64_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT64_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT64_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT64_VALUE (0)
+/**
+ *   Offsets for struct okl4_atomic_uint8
+ **/
+/*lint -esym(621, OKL4_STRUCT_ATOMIC_UINT8_SIZE) */
+#define OKL4_STRUCT_ATOMIC_UINT8_SIZE (1)
+/*lint -esym(621, OKL4_OFS_ATOMIC_UINT8_VALUE) */
+#define OKL4_OFS_ATOMIC_UINT8_VALUE (0)
+/**
+ *   Offsets for struct okl4_axon_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_DATA_SIZE) */
+#define OKL4_STRUCT_AXON_DATA_SIZE (12)
+/*lint -esym(621, OKL4_OFS_AXON_DATA_KCAP) */
+#define OKL4_OFS_AXON_DATA_KCAP (0)
+/*lint -esym(621, OKL4_OFS_AXON_DATA_SEGMENT) */
+#define OKL4_OFS_AXON_DATA_SEGMENT (4)
+/*lint -esym(621, OKL4_OFS_AXON_DATA_VIRQ) */
+#define OKL4_OFS_AXON_DATA_VIRQ (8)
+/**
+ *   Offsets for struct okl4_axon_ep_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_EP_DATA_SIZE) */
+#define OKL4_STRUCT_AXON_EP_DATA_SIZE (24)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX) */
+#define OKL4_OFS_AXON_EP_DATA_RX (0)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX_KCAP) */
+#define OKL4_OFS_AXON_EP_DATA_RX_KCAP (0)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX_SEGMENT) */
+#define OKL4_OFS_AXON_EP_DATA_RX_SEGMENT (4)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_RX_VIRQ) */
+#define OKL4_OFS_AXON_EP_DATA_RX_VIRQ (8)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX) */
+#define OKL4_OFS_AXON_EP_DATA_TX (12)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX_KCAP) */
+#define OKL4_OFS_AXON_EP_DATA_TX_KCAP (12)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX_SEGMENT) */
+#define OKL4_OFS_AXON_EP_DATA_TX_SEGMENT (16)
+/*lint -esym(621, OKL4_OFS_AXON_EP_DATA_TX_VIRQ) */
+#define OKL4_OFS_AXON_EP_DATA_TX_VIRQ (20)
+/**
+ *   Offsets for struct okl4_axon_queue
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_QUEUE_SIZE) */
+#define OKL4_STRUCT_AXON_QUEUE_SIZE (12)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_QUEUE_QUEUE_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRIES) */
+#define OKL4_OFS_AXON_QUEUE_ENTRIES (4)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_KPTR) */
+#define OKL4_OFS_AXON_QUEUE_KPTR (6)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_UPTR) */
+#define OKL4_OFS_AXON_QUEUE_UPTR (8)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE___PADDING0_2) */
+#define OKL4_OFS_AXON_QUEUE___PADDING0_2 (10)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE___PADDING1_3) */
+#define OKL4_OFS_AXON_QUEUE___PADDING1_3 (11)
+/**
+ *   Offsets for struct okl4_axon_queue_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_QUEUE_ENTRY_SIZE) */
+#define OKL4_STRUCT_AXON_QUEUE_ENTRY_SIZE (24)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY_INFO) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY_INFO (0)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY_DATA_SIZE) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY_DATA_SIZE (8)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY_RECV_SEQUENCE) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY_RECV_SEQUENCE (16)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING0_4) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING0_4 (20)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING1_5) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING1_5 (21)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING2_6) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING2_6 (22)
+/*lint -esym(621, OKL4_OFS_AXON_QUEUE_ENTRY___PADDING3_7) */
+#define OKL4_OFS_AXON_QUEUE_ENTRY___PADDING3_7 (23)
+/**
+ *   Offsets for struct okl4_axon_rx
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_RX_SIZE) */
+#define OKL4_STRUCT_AXON_RX_SIZE (56)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES) */
+#define OKL4_OFS_AXON_RX_QUEUES (0)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0) */
+#define OKL4_OFS_AXON_RX_QUEUES_0 (0)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_QUEUE_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_ENTRIES (4)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_KPTR (6)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_0_UPTR (8)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_0___PADDING0_2 (10)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_0___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_0___PADDING1_3 (11)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1) */
+#define OKL4_OFS_AXON_RX_QUEUES_1 (12)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_QUEUE_OFFSET (12)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_ENTRIES (16)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_KPTR (18)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_1_UPTR (20)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_1___PADDING0_2 (22)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_1___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_1___PADDING1_3 (23)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_2 (24)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_QUEUE_OFFSET (24)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_ENTRIES (28)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_KPTR (30)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_2_UPTR (32)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_2___PADDING0_2 (34)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_2___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_2___PADDING1_3 (35)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_3 (36)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_QUEUE_OFFSET (36)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_ENTRIES) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_ENTRIES (40)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_KPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_KPTR (42)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3_UPTR) */
+#define OKL4_OFS_AXON_RX_QUEUES_3_UPTR (44)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3___PADDING0_2) */
+#define OKL4_OFS_AXON_RX_QUEUES_3___PADDING0_2 (46)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUES_3___PADDING1_3) */
+#define OKL4_OFS_AXON_RX_QUEUES_3___PADDING1_3 (47)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES (48)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_0) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_0 (48)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_1) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_1 (50)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_2) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_2 (52)
+/*lint -esym(621, OKL4_OFS_AXON_RX_QUEUE_SIZES_3) */
+#define OKL4_OFS_AXON_RX_QUEUE_SIZES_3 (54)
+/**
+ *   Offsets for struct okl4_axon_tx
+ **/
+/*lint -esym(621, OKL4_STRUCT_AXON_TX_SIZE) */
+#define OKL4_STRUCT_AXON_TX_SIZE (48)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES) */
+#define OKL4_OFS_AXON_TX_QUEUES (0)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0) */
+#define OKL4_OFS_AXON_TX_QUEUES_0 (0)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_QUEUE_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_ENTRIES (4)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_KPTR (6)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_0_UPTR (8)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_0___PADDING0_2 (10)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_0___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_0___PADDING1_3 (11)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1) */
+#define OKL4_OFS_AXON_TX_QUEUES_1 (12)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_QUEUE_OFFSET (12)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_ENTRIES (16)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_KPTR (18)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_1_UPTR (20)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_1___PADDING0_2 (22)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_1___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_1___PADDING1_3 (23)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_2 (24)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_QUEUE_OFFSET (24)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_ENTRIES (28)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_KPTR (30)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_2_UPTR (32)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_2___PADDING0_2 (34)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_2___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_2___PADDING1_3 (35)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_3 (36)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_QUEUE_OFFSET) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_QUEUE_OFFSET (36)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_ENTRIES) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_ENTRIES (40)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_KPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_KPTR (42)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3_UPTR) */
+#define OKL4_OFS_AXON_TX_QUEUES_3_UPTR (44)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3___PADDING0_2) */
+#define OKL4_OFS_AXON_TX_QUEUES_3___PADDING0_2 (46)
+/*lint -esym(621, OKL4_OFS_AXON_TX_QUEUES_3___PADDING1_3) */
+#define OKL4_OFS_AXON_TX_QUEUES_3___PADDING1_3 (47)
+/**
+ *   Offsets for struct okl4_range_item
+ **/
+/*lint -esym(621, OKL4_STRUCT_RANGE_ITEM_SIZE) */
+#define OKL4_STRUCT_RANGE_ITEM_SIZE (16)
+/*lint -esym(621, OKL4_OFS_RANGE_ITEM_BASE) */
+#define OKL4_OFS_RANGE_ITEM_BASE (0)
+/*lint -esym(621, OKL4_OFS_RANGE_ITEM_SIZE) */
+#define OKL4_OFS_RANGE_ITEM_SIZE (8)
+/**
+ *   Offsets for struct okl4_virtmem_item
+ **/
+/*lint -esym(621, OKL4_STRUCT_VIRTMEM_ITEM_SIZE) */
+#define OKL4_STRUCT_VIRTMEM_ITEM_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_ITEM_RANGE) */
+#define OKL4_OFS_VIRTMEM_ITEM_RANGE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_ITEM_RANGE_BASE) */
+#define OKL4_OFS_VIRTMEM_ITEM_RANGE_BASE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_ITEM_RANGE_SIZE) */
+#define OKL4_OFS_VIRTMEM_ITEM_RANGE_SIZE (8)
+/**
+ *   Offsets for struct okl4_cell_management_item
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_ITEM_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_ITEM_SIZE (104)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_ENTRY) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_ENTRY (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_BASE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_BASE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_SIZE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MAPPING_RANGE_RANGE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DATA) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DATA (24)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_IMAGE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_IMAGE (32)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_MMU) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_MMU (40)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING0_4) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING0_4 (44)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING1_5) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING1_5 (45)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING2_6) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING2_6 (46)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING3_7) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING3_7 (47)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_NAME) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_NAME (48)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_REGISTERS_CAP) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_REGISTERS_CAP (56)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_RESET_VIRQ) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_RESET_VIRQ (60)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENT_INDEX) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENT_INDEX (64)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING4_4) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING4_4 (68)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING5_5) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING5_5 (69)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING6_6) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING6_6 (70)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING7_7) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING7_7 (71)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENTS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_SEGMENTS (72)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_VCPUS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_VCPUS (80)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_BOOT_ONCE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_BOOT_ONCE (88)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_CAN_STOP) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_CAN_STOP (89)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DEFERRED) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DEFERRED (90)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DETACHED) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DETACHED (91)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_ERASE) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_ERASE (92)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING8_5) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING8_5 (93)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING9_6) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING9_6 (94)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING10_7) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM___PADDING10_7 (95)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEM_DTB_ADDRESS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEM_DTB_ADDRESS (96)
+/**
+ *   Offsets for struct okl4_cell_management
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_SIZE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_NUM_ITEMS) */
+#define OKL4_OFS_CELL_MANAGEMENT_NUM_ITEMS (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING0_4) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING1_5) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING2_6) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT___PADDING3_7) */
+#define OKL4_OFS_CELL_MANAGEMENT___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_ITEMS) */
+#define OKL4_OFS_CELL_MANAGEMENT_ITEMS (8)
+/**
+ *   Offsets for struct okl4_segment_mapping
+ **/
+/*lint -esym(621, OKL4_STRUCT_SEGMENT_MAPPING_SIZE) */
+#define OKL4_STRUCT_SEGMENT_MAPPING_SIZE (32)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_PHYS_ADDR) */
+#define OKL4_OFS_SEGMENT_MAPPING_PHYS_ADDR (0)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_SIZE) */
+#define OKL4_OFS_SEGMENT_MAPPING_SIZE (8)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_VIRT_ADDR) */
+#define OKL4_OFS_SEGMENT_MAPPING_VIRT_ADDR (16)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_CAP) */
+#define OKL4_OFS_SEGMENT_MAPPING_CAP (24)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_DEVICE) */
+#define OKL4_OFS_SEGMENT_MAPPING_DEVICE (28)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING_OWNED) */
+#define OKL4_OFS_SEGMENT_MAPPING_OWNED (29)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING___PADDING0_6) */
+#define OKL4_OFS_SEGMENT_MAPPING___PADDING0_6 (30)
+/*lint -esym(621, OKL4_OFS_SEGMENT_MAPPING___PADDING1_7) */
+#define OKL4_OFS_SEGMENT_MAPPING___PADDING1_7 (31)
+/**
+ *   Offsets for struct okl4_cell_management_segments
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_SEGMENTS_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_SEGMENTS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_FREE_SEGMENTS) */
+#define OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_FREE_SEGMENTS (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_NUM_SEGMENTS) */
+#define OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_NUM_SEGMENTS (4)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_SEGMENT_MAPPINGS) */
+#define OKL4_OFS_CELL_MANAGEMENT_SEGMENTS_SEGMENT_MAPPINGS (8)
+/**
+ *   Offsets for struct okl4_cell_management_vcpus
+ **/
+/*lint -esym(621, OKL4_STRUCT_CELL_MANAGEMENT_VCPUS_SIZE) */
+#define OKL4_STRUCT_CELL_MANAGEMENT_VCPUS_SIZE (4)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_VCPUS_NUM_VCPUS) */
+#define OKL4_OFS_CELL_MANAGEMENT_VCPUS_NUM_VCPUS (0)
+/*lint -esym(621, OKL4_OFS_CELL_MANAGEMENT_VCPUS_VCPU_CAPS) */
+#define OKL4_OFS_CELL_MANAGEMENT_VCPUS_VCPU_CAPS (4)
+/**
+ *   Offsets for struct _okl4_env_hdr
+ **/
+/*lint -esym(621, _OKL4_STRUCT_ENV_HDR_SIZE) */
+#define _OKL4_STRUCT_ENV_HDR_SIZE (4)
+/*lint -esym(621, _OKL4_OFS_ENV_HDR_MAGIC) */
+#define _OKL4_OFS_ENV_HDR_MAGIC (0)
+/*lint -esym(621, _OKL4_OFS_ENV_HDR_COUNT) */
+#define _OKL4_OFS_ENV_HDR_COUNT (2)
+/**
+ *   Offsets for struct _okl4_env_item
+ **/
+/*lint -esym(621, _OKL4_STRUCT_ENV_ITEM_SIZE) */
+#define _OKL4_STRUCT_ENV_ITEM_SIZE (16)
+/*lint -esym(621, _OKL4_OFS_ENV_ITEM_NAME) */
+#define _OKL4_OFS_ENV_ITEM_NAME (0)
+/*lint -esym(621, _OKL4_OFS_ENV_ITEM_ITEM) */
+#define _OKL4_OFS_ENV_ITEM_ITEM (8)
+/**
+ *   Offsets for struct _okl4_env
+ **/
+/*lint -esym(621, _OKL4_STRUCT_ENV_SIZE) */
+#define _OKL4_STRUCT_ENV_SIZE (8)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_HDR) */
+#define _OKL4_OFS_ENV_ENV_HDR (0)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_HDR_MAGIC) */
+#define _OKL4_OFS_ENV_ENV_HDR_MAGIC (0)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_HDR_COUNT) */
+#define _OKL4_OFS_ENV_ENV_HDR_COUNT (2)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING0_4) */
+#define _OKL4_OFS_ENV___PADDING0_4 (4)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING1_5) */
+#define _OKL4_OFS_ENV___PADDING1_5 (5)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING2_6) */
+#define _OKL4_OFS_ENV___PADDING2_6 (6)
+/*lint -esym(621, _OKL4_OFS_ENV___PADDING3_7) */
+#define _OKL4_OFS_ENV___PADDING3_7 (7)
+/*lint -esym(621, _OKL4_OFS_ENV_ENV_ITEM) */
+#define _OKL4_OFS_ENV_ENV_ITEM (8)
+/**
+ *   Offsets for struct okl4_env_access_cell
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ACCESS_CELL_SIZE) */
+#define OKL4_STRUCT_ENV_ACCESS_CELL_SIZE (16)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_CELL_NAME) */
+#define OKL4_OFS_ENV_ACCESS_CELL_NAME (0)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_CELL_NUM_ENTRIES) */
+#define OKL4_OFS_ENV_ACCESS_CELL_NUM_ENTRIES (8)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_CELL_START_ENTRY) */
+#define OKL4_OFS_ENV_ACCESS_CELL_START_ENTRY (12)
+/**
+ *   Offsets for struct okl4_env_access_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ACCESS_ENTRY_SIZE) */
+#define OKL4_STRUCT_ENV_ACCESS_ENTRY_SIZE (48)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_VIRTUAL_ADDRESS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_VIRTUAL_ADDRESS (0)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_OFFSET) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_OFFSET (8)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_SIZE) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_SIZE (16)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_NUM_SEGS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_NUM_SEGS (24)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_SEGMENT_INDEX) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_SEGMENT_INDEX (28)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_CACHE_ATTRS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_CACHE_ATTRS (32)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_PERMISSIONS) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_PERMISSIONS (36)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_ENTRY_OBJECT_NAME) */
+#define OKL4_OFS_ENV_ACCESS_ENTRY_OBJECT_NAME (40)
+/**
+ *   Offsets for struct okl4_env_access_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ACCESS_TABLE_SIZE) */
+#define OKL4_STRUCT_ENV_ACCESS_TABLE_SIZE (24)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE_NUM_CELLS) */
+#define OKL4_OFS_ENV_ACCESS_TABLE_NUM_CELLS (0)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING0_4) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING1_5) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING2_6) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE___PADDING3_7) */
+#define OKL4_OFS_ENV_ACCESS_TABLE___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE_CELLS) */
+#define OKL4_OFS_ENV_ACCESS_TABLE_CELLS (8)
+/*lint -esym(621, OKL4_OFS_ENV_ACCESS_TABLE_ENTRIES) */
+#define OKL4_OFS_ENV_ACCESS_TABLE_ENTRIES (16)
+/**
+ *   Offsets for struct okl4_env_args
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_ARGS_SIZE) */
+#define OKL4_STRUCT_ENV_ARGS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS_ARGC) */
+#define OKL4_OFS_ENV_ARGS_ARGC (0)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING0_4) */
+#define OKL4_OFS_ENV_ARGS___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING1_5) */
+#define OKL4_OFS_ENV_ARGS___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING2_6) */
+#define OKL4_OFS_ENV_ARGS___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS___PADDING3_7) */
+#define OKL4_OFS_ENV_ARGS___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_ARGS_ARGV) */
+#define OKL4_OFS_ENV_ARGS_ARGV (8)
+/**
+ *   Offsets for struct okl4_env_interrupt_device_map
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_INTERRUPT_DEVICE_MAP_SIZE) */
+#define OKL4_STRUCT_ENV_INTERRUPT_DEVICE_MAP_SIZE (4)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_NUM_ENTRIES) */
+#define OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_NUM_ENTRIES (0)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_ENTRIES) */
+#define OKL4_OFS_ENV_INTERRUPT_DEVICE_MAP_ENTRIES (4)
+/**
+ *   Offsets for struct okl4_interrupt
+ **/
+/*lint -esym(621, OKL4_STRUCT_INTERRUPT_SIZE) */
+#define OKL4_STRUCT_INTERRUPT_SIZE (4)
+/*lint -esym(621, OKL4_OFS_INTERRUPT_KCAP) */
+#define OKL4_OFS_INTERRUPT_KCAP (0)
+/**
+ *   Offsets for struct okl4_env_interrupt_handle
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_INTERRUPT_HANDLE_SIZE) */
+#define OKL4_STRUCT_ENV_INTERRUPT_HANDLE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_HANDLE_DESCRIPTOR) */
+#define OKL4_OFS_ENV_INTERRUPT_HANDLE_DESCRIPTOR (0)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT) */
+#define OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT (4)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT_KCAP) */
+#define OKL4_OFS_ENV_INTERRUPT_HANDLE_INTERRUPT_KCAP (4)
+/**
+ *   Offsets for struct okl4_env_interrupt_list
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_INTERRUPT_LIST_SIZE) */
+#define OKL4_STRUCT_ENV_INTERRUPT_LIST_SIZE (24)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST_NUM_ENTRIES) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST_NUM_ENTRIES (0)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING0_4) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING1_5) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING2_6) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST___PADDING3_7) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST_DESCRIPTOR) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST_DESCRIPTOR (8)
+/*lint -esym(621, OKL4_OFS_ENV_INTERRUPT_LIST_INTERRUPT) */
+#define OKL4_OFS_ENV_INTERRUPT_LIST_INTERRUPT (16)
+/**
+ *   Offsets for struct okl4_env_profile_cell
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_PROFILE_CELL_SIZE) */
+#define OKL4_STRUCT_ENV_PROFILE_CELL_SIZE (48)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME (0)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_0) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_0 (0)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_1) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_1 (1)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_2) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_2 (2)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_3) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_3 (3)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_4) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_5) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_6) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_7) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_8) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_8 (8)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_9) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_9 (9)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_10) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_10 (10)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_11) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_11 (11)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_12) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_12 (12)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_13) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_13 (13)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_14) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_14 (14)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_15) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_15 (15)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_16) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_16 (16)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_17) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_17 (17)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_18) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_18 (18)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_19) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_19 (19)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_20) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_20 (20)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_21) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_21 (21)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_22) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_22 (22)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_23) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_23 (23)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_24) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_24 (24)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_25) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_25 (25)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_26) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_26 (26)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_27) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_27 (27)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_28) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_28 (28)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_29) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_29 (29)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_30) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_30 (30)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NAME_31) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NAME_31 (31)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_NUM_CORES) */
+#define OKL4_OFS_ENV_PROFILE_CELL_NUM_CORES (32)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING0_4) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING0_4 (36)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING1_5) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING1_5 (37)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING2_6) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING2_6 (38)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL___PADDING3_7) */
+#define OKL4_OFS_ENV_PROFILE_CELL___PADDING3_7 (39)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CELL_CORE) */
+#define OKL4_OFS_ENV_PROFILE_CELL_CORE (40)
+/**
+ *   Offsets for struct okl4_env_profile_cpu
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_PROFILE_CPU_SIZE) */
+#define OKL4_STRUCT_ENV_PROFILE_CPU_SIZE (4)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_CPU_CAP) */
+#define OKL4_OFS_ENV_PROFILE_CPU_CAP (0)
+/**
+ *   Offsets for struct okl4_env_profile_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_PROFILE_TABLE_SIZE) */
+#define OKL4_STRUCT_ENV_PROFILE_TABLE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_TABLE_NUM_CELL_ENTRIES) */
+#define OKL4_OFS_ENV_PROFILE_TABLE_NUM_CELL_ENTRIES (0)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_TABLE_PCPU_CELL_ENTRY) */
+#define OKL4_OFS_ENV_PROFILE_TABLE_PCPU_CELL_ENTRY (4)
+/*lint -esym(621, OKL4_OFS_ENV_PROFILE_TABLE_CELLS) */
+#define OKL4_OFS_ENV_PROFILE_TABLE_CELLS (8)
+/**
+ *   Offsets for struct okl4_env_segment
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_SEGMENT_SIZE) */
+#define OKL4_STRUCT_ENV_SEGMENT_SIZE (24)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_BASE) */
+#define OKL4_OFS_ENV_SEGMENT_BASE (0)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_SIZE) */
+#define OKL4_OFS_ENV_SEGMENT_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_CAP_ID) */
+#define OKL4_OFS_ENV_SEGMENT_CAP_ID (16)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_RWX) */
+#define OKL4_OFS_ENV_SEGMENT_RWX (20)
+/**
+ *   Offsets for struct okl4_env_segment_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_ENV_SEGMENT_TABLE_SIZE) */
+#define OKL4_STRUCT_ENV_SEGMENT_TABLE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE_NUM_SEGMENTS) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE_NUM_SEGMENTS (0)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING0_4) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING1_5) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING2_6) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE___PADDING3_7) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_ENV_SEGMENT_TABLE_SEGMENTS) */
+#define OKL4_OFS_ENV_SEGMENT_TABLE_SEGMENTS (8)
+/**
+ *   Offsets for struct okl4_firmware_segment
+ **/
+/*lint -esym(621, OKL4_STRUCT_FIRMWARE_SEGMENT_SIZE) */
+#define OKL4_STRUCT_FIRMWARE_SEGMENT_SIZE (32)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_COPY_ADDR) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_COPY_ADDR (0)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_EXEC_ADDR) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_EXEC_ADDR (8)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_FILESZ) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_FILESZ (16)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENT_MEMSZ_DIFF) */
+#define OKL4_OFS_FIRMWARE_SEGMENT_MEMSZ_DIFF (24)
+/**
+ *   Offsets for struct okl4_firmware_segments_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_FIRMWARE_SEGMENTS_INFO_SIZE) */
+#define OKL4_STRUCT_FIRMWARE_SEGMENTS_INFO_SIZE (8)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO_NUM_SEGMENTS) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO_NUM_SEGMENTS (0)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING0_4) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING1_5) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING2_6) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING3_7) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_FIRMWARE_SEGMENTS_INFO_SEGMENTS) */
+#define OKL4_OFS_FIRMWARE_SEGMENTS_INFO_SEGMENTS (8)
+/**
+ *   Offsets for struct okl4_kmmu
+ **/
+/*lint -esym(621, OKL4_STRUCT_KMMU_SIZE) */
+#define OKL4_STRUCT_KMMU_SIZE (4)
+/*lint -esym(621, OKL4_OFS_KMMU_KCAP) */
+#define OKL4_OFS_KMMU_KCAP (0)
+/**
+ *   Offsets for struct okl4_ksp_user_agent
+ **/
+/*lint -esym(621, OKL4_STRUCT_KSP_USER_AGENT_SIZE) */
+#define OKL4_STRUCT_KSP_USER_AGENT_SIZE (8)
+/*lint -esym(621, OKL4_OFS_KSP_USER_AGENT_KCAP) */
+#define OKL4_OFS_KSP_USER_AGENT_KCAP (0)
+/*lint -esym(621, OKL4_OFS_KSP_USER_AGENT_VIRQ) */
+#define OKL4_OFS_KSP_USER_AGENT_VIRQ (4)
+/**
+ *   Offsets for struct okl4_pipe_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_PIPE_DATA_SIZE) */
+#define OKL4_STRUCT_PIPE_DATA_SIZE (8)
+/*lint -esym(621, OKL4_OFS_PIPE_DATA_KCAP) */
+#define OKL4_OFS_PIPE_DATA_KCAP (0)
+/*lint -esym(621, OKL4_OFS_PIPE_DATA_VIRQ) */
+#define OKL4_OFS_PIPE_DATA_VIRQ (4)
+/**
+ *   Offsets for struct okl4_pipe_ep_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_PIPE_EP_DATA_SIZE) */
+#define OKL4_STRUCT_PIPE_EP_DATA_SIZE (16)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_RX) */
+#define OKL4_OFS_PIPE_EP_DATA_RX (0)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_RX_KCAP) */
+#define OKL4_OFS_PIPE_EP_DATA_RX_KCAP (0)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_RX_VIRQ) */
+#define OKL4_OFS_PIPE_EP_DATA_RX_VIRQ (4)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_TX) */
+#define OKL4_OFS_PIPE_EP_DATA_TX (8)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_TX_KCAP) */
+#define OKL4_OFS_PIPE_EP_DATA_TX_KCAP (8)
+/*lint -esym(621, OKL4_OFS_PIPE_EP_DATA_TX_VIRQ) */
+#define OKL4_OFS_PIPE_EP_DATA_TX_VIRQ (12)
+/**
+ *   Offsets for struct okl4_link
+ **/
+/*lint -esym(621, OKL4_STRUCT_LINK_SIZE) */
+#define OKL4_STRUCT_LINK_SIZE (80)
+/*lint -esym(621, OKL4_OFS_LINK_NAME) */
+#define OKL4_OFS_LINK_NAME (0)
+/*lint -esym(621, OKL4_OFS_LINK_OPAQUE) */
+#define OKL4_OFS_LINK_OPAQUE (8)
+/*lint -esym(621, OKL4_OFS_LINK_PARTNER_NAME) */
+#define OKL4_OFS_LINK_PARTNER_NAME (16)
+/*lint -esym(621, OKL4_OFS_LINK_ROLE) */
+#define OKL4_OFS_LINK_ROLE (24)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING0_4) */
+#define OKL4_OFS_LINK___PADDING0_4 (28)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING1_5) */
+#define OKL4_OFS_LINK___PADDING1_5 (29)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING2_6) */
+#define OKL4_OFS_LINK___PADDING2_6 (30)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING3_7) */
+#define OKL4_OFS_LINK___PADDING3_7 (31)
+/*lint -esym(621, OKL4_OFS_LINK_TRANSPORT) */
+#define OKL4_OFS_LINK_TRANSPORT (32)
+/*lint -esym(621, OKL4_OFS_LINK_TRANSPORT_TYPE) */
+#define OKL4_OFS_LINK_TRANSPORT_TYPE (72)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING4_4) */
+#define OKL4_OFS_LINK___PADDING4_4 (76)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING5_5) */
+#define OKL4_OFS_LINK___PADDING5_5 (77)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING6_6) */
+#define OKL4_OFS_LINK___PADDING6_6 (78)
+/*lint -esym(621, OKL4_OFS_LINK___PADDING7_7) */
+#define OKL4_OFS_LINK___PADDING7_7 (79)
+/**
+ *   Offsets for struct okl4_links
+ **/
+/*lint -esym(621, OKL4_STRUCT_LINKS_SIZE) */
+#define OKL4_STRUCT_LINKS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_LINKS_NUM_LINKS) */
+#define OKL4_OFS_LINKS_NUM_LINKS (0)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING0_4) */
+#define OKL4_OFS_LINKS___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING1_5) */
+#define OKL4_OFS_LINKS___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING2_6) */
+#define OKL4_OFS_LINKS___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_LINKS___PADDING3_7) */
+#define OKL4_OFS_LINKS___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_LINKS_LINKS) */
+#define OKL4_OFS_LINKS_LINKS (8)
+/**
+ *   Offsets for struct okl4_machine_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_MACHINE_INFO_SIZE) */
+#define OKL4_STRUCT_MACHINE_INFO_SIZE (24)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO_L1_CACHE_LINE_SIZE) */
+#define OKL4_OFS_MACHINE_INFO_L1_CACHE_LINE_SIZE (0)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO_L2_CACHE_LINE_SIZE) */
+#define OKL4_OFS_MACHINE_INFO_L2_CACHE_LINE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO_NUM_CPUS) */
+#define OKL4_OFS_MACHINE_INFO_NUM_CPUS (16)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING0_4) */
+#define OKL4_OFS_MACHINE_INFO___PADDING0_4 (20)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING1_5) */
+#define OKL4_OFS_MACHINE_INFO___PADDING1_5 (21)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING2_6) */
+#define OKL4_OFS_MACHINE_INFO___PADDING2_6 (22)
+/*lint -esym(621, OKL4_OFS_MACHINE_INFO___PADDING3_7) */
+#define OKL4_OFS_MACHINE_INFO___PADDING3_7 (23)
+/**
+ *   Offsets for struct okl4_merged_physpool
+ **/
+/*lint -esym(621, OKL4_STRUCT_MERGED_PHYSPOOL_SIZE) */
+#define OKL4_STRUCT_MERGED_PHYSPOOL_SIZE (16)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL_PHYS_ADDR) */
+#define OKL4_OFS_MERGED_PHYSPOOL_PHYS_ADDR (0)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL_NUM_SEGMENTS) */
+#define OKL4_OFS_MERGED_PHYSPOOL_NUM_SEGMENTS (8)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING0_4) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING1_5) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING2_6) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL___PADDING3_7) */
+#define OKL4_OFS_MERGED_PHYSPOOL___PADDING3_7 (15)
+/*lint -esym(621, OKL4_OFS_MERGED_PHYSPOOL_SEGMENTS) */
+#define OKL4_OFS_MERGED_PHYSPOOL_SEGMENTS (16)
+/**
+ *   Offsets for struct okl4_microvisor_timer
+ **/
+/*lint -esym(621, OKL4_STRUCT_MICROVISOR_TIMER_SIZE) */
+#define OKL4_STRUCT_MICROVISOR_TIMER_SIZE (8)
+/*lint -esym(621, OKL4_OFS_MICROVISOR_TIMER_KCAP) */
+#define OKL4_OFS_MICROVISOR_TIMER_KCAP (0)
+/*lint -esym(621, OKL4_OFS_MICROVISOR_TIMER_VIRQ) */
+#define OKL4_OFS_MICROVISOR_TIMER_VIRQ (4)
+/**
+ *   Offsets for struct okl4_cpu_registers
+ **/
+/*lint -esym(621, OKL4_STRUCT_CPU_REGISTERS_SIZE) */
+#define OKL4_STRUCT_CPU_REGISTERS_SIZE (448)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X) */
+#define OKL4_OFS_CPU_REGISTERS_X (0)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_0) */
+#define OKL4_OFS_CPU_REGISTERS_X_0 (0)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_1) */
+#define OKL4_OFS_CPU_REGISTERS_X_1 (8)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_2) */
+#define OKL4_OFS_CPU_REGISTERS_X_2 (16)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_3) */
+#define OKL4_OFS_CPU_REGISTERS_X_3 (24)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_4) */
+#define OKL4_OFS_CPU_REGISTERS_X_4 (32)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_5) */
+#define OKL4_OFS_CPU_REGISTERS_X_5 (40)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_6) */
+#define OKL4_OFS_CPU_REGISTERS_X_6 (48)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_7) */
+#define OKL4_OFS_CPU_REGISTERS_X_7 (56)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_8) */
+#define OKL4_OFS_CPU_REGISTERS_X_8 (64)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_9) */
+#define OKL4_OFS_CPU_REGISTERS_X_9 (72)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_10) */
+#define OKL4_OFS_CPU_REGISTERS_X_10 (80)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_11) */
+#define OKL4_OFS_CPU_REGISTERS_X_11 (88)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_12) */
+#define OKL4_OFS_CPU_REGISTERS_X_12 (96)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_13) */
+#define OKL4_OFS_CPU_REGISTERS_X_13 (104)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_14) */
+#define OKL4_OFS_CPU_REGISTERS_X_14 (112)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_15) */
+#define OKL4_OFS_CPU_REGISTERS_X_15 (120)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_16) */
+#define OKL4_OFS_CPU_REGISTERS_X_16 (128)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_17) */
+#define OKL4_OFS_CPU_REGISTERS_X_17 (136)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_18) */
+#define OKL4_OFS_CPU_REGISTERS_X_18 (144)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_19) */
+#define OKL4_OFS_CPU_REGISTERS_X_19 (152)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_20) */
+#define OKL4_OFS_CPU_REGISTERS_X_20 (160)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_21) */
+#define OKL4_OFS_CPU_REGISTERS_X_21 (168)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_22) */
+#define OKL4_OFS_CPU_REGISTERS_X_22 (176)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_23) */
+#define OKL4_OFS_CPU_REGISTERS_X_23 (184)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_24) */
+#define OKL4_OFS_CPU_REGISTERS_X_24 (192)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_25) */
+#define OKL4_OFS_CPU_REGISTERS_X_25 (200)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_26) */
+#define OKL4_OFS_CPU_REGISTERS_X_26 (208)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_27) */
+#define OKL4_OFS_CPU_REGISTERS_X_27 (216)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_28) */
+#define OKL4_OFS_CPU_REGISTERS_X_28 (224)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_29) */
+#define OKL4_OFS_CPU_REGISTERS_X_29 (232)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_X_30) */
+#define OKL4_OFS_CPU_REGISTERS_X_30 (240)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SP_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_SP_EL0 (248)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_IP) */
+#define OKL4_OFS_CPU_REGISTERS_IP (256)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CPSR) */
+#define OKL4_OFS_CPU_REGISTERS_CPSR (264)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING0_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING0_4 (268)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING1_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING1_5 (269)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING2_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING2_6 (270)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING3_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING3_7 (271)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SP_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_SP_EL1 (272)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_ELR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_ELR_EL1 (280)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_EL1 (288)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_ABT) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_ABT (292)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_UND) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_UND (296)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_IRQ) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_IRQ (300)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SPSR_FIQ) */
+#define OKL4_OFS_CPU_REGISTERS_SPSR_FIQ (304)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CSSELR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CSSELR_EL1 (308)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_SCTLR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_SCTLR_EL1 (312)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CPACR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CPACR_EL1 (316)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TTBR0_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TTBR0_EL1 (320)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TTBR1_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TTBR1_EL1 (328)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TCR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TCR_EL1 (336)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_DACR32_EL2) */
+#define OKL4_OFS_CPU_REGISTERS_DACR32_EL2 (344)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_IFSR32_EL2) */
+#define OKL4_OFS_CPU_REGISTERS_IFSR32_EL2 (348)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_ESR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_ESR_EL1 (352)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING4_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING4_4 (356)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING5_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING5_5 (357)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING6_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING6_6 (358)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING7_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING7_7 (359)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_FAR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_FAR_EL1 (360)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_PAR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_PAR_EL1 (368)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_MAIR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_MAIR_EL1 (376)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_VBAR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_VBAR_EL1 (384)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CONTEXTIDR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CONTEXTIDR_EL1 (392)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING8_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING8_4 (396)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING9_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING9_5 (397)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING10_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING10_6 (398)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING11_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING11_7 (399)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TPIDR_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_TPIDR_EL1 (400)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TPIDRRO_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_TPIDRRO_EL0 (408)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_TPIDR_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_TPIDR_EL0 (416)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_PMCR_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_PMCR_EL0 (424)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING12_4) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING12_4 (428)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING13_5) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING13_5 (429)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING14_6) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING14_6 (430)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS___PADDING15_7) */
+#define OKL4_OFS_CPU_REGISTERS___PADDING15_7 (431)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_PMCCNTR_EL0) */
+#define OKL4_OFS_CPU_REGISTERS_PMCCNTR_EL0 (432)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_FPEXC32_EL2) */
+#define OKL4_OFS_CPU_REGISTERS_FPEXC32_EL2 (440)
+/*lint -esym(621, OKL4_OFS_CPU_REGISTERS_CNTKCTL_EL1) */
+#define OKL4_OFS_CPU_REGISTERS_CNTKCTL_EL1 (444)
+/**
+ *   Offsets for struct okl4_schedule_profile_data
+ **/
+/*lint -esym(621, OKL4_STRUCT_SCHEDULE_PROFILE_DATA_SIZE) */
+#define OKL4_STRUCT_SCHEDULE_PROFILE_DATA_SIZE (32)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_TIMESTAMP) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_TIMESTAMP (0)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_TIME) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_TIME (8)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CONTEXT_SWITCHES) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CONTEXT_SWITCHES (16)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_MIGRATIONS) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_MIGRATIONS (20)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_HWIRQS) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_HWIRQS (24)
+/*lint -esym(621, OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_VIRQS) */
+#define OKL4_OFS_SCHEDULE_PROFILE_DATA_CPU_VIRQS (28)
+/**
+ *   Offsets for struct okl4_shared_buffer
+ **/
+/*lint -esym(621, OKL4_STRUCT_SHARED_BUFFER_SIZE) */
+#define OKL4_STRUCT_SHARED_BUFFER_SIZE (32)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_PHYSICAL_BASE) */
+#define OKL4_OFS_SHARED_BUFFER_PHYSICAL_BASE (0)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_BASE) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_BASE (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_SIZE) */
+#define OKL4_OFS_SHARED_BUFFER_VIRTMEM_ITEM_RANGE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER_CAP) */
+#define OKL4_OFS_SHARED_BUFFER_CAP (24)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING0_4) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING0_4 (28)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING1_5) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING1_5 (29)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING2_6) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING2_6 (30)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFER___PADDING3_7) */
+#define OKL4_OFS_SHARED_BUFFER___PADDING3_7 (31)
+/**
+ *   Offsets for struct okl4_shared_buffers_array
+ **/
+/*lint -esym(621, OKL4_STRUCT_SHARED_BUFFERS_ARRAY_SIZE) */
+#define OKL4_STRUCT_SHARED_BUFFERS_ARRAY_SIZE (16)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY_BUFFERS) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY_BUFFERS (0)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY_NUM_BUFFERS) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY_NUM_BUFFERS (8)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING0_4) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING1_5) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING2_6) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING3_7) */
+#define OKL4_OFS_SHARED_BUFFERS_ARRAY___PADDING3_7 (15)
+/**
+ *   Offsets for struct _okl4_tracebuffer_buffer_header
+ **/
+/*lint -esym(621, _OKL4_STRUCT_TRACEBUFFER_BUFFER_HEADER_SIZE) */
+#define _OKL4_STRUCT_TRACEBUFFER_BUFFER_HEADER_SIZE (40)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_TIMESTAMP) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_TIMESTAMP (0)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_WRAP) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_WRAP (8)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING0_4) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING0_4 (12)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING1_5) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING1_5 (13)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING2_6) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING2_6 (14)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING3_7) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER___PADDING3_7 (15)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_SIZE) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_SIZE (16)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_HEAD) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_HEAD (24)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_OFFSET) */
+#define _OKL4_OFS_TRACEBUFFER_BUFFER_HEADER_OFFSET (32)
+/**
+ *   Offsets for struct okl4_tracebuffer_env
+ **/
+/*lint -esym(621, OKL4_STRUCT_TRACEBUFFER_ENV_SIZE) */
+#define OKL4_STRUCT_TRACEBUFFER_ENV_SIZE (24)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT (0)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE (0)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_BASE) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_BASE (0)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_SIZE) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRT_RANGE_SIZE (8)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV_VIRQ) */
+#define OKL4_OFS_TRACEBUFFER_ENV_VIRQ (16)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING0_4) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING0_4 (20)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING1_5) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING1_5 (21)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING2_6) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING2_6 (22)
+/*lint -esym(621, OKL4_OFS_TRACEBUFFER_ENV___PADDING3_7) */
+#define OKL4_OFS_TRACEBUFFER_ENV___PADDING3_7 (23)
+/**
+ *   Offsets for struct _okl4_tracebuffer_header
+ **/
+/*lint -esym(621, _OKL4_STRUCT_TRACEBUFFER_HEADER_SIZE) */
+#define _OKL4_STRUCT_TRACEBUFFER_HEADER_SIZE (40)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_MAGIC) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_MAGIC (0)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_VERSION) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_VERSION (4)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_ID) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_ID (8)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_NUM_BUFFERS) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_NUM_BUFFERS (12)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_BUFFER_SIZE) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_BUFFER_SIZE (16)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_LOG_MASK) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_LOG_MASK (24)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_ACTIVE_BUFFER) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_ACTIVE_BUFFER (28)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_GRABBED_BUFFER) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_GRABBED_BUFFER (32)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_EMPTY_BUFFERS) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_EMPTY_BUFFERS (36)
+/*lint -esym(621, _OKL4_OFS_TRACEBUFFER_HEADER_BUFFERS) */
+#define _OKL4_OFS_TRACEBUFFER_HEADER_BUFFERS (40)
+/**
+ *   Offsets for struct okl4_tracepoint_entry_base
+ **/
+/*lint -esym(621, OKL4_STRUCT_TRACEPOINT_ENTRY_BASE_SIZE) */
+#define OKL4_STRUCT_TRACEPOINT_ENTRY_BASE_SIZE (12)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_ENTRY_BASE_TIME_OFFSET) */
+#define OKL4_OFS_TRACEPOINT_ENTRY_BASE_TIME_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_ENTRY_BASE_MASKS) */
+#define OKL4_OFS_TRACEPOINT_ENTRY_BASE_MASKS (4)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_ENTRY_BASE_DESCRIPTION) */
+#define OKL4_OFS_TRACEPOINT_ENTRY_BASE_DESCRIPTION (8)
+/**
+ *   Offsets for struct okl4_tracepoint_unpacked_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_TRACEPOINT_UNPACKED_ENTRY_SIZE) */
+#define OKL4_STRUCT_TRACEPOINT_UNPACKED_ENTRY_SIZE (12)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY (0)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_TIME_OFFSET) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_TIME_OFFSET (0)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_MASKS) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_MASKS (4)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_DESCRIPTION) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_ENTRY_DESCRIPTION (8)
+/*lint -esym(621, OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_DATA) */
+#define OKL4_OFS_TRACEPOINT_UNPACKED_ENTRY_DATA (12)
+/**
+ *   Offsets for struct okl4_vclient_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_VCLIENT_INFO_SIZE) */
+#define OKL4_STRUCT_VCLIENT_INFO_SIZE (32)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP (0)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX (0)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_KCAP) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_KCAP (0)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_SEGMENT) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_SEGMENT (4)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_VIRQ) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_RX_VIRQ (8)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX (12)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_KCAP) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_KCAP (12)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_SEGMENT) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_SEGMENT (16)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_VIRQ) */
+#define OKL4_OFS_VCLIENT_INFO_AXON_EP_TX_VIRQ (20)
+/*lint -esym(621, OKL4_OFS_VCLIENT_INFO_OPAQUE) */
+#define OKL4_OFS_VCLIENT_INFO_OPAQUE (24)
+/**
+ *   Offsets for struct okl4_vcpu_entry
+ **/
+/*lint -esym(621, OKL4_STRUCT_VCPU_ENTRY_SIZE) */
+#define OKL4_STRUCT_VCPU_ENTRY_SIZE (24)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_VCPU) */
+#define OKL4_OFS_VCPU_ENTRY_VCPU (0)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_IPI) */
+#define OKL4_OFS_VCPU_ENTRY_IPI (4)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_IRQ) */
+#define OKL4_OFS_VCPU_ENTRY_IRQ (8)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING0_4) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING1_5) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING2_6) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY___PADDING3_7) */
+#define OKL4_OFS_VCPU_ENTRY___PADDING3_7 (15)
+/*lint -esym(621, OKL4_OFS_VCPU_ENTRY_STACK_POINTER) */
+#define OKL4_OFS_VCPU_ENTRY_STACK_POINTER (16)
+/**
+ *   Offsets for struct okl4_vcpu_table
+ **/
+/*lint -esym(621, OKL4_STRUCT_VCPU_TABLE_SIZE) */
+#define OKL4_STRUCT_VCPU_TABLE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE_NUM_VCPUS) */
+#define OKL4_OFS_VCPU_TABLE_NUM_VCPUS (0)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING0_4) */
+#define OKL4_OFS_VCPU_TABLE___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING1_5) */
+#define OKL4_OFS_VCPU_TABLE___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING2_6) */
+#define OKL4_OFS_VCPU_TABLE___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE___PADDING3_7) */
+#define OKL4_OFS_VCPU_TABLE___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_VCPU_TABLE_VCPU) */
+#define OKL4_OFS_VCPU_TABLE_VCPU (8)
+/**
+ *   Offsets for struct okl4_vfp_ctrl_registers
+ **/
+/*lint -esym(621, OKL4_STRUCT_VFP_CTRL_REGISTERS_SIZE) */
+#define OKL4_STRUCT_VFP_CTRL_REGISTERS_SIZE (8)
+/*lint -esym(621, OKL4_OFS_VFP_CTRL_REGISTERS_FPSR) */
+#define OKL4_OFS_VFP_CTRL_REGISTERS_FPSR (0)
+/*lint -esym(621, OKL4_OFS_VFP_CTRL_REGISTERS_FPCR) */
+#define OKL4_OFS_VFP_CTRL_REGISTERS_FPCR (4)
+/**
+ *   Offsets for struct okl4_vfp_register
+ **/
+/*lint -esym(621, OKL4_STRUCT_VFP_REGISTER_SIZE) */
+#define OKL4_STRUCT_VFP_REGISTER_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES) */
+#define OKL4_OFS_VFP_REGISTER___BYTES (0)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_0) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_0 (0)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_1) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_1 (1)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_2) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_2 (2)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_3) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_3 (3)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_4) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_4 (4)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_5) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_5 (5)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_6) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_6 (6)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_7) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_7 (7)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_8) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_8 (8)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_9) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_9 (9)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_10) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_10 (10)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_11) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_11 (11)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_12) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_12 (12)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_13) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_13 (13)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_14) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_14 (14)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTER___BYTES_15) */
+#define OKL4_OFS_VFP_REGISTER___BYTES_15 (15)
+/**
+ *   Offsets for struct okl4_vfp_registers
+ **/
+/*lint -esym(621, OKL4_STRUCT_VFP_REGISTERS_SIZE) */
+#define OKL4_STRUCT_VFP_REGISTERS_SIZE (528)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V0) */
+#define OKL4_OFS_VFP_REGISTERS_V0 (0)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V1) */
+#define OKL4_OFS_VFP_REGISTERS_V1 (16)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V2) */
+#define OKL4_OFS_VFP_REGISTERS_V2 (32)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V3) */
+#define OKL4_OFS_VFP_REGISTERS_V3 (48)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V4) */
+#define OKL4_OFS_VFP_REGISTERS_V4 (64)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V5) */
+#define OKL4_OFS_VFP_REGISTERS_V5 (80)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V6) */
+#define OKL4_OFS_VFP_REGISTERS_V6 (96)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V7) */
+#define OKL4_OFS_VFP_REGISTERS_V7 (112)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V8) */
+#define OKL4_OFS_VFP_REGISTERS_V8 (128)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V9) */
+#define OKL4_OFS_VFP_REGISTERS_V9 (144)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V10) */
+#define OKL4_OFS_VFP_REGISTERS_V10 (160)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V11) */
+#define OKL4_OFS_VFP_REGISTERS_V11 (176)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V12) */
+#define OKL4_OFS_VFP_REGISTERS_V12 (192)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V13) */
+#define OKL4_OFS_VFP_REGISTERS_V13 (208)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V14) */
+#define OKL4_OFS_VFP_REGISTERS_V14 (224)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V15) */
+#define OKL4_OFS_VFP_REGISTERS_V15 (240)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V16) */
+#define OKL4_OFS_VFP_REGISTERS_V16 (256)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V17) */
+#define OKL4_OFS_VFP_REGISTERS_V17 (272)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V18) */
+#define OKL4_OFS_VFP_REGISTERS_V18 (288)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V19) */
+#define OKL4_OFS_VFP_REGISTERS_V19 (304)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V20) */
+#define OKL4_OFS_VFP_REGISTERS_V20 (320)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V21) */
+#define OKL4_OFS_VFP_REGISTERS_V21 (336)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V22) */
+#define OKL4_OFS_VFP_REGISTERS_V22 (352)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V23) */
+#define OKL4_OFS_VFP_REGISTERS_V23 (368)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V24) */
+#define OKL4_OFS_VFP_REGISTERS_V24 (384)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V25) */
+#define OKL4_OFS_VFP_REGISTERS_V25 (400)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V26) */
+#define OKL4_OFS_VFP_REGISTERS_V26 (416)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V27) */
+#define OKL4_OFS_VFP_REGISTERS_V27 (432)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V28) */
+#define OKL4_OFS_VFP_REGISTERS_V28 (448)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V29) */
+#define OKL4_OFS_VFP_REGISTERS_V29 (464)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V30) */
+#define OKL4_OFS_VFP_REGISTERS_V30 (480)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_V31) */
+#define OKL4_OFS_VFP_REGISTERS_V31 (496)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_CONTROL) */
+#define OKL4_OFS_VFP_REGISTERS_CONTROL (512)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_CONTROL_FPSR) */
+#define OKL4_OFS_VFP_REGISTERS_CONTROL_FPSR (512)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS_CONTROL_FPCR) */
+#define OKL4_OFS_VFP_REGISTERS_CONTROL_FPCR (516)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING0_8) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING0_8 (520)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING1_9) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING1_9 (521)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING2_10) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING2_10 (522)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING3_11) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING3_11 (523)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING4_12) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING4_12 (524)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING5_13) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING5_13 (525)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING6_14) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING6_14 (526)
+/*lint -esym(621, OKL4_OFS_VFP_REGISTERS___PADDING7_15) */
+#define OKL4_OFS_VFP_REGISTERS___PADDING7_15 (527)
+/**
+ *   Offsets for struct okl4_virtmem_pool
+ **/
+/*lint -esym(621, OKL4_STRUCT_VIRTMEM_POOL_SIZE) */
+#define OKL4_STRUCT_VIRTMEM_POOL_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL_RANGE) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL_RANGE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_BASE) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_BASE (0)
+/*lint -esym(621, OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_SIZE) */
+#define OKL4_OFS_VIRTMEM_POOL_POOL_RANGE_SIZE (8)
+/**
+ *   Offsets for struct okl4_virtual_interrupt_lines
+ **/
+/*lint -esym(621, OKL4_STRUCT_VIRTUAL_INTERRUPT_LINES_SIZE) */
+#define OKL4_STRUCT_VIRTUAL_INTERRUPT_LINES_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES_NUM_LINES) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES_NUM_LINES (0)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING0_4) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING1_5) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING2_6) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING3_7) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_VIRTUAL_INTERRUPT_LINES_LINES) */
+#define OKL4_OFS_VIRTUAL_INTERRUPT_LINES_LINES (8)
+/**
+ *   Offsets for struct okl4_vserver_info
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVER_INFO_SIZE) */
+#define OKL4_STRUCT_VSERVER_INFO_SIZE (32)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS (0)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS_DATA) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS_DATA (0)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS_MAX_MESSAGES) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS_MAX_MESSAGES (8)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING0_4) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING0_4 (12)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING1_5) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING1_5 (13)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING2_6) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING2_6 (14)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING3_7) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS___PADDING3_7 (15)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_CHANNELS_MESSAGE_SIZE) */
+#define OKL4_OFS_VSERVER_INFO_CHANNELS_MESSAGE_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO_NUM_CLIENTS) */
+#define OKL4_OFS_VSERVER_INFO_NUM_CLIENTS (24)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING0_4) */
+#define OKL4_OFS_VSERVER_INFO___PADDING0_4 (28)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING1_5) */
+#define OKL4_OFS_VSERVER_INFO___PADDING1_5 (29)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING2_6) */
+#define OKL4_OFS_VSERVER_INFO___PADDING2_6 (30)
+/*lint -esym(621, OKL4_OFS_VSERVER_INFO___PADDING3_7) */
+#define OKL4_OFS_VSERVER_INFO___PADDING3_7 (31)
+/**
+ *   Offsets for struct okl4_vservices_service_descriptor
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVICES_SERVICE_DESCRIPTOR_SIZE) */
+#define OKL4_STRUCT_VSERVICES_SERVICE_DESCRIPTOR_SIZE (24)
+/*lint -esym(621, OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_NAME) */
+#define OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_NAME (0)
+/*lint -esym(621, OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_PROTOCOL) */
+#define OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_PROTOCOL (8)
+/*lint -esym(621, OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_RESERVED) */
+#define OKL4_OFS_VSERVICES_SERVICE_DESCRIPTOR_RESERVED (16)
+/**
+ *   Offsets for struct okl4_vservices_transport_microvisor
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVICES_TRANSPORT_MICROVISOR_SIZE) */
+#define OKL4_STRUCT_VSERVICES_TRANSPORT_MICROVISOR_SIZE (120)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_IS_SERVER) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_IS_SERVER (0)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING0_1) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING0_1 (1)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING1_2) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING1_2 (2)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING2_3) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING2_3 (3)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_TYPE) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_TYPE (4)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_U) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_U (8)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN (72)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_NUM_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_NUM_LINES (72)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING0_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING0_4 (76)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING1_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING1_5 (77)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING2_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING2_6 (78)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING3_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN___PADDING3_7 (79)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_IN_LINES (80)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT (88)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_NUM_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_NUM_LINES (88)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING0_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING0_4 (92)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING1_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING1_5 (93)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING2_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING2_6 (94)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING3_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT___PADDING3_7 (95)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_LINES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_VIRQS_OUT_LINES (96)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_NUM_SERVICES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_NUM_SERVICES (104)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING3_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING3_4 (108)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING4_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING4_5 (109)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING5_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING5_6 (110)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING6_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR___PADDING6_7 (111)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_SERVICES) */
+#define OKL4_OFS_VSERVICES_TRANSPORT_MICROVISOR_SERVICES (112)
+/**
+ *   Offsets for struct okl4_vservices_transports
+ **/
+/*lint -esym(621, OKL4_STRUCT_VSERVICES_TRANSPORTS_SIZE) */
+#define OKL4_STRUCT_VSERVICES_TRANSPORTS_SIZE (16)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS_NUM_TRANSPORTS) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS_NUM_TRANSPORTS (0)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING0_4) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING0_4 (4)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING1_5) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING1_5 (5)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING2_6) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING2_6 (6)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS___PADDING3_7) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS___PADDING3_7 (7)
+/*lint -esym(621, OKL4_OFS_VSERVICES_TRANSPORTS_TRANSPORTS) */
+#define OKL4_OFS_VSERVICES_TRANSPORTS_TRANSPORTS (8)
+
+#endif /* ASSEMBLY */
+
+#endif /* __AUTO__MICROVISOR_OFFSETS_H__ */
+
diff --git a/include/microvisor/kernel/syscalls.h b/include/microvisor/kernel/syscalls.h
new file mode 100644
index 0000000..fdc2c0d
--- /dev/null
+++ b/include/microvisor/kernel/syscalls.h
@@ -0,0 +1,6114 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+
+
+/** @addtogroup lib_microvisor
+ * @{
+ */
+
+#ifndef __AUTO__USER_SYSCALLS_H__
+#define __AUTO__USER_SYSCALLS_H__
+
+/**
+ * @cond no_doc
+ */
+#if defined(ASSEMBLY)
+#define __hvc_str(x) x
+#else
+#define _hvc_str(x) #x
+#define __hvc_str(x) _hvc_str(x)
+#endif
+#if (defined(__GNUC__) && !defined(__clang__)) && \
+    (__GNUC__ < 4 || ((__GNUC__ == 4) && (__GNUC_MINOR__ < 5)))
+#if defined(__thumb2__)
+#define hvc(i) __hvc_str(.hword 0xf7e0 | (i & 0xf); .hword 8000 | (i >> 4) @ HVC)
+#else
+#define hvc(i) __hvc_str(.word 0xe1400070 | (i & 0xf) | (i >> 4 << 8) @ HVC)
+#endif
+#else
+#if defined(__ARM_EABI__)
+#if defined(ASSEMBLY) && !defined(__clang__)
+    .arch_extension virt
+#elif !defined(__clang__)
+__asm__(
+    ".arch_extension virt\n"
+);
+#endif
+#endif
+#define hvc(i) __hvc_str(hvc i)
+#endif
+/**
+ * @endcond
+ */
+
+#if !defined(ASSEMBLY)
+
+#define OKL4_OK OKL4_ERROR_OK
+
+/** @} */
+
+/*
+ * Syscall prototypes.
+ */
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_PROCESS_RECV
+ *
+ * @param axon_id
+ * @param transfer_limit
+ *
+ * @retval error
+ * @retval send_empty
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_axon_process_recv_return
+_okl4_sys_axon_process_recv(okl4_kcap_t axon_id, okl4_lsize_t transfer_limit)
+{
+    struct _okl4_sys_axon_process_recv_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(transfer_limit        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((transfer_limit >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5184)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    result.send_empty = (okl4_bool_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_axon_process_recv_return
+_okl4_sys_axon_process_recv(okl4_kcap_t axon_id, okl4_lsize_t transfer_limit)
+{
+    struct _okl4_sys_axon_process_recv_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)transfer_limit;
+    __asm__ __volatile__(
+            "" hvc(5184) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.send_empty = (okl4_bool_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_HALTED
+ *
+ * @param axon_id
+ * @param halted
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_halted(okl4_kcap_t axon_id, okl4_bool_t halted)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)halted;
+    __asm__ __volatile__(
+            ""hvc(5186)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_halted(okl4_kcap_t axon_id, okl4_bool_t halted)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)halted;
+    __asm__ __volatile__(
+            "" hvc(5186) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_RECV_AREA
+ *
+ * @param axon_id
+ * @param base
+ * @param size
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+        okl4_lsize_t size)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(base        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((base >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)(size        & 0xffffffff);
+    register uint32_t r4 asm("r4") = (uint32_t)((size >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5187)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+        okl4_lsize_t size)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)base;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+    __asm__ __volatile__(
+            "" hvc(5187) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_RECV_QUEUE
+ *
+ * @param axon_id
+ * @param queue
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(queue        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((queue >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5188)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)queue;
+    __asm__ __volatile__(
+            "" hvc(5188) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_RECV_SEGMENT
+ *
+ * @param axon_id
+ * @param segment_id
+ * @param segment_base
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+        okl4_laddr_t segment_base)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)segment_id;
+    register uint32_t r2 asm("r2") = (uint32_t)(segment_base        & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)((segment_base >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5189)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_recv_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+        okl4_laddr_t segment_base)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)segment_id;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_base;
+    __asm__ __volatile__(
+            "" hvc(5189) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_SEND_AREA
+ *
+ * @param axon_id
+ * @param base
+ * @param size
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+        okl4_lsize_t size)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(base        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((base >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)(size        & 0xffffffff);
+    register uint32_t r4 asm("r4") = (uint32_t)((size >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5190)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_area(okl4_kcap_t axon_id, okl4_laddr_t base,
+        okl4_lsize_t size)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)base;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+    __asm__ __volatile__(
+            "" hvc(5190) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_SEND_QUEUE
+ *
+ * @param axon_id
+ * @param queue
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)(queue        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((queue >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5191)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_queue(okl4_kcap_t axon_id, okl4_laddr_t queue)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)queue;
+    __asm__ __volatile__(
+            "" hvc(5191) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_SET_SEND_SEGMENT
+ *
+ * @param axon_id
+ * @param segment_id
+ * @param segment_base
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+        okl4_laddr_t segment_base)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    register uint32_t r1 asm("r1") = (uint32_t)segment_id;
+    register uint32_t r2 asm("r2") = (uint32_t)(segment_base        & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)((segment_base >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5192)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_set_send_segment(okl4_kcap_t axon_id, okl4_kcap_t segment_id,
+        okl4_laddr_t segment_base)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)segment_id;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_base;
+    __asm__ __volatile__(
+            "" hvc(5192) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: AXON_TRIGGER_SEND
+ *
+ * @param axon_id
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_trigger_send(okl4_kcap_t axon_id)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)axon_id;
+    __asm__ __volatile__(
+            ""hvc(5185)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_axon_trigger_send(okl4_kcap_t axon_id)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)axon_id;
+    __asm__ __volatile__(
+            "" hvc(5185) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Acknowledge the delivery of an interrupt.
+ *
+ *    @details
+ *    This API returns the number and source of the highest-priority
+ *        enabled,
+ *    pending and inactive interrupt that is targeted at the calling vCPU
+ *    and has higher priority than the calling vCPU's running group
+ *        priority.
+ *
+ *    The returned interrupt is marked as active, and will not be returned
+ *        again
+ *    by this function until @ref okl4_sys_interrupt_eoi is invoked
+ *        specifying the
+ *    same interrupt number and source. The vCPU's running interrupt
+ *        priority is
+ *    raised to the priority of the returned interrupt. This will typically
+ *        result
+ *    in the de-assertion of the vCPU's virtual IRQ line.
+ *
+ *    If no such interrupt exists, interrupt number 1023 is returned. If
+ *        the
+ *    returned interrupt number is 16 or greater, the source ID is 0;
+ *        otherwise it
+ *    is the vCPU ID of the vCPU that raised the interrupt (which is always
+ *        in the
+ *    same Cell as the caller).
+ *
+ *    @note Invoking this API is equivalent to reading from the GIC CPU
+ *    Interface's Interrupt Acknowledge Register (\p GICC_IAR).
+ *
+ *
+ * @retval irq
+ *    An interrupt line number for the virtual GIC.
+ * @retval source
+ *    The ID of the originating vCPU of a Software-Generated Interrupt.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_ack_return
+_okl4_sys_interrupt_ack(void)
+{
+    struct _okl4_sys_interrupt_ack_return result;
+
+    register uint32_t r0 asm("r0");
+    register uint32_t r1 asm("r1");
+    __asm__ __volatile__(
+            ""hvc(5128)"\n\t"
+            : "=r"(r0), "=r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    result.irq = (okl4_interrupt_number_t)(r0);
+    result.source = (uint8_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_ack_return
+_okl4_sys_interrupt_ack(void)
+{
+    struct _okl4_sys_interrupt_ack_return result;
+
+    register okl4_register_t x0 asm("x0");
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5128) "\n\t"
+            : "=r"(x0), "=r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.irq = (okl4_interrupt_number_t)(x0);
+    result.source = (uint8_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Register a vCPU as the handler of an interrupt.
+ *
+ *    @details
+ *    The Microvisor virtual GIC API permits an interrupt source to be
+ *        dynamically
+ *    assigned to a specific IRQ number in a Cell or vCPU. An interrupt can
+ *        only
+ *    be assigned to one IRQ number, and one Cell or vCPU, at a time. This
+ *    operation attaches the interrupt to a vCPU as a private interrupt.
+ *
+ *    Interrupt sources are addressed using capabilities. This operation,
+ *        given
+ *    a capability for an interrupt that is not currently attached to any
+ *        handler,
+ *    can attach the interrupt at a given unused IRQ number. If the IRQ
+ *        number
+ *    is between 16 and 31 (the GIC Private Peripheral Interrupt range), it
+ *        will
+ *    be attached to the specified vCPU; if it is between 32 and 1019 (the
+ *        GIC
+ *    Shared Peripheral Interrupt range), it will return an error.
+ *
+ *    @note The Software Generated Interrupt range, from 0 to 15, is
+ *        reserved
+ *    and cannot be used to attach interrupt source capabilities.
+ *
+ *    @note In most cases, interrupt sources are attached at system
+ *        construction
+ *    time by the OK Tool. It is not normally necessary to attach an
+ *        interrupt
+ *    source before using it.
+ *
+ * @param vcpu_cap
+ *    A virtual CPU capability.
+ * @param irq_cap
+ *    A virtual interrupt capability.
+ * @param irq_num
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_private(okl4_kcap_t vcpu_cap, okl4_kcap_t irq_cap,
+        okl4_interrupt_number_t irq_num)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu_cap;
+    register uint32_t r1 asm("r1") = (uint32_t)irq_cap;
+    register uint32_t r2 asm("r2") = (uint32_t)irq_num;
+    __asm__ __volatile__(
+            ""hvc(5134)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_private(okl4_kcap_t vcpu_cap, okl4_kcap_t irq_cap,
+        okl4_interrupt_number_t irq_num)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu_cap;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)irq_cap;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)irq_num;
+    __asm__ __volatile__(
+            "" hvc(5134) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Register a Cell (domain) as the handler of an interrupt.
+ *
+ *    @details
+ *    The Microvisor virtual GIC API permits an interrupt source to be
+ *        dynamically
+ *    assigned to a specific IRQ number in a Cell or vCPU. An interrupt can
+ *        only
+ *    be assigned to one IRQ number, and one Cell or vCPU, at a time. This
+ *    operation attaches the interrupt to a Cell as a shared interrupt.
+ *
+ *    Interrupt sources are addressed using capabilities. This operation,
+ *        given
+ *    a capability for an interrupt that is not currently attached to any
+ *        handler,
+ *    can attach the interrupt at a given unused IRQ number. If the IRQ
+ *        number
+ *    is between 0 and 31 (the GIC SGI or Private Peripheral Interrupt
+ *        range), it
+ *    will return an error; if it is between 32 and 1019 (the GIC
+ *    Shared Peripheral Interrupt range), it will be attached to the
+ *        specified
+ *    Cell.
+ *
+ *    @note In most cases, interrupt sources are attached at system
+ *        construction
+ *    time by the OK Tool. It is not normally necessary to attach an
+ *        interrupt
+ *    source before using it.
+ *
+ * @param domain_cap
+ *    A domain capability.
+ * @param irq_cap
+ *    A virtual interrupt capability.
+ * @param irq_num
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_shared(okl4_kcap_t domain_cap, okl4_kcap_t irq_cap,
+        okl4_interrupt_number_t irq_num)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)domain_cap;
+    register uint32_t r1 asm("r1") = (uint32_t)irq_cap;
+    register uint32_t r2 asm("r2") = (uint32_t)irq_num;
+    __asm__ __volatile__(
+            ""hvc(5135)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_attach_shared(okl4_kcap_t domain_cap, okl4_kcap_t irq_cap,
+        okl4_interrupt_number_t irq_num)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)domain_cap;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)irq_cap;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)irq_num;
+    __asm__ __volatile__(
+            "" hvc(5135) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Unregister an interrupt.
+ *
+ *    @details
+ *    Detach the given interrupt source from its registered handler. The
+ *        interrupt
+ *    will be deactivated and disabled, and will not be delivered again
+ *        until it
+ *    is reattached. However, if it is configured in edge triggering mode,
+ *        its
+ *    pending state will be preserved.
+ *
+ * @param irq_cap
+ *    A virtual interrupt capability.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_detach(okl4_kcap_t irq_cap)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq_cap;
+    __asm__ __volatile__(
+            ""hvc(5136)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_detach(okl4_kcap_t irq_cap)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq_cap;
+    __asm__ __volatile__(
+            "" hvc(5136) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable the interrupt distributor.
+ *
+ *    @details
+ *    This API enables the interrupt distributor, in the same form as
+ *        writing to
+ *    the enable bit in (\p GICD_CTLR).
+ *
+ * @param enable
+ *    A boolean value for GIC distributor enable.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_dist_enable(okl4_bool_t enable)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)enable;
+    __asm__ __volatile__(
+            ""hvc(5133)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_dist_enable(okl4_bool_t enable)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)enable;
+    __asm__ __volatile__(
+            "" hvc(5133) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Signal the end of the interrupt handling routine.
+ *
+ *    @details
+ *    This API informs the virtual GIC that handling for a given interrupt
+ *        has
+ *    completed. It marks the interrupt as inactive, and decreases the
+ *        running
+ *    interrupt priority of the calling vCPU. This may cause immediate
+ *        delivery of
+ *    another interrupt, possibly with the same number, if one is enabled
+ *        and
+ *    pending.
+ *
+ *    The specified interrupt number and source must match the active
+ *        interrupt
+ *    that was most recently returned by an @ref okl4_sys_interrupt_ack
+ *    invocation. If multiple interrupts have been acknowledged and not yet
+ *        ended,
+ *    they must be ended in the reversed order of their acknowledgement.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC CPU
+ *    Interface's End of Interrupt Register (\p GICC_EOIR), with \p EOImode
+ *    set to 0 in \p GICC_CTLR.
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param source
+ *    The ID of the originating vCPU of a Software-Generated Interrupt.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_eoi(okl4_interrupt_number_t irq, uint8_t source)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)source;
+    __asm__ __volatile__(
+            ""hvc(5129)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_eoi(okl4_interrupt_number_t irq, uint8_t source)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)source;
+    __asm__ __volatile__(
+            "" hvc(5129) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Retrieve the highest-priority pending interrupt.
+ *
+ *    @details
+ *    This API returns the number and source of the highest-priority
+ *        enabled,
+ *    pending and inactive interrupt that is targeted at the calling vCPU
+ *    and has higher priority than the calling vCPU's running group
+ *        priority.
+ *
+ *    If no such interrupt exists, interrupt number 1023 is returned. If
+ *        the
+ *    returned interrupt number is 16 or greater, the source ID is 0;
+ *        otherwise it
+ *    is the vCPU ID of the vCPU that raised the interrupt (which is always
+ *        in the
+ *    same Cell as the caller).
+ *
+ *    @note Invoking this API is equivalent to reading from the GIC CPU
+ *    Interface's Highest Priority Pending Interrupt Register (\p
+ *        GICC_HPPIR).
+ *
+ *
+ * @retval irq
+ *    An interrupt line number for the virtual GIC.
+ * @retval source
+ *    The ID of the originating vCPU of a Software-Generated Interrupt.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_highest_priority_pending_return
+_okl4_sys_interrupt_get_highest_priority_pending(void)
+{
+    struct _okl4_sys_interrupt_get_highest_priority_pending_return result;
+
+    register uint32_t r0 asm("r0");
+    register uint32_t r1 asm("r1");
+    __asm__ __volatile__(
+            ""hvc(5137)"\n\t"
+            : "=r"(r0), "=r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    result.irq = (okl4_interrupt_number_t)(r0);
+    result.source = (uint8_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_highest_priority_pending_return
+_okl4_sys_interrupt_get_highest_priority_pending(void)
+{
+    struct _okl4_sys_interrupt_get_highest_priority_pending_return result;
+
+    register okl4_register_t x0 asm("x0");
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5137) "\n\t"
+            : "=r"(x0), "=r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.irq = (okl4_interrupt_number_t)(x0);
+    result.source = (uint8_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Fetch the payload flags of a virtual interrupt.
+ *
+ *    @details
+ *    This fetches and clears the accumulated payload flags for a virtual
+ *    interrupt that has been raised by the Microvisor, or by a vCPU
+ *        invoking
+ *    the @ref okl4_sys_vinterrupt_raise API.
+ *
+ *    If the virtual interrupt is configured for level triggering, clearing
+ *        the
+ *    accumulated flags by calling this function will also clear the
+ *        pending state
+ *    of the interrupt.
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ * @retval payload
+ *    Accumulated virtual interrupt payload flags.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_payload_return
+_okl4_sys_interrupt_get_payload(okl4_interrupt_number_t irq)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp payload_tmp;
+    struct _okl4_sys_interrupt_get_payload_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5132)"\n\t"
+            : "=r"(r1), "=r"(r2), "+r"(r0)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    payload_tmp.words.lo = r1;
+    payload_tmp.words.hi = r2;
+    result.payload = (okl4_virq_flags_t)(payload_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_get_payload_return
+_okl4_sys_interrupt_get_payload(okl4_interrupt_number_t irq)
+{
+    struct _okl4_sys_interrupt_get_payload_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5132) "\n\t"
+            : "=r"(x1), "+r"(x0)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.payload = (okl4_virq_flags_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query the number of supported CPUs and interrupt lines.
+ *
+ *    @details
+ *    This API returns the number of CPUs and interrupt lines supported by
+ *        the
+ *    virtual interrupt controller, in the same form as is found in the GIC
+ *    Distributor's Interrupt Controller Type Register (\p GICD_TYPER), in
+ *    the \p CPUNumber and \p ITLinesNumber fields.
+ *
+ *
+ * @retval cpunumber
+ *    The number of supported target CPUs, minus 1.
+ * @retval itnumber
+ *    The number of supported groups of 32 interrupt lines, minus 1.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_limits_return
+_okl4_sys_interrupt_limits(void)
+{
+    struct _okl4_sys_interrupt_limits_return result;
+
+    register uint32_t r0 asm("r0");
+    register uint32_t r1 asm("r1");
+    __asm__ __volatile__(
+            ""hvc(5138)"\n\t"
+            : "=r"(r0), "=r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    result.cpunumber = (okl4_count_t)(r0);
+    result.itnumber = (okl4_count_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_interrupt_limits_return
+_okl4_sys_interrupt_limits(void)
+{
+    struct _okl4_sys_interrupt_limits_return result;
+
+    register okl4_register_t x0 asm("x0");
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5138) "\n\t"
+            : "=r"(x0), "=r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.cpunumber = (okl4_count_t)(x0);
+    result.itnumber = (okl4_count_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Disable delivery of an interrupt.
+ *
+ *    @detail
+ *    This prevents future delivery of the specified interrupt. It does not
+ *    affect any currently active delivery (that is, end-of-interrupt must
+ *    still be called). It also does not affect the pending state, so it
+ *        cannot
+ *    cause loss of edge-triggered interrupts.
+ *
+ *    @note Invoking this API is equivalent to writing a single bit to one
+ *        of the
+ *    GIC Distributor's Interrupt Clear-Enable Registers (\p
+ *        GICD_ICENABLERn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_mask(okl4_interrupt_number_t irq)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    __asm__ __volatile__(
+            ""hvc(5130)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_mask(okl4_interrupt_number_t irq)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    __asm__ __volatile__(
+            "" hvc(5130) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Raise a Software-Generated Interrupt.
+ *
+ *    @detail
+ *    This allows a Software-Generated Interrupt (with interrupt number
+ *        between
+ *    0 and 15) to be raised, targeted at a specified set of vCPUs within
+ *        the
+ *    same Cell. No capability is required, but interrupts cannot be raised
+ *        to
+ *    other Cells with this API.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC
+ *        Distributor's
+ *    Software Generated Interrupt Register (\p GICD_SGIR).
+ *
+ *    @note This API is distinct from the @ref okl4_sys_vinterrupt_raise
+ *        API,
+ *    which raises a virtual interrupt source which may communicate across
+ *    Cell boundaries, and requires an explicit capability.
+ *
+ * @param sgir
+ *    A description of the Software-Generated Interrupt to raise.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_raise(okl4_gicd_sgir_t sgir)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)sgir;
+    __asm__ __volatile__(
+            ""hvc(5145)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_raise(okl4_gicd_sgir_t sgir)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)sgir;
+    __asm__ __volatile__(
+            "" hvc(5145) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Set the interrupt priority binary point for the calling vCPU.
+ *
+ *    @details
+ *    The GIC splits IRQ priority values into two subfields: the group
+ *        priority
+ *    and the subpriority. The binary point is the index of the most
+ *        significant
+ *    bit of the subpriority (that is, one less than the number of
+ *        subpriority
+ *    bits).
+ *
+ *    An interrupt can preempt another active interrupt only if its group
+ *        priority
+ *    is higher than the running group priority; the subpriority is ignored
+ *        for
+ *    this comparison. The subpriority is used to determine which of two
+ *        equal
+ *    priority interrupts will be delivered first.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC CPU
+ *    Interface's Binary Point Register (\p GICC_BPR).
+ *
+ * @param binary_point
+ *    The number of bits in the subpriority field, minus 1.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_binary_point(uint8_t binary_point)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)binary_point;
+    __asm__ __volatile__(
+            ""hvc(5139)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_binary_point(uint8_t binary_point)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)binary_point;
+    __asm__ __volatile__(
+            "" hvc(5139) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Change the configuration of an interrupt.
+ *
+ *    @detail
+ *    This sets the triggering type of a specified interrupt to either
+ *    edge or level triggering.
+ *
+ *    The specified interrupt must be disabled.
+ *
+ *    @note Some interrupt sources only support one triggering type. In
+ *        this case,
+ *    calling this API for the interrupt will have no effect.
+ *
+ *    @note Invoking this API is equivalent to writing a single two-bit
+ *        field of
+ *    one of the GIC Distributor's Interrupt Configuration Registers (\p
+ *    GICD_ICFGRn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param icfgr
+ *    The configuration bits for the interrupt line.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_config(okl4_interrupt_number_t irq,
+        okl4_gicd_icfgr_t icfgr)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)icfgr;
+    __asm__ __volatile__(
+            ""hvc(5140)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_config(okl4_interrupt_number_t irq,
+        okl4_gicd_icfgr_t icfgr)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)icfgr;
+    __asm__ __volatile__(
+            "" hvc(5140) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable or disable the signaling of interrupts to the vCPU.
+ *
+ *    @details
+ *    Enable or disable the signaling of interrupts by the virtual CPU
+ *        interface
+ *    to the connected vCPU.
+ *
+ *    @note Interrupt signalling is initially disabled, as required by the
+ *        GIC
+ *    API specification. This API must therefore be invoked at least once
+ *        before
+ *    any interrupts will be delivered.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC CPU
+ *    Interface's Control Register (\p GICC_CTLR) using the "GICv1 without
+ *    Security Extensions or Non-Secure" format, which contains only a
+ *        single
+ *    enable bit.
+ *
+ * @param enable
+ *    A boolean value for GIC distributor enable.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_control(okl4_bool_t enable)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)enable;
+    __asm__ __volatile__(
+            ""hvc(5141)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_control(okl4_bool_t enable)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)enable;
+    __asm__ __volatile__(
+            "" hvc(5141) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Change the delivery priority of an interrupt.
+ *
+ *    @detail
+ *    This changes the delivery priority of an interrupt. It has no
+ *        immediate
+ *    effect on currently active interrupts, but will take effect once the
+ *    interrupt is deactivated.
+ *
+ *    @note The number of significant bits in this value is
+ *    implementation-defined. In this configuration, 4 significant priority
+ *    bits are implemented. The most significant bit is always at the high
+ *        end
+ *    of the priority byte; that is, at bit 7.
+ *
+ *    @note Smaller values represent higher priority. The highest possible
+ *    priority is 0; the lowest possible priority has all implemented bits
+ *        set,
+ *    and in this implementation is currently 0xf0.
+ *
+ *    @note Invoking this API is equivalent to writing a single byte of one
+ *        of the
+ *    GIC Distributor's Interrupt Priority Registers (\p GICD_IPRIORITYn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param priority
+ *    A GIC priority value in the range 0-240.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority(okl4_interrupt_number_t irq, uint8_t priority)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)priority;
+    __asm__ __volatile__(
+            ""hvc(5142)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority(okl4_interrupt_number_t irq, uint8_t priority)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)priority;
+    __asm__ __volatile__(
+            "" hvc(5142) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Set the minimum interrupt priority of the calling vCPU.
+ *
+ *    @details
+ *    This API sets the calling vCPU's minimum running interrupt priority.
+ *    Interrupts will only be delivered if they have priority higher than
+ *        this
+ *    value.
+ *
+ *    @note Higher priority corresponds to a lower priority value; i.e.,
+ *        the
+ *    highest priority value is 0.
+ *
+ *    @note The priority mask is initially set to 0, which prevents all
+ *        interrupt
+ *    delivery, as required by the GIC API specification. This API must
+ *        therefore
+ *    be invoked at least once before any interrupts will be delivered.
+ *
+ *    @note Invoking this API is equivalent to writing to the GIC CPU
+ *    Interface's Interrupt Priority Mask Register (\p GICC_PMR).
+ *
+ * @param priority_mask
+ *    A GIC priority value in the range 0-240.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority_mask(uint8_t priority_mask)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)priority_mask;
+    __asm__ __volatile__(
+            ""hvc(5143)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_priority_mask(uint8_t priority_mask)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)priority_mask;
+    __asm__ __volatile__(
+            "" hvc(5143) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Change the delivery targets of a shared interrupt.
+ *
+ *    @detail
+ *    This sets the subset of a Cell's vCPUs to which the specified shared
+ *    interrupt (with an interrupt number between 32 and 1019) can be
+ *        delivered.
+ *    The target vCPUs are specified by an 8-bit bitfield. Note that no
+ *        more
+ *    than 8 targets are supported by the GIC API, so vCPUs with IDs beyond
+ *        8
+ *    will never receive interrupts.
+ *
+ *    @note The GIC API does not specify how or when the implementation
+ *        selects a
+ *    target for interrupt delivery. Most hardware implementations deliver
+ *        to
+ *    all possible targets simultaneously, and then cancel all but the
+ *        first to
+ *    be acknowledged. In the interests of efficiency, the OKL4 Microvisor
+ *        does
+ *    not implement this behaviour; instead, it chooses an arbitrary target
+ *        when
+ *    the interrupt first becomes deliverable.
+ *
+ *    @note Invoking this API is equivalent to writing a single byte of one
+ *        of the
+ *    GIC Distributor's Interrupt Targets Registers (\p GICD_ITARGETSRn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param cpu_mask
+ *    Bitmask of vCPU IDs.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_targets(okl4_interrupt_number_t irq, uint8_t cpu_mask)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)cpu_mask;
+    __asm__ __volatile__(
+            ""hvc(5144)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_set_targets(okl4_interrupt_number_t irq, uint8_t cpu_mask)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)cpu_mask;
+    __asm__ __volatile__(
+            "" hvc(5144) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable delivery of an interrupt.
+ *
+ *    @detail
+ *    This permits delivery of the specified interrupt, once it is pending
+ *        and
+ *    inactive and has sufficiently high priority.
+ *
+ *    @note Invoking this API is equivalent to writing a single bit to one
+ *        of the
+ *    GIC Distributor's Interrupt Set-Enable Registers (\p
+ *        GICD_ISENABLERn).
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_unmask(okl4_interrupt_number_t irq)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    __asm__ __volatile__(
+            ""hvc(5131)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_interrupt_unmask(okl4_interrupt_number_t irq)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    __asm__ __volatile__(
+            "" hvc(5131) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enter the kernel interactive debugger.
+ *
+ * @details
+ * This is available on a debug build of the kernel, otherwise the operation
+ *     is a
+ * no-op.
+ *
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_kdb_interact(void)
+{
+    __asm__ __volatile__(
+            ""hvc(5120)"\n\t"
+            :
+            :
+            : "cc", "memory", "r0", "r1", "r2", "r3", "r4", "r5"
+            );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_kdb_interact(void)
+{
+    __asm__ __volatile__(
+            "" hvc(5120) "\n\t"
+            :
+            :
+            : "cc", "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Set the debug name of the addressed kernel object.
+ *
+ *    @details
+ *    The debug version of the Microvisor kernel supports naming of kernel
+ *        objects
+ *    to aid debugging. The object names are visible to external debuggers
+ *        such
+ *    as a JTAG tool, as well as the in-built interactive kernel debugger.
+ *
+ *    The target object may be any Microvisor object for which the caller
+ *        has a
+ *    capability with the master rights.
+ *
+ *    Debug names may be up to 16 characters long, with four characters
+ *        stored per
+ *    \p name[x] argument in little-endian order (on a 32-bit machine).
+ *
+ * @param object
+ *    The target kernel object id.
+ * @param name0
+ * @param name1
+ * @param name2
+ * @param name3
+ *
+ * @retval error
+ *    Resulting error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_kdb_set_object_name(okl4_kcap_t object, uint32_t name0, uint32_t name1
+        , uint32_t name2, uint32_t name3)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)object;
+    register uint32_t r1 asm("r1") = (uint32_t)name0;
+    register uint32_t r2 asm("r2") = (uint32_t)name1;
+    register uint32_t r3 asm("r3") = (uint32_t)name2;
+    register uint32_t r4 asm("r4") = (uint32_t)name3;
+    __asm__ __volatile__(
+            ""hvc(5121)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_kdb_set_object_name(okl4_kcap_t object, uint32_t name0, uint32_t name1
+        , uint32_t name2, uint32_t name3)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)object;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)name0;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)name1;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)name2;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)name3;
+    __asm__ __volatile__(
+            "" hvc(5121) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Call a kernel support package (KSP) defined interface.
+ *
+ *    @details
+ *    The KSP procedure call allows the caller to interact with customer
+ *    specific functions provided by the kernel support package. The caller
+ *    must possess a capability with the appropriate rights to a KSP agent
+ *        in
+ *    order to call this interface.
+ *
+ *    The remaining parameters provided are passed directly to the KSP
+ *        without
+ *    any inspection.
+ *
+ *    The KSP can return an error code and up to three return words.
+ *
+ * @param agent
+ *    The target KSP agent
+ * @param operation
+ *    The operation to be performed
+ * @param arg0
+ *    An argument for the operation
+ * @param arg1
+ *    An argument for the operation
+ * @param arg2
+ *    An argument for the operation
+ * @param arg3
+ *    An argument for the operation
+ *
+ * @retval error
+ *    The resulting error
+ * @retval ret0
+ *    A return value for the operation
+ * @retval ret1
+ *    A return value for the operation
+ * @retval ret2
+ *    A return value for the operation
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_ksp_procedure_call_return
+_okl4_sys_ksp_procedure_call(okl4_kcap_t agent, okl4_ksp_arg_t operation,
+        okl4_ksp_arg_t arg0, okl4_ksp_arg_t arg1, okl4_ksp_arg_t arg2,
+        okl4_ksp_arg_t arg3)
+{
+    struct _okl4_sys_ksp_procedure_call_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)agent;
+    register uint32_t r1 asm("r1") = (uint32_t)operation;
+    register uint32_t r2 asm("r2") = (uint32_t)arg0;
+    register uint32_t r3 asm("r3") = (uint32_t)arg1;
+    register uint32_t r4 asm("r4") = (uint32_t)arg2;
+    register uint32_t r5 asm("r5") = (uint32_t)arg3;
+    __asm__ __volatile__(
+            ""hvc(5197)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    result.ret0 = (okl4_ksp_arg_t)(r1);
+    result.ret1 = (okl4_ksp_arg_t)(r2);
+    result.ret2 = (okl4_ksp_arg_t)(r3);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_ksp_procedure_call_return
+_okl4_sys_ksp_procedure_call(okl4_kcap_t agent, okl4_ksp_arg_t operation,
+        okl4_ksp_arg_t arg0, okl4_ksp_arg_t arg1, okl4_ksp_arg_t arg2,
+        okl4_ksp_arg_t arg3)
+{
+    struct _okl4_sys_ksp_procedure_call_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)agent;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)operation;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)arg0;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)arg1;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)arg2;
+    register okl4_register_t x5 asm("x5") = (okl4_register_t)arg3;
+    __asm__ __volatile__(
+            "" hvc(5197) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+            :
+            : "cc", "memory", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.ret0 = (okl4_ksp_arg_t)(x1);
+    result.ret1 = (okl4_ksp_arg_t)(x2);
+    result.ret2 = (okl4_ksp_arg_t)(x3);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Attach a segment to an MMU.
+ *
+ *    @details
+ *    Before any mappings based on a segment can be established in the
+ *        MMU's
+ *    address space, the segment must be attached to the MMU. Attaching a
+ *        segment
+ *    serves to reference count the segment, preventing modifications to
+ *        the
+ *    segment being made.
+ *
+ *    A segment may be attached to an MMU multiple times, at the same or
+ *    different index. Each time a segment is attached to an MMU, the
+ *        attachment
+ *    reference count is incremented.
+ *
+ *    Attaching segments to an MMU is also important for VMMU objects in
+ *        that the
+ *    segment attachment index is used as a segment reference in the
+ *        virtual page
+ *    table format.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param segment_id
+ *    The target segment id.
+ * @param index
+ *    Index into the MMU's segment attachment table.
+ * @param perms
+ *    Mapping permissions.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_attach_segment(okl4_kcap_t mmu_id, okl4_kcap_t segment_id,
+        okl4_count_t index, okl4_page_perms_t perms)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)segment_id;
+    register uint32_t r2 asm("r2") = (uint32_t)index;
+    register uint32_t r3 asm("r3") = (uint32_t)perms;
+    __asm__ __volatile__(
+            ""hvc(5152)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_attach_segment(okl4_kcap_t mmu_id, okl4_kcap_t segment_id,
+        okl4_count_t index, okl4_page_perms_t perms)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)segment_id;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)perms;
+    __asm__ __volatile__(
+            "" hvc(5152) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Detach a segment from an MMU.
+ *
+ *    @details
+ *    A segment can be detached from an MMU or vMMU, causing its reference
+ *        count
+ *    to decrease. When the reference count reaches zero, the attachment is
+ *    removed and all mappings in the MMU object relating to the segment
+ *        are
+ *    removed.
+ *
+ *    The detach-segment operation is potentially a long running operation,
+ *    especially if invoked on a vMMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param index
+ *    Index into the MMU's segment attachment table.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_detach_segment(okl4_kcap_t mmu_id, okl4_count_t index)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)index;
+    __asm__ __volatile__(
+            ""hvc(5153)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_detach_segment(okl4_kcap_t mmu_id, okl4_count_t index)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)index;
+    __asm__ __volatile__(
+            "" hvc(5153) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Flush a range of virtual addresses from an MMU.
+ *
+ *    @details
+ *    This causes the kernel to remove all mappings covering the specified
+ *    virtual address range.
+ *
+ *    @note The size of the range must be a multiple of 1MB and the
+ *    starting virtual address must be 1MB aligned.
+ *    There is no support for flushing at a finer granularity.
+ *    If a fine grained flush is required, the caller should use the
+ *    @ref _okl4_sys_mmu_unmap_page operation.
+ *
+ *    The flush-range operation is potentially a long running operation.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    The starting virtual address of the range.
+ *    (Must be 1MB aligned)
+ * @param size
+ *    Size of the range. (Must be a multiple of 1MB)
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_lsize_tr_t size)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)size;
+    __asm__ __volatile__(
+            ""hvc(5154)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_lsize_tr_t size)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+    __asm__ __volatile__(
+            "" hvc(5154) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Flush a range of virtual addresses from an MMU.
+ *
+ *    @details
+ *    This causes the kernel to remove all mappings covering the specified
+ *    virtual address range.
+ *
+ *    @note The size of the range must be a multiple of 1MB and the
+ *    starting virtual address must be 1MB aligned.
+ *    There is no support for flushing at a finer granularity.
+ *    If a fine grained flush is required, the caller should use the
+ *    @ref _okl4_sys_mmu_unmap_page operation.
+ *
+ *    The flush-range operation is potentially a long running operation.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_lsize_pn_t count_pn)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)count_pn;
+    __asm__ __volatile__(
+            ""hvc(5155)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_flush_range_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_lsize_pn_t count_pn)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)count_pn;
+    __asm__ __volatile__(
+            "" hvc(5155) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Lookup a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation performs a lookup in the MMU's pagetable for a mapping
+ *    derived from a specified segment.
+ *
+ *    If a mapping is found that is derived from the specified segment, the
+ *    operation will return the segment offset, size and the page
+ *        attributes
+ *    associated with the mapping.
+ *
+ *    If a segment_index value of OKL4_KCAP_INVALID is specified, the
+ *        operation
+ *    will search for a matching segment in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ *
+ * @retval error
+ *    Resulting error.
+ * @retval offset
+ *    Offset into the segment.
+ * @retval size
+ *    Size of the mapping, in bytes. Size will be one of the supported
+ *    machine page-sizes. If a segment search was performed, the lower
+ *        10-bits of
+ *    size contain the returned segment-index.
+ * @retval page_attr
+ *    Mapping attributes.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_page_return
+_okl4_sys_mmu_lookup_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp size_tmp;
+    struct _okl4_sys_mmu_lookup_page_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3");
+    register uint32_t r4 asm("r4");
+    __asm__ __volatile__(
+            ""hvc(5156)"\n\t"
+            : "=r"(r3), "=r"(r4), "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    result.offset = (okl4_psize_tr_t)(r1);
+    size_tmp.words.lo = r2;
+    size_tmp.words.hi = r3;
+    result.size = (okl4_mmu_lookup_size_t)(size_tmp.val);
+    result.page_attr = (_okl4_page_attribute_t)(r4);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_page_return
+_okl4_sys_mmu_lookup_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index)
+{
+    struct _okl4_sys_mmu_lookup_page_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3");
+    __asm__ __volatile__(
+            "" hvc(5156) "\n\t"
+            : "=r"(x3), "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.offset = (okl4_psize_tr_t)(x1);
+    result.size = (okl4_mmu_lookup_size_t)(x2);
+    result.page_attr = (_okl4_page_attribute_t)(x3);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Lookup a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation performs a lookup in the MMU's pagetable for a mapping
+ *    derived from a specified segment.
+ *
+ *    If a mapping is found that is derived from the specified segment, the
+ *    operation will return the segment offset, size and the page
+ *        attributes
+ *    associated with the mapping.
+ *
+ *    If a segment_index value of OKL4_KCAP_INVALID is specified, the
+ *        operation
+ *    will search for a matching segment in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ *
+ * @retval segment_index
+ *    Index into the MMU's segment attachment table, or error.
+ * @retval offset_pn
+ *    Offset into the segment in units of page numbers.
+ * @retval count_pn
+ *    The number of consecutive pages to map/unmap.
+ * @retval page_attr
+ *    Mapping attributes.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_pn_return
+_okl4_sys_mmu_lookup_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index)
+{
+    struct _okl4_sys_mmu_lookup_pn_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3");
+    __asm__ __volatile__(
+            ""hvc(5157)"\n\t"
+            : "=r"(r3), "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    result.segment_index = (okl4_mmu_lookup_index_t)(r0);
+    result.offset_pn = (okl4_psize_pn_t)(r1);
+    result.count_pn = (okl4_lsize_pn_t)(r2);
+    result.page_attr = (_okl4_page_attribute_t)(r3);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_mmu_lookup_pn_return
+_okl4_sys_mmu_lookup_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index)
+{
+    struct _okl4_sys_mmu_lookup_pn_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3");
+    __asm__ __volatile__(
+            "" hvc(5157) "\n\t"
+            : "=r"(x3), "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.segment_index = (okl4_mmu_lookup_index_t)(x0);
+    result.offset_pn = (okl4_psize_pn_t)(x1);
+    result.count_pn = (okl4_lsize_pn_t)(x2);
+    result.page_attr = (_okl4_page_attribute_t)(x3);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Create a mapping at a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation installs a new mapping into the MMU at the specified
+ *        virtual
+ *    address. The mapping's physical address is determined from the
+ *        specified
+ *    segment and offset, and the mapping's size and attributes are
+ *        provided in
+ *    \p size and \p page_attr.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param offset
+ *    Offset into the segment.
+ * @param size
+ *    Size of the mapping, in bytes.
+ * @param page_attr
+ *    Mapping attributes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_psize_tr_t offset, okl4_lsize_tr_t size
+        , _okl4_page_attribute_t page_attr)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)offset;
+    register uint32_t r4 asm("r4") = (uint32_t)size;
+    register uint32_t r5 asm("r5") = (uint32_t)page_attr;
+    __asm__ __volatile__(
+            ""hvc(5158)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_psize_tr_t offset, okl4_lsize_tr_t size
+        , _okl4_page_attribute_t page_attr)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)offset;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)size;
+    register okl4_register_t x5 asm("x5") = (okl4_register_t)page_attr;
+    __asm__ __volatile__(
+            "" hvc(5158) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+            :
+            : "cc", "memory", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Create a mapping at a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation installs a new mapping into the MMU at the specified
+ *        virtual
+ *    address. The mapping's physical address is determined from the
+ *        specified
+ *    segment and offset, and the mapping's size and attributes are
+ *        provided in
+ *    \p size and \p page_attr.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param segment_offset_pn
+ *    Offset into the segment in units of page numbers.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ * @param page_attr
+ *    Mapping attributes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_psize_pn_t segment_offset_pn,
+        okl4_lsize_pn_t count_pn, _okl4_page_attribute_t page_attr)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)segment_offset_pn;
+    register uint32_t r4 asm("r4") = (uint32_t)count_pn;
+    register uint32_t r5 asm("r5") = (uint32_t)page_attr;
+    __asm__ __volatile__(
+            ""hvc(5159)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_map_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_psize_pn_t segment_offset_pn,
+        okl4_lsize_pn_t count_pn, _okl4_page_attribute_t page_attr)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)segment_offset_pn;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)count_pn;
+    register okl4_register_t x5 asm("x5") = (okl4_register_t)page_attr;
+    __asm__ __volatile__(
+            "" hvc(5159) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+            :
+            : "cc", "memory", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Remove a mapping at a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation removes a mapping from the MMU at the specified
+ *        virtual
+ *    address. The size and address specified must match the size and base
+ *    address of the mapping being removed.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param size
+ *    Size of the mapping, in bytes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_lsize_tr_t size)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)size;
+    __asm__ __volatile__(
+            ""hvc(5160)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_page(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_lsize_tr_t size)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)size;
+    __asm__ __volatile__(
+            "" hvc(5160) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Remove a mapping at a virtual address in the MMU.
+ *
+ *    @details
+ *    This operation removes a mapping from the MMU at the specified
+ *        virtual
+ *    address. The size and address specified must match the size and base
+ *    address of the mapping being removed.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_lsize_pn_t count_pn)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)count_pn;
+    __asm__ __volatile__(
+            ""hvc(5161)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_unmap_pn(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_lsize_pn_t count_pn)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)count_pn;
+    __asm__ __volatile__(
+            "" hvc(5161) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the cache attributes of a mapping in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param size
+ *    Size of the mapping, in bytes.
+ * @param attrs
+ *    Mapping cache attributes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_attrs(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_lsize_tr_t size,
+        okl4_page_cache_t attrs)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)size;
+    register uint32_t r4 asm("r4") = (uint32_t)attrs;
+    __asm__ __volatile__(
+            ""hvc(5162)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_attrs(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_lsize_tr_t size,
+        okl4_page_cache_t attrs)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)size;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)attrs;
+    __asm__ __volatile__(
+            "" hvc(5162) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the page permissions of a mapping in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param vaddr
+ *    Virtual address of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param size
+ *    Size of the mapping, in bytes.
+ * @param perms
+ *    Mapping permissions.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_perms(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_lsize_tr_t size,
+        okl4_page_perms_t perms)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vaddr;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)size;
+    register uint32_t r4 asm("r4") = (uint32_t)perms;
+    __asm__ __volatile__(
+            ""hvc(5163)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_page_perms(okl4_kcap_t mmu_id, okl4_laddr_tr_t vaddr,
+        okl4_count_t segment_index, okl4_lsize_tr_t size,
+        okl4_page_perms_t perms)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vaddr;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)size;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)perms;
+    __asm__ __volatile__(
+            "" hvc(5163) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the cache attributes of a mapping in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ * @param attrs
+ *    Mapping cache attributes.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_attrs(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+        okl4_page_cache_t attrs)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)count_pn;
+    register uint32_t r4 asm("r4") = (uint32_t)attrs;
+    __asm__ __volatile__(
+            ""hvc(5164)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_attrs(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+        okl4_page_cache_t attrs)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)count_pn;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)attrs;
+    __asm__ __volatile__(
+            "" hvc(5164) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Update the page permissions of a mapping in the MMU.
+ *
+ * @param mmu_id
+ *    The target MMU id.
+ * @param laddr_pn
+ *    Logical address page-number of the mapping.
+ * @param segment_index
+ *    Index into the MMU's segment attachment table.
+ * @param count_pn
+ *    The number of consecutive pages to map/unmap.
+ * @param perms
+ *    Mapping permissions.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_perms(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+        okl4_page_perms_t perms)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)mmu_id;
+    register uint32_t r1 asm("r1") = (uint32_t)laddr_pn;
+    register uint32_t r2 asm("r2") = (uint32_t)segment_index;
+    register uint32_t r3 asm("r3") = (uint32_t)count_pn;
+    register uint32_t r4 asm("r4") = (uint32_t)perms;
+    __asm__ __volatile__(
+            ""hvc(5165)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_mmu_update_pn_perms(okl4_kcap_t mmu_id, okl4_laddr_pn_t laddr_pn,
+        okl4_count_t segment_index, okl4_lsize_pn_t count_pn,
+        okl4_page_perms_t perms)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)mmu_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)laddr_pn;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)segment_index;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)count_pn;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)perms;
+    __asm__ __volatile__(
+            "" hvc(5165) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * A NULL system-call for latency measurement.
+ *
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_performance_null_syscall(void)
+{
+    register uint32_t r0 asm("r0");
+    __asm__ __volatile__(
+            ""hvc(5198)"\n\t"
+            : "=r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_performance_null_syscall(void)
+{
+    register okl4_register_t x0 asm("x0");
+    __asm__ __volatile__(
+            "" hvc(5198) "\n\t"
+            : "=r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * Control a pipe, including reset, ready and halt functionality.
+ *
+ * @param pipe_id
+ *    The capability identifier of the pipe.
+ * @param control
+ *    The state control argument.
+ *
+ * @retval error
+ *    The returned error code.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_control(okl4_kcap_t pipe_id, okl4_pipe_control_t control)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)pipe_id;
+    register uint32_t r1 asm("r1") = (uint32_t)control;
+    __asm__ __volatile__(
+            ""hvc(5146)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_control(okl4_kcap_t pipe_id, okl4_pipe_control_t control)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)pipe_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)control;
+    __asm__ __volatile__(
+            "" hvc(5146) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * Send a message from a microvisor pipe.
+ *
+ * @param pipe_id
+ *    The capability identifier of the pipe.
+ * @param buf_size
+ *    Size of the receive buffer.
+ * @param data
+ *    Pointer to receive buffer.
+ *
+ * @retval error
+ *    The returned error code.
+ * @retval size
+ *    Size of the received message.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_pipe_recv_return
+_okl4_sys_pipe_recv(okl4_kcap_t pipe_id, okl4_vsize_t buf_size, uint8_t *data)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp size_tmp;
+    struct _okl4_sys_pipe_recv_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)pipe_id;
+    register uint32_t r1 asm("r1") = (uint32_t)buf_size;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)data;
+    __asm__ __volatile__(
+            ""hvc(5147)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    size_tmp.words.lo = r1;
+    size_tmp.words.hi = r2;
+    result.size = (okl4_ksize_t)(size_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_pipe_recv_return
+_okl4_sys_pipe_recv(okl4_kcap_t pipe_id, okl4_vsize_t buf_size, uint8_t *data)
+{
+    struct _okl4_sys_pipe_recv_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)pipe_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)buf_size;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)data;
+    __asm__ __volatile__(
+            "" hvc(5147) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.size = (okl4_ksize_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * Send a message to a microvisor pipe.
+ *
+ * @param pipe_id
+ *    The capability identifier of the pipe.
+ * @param size
+ *    Size of the message to send.
+ * @param data
+ *    Pointer to the message payload to send.
+ *
+ * @retval error
+ *    The returned error code.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_send(okl4_kcap_t pipe_id, okl4_vsize_t size, const uint8_t *data)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)pipe_id;
+    register uint32_t r1 asm("r1") = (uint32_t)size;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)data;
+    __asm__ __volatile__(
+            ""hvc(5148)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_pipe_send(okl4_kcap_t pipe_id, okl4_vsize_t size, const uint8_t *data)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)pipe_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)size;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)data;
+    __asm__ __volatile__(
+            "" hvc(5148) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Waive the current vCPU's priority.
+ *
+ *    @details
+ *    This operation allows a vCPU to change its waived priority. A vCPU
+ *        has
+ *    both a base priority and its current priority.
+ *
+ *    The base priority is the statically assigned maximum priority that a
+ *        vCPU
+ *    has been given. The current priority is the priority used for system
+ *    scheduling and is limited to the range of zero to the base priority.
+ *
+ *    The `waive-priority` operation allows a vCPU to set its current
+ *        priority
+ *    and is normally used to reduce its current priority. This allows a
+ *        vCPU to
+ *    perform work at a lower system priority, and supports the interleaved
+ *    scheduling feature.
+ *
+ *    A vCPU's priority is restored to its base priority whenever an
+ *        interrupt
+ *    that has the vCPU registered as its handler is raised. This allows
+ *    interrupt handling and guest operating systems to return to the base
+ *    priority to potentially do higher priority work.
+ *
+ *    After calling this interface an immediate reschedule will be
+ *        performed.
+ *
+ * @param priority
+ *    New vCPU priority.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_priority_waive(okl4_priority_t priority)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)priority;
+    __asm__ __volatile__(
+            ""hvc(5151)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_priority_waive(okl4_priority_t priority)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)priority;
+    __asm__ __volatile__(
+            "" hvc(5151) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_GET_REGISTER
+ *
+ * @param target
+ * @param reg_and_set
+ *
+ * @retval reg_w0
+ * @retval reg_w1
+ * @retval reg_w2
+ * @retval reg_w3
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_remote_get_register_return
+_okl4_sys_remote_get_register(okl4_kcap_t target,
+        okl4_register_and_set_t reg_and_set)
+{
+    struct _okl4_sys_remote_get_register_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)reg_and_set;
+    register uint32_t r2 asm("r2");
+    register uint32_t r3 asm("r3");
+    register uint32_t r4 asm("r4");
+    __asm__ __volatile__(
+            ""hvc(5200)"\n\t"
+            : "=r"(r2), "=r"(r3), "=r"(r4), "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    result.reg_w0 = (uint32_t)(r0);
+    result.reg_w1 = (uint32_t)(r1);
+    result.reg_w2 = (uint32_t)(r2);
+    result.reg_w3 = (uint32_t)(r3);
+    result.error = (okl4_error_t)(r4);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_remote_get_register_return
+_okl4_sys_remote_get_register(okl4_kcap_t target,
+        okl4_register_and_set_t reg_and_set)
+{
+    struct _okl4_sys_remote_get_register_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)reg_and_set;
+    register okl4_register_t x2 asm("x2");
+    register okl4_register_t x3 asm("x3");
+    register okl4_register_t x4 asm("x4");
+    __asm__ __volatile__(
+            "" hvc(5200) "\n\t"
+            : "=r"(x2), "=r"(x3), "=r"(x4), "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x5", "x6", "x7"
+            );
+
+
+    result.reg_w0 = (uint32_t)(x0);
+    result.reg_w1 = (uint32_t)(x1);
+    result.reg_w2 = (uint32_t)(x2);
+    result.reg_w3 = (uint32_t)(x3);
+    result.error = (okl4_error_t)(x4);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_GET_REGISTERS
+ *
+ * @param target
+ * @param set
+ * @param regs
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_get_registers(okl4_kcap_t target, okl4_register_set_t set,
+        void *regs)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)set;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)regs;
+    __asm__ __volatile__(
+            ""hvc(5201)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_get_registers(okl4_kcap_t target, okl4_register_set_t set,
+        void *regs)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)set;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)regs;
+    __asm__ __volatile__(
+            "" hvc(5201) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_READ_MEMORY32
+ *
+ * @param target
+ * @param address
+ *
+ * @retval data
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_remote_read_memory32_return
+_okl4_sys_remote_read_memory32(okl4_kcap_t target, okl4_laddr_t address)
+{
+    struct _okl4_sys_remote_read_memory32_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)(address        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((address >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5202)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.data = (uint32_t)(r0);
+    result.error = (okl4_error_t)(r1);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_remote_read_memory32_return
+_okl4_sys_remote_read_memory32(okl4_kcap_t target, okl4_laddr_t address)
+{
+    struct _okl4_sys_remote_read_memory32_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)address;
+    __asm__ __volatile__(
+            "" hvc(5202) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.data = (uint32_t)(x0);
+    result.error = (okl4_error_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_SET_REGISTER
+ *
+ * @param target
+ * @param reg_and_set
+ * @param reg_w0
+ * @param reg_w1
+ * @param reg_w2
+ * @param reg_w3
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_register(okl4_kcap_t target,
+        okl4_register_and_set_t reg_and_set, uint32_t reg_w0, uint32_t reg_w1,
+        uint32_t reg_w2, uint32_t reg_w3)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)reg_and_set;
+    register uint32_t r2 asm("r2") = (uint32_t)reg_w0;
+    register uint32_t r3 asm("r3") = (uint32_t)reg_w1;
+    register uint32_t r4 asm("r4") = (uint32_t)reg_w2;
+    register uint32_t r5 asm("r5") = (uint32_t)reg_w3;
+    __asm__ __volatile__(
+            ""hvc(5203)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_register(okl4_kcap_t target,
+        okl4_register_and_set_t reg_and_set, uint32_t reg_w0, uint32_t reg_w1,
+        uint32_t reg_w2, uint32_t reg_w3)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)reg_and_set;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)reg_w0;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)reg_w1;
+    register okl4_register_t x4 asm("x4") = (okl4_register_t)reg_w2;
+    register okl4_register_t x5 asm("x5") = (okl4_register_t)reg_w3;
+    __asm__ __volatile__(
+            "" hvc(5203) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5)
+            :
+            : "cc", "memory", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_SET_REGISTERS
+ *
+ * @param target
+ * @param set
+ * @param regs
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_registers(okl4_kcap_t target, okl4_register_set_t set,
+        void *regs)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)set;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)regs;
+    __asm__ __volatile__(
+            ""hvc(5204)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_set_registers(okl4_kcap_t target, okl4_register_set_t set,
+        void *regs)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)set;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)regs;
+    __asm__ __volatile__(
+            "" hvc(5204) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: REMOTE_WRITE_MEMORY32
+ *
+ * @param target
+ * @param address
+ * @param data
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_write_memory32(okl4_kcap_t target, okl4_laddr_t address,
+        uint32_t data)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)target;
+    register uint32_t r1 asm("r1") = (uint32_t)(address        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((address >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)data;
+    __asm__ __volatile__(
+            ""hvc(5205)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_remote_write_memory32(okl4_kcap_t target, okl4_laddr_t address,
+        uint32_t data)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)address;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)data;
+    __asm__ __volatile__(
+            "" hvc(5205) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * Retrieve suspend status.
+ *
+ * @param scheduler_id
+ *    The scheduler capability identifier.
+ *
+ * @retval error
+ *    Resulting error.
+ * @retval power_suspend_version
+ *    The power suspend versioning number
+ * @retval power_suspend_running_count
+ *    The number of running power_suspend watched vCPUs
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_metrics_status_suspended_return
+_okl4_sys_schedule_metrics_status_suspended(okl4_kcap_t scheduler_id)
+{
+    struct _okl4_sys_schedule_metrics_status_suspended_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5206)"\n\t"
+            : "=r"(r1), "=r"(r2), "+r"(r0)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    result.power_suspend_version = (uint32_t)(r1);
+    result.power_suspend_running_count = (uint32_t)(r2);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_metrics_status_suspended_return
+_okl4_sys_schedule_metrics_status_suspended(okl4_kcap_t scheduler_id)
+{
+    struct _okl4_sys_schedule_metrics_status_suspended_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
+    register okl4_register_t x1 asm("x1");
+    register okl4_register_t x2 asm("x2");
+    __asm__ __volatile__(
+            "" hvc(5206) "\n\t"
+            : "=r"(x1), "=r"(x2), "+r"(x0)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.power_suspend_version = (uint32_t)(x1);
+    result.power_suspend_running_count = (uint32_t)(x2);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * Register a vCPU for suspend count tracking.
+ *
+ * @param scheduler_id
+ *    The scheduler capability identifier.
+ * @param vcpu_id
+ *    The target vCPU capability identifier.
+ * @param watch
+ *    Whether to register or unregister
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_metrics_watch_suspended(okl4_kcap_t scheduler_id,
+        okl4_kcap_t vcpu_id, okl4_bool_t watch)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
+    register uint32_t r1 asm("r1") = (uint32_t)vcpu_id;
+    register uint32_t r2 asm("r2") = (uint32_t)watch;
+    __asm__ __volatile__(
+            ""hvc(5207)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_metrics_watch_suspended(okl4_kcap_t scheduler_id,
+        okl4_kcap_t vcpu_id, okl4_bool_t watch)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)vcpu_id;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)watch;
+    __asm__ __volatile__(
+            "" hvc(5207) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Disable profiling of a physical CPU.
+ *
+ * @param phys_cpu
+ *    The physical CPU capability id.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_disable(okl4_kcap_t phys_cpu)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)phys_cpu;
+    __asm__ __volatile__(
+            ""hvc(5168)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_disable(okl4_kcap_t phys_cpu)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)phys_cpu;
+    __asm__ __volatile__(
+            "" hvc(5168) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable profiling of a physical CPU.
+ *
+ *    This operation enables profiling of physical CPU related properties
+ *        such as
+ *    core usage and context switch count.
+ *
+ * @param phys_cpu
+ *    The physical CPU capability id.
+ *
+ * @retval error
+ *    Resulting error.
+ * @retval timestamp
+ *    The current timestamp.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_cpu_enable_return
+_okl4_sys_schedule_profile_cpu_enable(okl4_kcap_t phys_cpu)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp timestamp_tmp;
+    struct _okl4_sys_schedule_profile_cpu_enable_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)phys_cpu;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5169)"\n\t"
+            : "=r"(r1), "=r"(r2), "+r"(r0)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    timestamp_tmp.words.lo = r1;
+    timestamp_tmp.words.hi = r2;
+    result.timestamp = (uint64_t)(timestamp_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_cpu_enable_return
+_okl4_sys_schedule_profile_cpu_enable(okl4_kcap_t phys_cpu)
+{
+    struct _okl4_sys_schedule_profile_cpu_enable_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)phys_cpu;
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5169) "\n\t"
+            : "=r"(x1), "+r"(x0)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.timestamp = (uint64_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Retrieve profiling data relating to a physical CPU core.
+ *
+ *    @details
+ *    This operation returns a set of profiling data relating to a physical
+ *        CPU.
+ *    A timestamp of the current system time in units of microseconds is
+ *        recorded
+ *    during the operation. The remaining data fields indicate runtime and
+ *    number of events since the last invocation of this operation.
+ *
+ *    After the profiling data is retrieved, the kernel resets all metrics
+ *        to
+ *    zero.
+ *
+ *    @par profile data
+ *    For a physical CPU, the returned data is:
+ *    - \p cpu_time: Idle time of the CPU in microseconds.
+ *    - \p context_switches: Number of context switches on this core.
+ *    - \p enabled: True if profiling is enabled on this CPU.
+ *
+ * @param phys_cpu
+ *    The physical CPU capability id.
+ * @param profile
+ *    `return by reference`. Profiling data.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_get_data(okl4_kcap_t phys_cpu,
+        struct okl4_schedule_profile_data *profile)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)phys_cpu;
+    register uint32_t r1 asm("r1") = (uint32_t)(uintptr_t)profile;
+    __asm__ __volatile__(
+            ""hvc(5170)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_cpu_get_data(okl4_kcap_t phys_cpu,
+        struct okl4_schedule_profile_data *profile)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)phys_cpu;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)(uintptr_t)profile;
+    __asm__ __volatile__(
+            "" hvc(5170) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Disable profiling of a vCPU.
+ *
+ * @param vcpu
+ *    The target vCPU id.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_disable(okl4_kcap_t vcpu)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    __asm__ __volatile__(
+            ""hvc(5171)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_disable(okl4_kcap_t vcpu)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    __asm__ __volatile__(
+            "" hvc(5171) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Enable profiling of a vCPU.
+ *
+ *    This operation enables profiling of vCPU related properties such as
+ *    execution time and context switch count.
+ *
+ * @param vcpu
+ *    The target vCPU id.
+ *
+ * @retval error
+ *    Resulting error.
+ * @retval timestamp
+ *    The current timestamp.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_vcpu_enable_return
+_okl4_sys_schedule_profile_vcpu_enable(okl4_kcap_t vcpu)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp timestamp_tmp;
+    struct _okl4_sys_schedule_profile_vcpu_enable_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5172)"\n\t"
+            : "=r"(r1), "=r"(r2), "+r"(r0)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    timestamp_tmp.words.lo = r1;
+    timestamp_tmp.words.hi = r2;
+    result.timestamp = (uint64_t)(timestamp_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_schedule_profile_vcpu_enable_return
+_okl4_sys_schedule_profile_vcpu_enable(okl4_kcap_t vcpu)
+{
+    struct _okl4_sys_schedule_profile_vcpu_enable_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    register okl4_register_t x1 asm("x1");
+    __asm__ __volatile__(
+            "" hvc(5172) "\n\t"
+            : "=r"(x1), "+r"(x0)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.timestamp = (uint64_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Retrieve profiling data relating to a vCPU.
+ *
+ *    @details
+ *    This operation returns a set of profiling data relating to a vCPU.
+ *    A timestamp of the current system time in units of microseconds is
+ *        recorded
+ *    during the operation. The remaining data fields indicate runtime and
+ *    number of events since the last invocation of this operation.
+ *
+ *    After the profiling data is retrieved, the kernel resets all metrics
+ *        to
+ *    zero.
+ *
+ *    @par profile data
+ *    For a vCPU, the returned data is:
+ *    - \p cpu_time: Execution time of the vCPU in microseconds.
+ *    - \p context_switches: Number of context switches.
+ *    - \p cpu_migrations: Number of migrations between physical CPUs.
+ *    - \p enabled: True if profiling is enabled on this CPU.
+ *
+ * @param vcpu
+ *    The target vCPU id.
+ * @param profile
+ *    `return by reference`. Profiling data.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_get_data(okl4_kcap_t vcpu,
+        struct okl4_schedule_profile_data *profile)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    register uint32_t r1 asm("r1") = (uint32_t)(uintptr_t)profile;
+    __asm__ __volatile__(
+            ""hvc(5173)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_schedule_profile_vcpu_get_data(okl4_kcap_t vcpu,
+        struct okl4_schedule_profile_data *profile)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)(uintptr_t)profile;
+    __asm__ __volatile__(
+            "" hvc(5173) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: SCHEDULER_SUSPEND
+ *
+ * @param scheduler_id
+ * @param power_state
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_scheduler_suspend(okl4_kcap_t scheduler_id,
+        okl4_power_state_t power_state)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
+    register uint32_t r1 asm("r1") = (uint32_t)power_state;
+    __asm__ __volatile__(
+            ""hvc(5150)"\n\t"
+            : "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_scheduler_suspend(okl4_kcap_t scheduler_id,
+        okl4_power_state_t power_state)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)power_state;
+    __asm__ __volatile__(
+            "" hvc(5150) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Cancel an active timeout on a specified timer.
+ *
+ *    @details
+ *    This operation cancels an active timeout on a specified timer. The
+ *    operation returns the time that was remaining on the cancelled
+ *        timeout.
+ *    If there was not an active timeout, the operation returns an error.
+ *
+ *    The returned remaining time is formatted in the requested units from
+ *        the
+ *    \p flags argument.
+ *
+ *    The operation will also return the \p old_flags field indicating
+ *        whether
+ *    the canceled timeout was periodic or one-shot and whether it was an
+ *    absolute or relative timeout.
+ *
+ *    @par flags
+ *    - If the \p units flag is set, the remaining time is returned in
+ *        units
+ *    of timer ticks. The length of a timer tick is KSP defined and may be
+ *    obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ *    - If the \p units flag is not set, the remaining time is returned in
+ *    nanoseconds.
+ *
+ *    @par old_flags
+ *    - If the \p periodic flag is set, the cancelled timeout was periodic.
+ *    - If the \p periodic flag is not set, the cancelled timeout was
+ *    one-shot.
+ *    - If the \p absolute flag is set, the cancelled timeout was an
+ *    absolute time.
+ *    - If the \p absolute flag is not set, the cancelled timeout was a
+ *    relative time.
+ *
+ * @param timer
+ *    The target timer capability.
+ * @param flags
+ *    Flags for the requested operation.
+ *
+ * @retval remaining
+ *    Time that was remaining on the cancelled timeout.
+ * @retval old_flags
+ *    Flags relating to the cancelled timeout.
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_cancel_return
+_okl4_sys_timer_cancel(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp remaining_tmp;
+    struct _okl4_sys_timer_cancel_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1") = (uint32_t)flags;
+    register uint32_t r2 asm("r2");
+    register uint32_t r3 asm("r3");
+    __asm__ __volatile__(
+            ""hvc(5176)"\n\t"
+            : "=r"(r2), "=r"(r3), "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    remaining_tmp.words.lo = r0;
+    remaining_tmp.words.hi = r1;
+    result.remaining = (uint64_t)(remaining_tmp.val);
+    result.old_flags = (okl4_timer_flags_t)(r2);
+    result.error = (okl4_error_t)(r3);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_cancel_return
+_okl4_sys_timer_cancel(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    struct _okl4_sys_timer_cancel_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)flags;
+    register okl4_register_t x2 asm("x2");
+    __asm__ __volatile__(
+            "" hvc(5176) "\n\t"
+            : "=r"(x2), "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.remaining = (uint64_t)(x0);
+    result.old_flags = (okl4_timer_flags_t)(x1);
+    result.error = (okl4_error_t)(x2);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query the timer frequency and obtain time conversion constants.
+ *
+ *    @details
+ *    This operation returns the timer frequency and the conversion
+ *        constants
+ *    that may be used to convert between units of nanoseconds and units of
+ *    ticks.
+ *
+ *    The timer frequency is returned as a 64-bit value in units of
+ *        micro-hertz.
+ *    (1000000 = 1Hz).
+ *    The timer resolution (or period) can be calculated from the
+ *        frequency.
+ *
+ *    The time conversion constants are retuned as values \p a and \p b
+ *        which can
+ *    be used for unit conversions as follows:
+ *    - ns = (ticks) * \p a / \p b
+ *    - ticks = (ns * \p b) / \p a
+ *
+ *    @note
+ *    The constants are provided by the KSP module and are designed to be
+ *        used
+ *    for simple overflow-free computation using 64-bit arithmetic covering
+ *        the
+ *    time values from 0 to 2 years.
+ *
+ * @param timer
+ *    The target timer capability.
+ *
+ * @retval tick_freq
+ *    The timer frequency [in units of micro-hertz].
+ * @retval a
+ *    Ticks to nanoseconds conversion multiplier.
+ * @retval b
+ *    Ticks to nanoseconds conversion divisor.
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_resolution_return
+_okl4_sys_timer_get_resolution(okl4_kcap_t timer)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp tick_freq_tmp;
+    struct _okl4_sys_timer_get_resolution_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1");
+    register uint32_t r2 asm("r2");
+    register uint32_t r3 asm("r3");
+    register uint32_t r4 asm("r4");
+    __asm__ __volatile__(
+            ""hvc(5177)"\n\t"
+            : "=r"(r1), "=r"(r2), "=r"(r3), "=r"(r4), "+r"(r0)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    tick_freq_tmp.words.lo = r0;
+    tick_freq_tmp.words.hi = r1;
+    result.tick_freq = (uint64_t)(tick_freq_tmp.val);
+    result.a = (uint32_t)(r2);
+    result.b = (uint32_t)(r3);
+    result.error = (okl4_error_t)(r4);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_resolution_return
+_okl4_sys_timer_get_resolution(okl4_kcap_t timer)
+{
+    struct _okl4_sys_timer_get_resolution_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1");
+    register okl4_register_t x2 asm("x2");
+    register okl4_register_t x3 asm("x3");
+    __asm__ __volatile__(
+            "" hvc(5177) "\n\t"
+            : "=r"(x1), "=r"(x2), "=r"(x3), "+r"(x0)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.tick_freq = (uint64_t)(x0);
+    result.a = (uint32_t)(x1);
+    result.b = (uint32_t)(x2);
+    result.error = (okl4_error_t)(x3);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query the current system time.
+ *
+ *    @details
+ *    This operation returns the current absolute system time. The \p flags
+ *    argument is used to specify the desired units for the return value.
+ *
+ *    - Absolute time is based on an arbitrary time zero, defined to be at
+ *    or before the time of boot.
+ *
+ *    @par flags
+ *    - If the \p units flag is set, the time is returned in units
+ *    of timer ticks. The length of a timer tick is KSP defined and may
+ *    be obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ *    - If the \p units flag is not set, the time is returned in
+ *    terms of nanoseconds.
+ *
+ * @param timer
+ *    The target timer capability.
+ * @param flags
+ *    Flags for the requested operation.
+ *
+ * @retval time
+ *    The current system time.
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_time_return
+_okl4_sys_timer_get_time(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp time_tmp;
+    struct _okl4_sys_timer_get_time_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1") = (uint32_t)flags;
+    register uint32_t r2 asm("r2");
+    __asm__ __volatile__(
+            ""hvc(5178)"\n\t"
+            : "=r"(r2), "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    time_tmp.words.lo = r0;
+    time_tmp.words.hi = r1;
+    result.time = (uint64_t)(time_tmp.val);
+    result.error = (okl4_error_t)(r2);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_get_time_return
+_okl4_sys_timer_get_time(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    struct _okl4_sys_timer_get_time_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)flags;
+    __asm__ __volatile__(
+            "" hvc(5178) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.time = (uint64_t)(x0);
+    result.error = (okl4_error_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Query a timer about an active timeout.
+ *
+ *    @details
+ *    The operation queries a timer about an active timeout. If there is no
+ *    active timeout, this operation returns an error.
+ *
+ *    If the timer has an active timeout, this operation returns the
+ *        remaining
+ *    time and the flags associated with the timeout. The remaining time is
+ *    returned in the requested units from the \p flags argument.
+ *
+ *    The operation also returns the \p active_flags field indicating
+ *        whether the
+ *    active timeout is periodic or one-shot and whether it was an absolute
+ *        or
+ *    relative timeout.
+ *
+ *    @par flags
+ *    - If the \p units flag is set, the remaining time is returned in
+ *        units
+ *    of timer ticks. The length of a timer tick is KSP defined and may
+ *    be obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ *    - If the \p units flag is not set, the remaining time is returned in
+ *    units of nanoseconds.
+ *
+ *    @par active_flags
+ *    - If the \p periodic flag is set, the timeout is periodic.
+ *    - If the \p periodic flag is not set, the timeout is one-shot.
+ *    - If the \p absolute flag is set, the timeout is an absolute time.
+ *    - If the \p absolute flag is not set, the timeout is a relative time.
+ *
+ * @param timer
+ *    The target timer capability.
+ * @param flags
+ *    Flags for the requested operation.
+ *
+ * @retval remaining
+ *    Time remaining before the next timeout.
+ * @retval active_flags
+ *    Flags relating to the active timeout.
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_timer_query_return
+_okl4_sys_timer_query(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp remaining_tmp;
+    struct _okl4_sys_timer_query_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1") = (uint32_t)flags;
+    register uint32_t r2 asm("r2");
+    register uint32_t r3 asm("r3");
+    __asm__ __volatile__(
+            ""hvc(5179)"\n\t"
+            : "=r"(r2), "=r"(r3), "+r"(r0), "+r"(r1)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    remaining_tmp.words.lo = r0;
+    remaining_tmp.words.hi = r1;
+    result.remaining = (uint64_t)(remaining_tmp.val);
+    result.active_flags = (okl4_timer_flags_t)(r2);
+    result.error = (okl4_error_t)(r3);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_timer_query_return
+_okl4_sys_timer_query(okl4_kcap_t timer, okl4_timer_flags_t flags)
+{
+    struct _okl4_sys_timer_query_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)flags;
+    register okl4_register_t x2 asm("x2");
+    __asm__ __volatile__(
+            "" hvc(5179) "\n\t"
+            : "=r"(x2), "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.remaining = (uint64_t)(x0);
+    result.active_flags = (okl4_timer_flags_t)(x1);
+    result.error = (okl4_error_t)(x2);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Start a timer with a specified timeout.
+ *
+ *    @details
+ *    This operation optionally resets then starts a timer with a new
+ *        timeout.
+ *    The specified timeout may be an `absolute` or `relative` time, may be
+ *    `one-shot` or `periodic` and may be specified in units of nanoseconds
+ *        or
+ *    ticks.
+ *
+ *    @par flags
+ *    - If the \p absolute flag is set, the timeout is treated as an
+ *    absolute time based on an arbitrary time zero, defined to be at or
+ *    before the time of boot.
+ *    - If the \p absolute flag is not set, the timeout is treated as a
+ *    relative time a specified amount of into the future. E.g. 10ms from
+ *    now.
+ *    - If the \p periodic flag is set, the timeout is treated as a
+ *        periodic
+ *    timeout that repeats with a period equal to the specified timeout.
+ *    - If the \p periodic flag is not set, the timeout is treated as a
+ *    one-shot timeout that expires at the specified time and does not
+ *    repeat.
+ *    - If the \p units flag is set, the timeout is specified in units of
+ *    timer ticks. The length of a timer tick is KSP defined and may be
+ *    obtained with the @ref _okl4_sys_timer_get_resolution operation.
+ *    - If the \p units flag is not set, the timeout is specified in units
+ *    of nanoseconds.
+ *    - The \p reload flag allows an active timeout to be cancelled and the
+ *    new timeout is programmed into the timer.
+ *
+ * @param timer
+ *    The target timer capability.
+ * @param timeout
+ *    The timeout value.
+ * @param flags
+ *    Flags for the requested operation.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_timer_start(okl4_kcap_t timer, uint64_t timeout,
+        okl4_timer_flags_t flags)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)timer;
+    register uint32_t r1 asm("r1") = (uint32_t)(timeout        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((timeout >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)flags;
+    __asm__ __volatile__(
+            ""hvc(5180)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_timer_start(okl4_kcap_t timer, uint64_t timeout,
+        okl4_timer_flags_t flags)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)timer;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)timeout;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)flags;
+    __asm__ __volatile__(
+            "" hvc(5180) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: TRACEBUFFER_SYNC
+ *
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_tracebuffer_sync(void)
+{
+    __asm__ __volatile__(
+            ""hvc(5199)"\n\t"
+            :
+            :
+            : "cc", "memory", "r0", "r1", "r2", "r3", "r4", "r5"
+            );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_tracebuffer_sync(void)
+{
+    __asm__ __volatile__(
+            "" hvc(5199) "\n\t"
+            :
+            :
+            : "cc", "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Reset a vCPU.
+ *
+ *    @details
+ *    This operation resets a vCPU to its boot state.
+ *
+ * @param vcpu
+ *    The target vCPU capability.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_reset(okl4_kcap_t vcpu)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    __asm__ __volatile__(
+            ""hvc(5122)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_reset(okl4_kcap_t vcpu)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    __asm__ __volatile__(
+            "" hvc(5122) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Start a vCPU executing.
+ *
+ *    @details
+ *    This operation starts a stopped vCPU, at an optionally specified
+ *    instruction pointer. If instruction pointer is not to be set the
+ *    value at the previous stop is preserved.
+ *
+ * @param vcpu
+ *    The target vCPU capability.
+ * @param set_ip
+ *    Should the instruction pointer be set.
+ * @param ip
+ *    Instruction pointer to start the vCPU at.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_start(okl4_kcap_t vcpu, okl4_bool_t set_ip, void *ip)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    register uint32_t r1 asm("r1") = (uint32_t)set_ip;
+    register uint32_t r2 asm("r2") = (uint32_t)(uintptr_t)ip;
+    __asm__ __volatile__(
+            ""hvc(5123)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_start(okl4_kcap_t vcpu, okl4_bool_t set_ip, void *ip)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)set_ip;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)(uintptr_t)ip;
+    __asm__ __volatile__(
+            "" hvc(5123) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Stop a vCPU executing.
+ *
+ *    @details
+ *    This operation stops a vCPU's execution until next restarted.
+ *
+ * @param vcpu
+ *    The target vCPU capability.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_stop(okl4_kcap_t vcpu)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    __asm__ __volatile__(
+            ""hvc(5124)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_stop(okl4_kcap_t vcpu)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    __asm__ __volatile__(
+            "" hvc(5124) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Switch a vCPU's execution mode between 32-bit and 64-bit.
+ *
+ *    @details
+ *    This operation resets a vCPU to its boot state, switches between
+ *        32-bit
+ *    and 64-bit modes, and restarts execution at the specified address.
+ *        The
+ *    start address must be valid in the vCPU's initial address space,
+ *        which may
+ *    not be the same as the caller's address space.
+ *
+ * @param vcpu
+ *    The target vCPU capability.
+ * @param to_64bit
+ *    The vCPU will reset in 64-bit mode if true; otherwise in 32-bit mode
+ * @param set_ip
+ *    Should the instruction pointer be set.
+ * @param ip
+ *    Instruction pointer to start the vCPU at.
+ *
+ * @retval error
+ *    Resulting error.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_switch_mode(okl4_kcap_t vcpu, okl4_bool_t to_64bit,
+        okl4_bool_t set_ip, void *ip)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)vcpu;
+    register uint32_t r1 asm("r1") = (uint32_t)to_64bit;
+    register uint32_t r2 asm("r2") = (uint32_t)set_ip;
+    register uint32_t r3 asm("r3") = (uint32_t)(uintptr_t)ip;
+    __asm__ __volatile__(
+            ""hvc(5125)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3)
+            :
+            : "cc", "memory", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vcpu_switch_mode(okl4_kcap_t vcpu, okl4_bool_t to_64bit,
+        okl4_bool_t set_ip, void *ip)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)vcpu;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)to_64bit;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)set_ip;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)(uintptr_t)ip;
+    __asm__ __volatile__(
+            "" hvc(5125) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Signal a synchronization event.
+ *
+ *    @details
+ *    This operation sets the wakeup flags for all vCPUs in the caller's
+ *        domain.
+ *    If any vCPUs in the domain are waiting due to a pending `sync_wfe`
+ *        operation,
+ *    they will be released from the wait. The OKL4 scheduler will then
+ *        determine
+ *    which vCPUs should execute first based on their priority.
+ *
+ *    This `sync_sev` operation is non-blocking and is used to signal other
+ *        vCPUs
+ *    about some user-defined event. A typical use of this operation is to
+ *        signal
+ *    the release of a spinlock to other waiting vCPUs.
+ *
+ *    @see _okl4_sys_vcpu_sync_wfe
+ *
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_sev(void)
+{
+    __asm__ __volatile__(
+            ""hvc(5126)"\n\t"
+            :
+            :
+            : "cc", "memory", "r0", "r1", "r2", "r3", "r4", "r5"
+            );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_sev(void)
+{
+    __asm__ __volatile__(
+            "" hvc(5126) "\n\t"
+            :
+            :
+            : "cc", "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Wait for a synchronization event.
+ *
+ *    @details
+ *    This operation is used to defer the execution of a vCPU while it is
+ *        waiting
+ *    for an event. This operation is non-blocking, in that if no other
+ *        vCPUs in
+ *    the system are runnable, the operation will complete and the vCPU is
+ *        not
+ *    blocked. The `sync_wfe` operation uses the \p holder argument as a
+ *        hint to
+ *    the vCPU the caller is waiting on.
+ *
+ *    This operation first determines whether there is a pending wakeup
+ *        flag set
+ *    for the calling vCPU. If the flag is set, the operation clears the
+ *        flag and
+ *    returns immediately. If the caller has provided a valid \p holder id,
+ *        and
+ *    the holder is currently executing on a different physical core, the
+ *    operation again returns immediately.
+ *
+ *    In all other cases, the Microvisor records that the vCPU is waiting
+ *        and
+ *    reduces the vCPU's priority temporarily to the lowest priority in
+ *    the system. The scheduler is then invoked to rebalance the system.
+ *
+ *    A waiting vCPU will continue execution and return from the `sync_wfe`
+ *    operation as soon as no higher priority vCPUs in the system are
+ *        available
+ *    for scheduling, or a wake-up event is signalled by another vCPU in
+ *        the same
+ *    domain.
+ *
+ *    @par holder
+ *    The holder identifier may be a valid capability to another vCPU, or
+ *        an
+ *    invalid id. If the provided id is valid, it is used as a hint to the
+ *    Microvisor that the caller is waiting on the specified vCPU. The
+ *    `vcpu_sync` API is optimized for short spinlock type use-cases and
+ *        will
+ *    therefore allow the caller to continue execution without waiting, if
+ *        the
+ *    target \p holder vCPU is presently running on another physical core.
+ *        This
+ *    is done to reduce latency with the expectation that the holder vCPU
+ *        will
+ *    soon release the lock.
+ *
+ *    @see _okl4_sys_vcpu_sync_sev
+ *
+ * @param holder
+ *    Capability of the vCPU to wait for, or an invalid designator.
+ *
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_wfe(okl4_kcap_t holder)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)holder;
+    __asm__ __volatile__(
+            ""hvc(5127)"\n\t"
+            : "+r"(r0)
+            :
+            : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+            );
+
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE void
+_okl4_sys_vcpu_sync_wfe(okl4_kcap_t holder)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)holder;
+    __asm__ __volatile__(
+            "" hvc(5127) "\n\t"
+            : "+r"(x0)
+            :
+            : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+}
+
+#endif
+
+/**
+ *
+ * @brief Atomically fetch an interrupt payload and raise a virtual interrupt.
+ *
+ *    @details
+ *    This API is equivalent to atomically calling @ref
+ *        sys_interrupt_get_payload
+ *    and @ref sys_vinterrupt_modify. Typically, the specified virtual
+ *        interrupt
+ *    will be one that is not attached to the specified virtual interrupt
+ *        source,
+ *    but this is not enforced. If only one virtual interrupt source is
+ *        affected,
+ *    then the @ref sys_interrupt_get_payload phase will occur first.
+ *
+ *    Certain communication protocols must perform this sequence of
+ *        operations
+ *    atomically in order to maintain consistency. Other than being atomic,
+ *        this
+ *    is no different to invoking the two component operations separately.
+ *
+ * @param irq
+ *    An interrupt line number for the virtual GIC.
+ * @param virqline
+ *    A virtual interrupt line capability.
+ * @param mask
+ *    A machine-word-sized array of payload flags to preserve.
+ * @param payload
+ *    A machine-word-sized array of payload flags to set.
+ *
+ * @retval error
+ *    The resulting error value.
+ * @retval payload
+ *    Accumulated virtual interrupt payload flags.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE struct _okl4_sys_vinterrupt_clear_and_raise_return
+_okl4_sys_vinterrupt_clear_and_raise(okl4_interrupt_number_t irq,
+        okl4_kcap_t virqline, okl4_virq_flags_t mask, okl4_virq_flags_t payload)
+{
+    typedef union {
+        struct uint64 {
+            uint32_t lo;
+            uint32_t hi;
+        } words;
+        uint64_t val;
+    } okl4_uint64_tmp;
+    okl4_uint64_tmp payload_tmp;
+    struct _okl4_sys_vinterrupt_clear_and_raise_return result;
+
+    register uint32_t r0 asm("r0") = (uint32_t)irq;
+    register uint32_t r1 asm("r1") = (uint32_t)virqline;
+    register uint32_t r2 asm("r2") = (uint32_t)(mask        & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)((mask >> 32) & 0xffffffff);
+    register uint32_t r4 asm("r4") = (uint32_t)(payload        & 0xffffffff);
+    register uint32_t r5 asm("r5") = (uint32_t)((payload >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5194)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5)
+            :
+            : "cc", "memory"
+            );
+
+
+    result.error = (okl4_error_t)(r0);
+    payload_tmp.words.lo = r1;
+    payload_tmp.words.hi = r2;
+    result.payload = (okl4_virq_flags_t)(payload_tmp.val);
+    return result;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE struct _okl4_sys_vinterrupt_clear_and_raise_return
+_okl4_sys_vinterrupt_clear_and_raise(okl4_interrupt_number_t irq,
+        okl4_kcap_t virqline, okl4_virq_flags_t mask, okl4_virq_flags_t payload)
+{
+    struct _okl4_sys_vinterrupt_clear_and_raise_return result;
+
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)irq;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)virqline;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)mask;
+    register okl4_register_t x3 asm("x3") = (okl4_register_t)payload;
+    __asm__ __volatile__(
+            "" hvc(5194) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3)
+            :
+            : "cc", "memory", "x4", "x5", "x6", "x7"
+            );
+
+
+    result.error = (okl4_error_t)(x0);
+    result.payload = (okl4_virq_flags_t)(x1);
+    return result;
+}
+
+#endif
+
+/**
+ *
+ * @brief Raise a virtual interrupt, and modify the payload flags.
+ *
+ *    @details
+ *    This triggers a virtual interrupt by raising a virtual interrupt
+ *        source. A
+ *    virtual interrupt source object is distinct from a virtual interrupt.
+ *        A
+ *    virtual interrupt source is always linked to a virtual interrupt, but
+ *        the
+ *    reverse is not true.
+ *
+ *    Each Microvisor virtual interrupt carries a payload of flags which
+ *        may be
+ *    fetched by the recipient of the interrupt. An interrupt payload is a
+ *        @ref
+ *    okl4_word_t sized array of flags, packed into a single word. Flags
+ *        are
+ *    cleared whenever the interrupt recipient fetches the payload with the
+ *        @ref
+ *    okl4_sys_interrupt_get_payload API.
+ *
+ *    The interrupt-modify API allows the caller to pass in a new set of
+ *        flags in
+ *    the \p payload field, and a set of flags to keep from the previous
+ *        payload
+ *    in the \p mask field. If the interrupt has previously been raised and
+ *        not
+ *    yet delivered, the flags accumulate with a mask; that is, each flag
+ *        is the
+ *    boolean OR of the specified value with the boolean AND of its
+ *        previous
+ *    value and the mask.
+ *
+ *    When the recipient has configured the interrupt for edge triggering,
+ *        an
+ *    invocation of this API is counted as a single edge; this triggers
+ *        interrupt
+ *    delivery if the interrupt is not already pending, irrespective of the
+ *    payload. If the interrupt is configured for level triggering, then
+ *        its
+ *    pending state is the boolean OR of its payload flags after any
+ *        specified
+ *    flags are cleared or raised; at least one flag must be set in the new
+ *    payload to permit delivery of a level-triggered interrupt.
+ *
+ * @param virqline
+ *    A virtual interrupt line capability.
+ * @param mask
+ *    A machine-word-sized array of payload flags to preserve.
+ * @param payload
+ *    A machine-word-sized array of payload flags to set.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_modify(okl4_kcap_t virqline, okl4_virq_flags_t mask,
+        okl4_virq_flags_t payload)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)virqline;
+    register uint32_t r1 asm("r1") = (uint32_t)(mask        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((mask >> 32) & 0xffffffff);
+    register uint32_t r3 asm("r3") = (uint32_t)(payload        & 0xffffffff);
+    register uint32_t r4 asm("r4") = (uint32_t)((payload >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5195)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4)
+            :
+            : "cc", "memory", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_modify(okl4_kcap_t virqline, okl4_virq_flags_t mask,
+        okl4_virq_flags_t payload)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)virqline;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)mask;
+    register okl4_register_t x2 asm("x2") = (okl4_register_t)payload;
+    __asm__ __volatile__(
+            "" hvc(5195) "\n\t"
+            : "+r"(x0), "+r"(x1), "+r"(x2)
+            :
+            : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Raise a virtual interrupt, setting specified payload flags.
+ *
+ *    @details
+ *    This triggers a virtual interrupt by raising a virtual interrupt
+ *        source. A
+ *    virtual interrupt source object is distinct from a virtual interrupt.
+ *        A
+ *    virtual interrupt source is always linked to a virtual interrupt, but
+ *        the
+ *    reverse is not true.
+ *
+ *    Each Microvisor virtual interrupt carries a payload of flags which
+ *        may be
+ *    fetched by the recipient of the interrupt. An interrupt payload is a
+ *        @ref
+ *    okl4_word_t sized array of flags, packed into a single word. Flags
+ *        are
+ *    cleared whenever the interrupt recipient fetches the payload with the
+ *        @ref
+ *    okl4_sys_interrupt_get_payload API.
+ *
+ *    The interrupt-raise API allows the caller to pass in a new set of
+ *        flags in
+ *    the \p payload field. If the interrupt has previously been raised and
+ *        not
+ *    yet delivered, the flags accumulate; that is, each flag is the
+ *        boolean OR
+ *    of its previous value and the specified value.
+ *
+ *    When the recipient has configured the interrupt for edge triggering,
+ *        an
+ *    invocation of this API is counted as a single edge; this triggers
+ *        interrupt
+ *    delivery if the interrupt is not already pending, irrespective of the
+ *    payload. If the interrupt is configured for level triggering, then
+ *        its
+ *    pending state is the boolean OR of its payload flags after any
+ *        specified
+ *    flags are raised; at least one flag must be set in the new payload to
+ *    permit delivery of a level-triggered interrupt.
+ *
+ *    @note Invoking this API is equivalent to invoking the @ref
+ *    okl4_sys_vinterrupt_modify API with all bits set in the \p mask
+ *        value.
+ *
+ *    @note This API is distinct from the @ref okl4_sys_interrupt_raise
+ *        API,
+ *    which raises a local software-generated interrupt without requiring
+ *        an
+ *    explicit capability.
+ *
+ * @param virqline
+ *    A virtual interrupt line capability.
+ * @param payload
+ *    A machine-word-sized array of payload flags to set.
+ *
+ * @retval error
+ *    The resulting error value.
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_raise(okl4_kcap_t virqline, okl4_virq_flags_t payload)
+{
+    register uint32_t r0 asm("r0") = (uint32_t)virqline;
+    register uint32_t r1 asm("r1") = (uint32_t)(payload        & 0xffffffff);
+    register uint32_t r2 asm("r2") = (uint32_t)((payload >> 32) & 0xffffffff);
+    __asm__ __volatile__(
+            ""hvc(5196)"\n\t"
+            : "+r"(r0), "+r"(r1), "+r"(r2)
+            :
+            : "cc", "memory", "r3", "r4", "r5"
+            );
+
+
+    return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_vinterrupt_raise(okl4_kcap_t virqline, okl4_virq_flags_t payload)
+{
+    register okl4_register_t x0 asm("x0") = (okl4_register_t)virqline;
+    register okl4_register_t x1 asm("x1") = (okl4_register_t)payload;
+    __asm__ __volatile__(
+            "" hvc(5196) "\n\t"
+            : "+r"(x0), "+r"(x1)
+            :
+            : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+            );
+
+
+    return (okl4_error_t)x0;
+}
+
+#endif
+
+
+/*lint -restore */
+
+#endif /* !ASSEMBLY */
+
+/*
+ * Assembly system call prototypes / numbers.
+ */
+
+/** @addtogroup lib_microvisor_syscall_numbers Microvisor System Call Numbers
+ * @{
+ */
+#define OKL4_SYSCALL_AXON_PROCESS_RECV 5184
+
+#define OKL4_SYSCALL_AXON_SET_HALTED 5186
+
+#define OKL4_SYSCALL_AXON_SET_RECV_AREA 5187
+
+#define OKL4_SYSCALL_AXON_SET_RECV_QUEUE 5188
+
+#define OKL4_SYSCALL_AXON_SET_RECV_SEGMENT 5189
+
+#define OKL4_SYSCALL_AXON_SET_SEND_AREA 5190
+
+#define OKL4_SYSCALL_AXON_SET_SEND_QUEUE 5191
+
+#define OKL4_SYSCALL_AXON_SET_SEND_SEGMENT 5192
+
+#define OKL4_SYSCALL_AXON_TRIGGER_SEND 5185
+
+#define OKL4_SYSCALL_INTERRUPT_ACK 5128
+
+#define OKL4_SYSCALL_INTERRUPT_ATTACH_PRIVATE 5134
+
+#define OKL4_SYSCALL_INTERRUPT_ATTACH_SHARED 5135
+
+#define OKL4_SYSCALL_INTERRUPT_DETACH 5136
+
+#define OKL4_SYSCALL_INTERRUPT_DIST_ENABLE 5133
+
+#define OKL4_SYSCALL_INTERRUPT_EOI 5129
+
+#define OKL4_SYSCALL_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING 5137
+
+#define OKL4_SYSCALL_INTERRUPT_GET_PAYLOAD 5132
+
+#define OKL4_SYSCALL_INTERRUPT_LIMITS 5138
+
+#define OKL4_SYSCALL_INTERRUPT_MASK 5130
+
+#define OKL4_SYSCALL_INTERRUPT_RAISE 5145
+
+#define OKL4_SYSCALL_INTERRUPT_SET_BINARY_POINT 5139
+
+#define OKL4_SYSCALL_INTERRUPT_SET_CONFIG 5140
+
+#define OKL4_SYSCALL_INTERRUPT_SET_CONTROL 5141
+
+#define OKL4_SYSCALL_INTERRUPT_SET_PRIORITY 5142
+
+#define OKL4_SYSCALL_INTERRUPT_SET_PRIORITY_MASK 5143
+
+#define OKL4_SYSCALL_INTERRUPT_SET_TARGETS 5144
+
+#define OKL4_SYSCALL_INTERRUPT_UNMASK 5131
+
+#define OKL4_SYSCALL_KDB_INTERACT 5120
+
+#define OKL4_SYSCALL_KDB_SET_OBJECT_NAME 5121
+
+#define OKL4_SYSCALL_KSP_PROCEDURE_CALL 5197
+
+#define OKL4_SYSCALL_MMU_ATTACH_SEGMENT 5152
+
+#define OKL4_SYSCALL_MMU_DETACH_SEGMENT 5153
+
+#define OKL4_SYSCALL_MMU_FLUSH_RANGE 5154
+
+#define OKL4_SYSCALL_MMU_FLUSH_RANGE_PN 5155
+
+#define OKL4_SYSCALL_MMU_LOOKUP_PAGE 5156
+
+#define OKL4_SYSCALL_MMU_LOOKUP_PN 5157
+
+#define OKL4_SYSCALL_MMU_MAP_PAGE 5158
+
+#define OKL4_SYSCALL_MMU_MAP_PN 5159
+
+#define OKL4_SYSCALL_MMU_UNMAP_PAGE 5160
+
+#define OKL4_SYSCALL_MMU_UNMAP_PN 5161
+
+#define OKL4_SYSCALL_MMU_UPDATE_PAGE_ATTRS 5162
+
+#define OKL4_SYSCALL_MMU_UPDATE_PAGE_PERMS 5163
+
+#define OKL4_SYSCALL_MMU_UPDATE_PN_ATTRS 5164
+
+#define OKL4_SYSCALL_MMU_UPDATE_PN_PERMS 5165
+
+#define OKL4_SYSCALL_PERFORMANCE_NULL_SYSCALL 5198
+
+#define OKL4_SYSCALL_PIPE_CONTROL 5146
+
+#define OKL4_SYSCALL_PIPE_RECV 5147
+
+#define OKL4_SYSCALL_PIPE_SEND 5148
+
+#define OKL4_SYSCALL_PRIORITY_WAIVE 5151
+
+#define OKL4_SYSCALL_REMOTE_GET_REGISTER 5200
+
+#define OKL4_SYSCALL_REMOTE_GET_REGISTERS 5201
+
+#define OKL4_SYSCALL_REMOTE_READ_MEMORY32 5202
+
+#define OKL4_SYSCALL_REMOTE_SET_REGISTER 5203
+
+#define OKL4_SYSCALL_REMOTE_SET_REGISTERS 5204
+
+#define OKL4_SYSCALL_REMOTE_WRITE_MEMORY32 5205
+
+#define OKL4_SYSCALL_SCHEDULE_METRICS_STATUS_SUSPENDED 5206
+
+#define OKL4_SYSCALL_SCHEDULE_METRICS_WATCH_SUSPENDED 5207
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_CPU_DISABLE 5168
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_CPU_ENABLE 5169
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_CPU_GET_DATA 5170
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_VCPU_DISABLE 5171
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_VCPU_ENABLE 5172
+
+#define OKL4_SYSCALL_SCHEDULE_PROFILE_VCPU_GET_DATA 5173
+
+#define OKL4_SYSCALL_SCHEDULER_SUSPEND 5150
+
+#define OKL4_SYSCALL_TIMER_CANCEL 5176
+
+#define OKL4_SYSCALL_TIMER_GET_RESOLUTION 5177
+
+#define OKL4_SYSCALL_TIMER_GET_TIME 5178
+
+#define OKL4_SYSCALL_TIMER_QUERY 5179
+
+#define OKL4_SYSCALL_TIMER_START 5180
+
+#define OKL4_SYSCALL_TRACEBUFFER_SYNC 5199
+
+#define OKL4_SYSCALL_VCPU_RESET 5122
+
+#define OKL4_SYSCALL_VCPU_START 5123
+
+#define OKL4_SYSCALL_VCPU_STOP 5124
+
+#define OKL4_SYSCALL_VCPU_SWITCH_MODE 5125
+
+#define OKL4_SYSCALL_VCPU_SYNC_SEV 5126
+
+#define OKL4_SYSCALL_VCPU_SYNC_WFE 5127
+
+#define OKL4_SYSCALL_VINTERRUPT_CLEAR_AND_RAISE 5194
+
+#define OKL4_SYSCALL_VINTERRUPT_MODIFY 5195
+
+#define OKL4_SYSCALL_VINTERRUPT_RAISE 5196
+
+/** @} */
+#undef hvc
+
+#if defined(_definitions_for_linters)
+/* Ignore lint identifier clashes for syscall names. */
+/*lint -esym(621, _okl4_sys_axon_process_recv) */
+/*lint -esym(621, _okl4_sys_axon_set_halted) */
+/*lint -esym(621, _okl4_sys_axon_set_recv_area) */
+/*lint -esym(621, _okl4_sys_axon_set_recv_queue) */
+/*lint -esym(621, _okl4_sys_axon_set_recv_segment) */
+/*lint -esym(621, _okl4_sys_axon_set_send_area) */
+/*lint -esym(621, _okl4_sys_axon_set_send_queue) */
+/*lint -esym(621, _okl4_sys_axon_set_send_segment) */
+/*lint -esym(621, _okl4_sys_axon_trigger_send) */
+/*lint -esym(621, _okl4_sys_interrupt_ack) */
+/*lint -esym(621, _okl4_sys_interrupt_attach_private) */
+/*lint -esym(621, _okl4_sys_interrupt_attach_shared) */
+/*lint -esym(621, _okl4_sys_interrupt_detach) */
+/*lint -esym(621, _okl4_sys_interrupt_dist_enable) */
+/*lint -esym(621, _okl4_sys_interrupt_eoi) */
+/*lint -esym(621, _okl4_sys_interrupt_get_highest_priority_pending) */
+/*lint -esym(621, _okl4_sys_interrupt_get_payload) */
+/*lint -esym(621, _okl4_sys_interrupt_limits) */
+/*lint -esym(621, _okl4_sys_interrupt_mask) */
+/*lint -esym(621, _okl4_sys_interrupt_raise) */
+/*lint -esym(621, _okl4_sys_interrupt_set_binary_point) */
+/*lint -esym(621, _okl4_sys_interrupt_set_config) */
+/*lint -esym(621, _okl4_sys_interrupt_set_control) */
+/*lint -esym(621, _okl4_sys_interrupt_set_priority) */
+/*lint -esym(621, _okl4_sys_interrupt_set_priority_mask) */
+/*lint -esym(621, _okl4_sys_interrupt_set_targets) */
+/*lint -esym(621, _okl4_sys_interrupt_unmask) */
+/*lint -esym(621, _okl4_sys_kdb_interact) */
+/*lint -esym(621, _okl4_sys_kdb_set_object_name) */
+/*lint -esym(621, _okl4_sys_ksp_procedure_call) */
+/*lint -esym(621, _okl4_sys_mmu_attach_segment) */
+/*lint -esym(621, _okl4_sys_mmu_detach_segment) */
+/*lint -esym(621, _okl4_sys_mmu_flush_range) */
+/*lint -esym(621, _okl4_sys_mmu_flush_range_pn) */
+/*lint -esym(621, _okl4_sys_mmu_lookup_page) */
+/*lint -esym(621, _okl4_sys_mmu_lookup_pn) */
+/*lint -esym(621, _okl4_sys_mmu_map_page) */
+/*lint -esym(621, _okl4_sys_mmu_map_pn) */
+/*lint -esym(621, _okl4_sys_mmu_unmap_page) */
+/*lint -esym(621, _okl4_sys_mmu_unmap_pn) */
+/*lint -esym(621, _okl4_sys_mmu_update_page_attrs) */
+/*lint -esym(621, _okl4_sys_mmu_update_page_perms) */
+/*lint -esym(621, _okl4_sys_mmu_update_pn_attrs) */
+/*lint -esym(621, _okl4_sys_mmu_update_pn_perms) */
+/*lint -esym(621, _okl4_sys_performance_null_syscall) */
+/*lint -esym(621, _okl4_sys_pipe_control) */
+/*lint -esym(621, _okl4_sys_pipe_recv) */
+/*lint -esym(621, _okl4_sys_pipe_send) */
+/*lint -esym(621, _okl4_sys_priority_waive) */
+/*lint -esym(621, _okl4_sys_remote_get_register) */
+/*lint -esym(621, _okl4_sys_remote_get_registers) */
+/*lint -esym(621, _okl4_sys_remote_read_memory32) */
+/*lint -esym(621, _okl4_sys_remote_set_register) */
+/*lint -esym(621, _okl4_sys_remote_set_registers) */
+/*lint -esym(621, _okl4_sys_remote_write_memory32) */
+/*lint -esym(621, _okl4_sys_schedule_metrics_status_suspended) */
+/*lint -esym(621, _okl4_sys_schedule_metrics_watch_suspended) */
+/*lint -esym(621, _okl4_sys_schedule_profile_cpu_disable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_cpu_enable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_cpu_get_data) */
+/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_disable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_enable) */
+/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_get_data) */
+/*lint -esym(621, _okl4_sys_scheduler_suspend) */
+/*lint -esym(621, _okl4_sys_timer_cancel) */
+/*lint -esym(621, _okl4_sys_timer_get_resolution) */
+/*lint -esym(621, _okl4_sys_timer_get_time) */
+/*lint -esym(621, _okl4_sys_timer_query) */
+/*lint -esym(621, _okl4_sys_timer_start) */
+/*lint -esym(621, _okl4_sys_tracebuffer_sync) */
+/*lint -esym(621, _okl4_sys_vcpu_reset) */
+/*lint -esym(621, _okl4_sys_vcpu_start) */
+/*lint -esym(621, _okl4_sys_vcpu_stop) */
+/*lint -esym(621, _okl4_sys_vcpu_switch_mode) */
+/*lint -esym(621, _okl4_sys_vcpu_sync_sev) */
+/*lint -esym(621, _okl4_sys_vcpu_sync_wfe) */
+/*lint -esym(621, _okl4_sys_vinterrupt_clear_and_raise) */
+/*lint -esym(621, _okl4_sys_vinterrupt_modify) */
+/*lint -esym(621, _okl4_sys_vinterrupt_raise) */
+#endif
+#endif /* __AUTO__USER_SYSCALLS_H__ */
+/** @} */
diff --git a/include/microvisor/kernel/types.h b/include/microvisor/kernel/types.h
new file mode 100644
index 0000000..c87285c
--- /dev/null
+++ b/include/microvisor/kernel/types.h
@@ -0,0 +1,16064 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Auto generated - do not modify */
+
+/** @addtogroup lib_microvisor
+ * @{
+ */
+/** @addtogroup lib_microvisor_types Microvisor Types
+ * @{
+ */
+#ifndef __AUTO__MICROVISOR_TYPES_H__
+#define __AUTO__MICROVISOR_TYPES_H__
+
+#if !defined(ASSEMBLY)
+
+#define OKL4_DEFAULT_PERMS OKL4_PAGE_PERMS_RWX
+#define OKL4_DEFAULT_CACHE_ATTRIBUTES OKL4_PAGE_CACHE_DEFAULT
+
+#if __SIZEOF_POINTER__ != 8
+#define __ptr64(type, name) union { type name; uint64_t _x_##name; }
+#define __ptr64_array(type, name) union { type val; uint64_t _x; } name
+#else
+#define __ptr64(type, name) type name
+#define __ptr64_array(type, name) type name
+#endif
+
+/**
+    The `okl4_bool_t` type represents a standard boolean value.  Valid values are
+    restricted to @ref OKL4_TRUE and @ref OKL4_FALSE.
+*/
+
+typedef _Bool okl4_bool_t;
+
+
+
+
+
+
+
+
+/**
+    - BITS 7..0 -   @ref OKL4_MASK_AFF0_ARM_MPIDR
+    - BITS 15..8 -   @ref OKL4_MASK_AFF1_ARM_MPIDR
+    - BITS 23..16 -   @ref OKL4_MASK_AFF2_ARM_MPIDR
+    - BIT 24 -   @ref OKL4_MASK_MT_ARM_MPIDR
+    - BIT 30 -   @ref OKL4_MASK_U_ARM_MPIDR
+    - BIT 31 -   @ref OKL4_MASK_MP_ARM_MPIDR
+    - BITS 39..32 -   @ref OKL4_MASK_AFF3_ARM_MPIDR
+*/
+
+/*lint -esym(621, okl4_arm_mpidr_t) */
+typedef uint64_t okl4_arm_mpidr_t;
+
+/*lint -esym(621, okl4_arm_mpidr_getaff0) */
+/*lint -esym(714, okl4_arm_mpidr_getaff0) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff0(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff0) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff0(okl4_arm_mpidr_t *x, uint64_t _aff0);
+
+/*lint -esym(621, okl4_arm_mpidr_getaff1) */
+/*lint -esym(714, okl4_arm_mpidr_getaff1) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff1(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff1) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff1(okl4_arm_mpidr_t *x, uint64_t _aff1);
+
+/*lint -esym(621, okl4_arm_mpidr_getaff2) */
+/*lint -esym(714, okl4_arm_mpidr_getaff2) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff2(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff2) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff2(okl4_arm_mpidr_t *x, uint64_t _aff2);
+
+/*lint -esym(621, okl4_arm_mpidr_getaff3) */
+/*lint -esym(714, okl4_arm_mpidr_getaff3) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff3(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setaff3) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff3(okl4_arm_mpidr_t *x, uint64_t _aff3);
+
+/*lint -esym(621, okl4_arm_mpidr_getmt) */
+/*lint -esym(714, okl4_arm_mpidr_getmt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmt(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setmt) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setmt(okl4_arm_mpidr_t *x, okl4_bool_t _mt);
+
+/*lint -esym(621, okl4_arm_mpidr_getu) */
+/*lint -esym(714, okl4_arm_mpidr_getu) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getu(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(621, okl4_arm_mpidr_setu) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setu(okl4_arm_mpidr_t *x, okl4_bool_t _u);
+
+/*lint -esym(621, okl4_arm_mpidr_getmp) */
+/*lint -esym(714, okl4_arm_mpidr_getmp) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmp(const okl4_arm_mpidr_t *x);
+
+/*lint -esym(714, okl4_arm_mpidr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_init(okl4_arm_mpidr_t *x);
+
+/*lint -esym(714, okl4_arm_mpidr_cast) */
+OKL4_FORCE_INLINE okl4_arm_mpidr_t
+okl4_arm_mpidr_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF0_MASK) */
+#define OKL4_ARM_MPIDR_AFF0_MASK ((okl4_arm_mpidr_t)255U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF0_ARM_MPIDR) */
+#define OKL4_MASK_AFF0_ARM_MPIDR ((okl4_arm_mpidr_t)255U)
+/*lint -esym(621, OKL4_SHIFT_AFF0_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF0_ARM_MPIDR (0)
+/*lint -esym(621, OKL4_WIDTH_AFF0_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF0_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF1_MASK) */
+#define OKL4_ARM_MPIDR_AFF1_MASK ((okl4_arm_mpidr_t)255U << 8) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF1_ARM_MPIDR) */
+#define OKL4_MASK_AFF1_ARM_MPIDR ((okl4_arm_mpidr_t)255U << 8)
+/*lint -esym(621, OKL4_SHIFT_AFF1_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_WIDTH_AFF1_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF2_MASK) */
+#define OKL4_ARM_MPIDR_AFF2_MASK ((okl4_arm_mpidr_t)255U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF2_ARM_MPIDR) */
+#define OKL4_MASK_AFF2_ARM_MPIDR ((okl4_arm_mpidr_t)255U << 16)
+/*lint -esym(621, OKL4_SHIFT_AFF2_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF2_ARM_MPIDR (16)
+/*lint -esym(621, OKL4_WIDTH_AFF2_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF2_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ARM_MPIDR_MT_MASK) */
+#define OKL4_ARM_MPIDR_MT_MASK ((okl4_arm_mpidr_t)1U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MT_ARM_MPIDR) */
+#define OKL4_MASK_MT_ARM_MPIDR ((okl4_arm_mpidr_t)1U << 24)
+/*lint -esym(621, OKL4_SHIFT_MT_ARM_MPIDR) */
+#define OKL4_SHIFT_MT_ARM_MPIDR (24)
+/*lint -esym(621, OKL4_WIDTH_MT_ARM_MPIDR) */
+#define OKL4_WIDTH_MT_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ARM_MPIDR_U_MASK) */
+#define OKL4_ARM_MPIDR_U_MASK ((okl4_arm_mpidr_t)1U << 30) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_U_ARM_MPIDR) */
+#define OKL4_MASK_U_ARM_MPIDR ((okl4_arm_mpidr_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_U_ARM_MPIDR) */
+#define OKL4_SHIFT_U_ARM_MPIDR (30)
+/*lint -esym(621, OKL4_WIDTH_U_ARM_MPIDR) */
+#define OKL4_WIDTH_U_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ARM_MPIDR_MP_MASK) */
+#define OKL4_ARM_MPIDR_MP_MASK ((okl4_arm_mpidr_t)1U << 31) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MP_ARM_MPIDR) */
+#define OKL4_MASK_MP_ARM_MPIDR ((okl4_arm_mpidr_t)1U << 31)
+/*lint -esym(621, OKL4_SHIFT_MP_ARM_MPIDR) */
+#define OKL4_SHIFT_MP_ARM_MPIDR (31)
+/*lint -esym(621, OKL4_WIDTH_MP_ARM_MPIDR) */
+#define OKL4_WIDTH_MP_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ARM_MPIDR_AFF3_MASK) */
+#define OKL4_ARM_MPIDR_AFF3_MASK ((okl4_arm_mpidr_t)255U << 32) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_AFF3_ARM_MPIDR) */
+#define OKL4_MASK_AFF3_ARM_MPIDR ((okl4_arm_mpidr_t)255U << 32)
+/*lint -esym(621, OKL4_SHIFT_AFF3_ARM_MPIDR) */
+#define OKL4_SHIFT_AFF3_ARM_MPIDR (32)
+/*lint -esym(621, OKL4_WIDTH_AFF3_ARM_MPIDR) */
+#define OKL4_WIDTH_AFF3_ARM_MPIDR (8)
+
+
+/*lint -sem(okl4_arm_mpidr_getaff0, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff0) */
+/*lint -esym(714, okl4_arm_mpidr_getaff0) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff0(const okl4_arm_mpidr_t *x)
+{
+    uint64_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint64_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff0, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff0) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff0) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff0(okl4_arm_mpidr_t *x, uint64_t _aff0)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_aff0;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getaff1, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff1) */
+/*lint -esym(714, okl4_arm_mpidr_getaff1) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff1(const okl4_arm_mpidr_t *x)
+{
+    uint64_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 8;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint64_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff1, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff1) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff1) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff1(okl4_arm_mpidr_t *x, uint64_t _aff1)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 8;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_aff1;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getaff2, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff2) */
+/*lint -esym(714, okl4_arm_mpidr_getaff2) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff2(const okl4_arm_mpidr_t *x)
+{
+    uint64_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 16;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint64_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff2, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff2) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff2) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff2(okl4_arm_mpidr_t *x, uint64_t _aff2)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 16;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_aff2;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getmt, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_mpidr_getmt) */
+/*lint -esym(714, okl4_arm_mpidr_getmt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmt(const okl4_arm_mpidr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 24;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setmt, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_mpidr_setmt) */
+
+/*lint -esym(621, okl4_arm_mpidr_setmt) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setmt(okl4_arm_mpidr_t *x, okl4_bool_t _mt)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 24;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_mt;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getu, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_mpidr_getu) */
+/*lint -esym(714, okl4_arm_mpidr_getu) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getu(const okl4_arm_mpidr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setu, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_mpidr_setu) */
+
+/*lint -esym(621, okl4_arm_mpidr_setu) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setu(okl4_arm_mpidr_t *x, okl4_bool_t _u)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_u;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_mpidr_getmp, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_mpidr_getmp) */
+/*lint -esym(714, okl4_arm_mpidr_getmp) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_mpidr_getmp(const okl4_arm_mpidr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 31;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_getaff3, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_arm_mpidr_getaff3) */
+/*lint -esym(714, okl4_arm_mpidr_getaff3) */
+OKL4_FORCE_INLINE uint64_t
+okl4_arm_mpidr_getaff3(const okl4_arm_mpidr_t *x)
+{
+    uint64_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 32;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint64_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_mpidr_setaff3, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_arm_mpidr_setaff3) */
+
+/*lint -esym(621, okl4_arm_mpidr_setaff3) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_setaff3(okl4_arm_mpidr_t *x, uint64_t _aff3)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 32;
+            uint64_t field : 8;
+        } bits;
+        okl4_arm_mpidr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_aff3;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_arm_mpidr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_mpidr_init(okl4_arm_mpidr_t *x)
+{
+    *x = (okl4_arm_mpidr_t)2147483648U;
+}
+
+/*lint -esym(714, okl4_arm_mpidr_cast) */
+OKL4_FORCE_INLINE okl4_arm_mpidr_t
+okl4_arm_mpidr_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_arm_mpidr_t x = (okl4_arm_mpidr_t)p;
+    if (force) {
+        x &= ~(okl4_arm_mpidr_t)0x80000000U;
+        x |= (okl4_arm_mpidr_t)0x80000000U; /* x.mp */
+    }
+    return x;
+}
+
+
+
+
+/*lint -esym(621, OKL4_AXON_NUM_RECEIVE_QUEUES) */
+#define OKL4_AXON_NUM_RECEIVE_QUEUES ((uint32_t)(4U))
+
+/*lint -esym(621, OKL4_AXON_NUM_SEND_QUEUES) */
+#define OKL4_AXON_NUM_SEND_QUEUES ((uint32_t)(4U))
+
+/*lint -esym(621, _OKL4_POISON) */
+#define _OKL4_POISON ((uint32_t)(3735928559U))
+
+/*lint -esym(621, OKL4_TRACEBUFFER_INVALID_REF) */
+#define OKL4_TRACEBUFFER_INVALID_REF ((uint32_t)(0xffffffffU))
+
+
+
+
+typedef uint32_t okl4_arm_psci_function_t;
+
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_VERSION) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_VERSION ((okl4_arm_psci_function_t)0x0U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_SUSPEND) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_SUSPEND ((okl4_arm_psci_function_t)0x1U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_OFF) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_OFF ((okl4_arm_psci_function_t)0x2U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_ON) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_ON ((okl4_arm_psci_function_t)0x3U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_AFFINITY_INFO) */
+#define OKL4_ARM_PSCI_FUNCTION_AFFINITY_INFO ((okl4_arm_psci_function_t)0x4U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MIGRATE) */
+#define OKL4_ARM_PSCI_FUNCTION_MIGRATE ((okl4_arm_psci_function_t)0x5U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE) */
+#define OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE ((okl4_arm_psci_function_t)0x6U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU) */
+#define OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU ((okl4_arm_psci_function_t)0x7U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_SYSTEM_OFF) */
+#define OKL4_ARM_PSCI_FUNCTION_SYSTEM_OFF ((okl4_arm_psci_function_t)0x8U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET) */
+#define OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET ((okl4_arm_psci_function_t)0x9U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_FEATURES) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_FEATURES ((okl4_arm_psci_function_t)0xaU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_FREEZE) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_FREEZE ((okl4_arm_psci_function_t)0xbU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND) */
+#define OKL4_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND ((okl4_arm_psci_function_t)0xcU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_NODE_HW_STATE) */
+#define OKL4_ARM_PSCI_FUNCTION_NODE_HW_STATE ((okl4_arm_psci_function_t)0xdU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND) */
+#define OKL4_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND ((okl4_arm_psci_function_t)0xeU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE ((okl4_arm_psci_function_t)0xfU)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY ((okl4_arm_psci_function_t)0x10U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT) */
+#define OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT ((okl4_arm_psci_function_t)0x11U)
+
+/*lint -esym(714, okl4_arm_psci_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_function_is_element_of(okl4_arm_psci_function_t var);
+
+
+/*lint -esym(714, okl4_arm_psci_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_function_is_element_of(okl4_arm_psci_function_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ARM_PSCI_FUNCTION_PSCI_VERSION) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_SUSPEND) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_OFF) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_ON) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_AFFINITY_INFO) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_MIGRATE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_OFF) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_PSCI_FEATURES) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_FREEZE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_NODE_HW_STATE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY) ||
+            (var == OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT));
+}
+
+
+
+typedef uint32_t okl4_arm_psci_result_t;
+
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_SUCCESS) */
+#define OKL4_ARM_PSCI_RESULT_SUCCESS ((okl4_arm_psci_result_t)0x0U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_INVALID_ADDRESS) */
+#define OKL4_ARM_PSCI_RESULT_INVALID_ADDRESS ((okl4_arm_psci_result_t)0xfffffff7U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_DISABLED) */
+#define OKL4_ARM_PSCI_RESULT_DISABLED ((okl4_arm_psci_result_t)0xfffffff8U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_NOT_PRESENT) */
+#define OKL4_ARM_PSCI_RESULT_NOT_PRESENT ((okl4_arm_psci_result_t)0xfffffff9U)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_INTERNAL_FAILURE) */
+#define OKL4_ARM_PSCI_RESULT_INTERNAL_FAILURE ((okl4_arm_psci_result_t)0xfffffffaU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_ON_PENDING) */
+#define OKL4_ARM_PSCI_RESULT_ON_PENDING ((okl4_arm_psci_result_t)0xfffffffbU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_ALREADY_ON) */
+#define OKL4_ARM_PSCI_RESULT_ALREADY_ON ((okl4_arm_psci_result_t)0xfffffffcU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_DENIED) */
+#define OKL4_ARM_PSCI_RESULT_DENIED ((okl4_arm_psci_result_t)0xfffffffdU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_INVALID_PARAMETERS) */
+#define OKL4_ARM_PSCI_RESULT_INVALID_PARAMETERS ((okl4_arm_psci_result_t)0xfffffffeU)
+/*lint -esym(621, OKL4_ARM_PSCI_RESULT_NOT_SUPPORTED) */
+#define OKL4_ARM_PSCI_RESULT_NOT_SUPPORTED ((okl4_arm_psci_result_t)0xffffffffU)
+
+/*lint -esym(714, okl4_arm_psci_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_result_is_element_of(okl4_arm_psci_result_t var);
+
+
+/*lint -esym(714, okl4_arm_psci_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_result_is_element_of(okl4_arm_psci_result_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ARM_PSCI_RESULT_SUCCESS) ||
+            (var == OKL4_ARM_PSCI_RESULT_NOT_SUPPORTED) ||
+            (var == OKL4_ARM_PSCI_RESULT_INVALID_PARAMETERS) ||
+            (var == OKL4_ARM_PSCI_RESULT_DENIED) ||
+            (var == OKL4_ARM_PSCI_RESULT_ALREADY_ON) ||
+            (var == OKL4_ARM_PSCI_RESULT_ON_PENDING) ||
+            (var == OKL4_ARM_PSCI_RESULT_INTERNAL_FAILURE) ||
+            (var == OKL4_ARM_PSCI_RESULT_NOT_PRESENT) ||
+            (var == OKL4_ARM_PSCI_RESULT_DISABLED) ||
+            (var == OKL4_ARM_PSCI_RESULT_INVALID_ADDRESS));
+}
+
+
+/**
+    - BITS 15..0 -   @ref OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE
+    - BIT 16 -   @ref OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE
+    - BITS 25..24 -   @ref OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE
+*/
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_t) */
+typedef uint32_t okl4_arm_psci_suspend_state_t;
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_getstateid) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getstateid) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getstateid(const okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setstateid) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setstateid(okl4_arm_psci_suspend_state_t *x, uint32_t _state_id);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerdown) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerdown) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_suspend_state_getpowerdown(const okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerdown) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerdown(okl4_arm_psci_suspend_state_t *x, okl4_bool_t _power_down);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerlevel) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerlevel) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getpowerlevel(const okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerlevel) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerlevel(okl4_arm_psci_suspend_state_t *x, uint32_t _power_level);
+
+/*lint -esym(714, okl4_arm_psci_suspend_state_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_init(okl4_arm_psci_suspend_state_t *x);
+
+/*lint -esym(714, okl4_arm_psci_suspend_state_cast) */
+OKL4_FORCE_INLINE okl4_arm_psci_suspend_state_t
+okl4_arm_psci_suspend_state_cast(uint32_t p, okl4_bool_t force);
+
+
+
+/*lint -esym(621, OKL4_ARM_PSCI_POWER_LEVEL_CPU) */
+#define OKL4_ARM_PSCI_POWER_LEVEL_CPU ((okl4_arm_psci_suspend_state_t)(0U))
+
+/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_STATE_ID_MASK) */
+#define OKL4_ARM_PSCI_SUSPEND_STATE_STATE_ID_MASK ((okl4_arm_psci_suspend_state_t)65535U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)65535U)
+/*lint -esym(621, OKL4_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE (0)
+/*lint -esym(621, OKL4_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_POWER_DOWN_MASK) */
+#define OKL4_ARM_PSCI_SUSPEND_STATE_POWER_DOWN_MASK ((okl4_arm_psci_suspend_state_t)1U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)1U << 16)
+/*lint -esym(621, OKL4_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1)
+/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_POWER_LEVEL_MASK) */
+#define OKL4_ARM_PSCI_SUSPEND_STATE_POWER_LEVEL_MASK ((okl4_arm_psci_suspend_state_t)3U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)3U << 24)
+/*lint -esym(621, OKL4_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (24)
+/*lint -esym(621, OKL4_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (2)
+
+
+/*lint -sem(okl4_arm_psci_suspend_state_getstateid, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_arm_psci_suspend_state_getstateid) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getstateid) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getstateid(const okl4_arm_psci_suspend_state_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_psci_suspend_state_setstateid, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_setstateid) */
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setstateid) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setstateid(okl4_arm_psci_suspend_state_t *x, uint32_t _state_id)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_state_id;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_psci_suspend_state_getpowerdown, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerdown) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerdown) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_psci_suspend_state_getpowerdown(const okl4_arm_psci_suspend_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_psci_suspend_state_setpowerdown, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_setpowerdown) */
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerdown) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerdown(okl4_arm_psci_suspend_state_t *x, okl4_bool_t _power_down)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_power_down;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_psci_suspend_state_getpowerlevel, 1p, @n >= 0 && @n <= 3) */
+/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerlevel) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerlevel) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_psci_suspend_state_getpowerlevel(const okl4_arm_psci_suspend_state_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 2;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_psci_suspend_state_setpowerlevel, 2n >= 0 && 2n <= 3) */
+/*lint -esym(714, okl4_arm_psci_suspend_state_setpowerlevel) */
+
+/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerlevel) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_setpowerlevel(okl4_arm_psci_suspend_state_t *x, uint32_t _power_level)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 2;
+        } bits;
+        okl4_arm_psci_suspend_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_power_level;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_arm_psci_suspend_state_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_psci_suspend_state_init(okl4_arm_psci_suspend_state_t *x)
+{
+    *x = (okl4_arm_psci_suspend_state_t)0U;
+}
+
+/*lint -esym(714, okl4_arm_psci_suspend_state_cast) */
+OKL4_FORCE_INLINE okl4_arm_psci_suspend_state_t
+okl4_arm_psci_suspend_state_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_arm_psci_suspend_state_t x = (okl4_arm_psci_suspend_state_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_MMU_ENABLE_ARM_SCTLR
+    - BIT 1 -   @ref OKL4_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR
+    - BIT 2 -   @ref OKL4_MASK_DATA_CACHE_ENABLE_ARM_SCTLR
+    - BIT 3 -   @ref OKL4_MASK_STACK_ALIGN_ARM_SCTLR
+    - BIT 4 -   @ref OKL4_MASK_STACK_ALIGN_EL0_ARM_SCTLR
+    - BIT 5 -   @ref OKL4_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR
+    - BIT 6 -   @ref OKL4_MASK_OKL_HCR_EL2_DC_ARM_SCTLR
+    - BIT 7 -   @ref OKL4_MASK_IT_DISABLE_ARM_SCTLR
+    - BIT 8 -   @ref OKL4_MASK_SETEND_DISABLE_ARM_SCTLR
+    - BIT 9 -   @ref OKL4_MASK_USER_MASK_ACCESS_ARM_SCTLR
+    - BIT 11 -   @ref OKL4_MASK_RESERVED11_ARM_SCTLR
+    - BIT 12 -   @ref OKL4_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR
+    - BIT 13 -   @ref OKL4_MASK_VECTORS_BIT_ARM_SCTLR
+    - BIT 14 -   @ref OKL4_MASK_DCACHE_ZERO_ARM_SCTLR
+    - BIT 15 -   @ref OKL4_MASK_USER_CACHE_TYPE_ARM_SCTLR
+    - BIT 16 -   @ref OKL4_MASK_NO_TRAP_WFI_ARM_SCTLR
+    - BIT 18 -   @ref OKL4_MASK_NO_TRAP_WFE_ARM_SCTLR
+    - BIT 19 -   @ref OKL4_MASK_WRITE_EXEC_NEVER_ARM_SCTLR
+    - BIT 20 -   @ref OKL4_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR
+    - BIT 22 -   @ref OKL4_MASK_RESERVED22_ARM_SCTLR
+    - BIT 23 -   @ref OKL4_MASK_RESERVED23_ARM_SCTLR
+    - BIT 24 -   @ref OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR
+    - BIT 25 -   @ref OKL4_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR
+    - BIT 28 -   @ref OKL4_MASK_TEX_REMAP_ENABLE_ARM_SCTLR
+    - BIT 29 -   @ref OKL4_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR
+    - BIT 30 -   @ref OKL4_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR
+*/
+
+/*lint -esym(621, okl4_arm_sctlr_t) */
+typedef uint32_t okl4_arm_sctlr_t;
+
+/*lint -esym(621, okl4_arm_sctlr_getmmuenable) */
+/*lint -esym(714, okl4_arm_sctlr_getmmuenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getmmuenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setmmuenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setmmuenable(okl4_arm_sctlr_t *x, okl4_bool_t _mmu_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getalignmentcheckenable) */
+/*lint -esym(714, okl4_arm_sctlr_getalignmentcheckenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getalignmentcheckenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setalignmentcheckenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setalignmentcheckenable(okl4_arm_sctlr_t *x, okl4_bool_t _alignment_check_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getdatacacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getdatacacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdatacacheenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setdatacacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdatacacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _data_cache_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getinstructioncacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getinstructioncacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getinstructioncacheenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setinstructioncacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setinstructioncacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _instruction_cache_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getcp15barrierenable) */
+/*lint -esym(714, okl4_arm_sctlr_getcp15barrierenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getcp15barrierenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setcp15barrierenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setcp15barrierenable(okl4_arm_sctlr_t *x, okl4_bool_t _cp15_barrier_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getitdisable) */
+/*lint -esym(714, okl4_arm_sctlr_getitdisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getitdisable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setitdisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setitdisable(okl4_arm_sctlr_t *x, okl4_bool_t _it_disable);
+
+/*lint -esym(621, okl4_arm_sctlr_getsetenddisable) */
+/*lint -esym(714, okl4_arm_sctlr_getsetenddisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getsetenddisable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setsetenddisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setsetenddisable(okl4_arm_sctlr_t *x, okl4_bool_t _setend_disable);
+
+/*lint -esym(621, okl4_arm_sctlr_getreserved11) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved11) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved11(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfi) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfi) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfi(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfi) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfi(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfi);
+
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfe) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfe) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfe(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfe) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfe(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfe);
+
+/*lint -esym(621, okl4_arm_sctlr_getwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getwriteexecnever(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _write_exec_never);
+
+/*lint -esym(621, okl4_arm_sctlr_getreserved22) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved22) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved22(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_getreserved23) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved23) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved23(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_getel0endianness) */
+/*lint -esym(714, okl4_arm_sctlr_getel0endianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getel0endianness(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setel0endianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setel0endianness(okl4_arm_sctlr_t *x, okl4_bool_t _el0_endianness);
+
+/*lint -esym(621, okl4_arm_sctlr_getexceptionendianness) */
+/*lint -esym(714, okl4_arm_sctlr_getexceptionendianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getexceptionendianness(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setexceptionendianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setexceptionendianness(okl4_arm_sctlr_t *x, okl4_bool_t _exception_endianness);
+
+/*lint -esym(621, okl4_arm_sctlr_getvectorsbit) */
+/*lint -esym(714, okl4_arm_sctlr_getvectorsbit) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getvectorsbit(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setvectorsbit) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setvectorsbit(okl4_arm_sctlr_t *x, okl4_bool_t _vectors_bit);
+
+/*lint -esym(621, okl4_arm_sctlr_getuserwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getuserwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getuserwriteexecnever(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setuserwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setuserwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _user_write_exec_never);
+
+/*lint -esym(621, okl4_arm_sctlr_gettexremapenable) */
+/*lint -esym(714, okl4_arm_sctlr_gettexremapenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_gettexremapenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_settexremapenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_settexremapenable(okl4_arm_sctlr_t *x, okl4_bool_t _tex_remap_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getaccessflagenable) */
+/*lint -esym(714, okl4_arm_sctlr_getaccessflagenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getaccessflagenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setaccessflagenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setaccessflagenable(okl4_arm_sctlr_t *x, okl4_bool_t _access_flag_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getthumbexceptionenable) */
+/*lint -esym(714, okl4_arm_sctlr_getthumbexceptionenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getthumbexceptionenable(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setthumbexceptionenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setthumbexceptionenable(okl4_arm_sctlr_t *x, okl4_bool_t _thumb_exception_enable);
+
+/*lint -esym(621, okl4_arm_sctlr_getstackalign) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalign(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalign) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalign(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align);
+
+/*lint -esym(621, okl4_arm_sctlr_getstackalignel0) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalignel0) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalignel0(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalignel0) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalignel0(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align_el0);
+
+/*lint -esym(621, okl4_arm_sctlr_getusermaskaccess) */
+/*lint -esym(714, okl4_arm_sctlr_getusermaskaccess) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusermaskaccess(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setusermaskaccess) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusermaskaccess(okl4_arm_sctlr_t *x, okl4_bool_t _user_mask_access);
+
+/*lint -esym(621, okl4_arm_sctlr_getdcachezero) */
+/*lint -esym(714, okl4_arm_sctlr_getdcachezero) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdcachezero(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setdcachezero) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdcachezero(okl4_arm_sctlr_t *x, okl4_bool_t _dcache_zero);
+
+/*lint -esym(621, okl4_arm_sctlr_getusercachetype) */
+/*lint -esym(714, okl4_arm_sctlr_getusercachetype) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusercachetype(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setusercachetype) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusercachetype(okl4_arm_sctlr_t *x, okl4_bool_t _user_cache_type);
+
+/*lint -esym(621, okl4_arm_sctlr_getoklhcrel2dc) */
+/*lint -esym(714, okl4_arm_sctlr_getoklhcrel2dc) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getoklhcrel2dc(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setoklhcrel2dc) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setoklhcrel2dc(okl4_arm_sctlr_t *x, okl4_bool_t _okl_hcr_el2_dc);
+
+/*lint -esym(714, okl4_arm_sctlr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_init(okl4_arm_sctlr_t *x);
+
+/*lint -esym(714, okl4_arm_sctlr_cast) */
+OKL4_FORCE_INLINE okl4_arm_sctlr_t
+okl4_arm_sctlr_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_ARM_SCTLR_MMU_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_MMU_ENABLE_MASK ((okl4_arm_sctlr_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_MMU_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U)
+/*lint -esym(621, OKL4_SHIFT_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_MMU_ENABLE_ARM_SCTLR (0)
+/*lint -esym(621, OKL4_WIDTH_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_MMU_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_ALIGNMENT_CHECK_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_ALIGNMENT_CHECK_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_DATA_CACHE_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_DATA_CACHE_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 2) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_DATA_CACHE_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 2)
+/*lint -esym(621, OKL4_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR (2)
+/*lint -esym(621, OKL4_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_STACK_ALIGN_MASK) */
+#define OKL4_ARM_SCTLR_STACK_ALIGN_MASK ((okl4_arm_sctlr_t)1U << 3) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_MASK_STACK_ALIGN_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 3)
+/*lint -esym(621, OKL4_SHIFT_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_SHIFT_STACK_ALIGN_ARM_SCTLR (3)
+/*lint -esym(621, OKL4_WIDTH_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_WIDTH_STACK_ALIGN_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_STACK_ALIGN_EL0_MASK) */
+#define OKL4_ARM_SCTLR_STACK_ALIGN_EL0_MASK ((okl4_arm_sctlr_t)1U << 4) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_MASK_STACK_ALIGN_EL0_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 4)
+/*lint -esym(621, OKL4_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR (4)
+/*lint -esym(621, OKL4_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_CP15_BARRIER_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_CP15_BARRIER_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 5) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 5)
+/*lint -esym(621, OKL4_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR (5)
+/*lint -esym(621, OKL4_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_OKL_HCR_EL2_DC_MASK) */
+#define OKL4_ARM_SCTLR_OKL_HCR_EL2_DC_MASK ((okl4_arm_sctlr_t)1U << 6) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_MASK_OKL_HCR_EL2_DC_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 6)
+/*lint -esym(621, OKL4_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR (6)
+/*lint -esym(621, OKL4_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_IT_DISABLE_MASK) */
+#define OKL4_ARM_SCTLR_IT_DISABLE_MASK ((okl4_arm_sctlr_t)1U << 7) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_MASK_IT_DISABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 7)
+/*lint -esym(621, OKL4_SHIFT_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_IT_DISABLE_ARM_SCTLR (7)
+/*lint -esym(621, OKL4_WIDTH_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_IT_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_SETEND_DISABLE_MASK) */
+#define OKL4_ARM_SCTLR_SETEND_DISABLE_MASK ((okl4_arm_sctlr_t)1U << 8) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_MASK_SETEND_DISABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 8)
+/*lint -esym(621, OKL4_SHIFT_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_SETEND_DISABLE_ARM_SCTLR (8)
+/*lint -esym(621, OKL4_WIDTH_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_SETEND_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_USER_MASK_ACCESS_MASK) */
+#define OKL4_ARM_SCTLR_USER_MASK_ACCESS_MASK ((okl4_arm_sctlr_t)1U << 9) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_MASK_USER_MASK_ACCESS_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 9)
+/*lint -esym(621, OKL4_SHIFT_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_SHIFT_USER_MASK_ACCESS_ARM_SCTLR (9)
+/*lint -esym(621, OKL4_WIDTH_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_WIDTH_USER_MASK_ACCESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_RESERVED11_MASK) */
+#define OKL4_ARM_SCTLR_RESERVED11_MASK ((okl4_arm_sctlr_t)1U << 11) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESERVED11_ARM_SCTLR) */
+#define OKL4_MASK_RESERVED11_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 11)
+/*lint -esym(621, OKL4_SHIFT_RESERVED11_ARM_SCTLR) */
+#define OKL4_SHIFT_RESERVED11_ARM_SCTLR (11)
+/*lint -esym(621, OKL4_WIDTH_RESERVED11_ARM_SCTLR) */
+#define OKL4_WIDTH_RESERVED11_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_INSTRUCTION_CACHE_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_INSTRUCTION_CACHE_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 12) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 12)
+/*lint -esym(621, OKL4_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (12)
+/*lint -esym(621, OKL4_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_VECTORS_BIT_MASK) */
+#define OKL4_ARM_SCTLR_VECTORS_BIT_MASK ((okl4_arm_sctlr_t)1U << 13) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_MASK_VECTORS_BIT_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 13)
+/*lint -esym(621, OKL4_SHIFT_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_SHIFT_VECTORS_BIT_ARM_SCTLR (13)
+/*lint -esym(621, OKL4_WIDTH_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_WIDTH_VECTORS_BIT_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_DCACHE_ZERO_MASK) */
+#define OKL4_ARM_SCTLR_DCACHE_ZERO_MASK ((okl4_arm_sctlr_t)1U << 14) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_MASK_DCACHE_ZERO_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 14)
+/*lint -esym(621, OKL4_SHIFT_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_SHIFT_DCACHE_ZERO_ARM_SCTLR (14)
+/*lint -esym(621, OKL4_WIDTH_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_WIDTH_DCACHE_ZERO_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_USER_CACHE_TYPE_MASK) */
+#define OKL4_ARM_SCTLR_USER_CACHE_TYPE_MASK ((okl4_arm_sctlr_t)1U << 15) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_MASK_USER_CACHE_TYPE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 15)
+/*lint -esym(621, OKL4_SHIFT_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_SHIFT_USER_CACHE_TYPE_ARM_SCTLR (15)
+/*lint -esym(621, OKL4_WIDTH_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_WIDTH_USER_CACHE_TYPE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_NO_TRAP_WFI_MASK) */
+#define OKL4_ARM_SCTLR_NO_TRAP_WFI_MASK ((okl4_arm_sctlr_t)1U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_MASK_NO_TRAP_WFI_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 16)
+/*lint -esym(621, OKL4_SHIFT_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_SHIFT_NO_TRAP_WFI_ARM_SCTLR (16)
+/*lint -esym(621, OKL4_WIDTH_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_WIDTH_NO_TRAP_WFI_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_NO_TRAP_WFE_MASK) */
+#define OKL4_ARM_SCTLR_NO_TRAP_WFE_MASK ((okl4_arm_sctlr_t)1U << 18) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_MASK_NO_TRAP_WFE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 18)
+/*lint -esym(621, OKL4_SHIFT_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_SHIFT_NO_TRAP_WFE_ARM_SCTLR (18)
+/*lint -esym(621, OKL4_WIDTH_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_WIDTH_NO_TRAP_WFE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_WRITE_EXEC_NEVER_MASK) */
+#define OKL4_ARM_SCTLR_WRITE_EXEC_NEVER_MASK ((okl4_arm_sctlr_t)1U << 19) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_MASK_WRITE_EXEC_NEVER_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 19)
+/*lint -esym(621, OKL4_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR (19)
+/*lint -esym(621, OKL4_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_USER_WRITE_EXEC_NEVER_MASK) */
+#define OKL4_ARM_SCTLR_USER_WRITE_EXEC_NEVER_MASK ((okl4_arm_sctlr_t)1U << 20) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 20)
+/*lint -esym(621, OKL4_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR (20)
+/*lint -esym(621, OKL4_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_RESERVED22_MASK) */
+#define OKL4_ARM_SCTLR_RESERVED22_MASK ((okl4_arm_sctlr_t)1U << 22) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESERVED22_ARM_SCTLR) */
+#define OKL4_MASK_RESERVED22_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 22)
+/*lint -esym(621, OKL4_SHIFT_RESERVED22_ARM_SCTLR) */
+#define OKL4_SHIFT_RESERVED22_ARM_SCTLR (22)
+/*lint -esym(621, OKL4_WIDTH_RESERVED22_ARM_SCTLR) */
+#define OKL4_WIDTH_RESERVED22_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_RESERVED23_MASK) */
+#define OKL4_ARM_SCTLR_RESERVED23_MASK ((okl4_arm_sctlr_t)1U << 23) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESERVED23_ARM_SCTLR) */
+#define OKL4_MASK_RESERVED23_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 23)
+/*lint -esym(621, OKL4_SHIFT_RESERVED23_ARM_SCTLR) */
+#define OKL4_SHIFT_RESERVED23_ARM_SCTLR (23)
+/*lint -esym(621, OKL4_WIDTH_RESERVED23_ARM_SCTLR) */
+#define OKL4_WIDTH_RESERVED23_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_EL0_ENDIANNESS_MASK) */
+#define OKL4_ARM_SCTLR_EL0_ENDIANNESS_MASK ((okl4_arm_sctlr_t)1U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 24)
+/*lint -esym(621, OKL4_SHIFT_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_SHIFT_EL0_ENDIANNESS_ARM_SCTLR (24)
+/*lint -esym(621, OKL4_WIDTH_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_WIDTH_EL0_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_EXCEPTION_ENDIANNESS_MASK) */
+#define OKL4_ARM_SCTLR_EXCEPTION_ENDIANNESS_MASK ((okl4_arm_sctlr_t)1U << 25) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 25)
+/*lint -esym(621, OKL4_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR (25)
+/*lint -esym(621, OKL4_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_TEX_REMAP_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_TEX_REMAP_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 28) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_TEX_REMAP_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 28)
+/*lint -esym(621, OKL4_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR (28)
+/*lint -esym(621, OKL4_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_ACCESS_FLAG_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_ACCESS_FLAG_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 29) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 29)
+/*lint -esym(621, OKL4_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR (29)
+/*lint -esym(621, OKL4_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_THUMB_EXCEPTION_ENABLE_MASK) */
+#define OKL4_ARM_SCTLR_THUMB_EXCEPTION_ENABLE_MASK ((okl4_arm_sctlr_t)1U << 30) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (30)
+/*lint -esym(621, OKL4_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (1)
+
+
+/*lint -sem(okl4_arm_sctlr_getmmuenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getmmuenable) */
+/*lint -esym(714, okl4_arm_sctlr_getmmuenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getmmuenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setmmuenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setmmuenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setmmuenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setmmuenable(okl4_arm_sctlr_t *x, okl4_bool_t _mmu_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_mmu_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getalignmentcheckenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getalignmentcheckenable) */
+/*lint -esym(714, okl4_arm_sctlr_getalignmentcheckenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getalignmentcheckenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setalignmentcheckenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setalignmentcheckenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setalignmentcheckenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setalignmentcheckenable(okl4_arm_sctlr_t *x, okl4_bool_t _alignment_check_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_alignment_check_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getdatacacheenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getdatacacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getdatacacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdatacacheenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setdatacacheenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setdatacacheenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setdatacacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdatacacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _data_cache_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_data_cache_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getstackalign, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getstackalign) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalign(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setstackalign, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setstackalign) */
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalign) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalign(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_stack_align;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getstackalignel0, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getstackalignel0) */
+/*lint -esym(714, okl4_arm_sctlr_getstackalignel0) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getstackalignel0(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setstackalignel0, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setstackalignel0) */
+
+/*lint -esym(621, okl4_arm_sctlr_setstackalignel0) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setstackalignel0(okl4_arm_sctlr_t *x, okl4_bool_t _stack_align_el0)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_stack_align_el0;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getcp15barrierenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getcp15barrierenable) */
+/*lint -esym(714, okl4_arm_sctlr_getcp15barrierenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getcp15barrierenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setcp15barrierenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setcp15barrierenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setcp15barrierenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setcp15barrierenable(okl4_arm_sctlr_t *x, okl4_bool_t _cp15_barrier_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_cp15_barrier_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getoklhcrel2dc, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getoklhcrel2dc) */
+/*lint -esym(714, okl4_arm_sctlr_getoklhcrel2dc) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getoklhcrel2dc(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 6;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setoklhcrel2dc, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setoklhcrel2dc) */
+
+/*lint -esym(621, okl4_arm_sctlr_setoklhcrel2dc) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setoklhcrel2dc(okl4_arm_sctlr_t *x, okl4_bool_t _okl_hcr_el2_dc)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 6;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_okl_hcr_el2_dc;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getitdisable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getitdisable) */
+/*lint -esym(714, okl4_arm_sctlr_getitdisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getitdisable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setitdisable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setitdisable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setitdisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setitdisable(okl4_arm_sctlr_t *x, okl4_bool_t _it_disable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_it_disable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getsetenddisable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getsetenddisable) */
+/*lint -esym(714, okl4_arm_sctlr_getsetenddisable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getsetenddisable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setsetenddisable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setsetenddisable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setsetenddisable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setsetenddisable(okl4_arm_sctlr_t *x, okl4_bool_t _setend_disable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_setend_disable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getusermaskaccess, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getusermaskaccess) */
+/*lint -esym(714, okl4_arm_sctlr_getusermaskaccess) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusermaskaccess(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 9;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setusermaskaccess, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setusermaskaccess) */
+
+/*lint -esym(621, okl4_arm_sctlr_setusermaskaccess) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusermaskaccess(okl4_arm_sctlr_t *x, okl4_bool_t _user_mask_access)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 9;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_user_mask_access;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getreserved11, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getreserved11) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved11) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved11(const okl4_arm_sctlr_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 11;
+            uint32_t field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_getinstructioncacheenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getinstructioncacheenable) */
+/*lint -esym(714, okl4_arm_sctlr_getinstructioncacheenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getinstructioncacheenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 12;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setinstructioncacheenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setinstructioncacheenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setinstructioncacheenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setinstructioncacheenable(okl4_arm_sctlr_t *x, okl4_bool_t _instruction_cache_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 12;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_instruction_cache_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getvectorsbit, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getvectorsbit) */
+/*lint -esym(714, okl4_arm_sctlr_getvectorsbit) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getvectorsbit(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 13;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setvectorsbit, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setvectorsbit) */
+
+/*lint -esym(621, okl4_arm_sctlr_setvectorsbit) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setvectorsbit(okl4_arm_sctlr_t *x, okl4_bool_t _vectors_bit)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 13;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_vectors_bit;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getdcachezero, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getdcachezero) */
+/*lint -esym(714, okl4_arm_sctlr_getdcachezero) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getdcachezero(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 14;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setdcachezero, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setdcachezero) */
+
+/*lint -esym(621, okl4_arm_sctlr_setdcachezero) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setdcachezero(okl4_arm_sctlr_t *x, okl4_bool_t _dcache_zero)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 14;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_dcache_zero;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getusercachetype, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getusercachetype) */
+/*lint -esym(714, okl4_arm_sctlr_getusercachetype) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getusercachetype(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 15;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setusercachetype, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setusercachetype) */
+
+/*lint -esym(621, okl4_arm_sctlr_setusercachetype) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setusercachetype(okl4_arm_sctlr_t *x, okl4_bool_t _user_cache_type)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 15;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_user_cache_type;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getnotrapwfi, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfi) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfi) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfi(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setnotrapwfi, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setnotrapwfi) */
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfi) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfi(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfi)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_no_trap_wfi;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getnotrapwfe, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getnotrapwfe) */
+/*lint -esym(714, okl4_arm_sctlr_getnotrapwfe) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getnotrapwfe(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 18;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setnotrapwfe, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setnotrapwfe) */
+
+/*lint -esym(621, okl4_arm_sctlr_setnotrapwfe) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setnotrapwfe(okl4_arm_sctlr_t *x, okl4_bool_t _no_trap_wfe)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 18;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_no_trap_wfe;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getwriteexecnever, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getwriteexecnever(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 19;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setwriteexecnever, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setwriteexecnever) */
+
+/*lint -esym(621, okl4_arm_sctlr_setwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _write_exec_never)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 19;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_write_exec_never;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getuserwriteexecnever, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getuserwriteexecnever) */
+/*lint -esym(714, okl4_arm_sctlr_getuserwriteexecnever) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getuserwriteexecnever(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 20;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setuserwriteexecnever, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setuserwriteexecnever) */
+
+/*lint -esym(621, okl4_arm_sctlr_setuserwriteexecnever) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setuserwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _user_write_exec_never)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 20;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_user_write_exec_never;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getreserved22, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getreserved22) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved22) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved22(const okl4_arm_sctlr_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 22;
+            uint32_t field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_getreserved23, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getreserved23) */
+/*lint -esym(714, okl4_arm_sctlr_getreserved23) */
+OKL4_FORCE_INLINE uint32_t
+okl4_arm_sctlr_getreserved23(const okl4_arm_sctlr_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 23;
+            uint32_t field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_getel0endianness, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getel0endianness) */
+/*lint -esym(714, okl4_arm_sctlr_getel0endianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getel0endianness(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setel0endianness, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setel0endianness) */
+
+/*lint -esym(621, okl4_arm_sctlr_setel0endianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setel0endianness(okl4_arm_sctlr_t *x, okl4_bool_t _el0_endianness)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_el0_endianness;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getexceptionendianness, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getexceptionendianness) */
+/*lint -esym(714, okl4_arm_sctlr_getexceptionendianness) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getexceptionendianness(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 25;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setexceptionendianness, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setexceptionendianness) */
+
+/*lint -esym(621, okl4_arm_sctlr_setexceptionendianness) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setexceptionendianness(okl4_arm_sctlr_t *x, okl4_bool_t _exception_endianness)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 25;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_exception_endianness;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_gettexremapenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_gettexremapenable) */
+/*lint -esym(714, okl4_arm_sctlr_gettexremapenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_gettexremapenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_settexremapenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_settexremapenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_settexremapenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_settexremapenable(okl4_arm_sctlr_t *x, okl4_bool_t _tex_remap_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_tex_remap_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getaccessflagenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getaccessflagenable) */
+/*lint -esym(714, okl4_arm_sctlr_getaccessflagenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getaccessflagenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 29;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setaccessflagenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setaccessflagenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setaccessflagenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setaccessflagenable(okl4_arm_sctlr_t *x, okl4_bool_t _access_flag_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 29;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_access_flag_enable;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_arm_sctlr_getthumbexceptionenable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getthumbexceptionenable) */
+/*lint -esym(714, okl4_arm_sctlr_getthumbexceptionenable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_sctlr_getthumbexceptionenable(const okl4_arm_sctlr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_arm_sctlr_setthumbexceptionenable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setthumbexceptionenable) */
+
+/*lint -esym(621, okl4_arm_sctlr_setthumbexceptionenable) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setthumbexceptionenable(okl4_arm_sctlr_t *x, okl4_bool_t _thumb_exception_enable)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_arm_sctlr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_thumb_exception_enable;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_arm_sctlr_init) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_init(okl4_arm_sctlr_t *x)
+{
+    *x = (okl4_arm_sctlr_t)12912928U;
+}
+
+/*lint -esym(714, okl4_arm_sctlr_cast) */
+OKL4_FORCE_INLINE okl4_arm_sctlr_t
+okl4_arm_sctlr_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_arm_sctlr_t x = (okl4_arm_sctlr_t)p;
+    if (force) {
+        x &= ~(okl4_arm_sctlr_t)0x800U;
+        x |= (okl4_arm_sctlr_t)0x800U; /* x.reserved11 */
+        x &= ~(okl4_arm_sctlr_t)0x400000U;
+        x |= (okl4_arm_sctlr_t)0x400000U; /* x.reserved22 */
+        x &= ~(okl4_arm_sctlr_t)0x800000U;
+        x |= (okl4_arm_sctlr_t)0x800000U; /* x.reserved23 */
+    }
+    return x;
+}
+
+
+
+
+typedef uint32_t okl4_arm_smccc_arch_function_t;
+
+/*lint -esym(621, OKL4_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION) */
+#define OKL4_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION ((okl4_arm_smccc_arch_function_t)0x0U)
+/*lint -esym(621, OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES) */
+#define OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES ((okl4_arm_smccc_arch_function_t)0x1U)
+/*lint -esym(621, OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1) */
+#define OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1 ((okl4_arm_smccc_arch_function_t)0x8000U)
+
+/*lint -esym(714, okl4_arm_smccc_arch_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_arch_function_is_element_of(okl4_arm_smccc_arch_function_t var);
+
+
+/*lint -esym(714, okl4_arm_smccc_arch_function_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_arch_function_is_element_of(okl4_arm_smccc_arch_function_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION) ||
+            (var == OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES) ||
+            (var == OKL4_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1));
+}
+
+
+
+typedef uint32_t okl4_arm_smccc_result_t;
+
+/*lint -esym(621, OKL4_ARM_SMCCC_RESULT_SUCCESS) */
+#define OKL4_ARM_SMCCC_RESULT_SUCCESS ((okl4_arm_smccc_result_t)0x0U)
+/*lint -esym(621, OKL4_ARM_SMCCC_RESULT_NOT_SUPPORTED) */
+#define OKL4_ARM_SMCCC_RESULT_NOT_SUPPORTED ((okl4_arm_smccc_result_t)0xffffffffU)
+
+/*lint -esym(714, okl4_arm_smccc_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_result_is_element_of(okl4_arm_smccc_result_t var);
+
+
+/*lint -esym(714, okl4_arm_smccc_result_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_arm_smccc_result_is_element_of(okl4_arm_smccc_result_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ARM_SMCCC_RESULT_SUCCESS) ||
+            (var == OKL4_ARM_SMCCC_RESULT_NOT_SUPPORTED));
+}
+
+
+/**
+    The `okl4_register_t` type represents an unsigned, machine-native
+    register-sized integer value.
+*/
+
+typedef uint64_t okl4_register_t;
+
+
+
+
+
+typedef okl4_register_t okl4_atomic_raw_register_t;
+
+
+
+
+
+
+
+
+
+typedef uint16_t okl4_atomic_raw_uint16_t;
+
+
+
+
+
+typedef uint32_t okl4_atomic_raw_uint32_t;
+
+
+
+
+
+typedef uint64_t okl4_atomic_raw_uint64_t;
+
+
+
+
+
+
+
+
+
+typedef uint8_t okl4_atomic_raw_uint8_t;
+
+
+
+
+/**
+    The okl4_atomic_register_t type implements a machine-word-sized value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_register {
+    volatile okl4_atomic_raw_register_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_register_t type implements a machine-word-sized value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_register okl4_atomic_register_t;
+
+
+
+
+/**
+    The okl4_atomic_uint16_t type implements a 16-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint16 {
+    volatile okl4_atomic_raw_uint16_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_uint16_t type implements a 16-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint16 okl4_atomic_uint16_t;
+
+
+
+
+/**
+    The okl4_atomic_uint32_t type implements a 32-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint32 {
+    volatile okl4_atomic_raw_uint32_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_uint32_t type implements a 32-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint32 okl4_atomic_uint32_t;
+
+
+
+
+/**
+    The okl4_atomic_uint64_t type implements a 64-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint64 {
+    volatile okl4_atomic_raw_uint64_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_uint64_t type implements a 64-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint64 okl4_atomic_uint64_t;
+
+
+
+
+/**
+    The okl4_atomic_uint8_t type implements an 8-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+struct okl4_atomic_uint8 {
+    volatile okl4_atomic_raw_uint8_t value;
+};
+
+
+
+
+
+
+/**
+    The okl4_atomic_uint8_t type implements an 8-bit value
+    that can be operated on using atomic operations.  This can be used
+    to implement thread-safe synchronisation primitives.
+*/
+
+typedef struct okl4_atomic_uint8 okl4_atomic_uint8_t;
+
+
+
+
+/**
+    The `okl4_count_t` type represents a natural number of items or
+    iterations. This type is unsigned and cannot represent error values; use
+    `okl4_scount_t` if an error representation is required.
+*/
+
+typedef uint32_t okl4_count_t;
+
+/*lint -esym(621, OKL4_DEFAULT_PAGEBITS) */
+#define OKL4_DEFAULT_PAGEBITS ((okl4_count_t)(12U))
+
+/** The maximum limit for segment index retured in mmu_lookup_segment. */
+/*lint -esym(621, OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK) */
+#define OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK ((okl4_count_t)(1023U))
+
+/** The maximum limit for segment attachments to a KMMU. */
+/*lint -esym(621, OKL4_KMMU_MAX_SEGMENTS) */
+#define OKL4_KMMU_MAX_SEGMENTS ((okl4_count_t)(256U))
+
+/*lint -esym(621, OKL4_PROFILE_NO_PCPUS) */
+#define OKL4_PROFILE_NO_PCPUS ((okl4_count_t)(0xffffffffU))
+
+
+
+/**
+    The `okl4_kcap_t` type represents a kernel object capability identifier
+    (otherwise known as *designator* or *cap*) that addresses a kernel
+    capability. A capability encodes rights to perform particular operations on
+    a kernel object.
+*/
+
+typedef okl4_count_t okl4_kcap_t;
+
+/*lint -esym(621, OKL4_KCAP_INVALID) */
+#define OKL4_KCAP_INVALID ((okl4_kcap_t)(0xffffffffU))
+
+
+
+/**
+    The `okl4_interrupt_number_t` type is an index into the interrupt ID
+    space. For platforms with a single simple interrupt controller, this is
+    the physical interrupt number. When there are multiple interrupt
+    controllers, or a large and sparse interrupt ID space, the mapping from
+    this type to the physical interrupt is defined by the KSP.
+*/
+
+typedef okl4_count_t okl4_interrupt_number_t;
+
+/*lint -esym(621, OKL4_INTERRUPT_INVALID_IRQ) */
+#define OKL4_INTERRUPT_INVALID_IRQ ((okl4_interrupt_number_t)(1023U))
+
+/*lint -esym(621, OKL4_INVALID_VIRQ) */
+#define OKL4_INVALID_VIRQ ((okl4_interrupt_number_t)(1023U))
+
+
+
+
+typedef okl4_interrupt_number_t okl4_irq_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_axon_data {
+    okl4_kcap_t kcap;
+    okl4_kcap_t segment;
+    okl4_irq_t virq;
+};
+
+
+
+
+/**
+    The `okl4_psize_t` type represents an unsigned integer value which is large
+    enough to represent the size of any physical memory object.
+*/
+
+typedef okl4_register_t okl4_psize_t;
+
+
+
+
+/**
+    The `okl4_lsize_t` type represents an unsigned integer value which is large
+    enough to represent the size of any guest logical memory object.
+*/
+
+typedef okl4_psize_t okl4_lsize_t;
+
+/*lint -esym(621, OKL4_DEFAULT_PAGESIZE) */
+#define OKL4_DEFAULT_PAGESIZE ((okl4_lsize_t)(4096U))
+
+
+
+/**
+    The `okl4_laddr_t` type represents an unsigned integer value which is large
+    enough to contain a guest logical address; that is, an address in the
+    input address space of the guest's virtual MMU. This may be larger than
+    the machine's pointer type.
+*/
+
+typedef okl4_lsize_t okl4_laddr_t;
+
+/*lint -esym(621, OKL4_USER_AREA_END) */
+#define OKL4_USER_AREA_END ((okl4_laddr_t)(17592186044416U))
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_PENDING_AXON_DATA_INFO
+    - BIT 1 -   @ref OKL4_MASK_FAILURE_AXON_DATA_INFO
+    - BIT 2 -   @ref OKL4_MASK_USR_AXON_DATA_INFO
+    - BITS 63..3 -   @ref OKL4_MASK_LADDR_AXON_DATA_INFO
+*/
+
+/*lint -esym(621, okl4_axon_data_info_t) */
+typedef okl4_laddr_t okl4_axon_data_info_t;
+
+/*lint -esym(621, okl4_axon_data_info_getpending) */
+/*lint -esym(714, okl4_axon_data_info_getpending) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getpending(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setpending) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setpending(okl4_axon_data_info_t *x, okl4_bool_t _pending);
+
+/*lint -esym(621, okl4_axon_data_info_getfailure) */
+/*lint -esym(714, okl4_axon_data_info_getfailure) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getfailure(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setfailure) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setfailure(okl4_axon_data_info_t *x, okl4_bool_t _failure);
+
+/*lint -esym(621, okl4_axon_data_info_getusr) */
+/*lint -esym(714, okl4_axon_data_info_getusr) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getusr(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setusr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setusr(okl4_axon_data_info_t *x, okl4_bool_t _usr);
+
+/*lint -esym(621, okl4_axon_data_info_getladdr) */
+/*lint -esym(714, okl4_axon_data_info_getladdr) */
+OKL4_FORCE_INLINE okl4_laddr_t
+okl4_axon_data_info_getladdr(const okl4_axon_data_info_t *x);
+
+/*lint -esym(621, okl4_axon_data_info_setladdr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setladdr(okl4_axon_data_info_t *x, okl4_laddr_t _laddr);
+
+/*lint -esym(714, okl4_axon_data_info_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_init(okl4_axon_data_info_t *x);
+
+/*lint -esym(714, okl4_axon_data_info_cast) */
+OKL4_FORCE_INLINE okl4_axon_data_info_t
+okl4_axon_data_info_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_AXON_DATA_INFO_PENDING_MASK) */
+#define OKL4_AXON_DATA_INFO_PENDING_MASK ((okl4_axon_data_info_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_PENDING_AXON_DATA_INFO) */
+#define OKL4_MASK_PENDING_AXON_DATA_INFO ((okl4_axon_data_info_t)1U)
+/*lint -esym(621, OKL4_SHIFT_PENDING_AXON_DATA_INFO) */
+#define OKL4_SHIFT_PENDING_AXON_DATA_INFO (0)
+/*lint -esym(621, OKL4_WIDTH_PENDING_AXON_DATA_INFO) */
+#define OKL4_WIDTH_PENDING_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_AXON_DATA_INFO_FAILURE_MASK) */
+#define OKL4_AXON_DATA_INFO_FAILURE_MASK ((okl4_axon_data_info_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_FAILURE_AXON_DATA_INFO) */
+#define OKL4_MASK_FAILURE_AXON_DATA_INFO ((okl4_axon_data_info_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_FAILURE_AXON_DATA_INFO) */
+#define OKL4_SHIFT_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_WIDTH_FAILURE_AXON_DATA_INFO) */
+#define OKL4_WIDTH_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_AXON_DATA_INFO_USR_MASK) */
+#define OKL4_AXON_DATA_INFO_USR_MASK ((okl4_axon_data_info_t)1U << 2) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_USR_AXON_DATA_INFO) */
+#define OKL4_MASK_USR_AXON_DATA_INFO ((okl4_axon_data_info_t)1U << 2)
+/*lint -esym(621, OKL4_SHIFT_USR_AXON_DATA_INFO) */
+#define OKL4_SHIFT_USR_AXON_DATA_INFO (2)
+/*lint -esym(621, OKL4_WIDTH_USR_AXON_DATA_INFO) */
+#define OKL4_WIDTH_USR_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_AXON_DATA_INFO_LADDR_MASK) */
+#define OKL4_AXON_DATA_INFO_LADDR_MASK ((okl4_axon_data_info_t)2305843009213693951U << 3) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_LADDR_AXON_DATA_INFO) */
+#define OKL4_MASK_LADDR_AXON_DATA_INFO ((okl4_axon_data_info_t)2305843009213693951U << 3)
+/*lint -esym(621, OKL4_SHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_SHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_PRESHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_PRESHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_WIDTH_LADDR_AXON_DATA_INFO) */
+#define OKL4_WIDTH_LADDR_AXON_DATA_INFO (61)
+
+
+/*lint -sem(okl4_axon_data_info_getpending, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_data_info_getpending) */
+/*lint -esym(714, okl4_axon_data_info_getpending) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getpending(const okl4_axon_data_info_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_data_info_setpending, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_data_info_setpending) */
+
+/*lint -esym(621, okl4_axon_data_info_setpending) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setpending(okl4_axon_data_info_t *x, okl4_bool_t _pending)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_pending;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_data_info_getfailure, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_data_info_getfailure) */
+/*lint -esym(714, okl4_axon_data_info_getfailure) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getfailure(const okl4_axon_data_info_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_data_info_setfailure, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_data_info_setfailure) */
+
+/*lint -esym(621, okl4_axon_data_info_setfailure) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setfailure(okl4_axon_data_info_t *x, okl4_bool_t _failure)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_failure;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_data_info_getusr, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_data_info_getusr) */
+/*lint -esym(714, okl4_axon_data_info_getusr) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_data_info_getusr(const okl4_axon_data_info_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_data_info_setusr, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_data_info_setusr) */
+
+/*lint -esym(621, okl4_axon_data_info_setusr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setusr(okl4_axon_data_info_t *x, okl4_bool_t _usr)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_usr;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_data_info_getladdr, 1p) */
+/*lint -esym(621, okl4_axon_data_info_getladdr) */
+/*lint -esym(714, okl4_axon_data_info_getladdr) */
+OKL4_FORCE_INLINE okl4_laddr_t
+okl4_axon_data_info_getladdr(const okl4_axon_data_info_t *x)
+{
+    okl4_laddr_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 3;
+            uint64_t field : 61;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_laddr_t)_conv.bits.field;
+    return (okl4_laddr_t)(field << 3);
+}
+
+/*lint -esym(714, okl4_axon_data_info_setladdr) */
+
+/*lint -esym(621, okl4_axon_data_info_setladdr) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_setladdr(okl4_axon_data_info_t *x, okl4_laddr_t _laddr)
+{
+    okl4_laddr_t val = _laddr >> 3;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 3;
+            uint64_t field : 61;
+        } bits;
+        okl4_axon_data_info_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)val;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_axon_data_info_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_data_info_init(okl4_axon_data_info_t *x)
+{
+    *x = (okl4_axon_data_info_t)0U;
+}
+
+/*lint -esym(714, okl4_axon_data_info_cast) */
+OKL4_FORCE_INLINE okl4_axon_data_info_t
+okl4_axon_data_info_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_axon_data_info_t x = (okl4_axon_data_info_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+
+*/
+
+struct okl4_axon_ep_data {
+    struct okl4_axon_data rx;
+    struct okl4_axon_data tx;
+};
+
+
+
+
+
+
+
+
+
+typedef char _okl4_padding_t;
+
+
+
+
+
+struct okl4_axon_queue {
+    uint32_t queue_offset;
+    uint16_t entries;
+    volatile uint16_t kptr;
+    volatile uint16_t uptr;
+    _okl4_padding_t __padding0_2; /**< Padding 4 */
+    _okl4_padding_t __padding1_3; /**< Padding 4 */
+};
+
+
+
+
+
+
+/**
+    The `okl4_ksize_t` type represents an unsigned integer value which is large
+    enough to represent the size of any kernel-accessible memory object.
+*/
+
+typedef okl4_lsize_t okl4_ksize_t;
+
+
+
+
+
+struct okl4_axon_queue_entry {
+    okl4_axon_data_info_t info;
+    okl4_ksize_t data_size;
+    uint32_t recv_sequence;
+    _okl4_padding_t __padding0_4; /**< Padding 8 */
+    _okl4_padding_t __padding1_5; /**< Padding 8 */
+    _okl4_padding_t __padding2_6; /**< Padding 8 */
+    _okl4_padding_t __padding3_7; /**< Padding 8 */
+};
+
+
+
+
+
+
+/**
+    - BITS 4..0 -   @ref OKL4_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE
+    - BITS 12..8 -   @ref OKL4_MASK_MIN_ORDER_AXON_QUEUE_SIZE
+*/
+
+/*lint -esym(621, okl4_axon_queue_size_t) */
+typedef uint16_t okl4_axon_queue_size_t;
+
+/*lint -esym(621, okl4_axon_queue_size_getallocorder) */
+/*lint -esym(714, okl4_axon_queue_size_getallocorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getallocorder(const okl4_axon_queue_size_t *x);
+
+/*lint -esym(621, okl4_axon_queue_size_setallocorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setallocorder(okl4_axon_queue_size_t *x, okl4_count_t _alloc_order);
+
+/*lint -esym(621, okl4_axon_queue_size_getminorder) */
+/*lint -esym(714, okl4_axon_queue_size_getminorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getminorder(const okl4_axon_queue_size_t *x);
+
+/*lint -esym(621, okl4_axon_queue_size_setminorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setminorder(okl4_axon_queue_size_t *x, okl4_count_t _min_order);
+
+/*lint -esym(714, okl4_axon_queue_size_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_init(okl4_axon_queue_size_t *x);
+
+/*lint -esym(714, okl4_axon_queue_size_cast) */
+OKL4_FORCE_INLINE okl4_axon_queue_size_t
+okl4_axon_queue_size_cast(uint16_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_AXON_QUEUE_SIZE_ALLOC_ORDER_MASK) */
+#define OKL4_AXON_QUEUE_SIZE_ALLOC_ORDER_MASK (okl4_axon_queue_size_t)(31U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE (okl4_axon_queue_size_t)(31U)
+/*lint -esym(621, OKL4_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE (0)
+/*lint -esym(621, OKL4_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE (5)
+/*lint -esym(621, OKL4_AXON_QUEUE_SIZE_MIN_ORDER_MASK) */
+#define OKL4_AXON_QUEUE_SIZE_MIN_ORDER_MASK (okl4_axon_queue_size_t)(31U << 8) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_MASK_MIN_ORDER_AXON_QUEUE_SIZE (okl4_axon_queue_size_t)(31U << 8)
+/*lint -esym(621, OKL4_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE (8)
+/*lint -esym(621, OKL4_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE (5)
+
+
+/*lint -sem(okl4_axon_queue_size_getallocorder, 1p, @n >= 0 && @n <= 31) */
+/*lint -esym(621, okl4_axon_queue_size_getallocorder) */
+/*lint -esym(714, okl4_axon_queue_size_getallocorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getallocorder(const okl4_axon_queue_size_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 5;
+        } bits;
+        okl4_axon_queue_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_queue_size_setallocorder, 2n >= 0 && 2n <= 31) */
+/*lint -esym(714, okl4_axon_queue_size_setallocorder) */
+
+/*lint -esym(621, okl4_axon_queue_size_setallocorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setallocorder(okl4_axon_queue_size_t *x, okl4_count_t _alloc_order)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 5;
+        } bits;
+        okl4_axon_queue_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_alloc_order;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_queue_size_getminorder, 1p, @n >= 0 && @n <= 31) */
+/*lint -esym(621, okl4_axon_queue_size_getminorder) */
+/*lint -esym(714, okl4_axon_queue_size_getminorder) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_axon_queue_size_getminorder(const okl4_axon_queue_size_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            uint32_t field : 5;
+        } bits;
+        okl4_axon_queue_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_queue_size_setminorder, 2n >= 0 && 2n <= 31) */
+/*lint -esym(714, okl4_axon_queue_size_setminorder) */
+
+/*lint -esym(621, okl4_axon_queue_size_setminorder) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_setminorder(okl4_axon_queue_size_t *x, okl4_count_t _min_order)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            uint32_t field : 5;
+        } bits;
+        okl4_axon_queue_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_min_order;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_axon_queue_size_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_queue_size_init(okl4_axon_queue_size_t *x)
+{
+    *x = (okl4_axon_queue_size_t)0U;
+}
+
+/*lint -esym(714, okl4_axon_queue_size_cast) */
+OKL4_FORCE_INLINE okl4_axon_queue_size_t
+okl4_axon_queue_size_cast(uint16_t p, okl4_bool_t force)
+{
+    okl4_axon_queue_size_t x = (okl4_axon_queue_size_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct okl4_axon_rx {
+    struct okl4_axon_queue queues[4];
+    okl4_axon_queue_size_t queue_sizes[4];
+};
+
+
+
+
+
+
+
+struct okl4_axon_tx {
+    struct okl4_axon_queue queues[4];
+};
+
+
+
+
+
+
+
+typedef okl4_register_t okl4_virq_flags_t;
+
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_READY_AXON_VIRQ_FLAGS
+    - BIT 1 -   @ref OKL4_MASK_FAULT_AXON_VIRQ_FLAGS
+*/
+
+/*lint -esym(621, okl4_axon_virq_flags_t) */
+typedef okl4_virq_flags_t okl4_axon_virq_flags_t;
+
+/*lint -esym(621, okl4_axon_virq_flags_getready) */
+/*lint -esym(714, okl4_axon_virq_flags_getready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getready(const okl4_axon_virq_flags_t *x);
+
+/*lint -esym(621, okl4_axon_virq_flags_setready) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setready(okl4_axon_virq_flags_t *x, okl4_bool_t _ready);
+
+/*lint -esym(621, okl4_axon_virq_flags_getfault) */
+/*lint -esym(714, okl4_axon_virq_flags_getfault) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getfault(const okl4_axon_virq_flags_t *x);
+
+/*lint -esym(621, okl4_axon_virq_flags_setfault) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setfault(okl4_axon_virq_flags_t *x, okl4_bool_t _fault);
+
+/*lint -esym(714, okl4_axon_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_init(okl4_axon_virq_flags_t *x);
+
+/*lint -esym(714, okl4_axon_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_axon_virq_flags_t
+okl4_axon_virq_flags_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_AXON_VIRQ_FLAGS_READY_MASK) */
+#define OKL4_AXON_VIRQ_FLAGS_READY_MASK ((okl4_axon_virq_flags_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_MASK_READY_AXON_VIRQ_FLAGS ((okl4_axon_virq_flags_t)1U)
+/*lint -esym(621, OKL4_SHIFT_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_SHIFT_READY_AXON_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_WIDTH_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_WIDTH_READY_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_AXON_VIRQ_FLAGS_FAULT_MASK) */
+#define OKL4_AXON_VIRQ_FLAGS_FAULT_MASK ((okl4_axon_virq_flags_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_MASK_FAULT_AXON_VIRQ_FLAGS ((okl4_axon_virq_flags_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_SHIFT_FAULT_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_WIDTH_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_WIDTH_FAULT_AXON_VIRQ_FLAGS (1)
+
+
+/*lint -sem(okl4_axon_virq_flags_getready, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_virq_flags_getready) */
+/*lint -esym(714, okl4_axon_virq_flags_getready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getready(const okl4_axon_virq_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_axon_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_virq_flags_setready, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_virq_flags_setready) */
+
+/*lint -esym(621, okl4_axon_virq_flags_setready) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setready(okl4_axon_virq_flags_t *x, okl4_bool_t _ready)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_axon_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_ready;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_axon_virq_flags_getfault, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_axon_virq_flags_getfault) */
+/*lint -esym(714, okl4_axon_virq_flags_getfault) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_axon_virq_flags_getfault(const okl4_axon_virq_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_axon_virq_flags_setfault, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_axon_virq_flags_setfault) */
+
+/*lint -esym(621, okl4_axon_virq_flags_setfault) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_setfault(okl4_axon_virq_flags_t *x, okl4_bool_t _fault)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_axon_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_fault;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_axon_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_axon_virq_flags_init(okl4_axon_virq_flags_t *x)
+{
+    *x = (okl4_axon_virq_flags_t)0U;
+}
+
+/*lint -esym(714, okl4_axon_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_axon_virq_flags_t
+okl4_axon_virq_flags_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_axon_virq_flags_t x = (okl4_axon_virq_flags_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    The `okl4_page_cache_t` object represents a set of attributes that
+    controls the caching behaviour of memory page mappings.
+
+    - @ref OKL4_PAGE_CACHE_WRITECOMBINE
+    - @ref OKL4_PAGE_CACHE_DEFAULT
+    - @ref OKL4_PAGE_CACHE_IPC_RX
+    - @ref OKL4_PAGE_CACHE_IPC_TX
+    - @ref OKL4_PAGE_CACHE_TRACEBUFFER
+    - @ref OKL4_PAGE_CACHE_WRITEBACK
+    - @ref OKL4_PAGE_CACHE_IWB_RWA_ONC
+    - @ref OKL4_PAGE_CACHE_WRITETHROUGH
+    - @ref OKL4_PAGE_CACHE_DEVICE_GRE
+    - @ref OKL4_PAGE_CACHE_DEVICE_NGRE
+    - @ref OKL4_PAGE_CACHE_DEVICE
+    - @ref OKL4_PAGE_CACHE_STRONG
+    - @ref OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE
+    - @ref OKL4_PAGE_CACHE_HW_MASK
+    - @ref OKL4_PAGE_CACHE_HW_DEVICE_NGNRE
+    - @ref OKL4_PAGE_CACHE_HW_DEVICE_NGRE
+    - @ref OKL4_PAGE_CACHE_HW_DEVICE_GRE
+    - @ref OKL4_PAGE_CACHE_HW_TWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_NC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RWA_NSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_NC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RWA_OSH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_NC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_TWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_HW_WB_RWA_ISH
+    - @ref OKL4_PAGE_CACHE_MAX
+    - @ref OKL4_PAGE_CACHE_INVALID
+*/
+
+typedef okl4_count_t okl4_page_cache_t;
+
+/*lint -esym(621, OKL4_PAGE_CACHE_WRITECOMBINE) */
+#define OKL4_PAGE_CACHE_WRITECOMBINE ((okl4_page_cache_t)0x0U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEFAULT) */
+#define OKL4_PAGE_CACHE_DEFAULT ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_IPC_RX) */
+#define OKL4_PAGE_CACHE_IPC_RX ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_IPC_TX) */
+#define OKL4_PAGE_CACHE_IPC_TX ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_TRACEBUFFER) */
+#define OKL4_PAGE_CACHE_TRACEBUFFER ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_WRITEBACK) */
+#define OKL4_PAGE_CACHE_WRITEBACK ((okl4_page_cache_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_IWB_RWA_ONC) */
+#define OKL4_PAGE_CACHE_IWB_RWA_ONC ((okl4_page_cache_t)0x2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_WRITETHROUGH) */
+#define OKL4_PAGE_CACHE_WRITETHROUGH ((okl4_page_cache_t)0x3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEVICE_GRE) */
+#define OKL4_PAGE_CACHE_DEVICE_GRE ((okl4_page_cache_t)0x4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEVICE_NGRE) */
+#define OKL4_PAGE_CACHE_DEVICE_NGRE ((okl4_page_cache_t)0x5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_DEVICE) */
+#define OKL4_PAGE_CACHE_DEVICE ((okl4_page_cache_t)0x6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_STRONG) */
+#define OKL4_PAGE_CACHE_STRONG ((okl4_page_cache_t)0x7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE ((okl4_page_cache_t)0x8000000U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_MASK) */
+#define OKL4_PAGE_CACHE_HW_MASK ((okl4_page_cache_t)0x8000000U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_NGNRE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_NGNRE ((okl4_page_cache_t)0x8000004U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_NGRE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_NGRE ((okl4_page_cache_t)0x8000008U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_DEVICE_GRE) */
+#define OKL4_PAGE_CACHE_HW_DEVICE_GRE ((okl4_page_cache_t)0x800000cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_WA_NSH ((okl4_page_cache_t)0x8000011U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000012U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000013U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH ((okl4_page_cache_t)0x8000014U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000015U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000016U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000017U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000018U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH ((okl4_page_cache_t)0x8000019U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH ((okl4_page_cache_t)0x800001fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000021U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RA_NSH ((okl4_page_cache_t)0x8000022U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000023U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH ((okl4_page_cache_t)0x8000024U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000025U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000026U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000027U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000028U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x8000029U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH ((okl4_page_cache_t)0x800002fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000031U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000032U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RWA_NSH ((okl4_page_cache_t)0x8000033U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000034U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000035U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000036U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000037U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000038U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x8000039U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH ((okl4_page_cache_t)0x800003fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH ((okl4_page_cache_t)0x8000041U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH ((okl4_page_cache_t)0x8000042U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH ((okl4_page_cache_t)0x8000043U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_NC_NSH) */
+#define OKL4_PAGE_CACHE_HW_NC_NSH ((okl4_page_cache_t)0x8000044U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH ((okl4_page_cache_t)0x8000045U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH ((okl4_page_cache_t)0x8000046U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH ((okl4_page_cache_t)0x8000047U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH ((okl4_page_cache_t)0x8000048U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH ((okl4_page_cache_t)0x8000049U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH ((okl4_page_cache_t)0x800004aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH ((okl4_page_cache_t)0x800004bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH ((okl4_page_cache_t)0x800004cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH ((okl4_page_cache_t)0x800004dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH ((okl4_page_cache_t)0x800004eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH ((okl4_page_cache_t)0x800004fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000051U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000052U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000053U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH ((okl4_page_cache_t)0x8000054U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_WA_NSH ((okl4_page_cache_t)0x8000055U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000056U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000057U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000058U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH ((okl4_page_cache_t)0x8000059U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH ((okl4_page_cache_t)0x800005fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000061U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000062U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000063U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH ((okl4_page_cache_t)0x8000064U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000065U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RA_NSH ((okl4_page_cache_t)0x8000066U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000067U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000068U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x8000069U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH ((okl4_page_cache_t)0x800006fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000071U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000072U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000073U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000074U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000075U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000076U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RWA_NSH ((okl4_page_cache_t)0x8000077U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000078U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x8000079U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH ((okl4_page_cache_t)0x800007fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH ((okl4_page_cache_t)0x8000081U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH ((okl4_page_cache_t)0x8000082U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x8000083U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH ((okl4_page_cache_t)0x8000084U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH ((okl4_page_cache_t)0x8000085U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH ((okl4_page_cache_t)0x8000086U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x8000087U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_NA_NSH ((okl4_page_cache_t)0x8000088U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH ((okl4_page_cache_t)0x8000089U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH ((okl4_page_cache_t)0x800008aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x800008bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH ((okl4_page_cache_t)0x800008cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH ((okl4_page_cache_t)0x800008dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH ((okl4_page_cache_t)0x800008eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH ((okl4_page_cache_t)0x800008fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH ((okl4_page_cache_t)0x8000091U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH ((okl4_page_cache_t)0x8000092U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x8000093U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH ((okl4_page_cache_t)0x8000094U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH ((okl4_page_cache_t)0x8000095U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH ((okl4_page_cache_t)0x8000096U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x8000097U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH ((okl4_page_cache_t)0x8000098U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_WA_NSH ((okl4_page_cache_t)0x8000099U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH ((okl4_page_cache_t)0x800009aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x800009bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH ((okl4_page_cache_t)0x800009cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH ((okl4_page_cache_t)0x800009dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH ((okl4_page_cache_t)0x800009eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH ((okl4_page_cache_t)0x800009fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH ((okl4_page_cache_t)0x80000a4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000a9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RA_NSH ((okl4_page_cache_t)0x80000aaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000abU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH ((okl4_page_cache_t)0x80000acU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH ((okl4_page_cache_t)0x80000adU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH ((okl4_page_cache_t)0x80000aeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH ((okl4_page_cache_t)0x80000afU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000b9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000baU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RWA_NSH ((okl4_page_cache_t)0x80000bbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000bcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000bdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000beU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH ((okl4_page_cache_t)0x80000bfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH ((okl4_page_cache_t)0x80000c4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000c9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000caU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000cbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_NA_NSH ((okl4_page_cache_t)0x80000ccU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH ((okl4_page_cache_t)0x80000cdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH ((okl4_page_cache_t)0x80000ceU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH ((okl4_page_cache_t)0x80000cfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH ((okl4_page_cache_t)0x80000d4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH ((okl4_page_cache_t)0x80000d9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000daU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000dbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH ((okl4_page_cache_t)0x80000dcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_WA_NSH ((okl4_page_cache_t)0x80000ddU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH ((okl4_page_cache_t)0x80000deU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH ((okl4_page_cache_t)0x80000dfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH ((okl4_page_cache_t)0x80000e4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000e9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH ((okl4_page_cache_t)0x80000eaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000ebU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH ((okl4_page_cache_t)0x80000ecU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH ((okl4_page_cache_t)0x80000edU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RA_NSH ((okl4_page_cache_t)0x80000eeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH ((okl4_page_cache_t)0x80000efU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000f9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000faU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000fbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000fcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000fdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH ((okl4_page_cache_t)0x80000feU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RWA_NSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RWA_NSH ((okl4_page_cache_t)0x80000ffU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_WA_OSH ((okl4_page_cache_t)0x8000211U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000212U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000213U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH ((okl4_page_cache_t)0x8000214U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000215U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000216U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000217U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000218U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH ((okl4_page_cache_t)0x8000219U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH ((okl4_page_cache_t)0x800021fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000221U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RA_OSH ((okl4_page_cache_t)0x8000222U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000223U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH ((okl4_page_cache_t)0x8000224U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000225U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000226U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000227U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000228U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x8000229U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH ((okl4_page_cache_t)0x800022fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000231U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000232U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RWA_OSH ((okl4_page_cache_t)0x8000233U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000234U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000235U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000236U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000237U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000238U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x8000239U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH ((okl4_page_cache_t)0x800023fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH ((okl4_page_cache_t)0x8000241U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH ((okl4_page_cache_t)0x8000242U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH ((okl4_page_cache_t)0x8000243U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_NC_OSH) */
+#define OKL4_PAGE_CACHE_HW_NC_OSH ((okl4_page_cache_t)0x8000244U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH ((okl4_page_cache_t)0x8000245U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH ((okl4_page_cache_t)0x8000246U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH ((okl4_page_cache_t)0x8000247U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH ((okl4_page_cache_t)0x8000248U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH ((okl4_page_cache_t)0x8000249U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH ((okl4_page_cache_t)0x800024aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH ((okl4_page_cache_t)0x800024bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH ((okl4_page_cache_t)0x800024cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH ((okl4_page_cache_t)0x800024dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH ((okl4_page_cache_t)0x800024eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH ((okl4_page_cache_t)0x800024fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000251U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000252U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000253U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH ((okl4_page_cache_t)0x8000254U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_WA_OSH ((okl4_page_cache_t)0x8000255U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000256U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000257U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000258U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH ((okl4_page_cache_t)0x8000259U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH ((okl4_page_cache_t)0x800025fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000261U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000262U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000263U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH ((okl4_page_cache_t)0x8000264U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000265U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RA_OSH ((okl4_page_cache_t)0x8000266U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000267U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000268U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x8000269U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH ((okl4_page_cache_t)0x800026fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000271U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000272U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000273U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000274U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000275U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000276U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RWA_OSH ((okl4_page_cache_t)0x8000277U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000278U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x8000279U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH ((okl4_page_cache_t)0x800027fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH ((okl4_page_cache_t)0x8000281U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH ((okl4_page_cache_t)0x8000282U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x8000283U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH ((okl4_page_cache_t)0x8000284U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH ((okl4_page_cache_t)0x8000285U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH ((okl4_page_cache_t)0x8000286U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x8000287U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_NA_OSH ((okl4_page_cache_t)0x8000288U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH ((okl4_page_cache_t)0x8000289U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH ((okl4_page_cache_t)0x800028aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x800028bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH ((okl4_page_cache_t)0x800028cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH ((okl4_page_cache_t)0x800028dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH ((okl4_page_cache_t)0x800028eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH ((okl4_page_cache_t)0x800028fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH ((okl4_page_cache_t)0x8000291U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH ((okl4_page_cache_t)0x8000292U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x8000293U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH ((okl4_page_cache_t)0x8000294U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH ((okl4_page_cache_t)0x8000295U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH ((okl4_page_cache_t)0x8000296U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x8000297U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH ((okl4_page_cache_t)0x8000298U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_WA_OSH ((okl4_page_cache_t)0x8000299U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH ((okl4_page_cache_t)0x800029aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x800029bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH ((okl4_page_cache_t)0x800029cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH ((okl4_page_cache_t)0x800029dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH ((okl4_page_cache_t)0x800029eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH ((okl4_page_cache_t)0x800029fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH ((okl4_page_cache_t)0x80002a4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002a9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RA_OSH ((okl4_page_cache_t)0x80002aaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002abU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH ((okl4_page_cache_t)0x80002acU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH ((okl4_page_cache_t)0x80002adU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH ((okl4_page_cache_t)0x80002aeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH ((okl4_page_cache_t)0x80002afU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002b9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002baU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WT_RWA_OSH ((okl4_page_cache_t)0x80002bbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002bcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002bdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002beU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH ((okl4_page_cache_t)0x80002bfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH ((okl4_page_cache_t)0x80002c4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002c9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002caU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002cbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_NA_OSH ((okl4_page_cache_t)0x80002ccU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH ((okl4_page_cache_t)0x80002cdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH ((okl4_page_cache_t)0x80002ceU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH ((okl4_page_cache_t)0x80002cfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH ((okl4_page_cache_t)0x80002d4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH ((okl4_page_cache_t)0x80002d9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002daU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002dbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH ((okl4_page_cache_t)0x80002dcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_WA_OSH ((okl4_page_cache_t)0x80002ddU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH ((okl4_page_cache_t)0x80002deU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH ((okl4_page_cache_t)0x80002dfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH ((okl4_page_cache_t)0x80002e4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002e9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH ((okl4_page_cache_t)0x80002eaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002ebU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH ((okl4_page_cache_t)0x80002ecU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH ((okl4_page_cache_t)0x80002edU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RA_OSH ((okl4_page_cache_t)0x80002eeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH ((okl4_page_cache_t)0x80002efU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002f9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002faU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002fbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002fcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002fdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH ((okl4_page_cache_t)0x80002feU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RWA_OSH) */
+#define OKL4_PAGE_CACHE_HW_WB_RWA_OSH ((okl4_page_cache_t)0x80002ffU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWT_WA_ISH ((okl4_page_cache_t)0x8000311U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000312U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000313U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH ((okl4_page_cache_t)0x8000314U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000315U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000316U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000317U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000318U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH ((okl4_page_cache_t)0x8000319U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH ((okl4_page_cache_t)0x800031fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000321U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RA_ISH ((okl4_page_cache_t)0x8000322U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000323U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH ((okl4_page_cache_t)0x8000324U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000325U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000326U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000327U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000328U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x8000329U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH ((okl4_page_cache_t)0x800032fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000331U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000332U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWT_RWA_ISH ((okl4_page_cache_t)0x8000333U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000334U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000335U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000336U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000337U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000338U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x8000339U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH ((okl4_page_cache_t)0x800033fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH ((okl4_page_cache_t)0x8000341U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH ((okl4_page_cache_t)0x8000342U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH ((okl4_page_cache_t)0x8000343U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_NC_ISH) */
+#define OKL4_PAGE_CACHE_HW_NC_ISH ((okl4_page_cache_t)0x8000344U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH ((okl4_page_cache_t)0x8000345U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH ((okl4_page_cache_t)0x8000346U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH ((okl4_page_cache_t)0x8000347U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH ((okl4_page_cache_t)0x8000348U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH ((okl4_page_cache_t)0x8000349U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH ((okl4_page_cache_t)0x800034aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH ((okl4_page_cache_t)0x800034bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH ((okl4_page_cache_t)0x800034cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH ((okl4_page_cache_t)0x800034dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH ((okl4_page_cache_t)0x800034eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH ((okl4_page_cache_t)0x800034fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000351U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000352U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000353U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH ((okl4_page_cache_t)0x8000354U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWB_WA_ISH ((okl4_page_cache_t)0x8000355U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000356U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000357U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000358U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH ((okl4_page_cache_t)0x8000359U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH ((okl4_page_cache_t)0x800035fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000361U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000362U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000363U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH ((okl4_page_cache_t)0x8000364U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000365U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RA_ISH ((okl4_page_cache_t)0x8000366U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000367U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000368U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x8000369U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH ((okl4_page_cache_t)0x800036fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000371U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000372U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000373U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000374U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000375U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000376U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_TWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_TWB_RWA_ISH ((okl4_page_cache_t)0x8000377U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000378U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x8000379U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH ((okl4_page_cache_t)0x800037fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH ((okl4_page_cache_t)0x8000381U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH ((okl4_page_cache_t)0x8000382U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x8000383U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH ((okl4_page_cache_t)0x8000384U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH ((okl4_page_cache_t)0x8000385U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH ((okl4_page_cache_t)0x8000386U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x8000387U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_NA_ISH ((okl4_page_cache_t)0x8000388U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH ((okl4_page_cache_t)0x8000389U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH ((okl4_page_cache_t)0x800038aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x800038bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH ((okl4_page_cache_t)0x800038cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH ((okl4_page_cache_t)0x800038dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH ((okl4_page_cache_t)0x800038eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH ((okl4_page_cache_t)0x800038fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH ((okl4_page_cache_t)0x8000391U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH ((okl4_page_cache_t)0x8000392U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x8000393U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH ((okl4_page_cache_t)0x8000394U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH ((okl4_page_cache_t)0x8000395U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH ((okl4_page_cache_t)0x8000396U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x8000397U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH ((okl4_page_cache_t)0x8000398U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_WA_ISH ((okl4_page_cache_t)0x8000399U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH ((okl4_page_cache_t)0x800039aU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x800039bU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH ((okl4_page_cache_t)0x800039cU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH ((okl4_page_cache_t)0x800039dU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH ((okl4_page_cache_t)0x800039eU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH ((okl4_page_cache_t)0x800039fU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH ((okl4_page_cache_t)0x80003a4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003a9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_RA_ISH ((okl4_page_cache_t)0x80003aaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003abU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH ((okl4_page_cache_t)0x80003acU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH ((okl4_page_cache_t)0x80003adU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH ((okl4_page_cache_t)0x80003aeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH ((okl4_page_cache_t)0x80003afU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003b9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003baU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WT_RWA_ISH ((okl4_page_cache_t)0x80003bbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003bcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003bdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003beU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH ((okl4_page_cache_t)0x80003bfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH ((okl4_page_cache_t)0x80003c4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003c9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003caU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003cbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_NA_ISH ((okl4_page_cache_t)0x80003ccU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH ((okl4_page_cache_t)0x80003cdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH ((okl4_page_cache_t)0x80003ceU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH ((okl4_page_cache_t)0x80003cfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH ((okl4_page_cache_t)0x80003d4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH ((okl4_page_cache_t)0x80003d9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003daU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003dbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH ((okl4_page_cache_t)0x80003dcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_WA_ISH ((okl4_page_cache_t)0x80003ddU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH ((okl4_page_cache_t)0x80003deU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH ((okl4_page_cache_t)0x80003dfU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH ((okl4_page_cache_t)0x80003e4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003e9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH ((okl4_page_cache_t)0x80003eaU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003ebU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH ((okl4_page_cache_t)0x80003ecU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH ((okl4_page_cache_t)0x80003edU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_RA_ISH ((okl4_page_cache_t)0x80003eeU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH ((okl4_page_cache_t)0x80003efU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f1U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f2U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f3U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f4U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f5U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f6U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f7U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f8U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003f9U)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003faU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003fbU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003fcU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003fdU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH ((okl4_page_cache_t)0x80003feU)
+/*lint -esym(621, OKL4_PAGE_CACHE_HW_WB_RWA_ISH) */
+#define OKL4_PAGE_CACHE_HW_WB_RWA_ISH ((okl4_page_cache_t)0x80003ffU)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_CACHE_MAX) */
+#define OKL4_PAGE_CACHE_MAX ((okl4_page_cache_t)0x80003ffU)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_CACHE_INVALID) */
+#define OKL4_PAGE_CACHE_INVALID ((okl4_page_cache_t)0xffffffffU)
+
+/*lint -esym(714, okl4_page_cache_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_cache_is_element_of(okl4_page_cache_t var);
+
+
+/*lint -esym(714, okl4_page_cache_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_cache_is_element_of(okl4_page_cache_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_PAGE_CACHE_WRITECOMBINE) ||
+            (var == OKL4_PAGE_CACHE_DEFAULT) ||
+            (var == OKL4_PAGE_CACHE_IPC_RX) ||
+            (var == OKL4_PAGE_CACHE_IPC_TX) ||
+            (var == OKL4_PAGE_CACHE_TRACEBUFFER) ||
+            (var == OKL4_PAGE_CACHE_WRITEBACK) ||
+            (var == OKL4_PAGE_CACHE_IWB_RWA_ONC) ||
+            (var == OKL4_PAGE_CACHE_WRITETHROUGH) ||
+            (var == OKL4_PAGE_CACHE_DEVICE_GRE) ||
+            (var == OKL4_PAGE_CACHE_DEVICE_NGRE) ||
+            (var == OKL4_PAGE_CACHE_DEVICE) ||
+            (var == OKL4_PAGE_CACHE_STRONG) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_DEVICE_NGNRE) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_DEVICE_GRE) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_NC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_DEVICE_NGNRNE) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_DEVICE_NGRE) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_NC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_NC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_ONC_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_TWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_INC_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_ONC_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_WT_RWA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH) ||
+            (var == OKL4_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH) ||
+            (var == OKL4_PAGE_CACHE_HW_MASK));
+}
+
+
+
+typedef uint32_t okl4_cell_id_t;
+
+
+
+
+
+typedef char okl4_char_t;
+
+
+
+
+
+
+
+
+/**
+    The `okl4_string_t` type represents a constant C string of type
+    'const char *'.
+*/
+
+typedef const okl4_char_t *okl4_string_t;
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+
+*/
+
+struct okl4_range_item {
+    okl4_laddr_t base;
+    okl4_lsize_t size;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_virtmem_item {
+    struct okl4_range_item range;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management_item {
+    okl4_laddr_t entry;
+    struct okl4_virtmem_item mapping_range;
+    __ptr64(void *, data);
+    __ptr64(okl4_string_t, image);
+    okl4_kcap_t mmu;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(okl4_string_t, name);
+    okl4_kcap_t registers_cap;
+    okl4_kcap_t reset_virq;
+    okl4_count_t segment_index;
+    _okl4_padding_t __padding4_4;
+    _okl4_padding_t __padding5_5;
+    _okl4_padding_t __padding6_6;
+    _okl4_padding_t __padding7_7;
+    __ptr64(struct okl4_cell_management_segments *, segments);
+    __ptr64(struct okl4_cell_management_vcpus *, vcpus);
+    okl4_bool_t boot_once;
+    okl4_bool_t can_stop;
+    okl4_bool_t deferred;
+    okl4_bool_t detached;
+    okl4_bool_t erase;
+    _okl4_padding_t __padding8_5;
+    _okl4_padding_t __padding9_6;
+    _okl4_padding_t __padding10_7;
+    okl4_laddr_t dtb_address;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management {
+    okl4_count_t num_items;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct okl4_cell_management_item items[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    The `okl4_paddr_t` type represents an unsigned integer value which is large
+    enough to contain a machine-native physical address.
+*/
+
+typedef okl4_psize_t okl4_paddr_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_segment_mapping {
+    okl4_paddr_t phys_addr;
+    okl4_psize_t size;
+    okl4_laddr_t virt_addr;
+    okl4_kcap_t cap;
+    okl4_bool_t device;
+    okl4_bool_t owned;
+    _okl4_padding_t __padding0_6;
+    _okl4_padding_t __padding1_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management_segments {
+    okl4_count_t free_segments;
+    okl4_count_t num_segments;
+    struct okl4_segment_mapping segment_mappings[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_cell_management_vcpus {
+    okl4_count_t num_vcpus;
+    okl4_kcap_t vcpu_caps[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    CPU instruction set
+*/
+
+typedef uint32_t okl4_cpu_exec_mode;
+
+/*lint -esym(621, OKL4_ARM_MODE) */
+#define OKL4_ARM_MODE ((okl4_cpu_exec_mode)(0U))
+
+/*lint -esym(621, OKL4_DEFAULT_MODE) */
+#define OKL4_DEFAULT_MODE ((okl4_cpu_exec_mode)(4U))
+
+/*lint -esym(621, OKL4_JAZELLE_MODE) */
+#define OKL4_JAZELLE_MODE ((okl4_cpu_exec_mode)(2U))
+
+/*lint -esym(621, OKL4_THUMBEE_MODE) */
+#define OKL4_THUMBEE_MODE ((okl4_cpu_exec_mode)(3U))
+
+/*lint -esym(621, OKL4_THUMB_MODE) */
+#define OKL4_THUMB_MODE ((okl4_cpu_exec_mode)(1U))
+
+
+
+/**
+    CPU mode specifier
+
+    - BITS 2..0 -   @ref OKL4_MASK_EXEC_MODE_CPU_MODE
+    - BIT 7 -   @ref OKL4_MASK_ENDIAN_CPU_MODE
+*/
+
+/*lint -esym(621, okl4_cpu_mode_t) */
+typedef uint32_t okl4_cpu_mode_t;
+
+/*lint -esym(621, okl4_cpu_mode_getexecmode) */
+/*lint -esym(714, okl4_cpu_mode_getexecmode) */
+OKL4_FORCE_INLINE okl4_cpu_exec_mode
+okl4_cpu_mode_getexecmode(const okl4_cpu_mode_t *x);
+
+/*lint -esym(621, okl4_cpu_mode_setexecmode) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setexecmode(okl4_cpu_mode_t *x, okl4_cpu_exec_mode _exec_mode);
+
+/*lint -esym(621, okl4_cpu_mode_getendian) */
+/*lint -esym(714, okl4_cpu_mode_getendian) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_cpu_mode_getendian(const okl4_cpu_mode_t *x);
+
+/*lint -esym(621, okl4_cpu_mode_setendian) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setendian(okl4_cpu_mode_t *x, okl4_bool_t _endian);
+
+/*lint -esym(714, okl4_cpu_mode_init) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_init(okl4_cpu_mode_t *x);
+
+/*lint -esym(714, okl4_cpu_mode_cast) */
+OKL4_FORCE_INLINE okl4_cpu_mode_t
+okl4_cpu_mode_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_CPU_MODE_EXEC_MODE_MASK) */
+#define OKL4_CPU_MODE_EXEC_MODE_MASK ((okl4_cpu_mode_t)7U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EXEC_MODE_CPU_MODE) */
+#define OKL4_MASK_EXEC_MODE_CPU_MODE ((okl4_cpu_mode_t)7U)
+/*lint -esym(621, OKL4_SHIFT_EXEC_MODE_CPU_MODE) */
+#define OKL4_SHIFT_EXEC_MODE_CPU_MODE (0)
+/*lint -esym(621, OKL4_WIDTH_EXEC_MODE_CPU_MODE) */
+#define OKL4_WIDTH_EXEC_MODE_CPU_MODE (3)
+/*lint -esym(621, OKL4_CPU_MODE_ENDIAN_MASK) */
+#define OKL4_CPU_MODE_ENDIAN_MASK ((okl4_cpu_mode_t)1U << 7) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ENDIAN_CPU_MODE) */
+#define OKL4_MASK_ENDIAN_CPU_MODE ((okl4_cpu_mode_t)1U << 7)
+/*lint -esym(621, OKL4_SHIFT_ENDIAN_CPU_MODE) */
+#define OKL4_SHIFT_ENDIAN_CPU_MODE (7)
+/*lint -esym(621, OKL4_WIDTH_ENDIAN_CPU_MODE) */
+#define OKL4_WIDTH_ENDIAN_CPU_MODE (1)
+
+
+/*lint -sem(okl4_cpu_mode_getexecmode, 1p, @n >= 0 && @n <= 7) */
+/*lint -esym(621, okl4_cpu_mode_getexecmode) */
+/*lint -esym(714, okl4_cpu_mode_getexecmode) */
+OKL4_FORCE_INLINE okl4_cpu_exec_mode
+okl4_cpu_mode_getexecmode(const okl4_cpu_mode_t *x)
+{
+    okl4_cpu_exec_mode field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 3;
+        } bits;
+        okl4_cpu_mode_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_cpu_exec_mode)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_cpu_mode_setexecmode, 2n >= 0 && 2n <= 7) */
+/*lint -esym(714, okl4_cpu_mode_setexecmode) */
+
+/*lint -esym(621, okl4_cpu_mode_setexecmode) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setexecmode(okl4_cpu_mode_t *x, okl4_cpu_exec_mode _exec_mode)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 3;
+        } bits;
+        okl4_cpu_mode_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_exec_mode;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_cpu_mode_getendian, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_cpu_mode_getendian) */
+/*lint -esym(714, okl4_cpu_mode_getendian) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_cpu_mode_getendian(const okl4_cpu_mode_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_cpu_mode_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_cpu_mode_setendian, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_cpu_mode_setendian) */
+
+/*lint -esym(621, okl4_cpu_mode_setendian) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_setendian(okl4_cpu_mode_t *x, okl4_bool_t _endian)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_cpu_mode_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_endian;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_cpu_mode_init) */
+OKL4_FORCE_INLINE void
+okl4_cpu_mode_init(okl4_cpu_mode_t *x)
+{
+    *x = (okl4_cpu_mode_t)0U;
+}
+
+/*lint -esym(714, okl4_cpu_mode_cast) */
+OKL4_FORCE_INLINE okl4_cpu_mode_t
+okl4_cpu_mode_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_cpu_mode_t x = (okl4_cpu_mode_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct _okl4_env_hdr {
+    uint16_t magic;
+    uint16_t count;
+};
+
+
+
+
+
+
+
+struct _okl4_env_item {
+    __ptr64(okl4_string_t, name);
+    __ptr64(void *, item);
+};
+
+
+
+
+
+
+/**
+    The OKL4 environment.  It is a dictionary that maps strings to
+    arbitary objects.  The content of the environment is defined
+    during system construction time, and is read-only during run
+    time.
+*/
+
+struct _okl4_env {
+    struct _okl4_env_hdr env_hdr;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct _okl4_env_item env_item[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_access_cell {
+    __ptr64(okl4_string_t, name);
+    okl4_count_t num_entries;
+    okl4_count_t start_entry;
+};
+
+
+
+
+/**
+    The okl4_page_perms_t object represents a set of access permissions for
+    page mappings.
+
+    - @ref OKL4_PAGE_PERMS_NONE
+    - @ref OKL4_PAGE_PERMS_X
+    - @ref OKL4_PAGE_PERMS_W
+    - @ref OKL4_PAGE_PERMS_WX
+    - @ref OKL4_PAGE_PERMS_R
+    - @ref OKL4_PAGE_PERMS_RX
+    - @ref OKL4_PAGE_PERMS_RW
+    - @ref OKL4_PAGE_PERMS_RWX
+    - @ref OKL4_PAGE_PERMS_MAX
+    - @ref OKL4_PAGE_PERMS_INVALID
+*/
+
+typedef uint32_t okl4_page_perms_t;
+
+/*lint -esym(621, OKL4_PAGE_PERMS_NONE) */
+#define OKL4_PAGE_PERMS_NONE ((okl4_page_perms_t)0x0U)
+/*lint -esym(621, OKL4_PAGE_PERMS_X) */
+#define OKL4_PAGE_PERMS_X ((okl4_page_perms_t)0x1U)
+/*lint -esym(621, OKL4_PAGE_PERMS_W) */
+#define OKL4_PAGE_PERMS_W ((okl4_page_perms_t)0x2U)
+/*lint -esym(621, OKL4_PAGE_PERMS_WX) */
+#define OKL4_PAGE_PERMS_WX ((okl4_page_perms_t)0x3U)
+/*lint -esym(621, OKL4_PAGE_PERMS_R) */
+#define OKL4_PAGE_PERMS_R ((okl4_page_perms_t)0x4U)
+/*lint -esym(621, OKL4_PAGE_PERMS_RX) */
+#define OKL4_PAGE_PERMS_RX ((okl4_page_perms_t)0x5U)
+/*lint -esym(621, OKL4_PAGE_PERMS_RW) */
+#define OKL4_PAGE_PERMS_RW ((okl4_page_perms_t)0x6U)
+/*lint -esym(621, OKL4_PAGE_PERMS_RWX) */
+#define OKL4_PAGE_PERMS_RWX ((okl4_page_perms_t)0x7U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_PERMS_MAX) */
+#define OKL4_PAGE_PERMS_MAX ((okl4_page_perms_t)0x7U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_PAGE_PERMS_INVALID) */
+#define OKL4_PAGE_PERMS_INVALID ((okl4_page_perms_t)0xffffffffU)
+
+/*lint -esym(714, okl4_page_perms_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_perms_is_element_of(okl4_page_perms_t var);
+
+
+/*lint -esym(714, okl4_page_perms_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_page_perms_is_element_of(okl4_page_perms_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_PAGE_PERMS_NONE) ||
+            (var == OKL4_PAGE_PERMS_X) ||
+            (var == OKL4_PAGE_PERMS_W) ||
+            (var == OKL4_PAGE_PERMS_WX) ||
+            (var == OKL4_PAGE_PERMS_R) ||
+            (var == OKL4_PAGE_PERMS_RX) ||
+            (var == OKL4_PAGE_PERMS_RW) ||
+            (var == OKL4_PAGE_PERMS_RWX));
+}
+
+
+/**
+
+*/
+
+struct okl4_env_access_entry {
+    okl4_laddr_t virtual_address;
+    okl4_psize_t offset;
+    okl4_psize_t size;
+    okl4_count_t num_segs;
+    okl4_count_t segment_index;
+    okl4_page_cache_t cache_attrs;
+    okl4_page_perms_t permissions;
+    __ptr64(okl4_string_t, object_name);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_access_table {
+    okl4_count_t num_cells;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(struct okl4_env_access_cell *, cells);
+    __ptr64(struct okl4_env_access_entry *, entries);
+};
+
+
+
+
+/**
+    This object contains command-line arguments passed to
+    user-level programs.
+*/
+
+struct okl4_env_args {
+    okl4_count_t argc;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64_array(okl4_string_t, argv)[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    The okl4_env_interrupt_device_map_t type represents a list of interrupt
+    numbers (IRQs) that are connected to a given peripheral
+    device.  Objects of this type are typically obtained from
+    the OKL4 environment.
+*/
+
+struct okl4_env_interrupt_device_map {
+    okl4_count_t num_entries;
+    okl4_interrupt_number_t entries[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    The okl4_interrupt_t structure is used to represent a kernel interrupt
+    object.
+*/
+
+struct okl4_interrupt {
+    okl4_kcap_t kcap;
+};
+
+
+
+
+/**
+    The okl4_env_interrupt_handle_t type stores the information required to
+    perform operations on a interrupt.
+*/
+
+struct okl4_env_interrupt_handle {
+    okl4_interrupt_number_t descriptor;
+    struct okl4_interrupt interrupt;
+};
+
+
+
+
+/**
+    The okl4_env_interrupt_list_t type stores a list of interrupt handle objects
+    which represent all the interrupts that are available to the cell.
+    Objects of this type are typically obtained from
+    the OKL4 environment.
+*/
+
+struct okl4_env_interrupt_list {
+    okl4_count_t num_entries;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(okl4_interrupt_number_t *, descriptor);
+    __ptr64(struct okl4_interrupt *, interrupt);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_profile_cell {
+    okl4_char_t name[32];
+    okl4_count_t num_cores;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(struct okl4_env_profile_cpu *, core);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_profile_cpu {
+    okl4_kcap_t cap;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_profile_table {
+    okl4_count_t num_cell_entries;
+    okl4_count_t pcpu_cell_entry;
+    __ptr64(struct okl4_env_profile_cell *, cells);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_segment {
+    okl4_paddr_t base;
+    okl4_psize_t size;
+    okl4_kcap_t cap_id;
+    okl4_page_perms_t rwx;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_env_segment_table {
+    okl4_count_t num_segments;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct okl4_env_segment segments[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    The `okl4_error_t` type represents an error condition returned by the
+    OKL4 API.
+
+    See OKL4_ERROR_*
+
+    - @ref OKL4_ERROR_KSP_OK
+    - @ref OKL4_ERROR_OK
+    - @ref OKL4_ERROR_ALREADY_STARTED
+    - @ref OKL4_ERROR_ALREADY_STOPPED
+    - @ref OKL4_ERROR_AXON_AREA_TOO_BIG
+    - @ref OKL4_ERROR_AXON_BAD_MESSAGE_SIZE
+    - @ref OKL4_ERROR_AXON_INVALID_OFFSET
+    - @ref OKL4_ERROR_AXON_QUEUE_NOT_MAPPED
+    - @ref OKL4_ERROR_AXON_QUEUE_NOT_READY
+    - @ref OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED
+    - @ref OKL4_ERROR_CANCELLED
+    - @ref OKL4_ERROR_EXISTING_MAPPING
+    - @ref OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS
+    - @ref OKL4_ERROR_INTERRUPTED
+    - @ref OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED
+    - @ref OKL4_ERROR_INTERRUPT_INVALID_IRQ
+    - @ref OKL4_ERROR_INTERRUPT_NOT_ATTACHED
+    - @ref OKL4_ERROR_INVALID_ARGUMENT
+    - @ref OKL4_ERROR_INVALID_DESIGNATOR
+    - @ref OKL4_ERROR_INVALID_POWER_STATE
+    - @ref OKL4_ERROR_INVALID_SEGMENT_INDEX
+    - @ref OKL4_ERROR_MEMORY_FAULT
+    - @ref OKL4_ERROR_MISSING_MAPPING
+    - @ref OKL4_ERROR_NON_EMPTY_MMU_CONTEXT
+    - @ref OKL4_ERROR_NOT_IN_SEGMENT
+    - @ref OKL4_ERROR_NOT_LAST_CPU
+    - @ref OKL4_ERROR_NO_RESOURCES
+    - @ref OKL4_ERROR_PIPE_BAD_STATE
+    - @ref OKL4_ERROR_PIPE_EMPTY
+    - @ref OKL4_ERROR_PIPE_FULL
+    - @ref OKL4_ERROR_PIPE_NOT_READY
+    - @ref OKL4_ERROR_PIPE_RECV_OVERFLOW
+    - @ref OKL4_ERROR_POWER_VCPU_RESUMED
+    - @ref OKL4_ERROR_SEGMENT_USED
+    - @ref OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED
+    - @ref OKL4_ERROR_TIMER_ACTIVE
+    - @ref OKL4_ERROR_TIMER_CANCELLED
+    - @ref OKL4_ERROR_TRY_AGAIN
+    - @ref OKL4_ERROR_WOULD_BLOCK
+    - @ref OKL4_ERROR_ALLOC_EXHAUSTED
+    - @ref OKL4_ERROR_KSP_ERROR_0
+    - @ref OKL4_ERROR_KSP_ERROR_1
+    - @ref OKL4_ERROR_KSP_ERROR_2
+    - @ref OKL4_ERROR_KSP_ERROR_3
+    - @ref OKL4_ERROR_KSP_ERROR_4
+    - @ref OKL4_ERROR_KSP_ERROR_5
+    - @ref OKL4_ERROR_KSP_ERROR_6
+    - @ref OKL4_ERROR_KSP_ERROR_7
+    - @ref OKL4_ERROR_KSP_INVALID_ARG
+    - @ref OKL4_ERROR_KSP_NOT_IMPLEMENTED
+    - @ref OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS
+    - @ref OKL4_ERROR_KSP_INTERRUPT_REGISTERED
+    - @ref OKL4_ERROR_NOT_IMPLEMENTED
+    - @ref OKL4_ERROR_MAX
+*/
+
+typedef uint32_t okl4_error_t;
+
+/**
+    KSP returned OK
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_OK) */
+#define OKL4_ERROR_KSP_OK ((okl4_error_t)0x0U)
+/**
+    The operation succeeded
+*/
+/*lint -esym(621, OKL4_ERROR_OK) */
+#define OKL4_ERROR_OK ((okl4_error_t)0x0U)
+/**
+    The target vCPU was already running.
+*/
+/*lint -esym(621, OKL4_ERROR_ALREADY_STARTED) */
+#define OKL4_ERROR_ALREADY_STARTED ((okl4_error_t)0x1U)
+/**
+    The target vCPU was not running.
+*/
+/*lint -esym(621, OKL4_ERROR_ALREADY_STOPPED) */
+#define OKL4_ERROR_ALREADY_STOPPED ((okl4_error_t)0x2U)
+/*lint -esym(621, OKL4_ERROR_AXON_AREA_TOO_BIG) */
+#define OKL4_ERROR_AXON_AREA_TOO_BIG ((okl4_error_t)0x3U)
+/*lint -esym(621, OKL4_ERROR_AXON_BAD_MESSAGE_SIZE) */
+#define OKL4_ERROR_AXON_BAD_MESSAGE_SIZE ((okl4_error_t)0x4U)
+/*lint -esym(621, OKL4_ERROR_AXON_INVALID_OFFSET) */
+#define OKL4_ERROR_AXON_INVALID_OFFSET ((okl4_error_t)0x5U)
+/*lint -esym(621, OKL4_ERROR_AXON_QUEUE_NOT_MAPPED) */
+#define OKL4_ERROR_AXON_QUEUE_NOT_MAPPED ((okl4_error_t)0x6U)
+/*lint -esym(621, OKL4_ERROR_AXON_QUEUE_NOT_READY) */
+#define OKL4_ERROR_AXON_QUEUE_NOT_READY ((okl4_error_t)0x7U)
+/*lint -esym(621, OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED) */
+#define OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED ((okl4_error_t)0x8U)
+/**
+    A blocking operation was cancelled due to an abort of the operation.
+*/
+/*lint -esym(621, OKL4_ERROR_CANCELLED) */
+#define OKL4_ERROR_CANCELLED ((okl4_error_t)0x9U)
+/**
+    The operation failed due to an existing mapping.  Mapping
+    operations must not overlap an existing mapping.  Unmapping
+    must be performed at the same size as the original mapping.
+*/
+/*lint -esym(621, OKL4_ERROR_EXISTING_MAPPING) */
+#define OKL4_ERROR_EXISTING_MAPPING ((okl4_error_t)0xaU)
+/**
+    The operation requested with a segment failed due to
+    insufficient rights in the segment.
+*/
+/*lint -esym(621, OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS) */
+#define OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS ((okl4_error_t)0xbU)
+/**
+    The operation did not complete because it was interrupted by a
+    preemption.  This error value is only used internally.
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPTED) */
+#define OKL4_ERROR_INTERRUPTED ((okl4_error_t)0xcU)
+/**
+    Attempt to attach an interrupt to an IRQ number, when the
+    interrupt is already attached to an IRQ number
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED) */
+#define OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED ((okl4_error_t)0xdU)
+/**
+    Attempt to use an IRQ number that is out of range, of
+    the wrong type, or not in the correct state
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPT_INVALID_IRQ) */
+#define OKL4_ERROR_INTERRUPT_INVALID_IRQ ((okl4_error_t)0xeU)
+/**
+    Attempt to operate on an unknown IRQ number
+*/
+/*lint -esym(621, OKL4_ERROR_INTERRUPT_NOT_ATTACHED) */
+#define OKL4_ERROR_INTERRUPT_NOT_ATTACHED ((okl4_error_t)0xfU)
+/**
+    An invalid argument was provided.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_ARGUMENT) */
+#define OKL4_ERROR_INVALID_ARGUMENT ((okl4_error_t)0x10U)
+/**
+    The operation failed because one of the arguments does not refer to a
+    valid object.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_DESIGNATOR) */
+#define OKL4_ERROR_INVALID_DESIGNATOR ((okl4_error_t)0x11U)
+/**
+    The operation failed because the power_state
+    argument is invalid.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_POWER_STATE) */
+#define OKL4_ERROR_INVALID_POWER_STATE ((okl4_error_t)0x12U)
+/**
+    The operation failed because the given segment index does
+    not correspond to an attached physical segment.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_SEGMENT_INDEX) */
+#define OKL4_ERROR_INVALID_SEGMENT_INDEX ((okl4_error_t)0x13U)
+/**
+    A user provided address produced a read or write fault in the operation.
+*/
+/*lint -esym(621, OKL4_ERROR_MEMORY_FAULT) */
+#define OKL4_ERROR_MEMORY_FAULT ((okl4_error_t)0x14U)
+/**
+    The operation failed because there is no mapping at the
+    specified location.
+*/
+/*lint -esym(621, OKL4_ERROR_MISSING_MAPPING) */
+#define OKL4_ERROR_MISSING_MAPPING ((okl4_error_t)0x15U)
+/**
+    The delete operation failed because the KMMU context is not
+    empty.
+*/
+/*lint -esym(621, OKL4_ERROR_NON_EMPTY_MMU_CONTEXT) */
+#define OKL4_ERROR_NON_EMPTY_MMU_CONTEXT ((okl4_error_t)0x16U)
+/**
+    The lookup operation failed because the given virtual address
+    of the given KMMU context is not mapped at the given physical
+    segment.
+*/
+/*lint -esym(621, OKL4_ERROR_NOT_IN_SEGMENT) */
+#define OKL4_ERROR_NOT_IN_SEGMENT ((okl4_error_t)0x17U)
+/**
+    The operation failed because the caller is not on the last
+    online cpu.
+*/
+/*lint -esym(621, OKL4_ERROR_NOT_LAST_CPU) */
+#define OKL4_ERROR_NOT_LAST_CPU ((okl4_error_t)0x18U)
+/**
+    Insufficient resources are available to perform the operation.
+*/
+/*lint -esym(621, OKL4_ERROR_NO_RESOURCES) */
+#define OKL4_ERROR_NO_RESOURCES ((okl4_error_t)0x19U)
+/**
+    Operation failed because pipe was not in the required state.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_BAD_STATE) */
+#define OKL4_ERROR_PIPE_BAD_STATE ((okl4_error_t)0x1aU)
+/**
+    Operation failed because no messages are in the queue.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_EMPTY) */
+#define OKL4_ERROR_PIPE_EMPTY ((okl4_error_t)0x1bU)
+/**
+    Operation failed because no memory is available in the queue.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_FULL) */
+#define OKL4_ERROR_PIPE_FULL ((okl4_error_t)0x1cU)
+/**
+    Operation failed because the pipe is in reset or not ready.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_NOT_READY) */
+#define OKL4_ERROR_PIPE_NOT_READY ((okl4_error_t)0x1dU)
+/**
+    Message was truncated because receive buffer size is too small.
+*/
+/*lint -esym(621, OKL4_ERROR_PIPE_RECV_OVERFLOW) */
+#define OKL4_ERROR_PIPE_RECV_OVERFLOW ((okl4_error_t)0x1eU)
+/**
+    The operation failed because at least one VCPU has a monitored
+    power state and is not currently suspended.
+*/
+/*lint -esym(621, OKL4_ERROR_POWER_VCPU_RESUMED) */
+#define OKL4_ERROR_POWER_VCPU_RESUMED ((okl4_error_t)0x1fU)
+/**
+    The operation requires a segment to be unused, or not attached
+    to an MMU context.
+*/
+/*lint -esym(621, OKL4_ERROR_SEGMENT_USED) */
+#define OKL4_ERROR_SEGMENT_USED ((okl4_error_t)0x20U)
+/*lint -esym(621, OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED) */
+#define OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED ((okl4_error_t)0x21U)
+/**
+    The timer is already active, and was not reprogrammed.
+*/
+/*lint -esym(621, OKL4_ERROR_TIMER_ACTIVE) */
+#define OKL4_ERROR_TIMER_ACTIVE ((okl4_error_t)0x22U)
+/**
+    The timer has already been cancelled or expired.
+*/
+/*lint -esym(621, OKL4_ERROR_TIMER_CANCELLED) */
+#define OKL4_ERROR_TIMER_CANCELLED ((okl4_error_t)0x23U)
+/**
+    Operation failed due to a temporary condition, and may be retried.
+*/
+/*lint -esym(621, OKL4_ERROR_TRY_AGAIN) */
+#define OKL4_ERROR_TRY_AGAIN ((okl4_error_t)0x24U)
+/**
+    The non-blocking operation failed because it would
+    block on a resource.
+*/
+/*lint -esym(621, OKL4_ERROR_WOULD_BLOCK) */
+#define OKL4_ERROR_WOULD_BLOCK ((okl4_error_t)0x25U)
+/**
+    Insufficient resources
+*/
+/*lint -esym(621, OKL4_ERROR_ALLOC_EXHAUSTED) */
+#define OKL4_ERROR_ALLOC_EXHAUSTED ((okl4_error_t)0x26U)
+/**
+    KSP specific error 0
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_0) */
+#define OKL4_ERROR_KSP_ERROR_0 ((okl4_error_t)0x10000010U)
+/**
+    KSP specific error 1
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_1) */
+#define OKL4_ERROR_KSP_ERROR_1 ((okl4_error_t)0x10000011U)
+/**
+    KSP specific error 2
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_2) */
+#define OKL4_ERROR_KSP_ERROR_2 ((okl4_error_t)0x10000012U)
+/**
+    KSP specific error 3
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_3) */
+#define OKL4_ERROR_KSP_ERROR_3 ((okl4_error_t)0x10000013U)
+/**
+    KSP specific error 4
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_4) */
+#define OKL4_ERROR_KSP_ERROR_4 ((okl4_error_t)0x10000014U)
+/**
+    KSP specific error 5
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_5) */
+#define OKL4_ERROR_KSP_ERROR_5 ((okl4_error_t)0x10000015U)
+/**
+    KSP specific error 6
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_6) */
+#define OKL4_ERROR_KSP_ERROR_6 ((okl4_error_t)0x10000016U)
+/**
+    KSP specific error 7
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_ERROR_7) */
+#define OKL4_ERROR_KSP_ERROR_7 ((okl4_error_t)0x10000017U)
+/**
+    Invalid argument to KSP
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_INVALID_ARG) */
+#define OKL4_ERROR_KSP_INVALID_ARG ((okl4_error_t)0x80000001U)
+/**
+    KSP doesn't implement requested feature
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_NOT_IMPLEMENTED) */
+#define OKL4_ERROR_KSP_NOT_IMPLEMENTED ((okl4_error_t)0x80000002U)
+/**
+    User didn't supply rights for requested feature
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS) */
+#define OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS ((okl4_error_t)0x80000003U)
+/**
+    Interrupt already registered
+*/
+/*lint -esym(621, OKL4_ERROR_KSP_INTERRUPT_REGISTERED) */
+#define OKL4_ERROR_KSP_INTERRUPT_REGISTERED ((okl4_error_t)0x80000004U)
+/**
+    Requested operation is not implemented.
+*/
+/*lint -esym(621, OKL4_ERROR_NOT_IMPLEMENTED) */
+#define OKL4_ERROR_NOT_IMPLEMENTED ((okl4_error_t)0xffffffffU)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ERROR_MAX) */
+#define OKL4_ERROR_MAX ((okl4_error_t)0xffffffffU)
+
+/*lint -esym(714, okl4_error_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_error_is_element_of(okl4_error_t var);
+
+
+/*lint -esym(714, okl4_error_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_error_is_element_of(okl4_error_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_ERROR_ALREADY_STARTED) ||
+            (var == OKL4_ERROR_ALREADY_STOPPED) ||
+            (var == OKL4_ERROR_AXON_AREA_TOO_BIG) ||
+            (var == OKL4_ERROR_AXON_BAD_MESSAGE_SIZE) ||
+            (var == OKL4_ERROR_AXON_INVALID_OFFSET) ||
+            (var == OKL4_ERROR_AXON_QUEUE_NOT_MAPPED) ||
+            (var == OKL4_ERROR_AXON_QUEUE_NOT_READY) ||
+            (var == OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED) ||
+            (var == OKL4_ERROR_CANCELLED) ||
+            (var == OKL4_ERROR_EXISTING_MAPPING) ||
+            (var == OKL4_ERROR_INSUFFICIENT_SEGMENT_RIGHTS) ||
+            (var == OKL4_ERROR_INTERRUPTED) ||
+            (var == OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED) ||
+            (var == OKL4_ERROR_INTERRUPT_INVALID_IRQ) ||
+            (var == OKL4_ERROR_INTERRUPT_NOT_ATTACHED) ||
+            (var == OKL4_ERROR_INVALID_ARGUMENT) ||
+            (var == OKL4_ERROR_INVALID_DESIGNATOR) ||
+            (var == OKL4_ERROR_INVALID_POWER_STATE) ||
+            (var == OKL4_ERROR_INVALID_SEGMENT_INDEX) ||
+            (var == OKL4_ERROR_KSP_ERROR_0) ||
+            (var == OKL4_ERROR_KSP_ERROR_1) ||
+            (var == OKL4_ERROR_KSP_ERROR_2) ||
+            (var == OKL4_ERROR_KSP_ERROR_3) ||
+            (var == OKL4_ERROR_KSP_ERROR_4) ||
+            (var == OKL4_ERROR_KSP_ERROR_5) ||
+            (var == OKL4_ERROR_KSP_ERROR_6) ||
+            (var == OKL4_ERROR_KSP_ERROR_7) ||
+            (var == OKL4_ERROR_KSP_INSUFFICIENT_RIGHTS) ||
+            (var == OKL4_ERROR_KSP_INTERRUPT_REGISTERED) ||
+            (var == OKL4_ERROR_KSP_INVALID_ARG) ||
+            (var == OKL4_ERROR_KSP_NOT_IMPLEMENTED) ||
+            (var == OKL4_ERROR_KSP_OK) ||
+            (var == OKL4_ERROR_MEMORY_FAULT) ||
+            (var == OKL4_ERROR_MISSING_MAPPING) ||
+            (var == OKL4_ERROR_NON_EMPTY_MMU_CONTEXT) ||
+            (var == OKL4_ERROR_NOT_IMPLEMENTED) ||
+            (var == OKL4_ERROR_NOT_IN_SEGMENT) ||
+            (var == OKL4_ERROR_NOT_LAST_CPU) ||
+            (var == OKL4_ERROR_NO_RESOURCES) ||
+            (var == OKL4_ERROR_OK) ||
+            (var == OKL4_ERROR_PIPE_BAD_STATE) ||
+            (var == OKL4_ERROR_PIPE_EMPTY) ||
+            (var == OKL4_ERROR_PIPE_FULL) ||
+            (var == OKL4_ERROR_PIPE_NOT_READY) ||
+            (var == OKL4_ERROR_PIPE_RECV_OVERFLOW) ||
+            (var == OKL4_ERROR_POWER_VCPU_RESUMED) ||
+            (var == OKL4_ERROR_SEGMENT_USED) ||
+            (var == OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED) ||
+            (var == OKL4_ERROR_TIMER_ACTIVE) ||
+            (var == OKL4_ERROR_TIMER_CANCELLED) ||
+            (var == OKL4_ERROR_TRY_AGAIN) ||
+            (var == OKL4_ERROR_WOULD_BLOCK) ||
+            (var == OKL4_ERROR_ALLOC_EXHAUSTED));
+}
+
+
+/**
+
+*/
+
+struct okl4_firmware_segment {
+    okl4_laddr_t copy_addr;
+    okl4_laddr_t exec_addr;
+    okl4_lsize_t filesz;
+    okl4_lsize_t memsz_diff;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_firmware_segments_info {
+    okl4_count_t num_segments;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct okl4_firmware_segment segments[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+/**
+    - BIT 1 -   @ref OKL4_MASK_EDGE_GICD_ICFGR
+*/
+
+/*lint -esym(621, okl4_gicd_icfgr_t) */
+typedef uint32_t okl4_gicd_icfgr_t;
+
+/*lint -esym(621, okl4_gicd_icfgr_getedge) */
+/*lint -esym(714, okl4_gicd_icfgr_getedge) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_icfgr_getedge(const okl4_gicd_icfgr_t *x);
+
+/*lint -esym(621, okl4_gicd_icfgr_setedge) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_setedge(okl4_gicd_icfgr_t *x, okl4_bool_t _edge);
+
+/*lint -esym(714, okl4_gicd_icfgr_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_init(okl4_gicd_icfgr_t *x);
+
+/*lint -esym(714, okl4_gicd_icfgr_cast) */
+OKL4_FORCE_INLINE okl4_gicd_icfgr_t
+okl4_gicd_icfgr_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_GICD_ICFGR_EDGE_MASK) */
+#define OKL4_GICD_ICFGR_EDGE_MASK ((okl4_gicd_icfgr_t)1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_EDGE_GICD_ICFGR) */
+#define OKL4_MASK_EDGE_GICD_ICFGR ((okl4_gicd_icfgr_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_EDGE_GICD_ICFGR) */
+#define OKL4_SHIFT_EDGE_GICD_ICFGR (1)
+/*lint -esym(621, OKL4_WIDTH_EDGE_GICD_ICFGR) */
+#define OKL4_WIDTH_EDGE_GICD_ICFGR (1)
+
+
+/*lint -sem(okl4_gicd_icfgr_getedge, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_gicd_icfgr_getedge) */
+/*lint -esym(714, okl4_gicd_icfgr_getedge) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_icfgr_getedge(const okl4_gicd_icfgr_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_gicd_icfgr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_icfgr_setedge, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_gicd_icfgr_setedge) */
+
+/*lint -esym(621, okl4_gicd_icfgr_setedge) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_setedge(okl4_gicd_icfgr_t *x, okl4_bool_t _edge)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_gicd_icfgr_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_edge;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_gicd_icfgr_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_icfgr_init(okl4_gicd_icfgr_t *x)
+{
+    *x = (okl4_gicd_icfgr_t)0U;
+}
+
+/*lint -esym(714, okl4_gicd_icfgr_cast) */
+OKL4_FORCE_INLINE okl4_gicd_icfgr_t
+okl4_gicd_icfgr_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_gicd_icfgr_t x = (okl4_gicd_icfgr_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+typedef uint32_t okl4_sgi_target_t;
+
+/*lint -esym(621, OKL4_SGI_TARGET_LISTED) */
+#define OKL4_SGI_TARGET_LISTED ((okl4_sgi_target_t)0x0U)
+/*lint -esym(621, OKL4_SGI_TARGET_ALL_OTHERS) */
+#define OKL4_SGI_TARGET_ALL_OTHERS ((okl4_sgi_target_t)0x1U)
+/*lint -esym(621, OKL4_SGI_TARGET_SELF) */
+#define OKL4_SGI_TARGET_SELF ((okl4_sgi_target_t)0x2U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_SGI_TARGET_MAX) */
+#define OKL4_SGI_TARGET_MAX ((okl4_sgi_target_t)0x2U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_SGI_TARGET_INVALID) */
+#define OKL4_SGI_TARGET_INVALID ((okl4_sgi_target_t)0xffffffffU)
+
+/*lint -esym(714, okl4_sgi_target_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_sgi_target_is_element_of(okl4_sgi_target_t var);
+
+
+/*lint -esym(714, okl4_sgi_target_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_sgi_target_is_element_of(okl4_sgi_target_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_SGI_TARGET_LISTED) ||
+            (var == OKL4_SGI_TARGET_ALL_OTHERS) ||
+            (var == OKL4_SGI_TARGET_SELF));
+}
+
+
+/**
+    - BITS 3..0 -   @ref OKL4_MASK_SGIINTID_GICD_SGIR
+    - BIT 15 -   @ref OKL4_MASK_NSATT_GICD_SGIR
+    - BITS 23..16 -   @ref OKL4_MASK_CPUTARGETLIST_GICD_SGIR
+    - BITS 25..24 -   @ref OKL4_MASK_TARGETLISTFILTER_GICD_SGIR
+*/
+
+/*lint -esym(621, okl4_gicd_sgir_t) */
+typedef uint32_t okl4_gicd_sgir_t;
+
+/*lint -esym(621, okl4_gicd_sgir_getsgiintid) */
+/*lint -esym(714, okl4_gicd_sgir_getsgiintid) */
+OKL4_FORCE_INLINE okl4_interrupt_number_t
+okl4_gicd_sgir_getsgiintid(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_setsgiintid) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setsgiintid(okl4_gicd_sgir_t *x, okl4_interrupt_number_t _sgiintid);
+
+/*lint -esym(621, okl4_gicd_sgir_getnsatt) */
+/*lint -esym(714, okl4_gicd_sgir_getnsatt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_sgir_getnsatt(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_setnsatt) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setnsatt(okl4_gicd_sgir_t *x, okl4_bool_t _nsatt);
+
+/*lint -esym(621, okl4_gicd_sgir_getcputargetlist) */
+/*lint -esym(714, okl4_gicd_sgir_getcputargetlist) */
+OKL4_FORCE_INLINE uint8_t
+okl4_gicd_sgir_getcputargetlist(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_setcputargetlist) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setcputargetlist(okl4_gicd_sgir_t *x, uint8_t _cputargetlist);
+
+/*lint -esym(621, okl4_gicd_sgir_gettargetlistfilter) */
+/*lint -esym(714, okl4_gicd_sgir_gettargetlistfilter) */
+OKL4_FORCE_INLINE okl4_sgi_target_t
+okl4_gicd_sgir_gettargetlistfilter(const okl4_gicd_sgir_t *x);
+
+/*lint -esym(621, okl4_gicd_sgir_settargetlistfilter) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_settargetlistfilter(okl4_gicd_sgir_t *x, okl4_sgi_target_t _targetlistfilter);
+
+/*lint -esym(714, okl4_gicd_sgir_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_init(okl4_gicd_sgir_t *x);
+
+/*lint -esym(714, okl4_gicd_sgir_cast) */
+OKL4_FORCE_INLINE okl4_gicd_sgir_t
+okl4_gicd_sgir_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_GICD_SGIR_SGIINTID_MASK) */
+#define OKL4_GICD_SGIR_SGIINTID_MASK ((okl4_gicd_sgir_t)15U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SGIINTID_GICD_SGIR) */
+#define OKL4_MASK_SGIINTID_GICD_SGIR ((okl4_gicd_sgir_t)15U)
+/*lint -esym(621, OKL4_SHIFT_SGIINTID_GICD_SGIR) */
+#define OKL4_SHIFT_SGIINTID_GICD_SGIR (0)
+/*lint -esym(621, OKL4_WIDTH_SGIINTID_GICD_SGIR) */
+#define OKL4_WIDTH_SGIINTID_GICD_SGIR (4)
+/*lint -esym(621, OKL4_GICD_SGIR_NSATT_MASK) */
+#define OKL4_GICD_SGIR_NSATT_MASK ((okl4_gicd_sgir_t)1U << 15) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_NSATT_GICD_SGIR) */
+#define OKL4_MASK_NSATT_GICD_SGIR ((okl4_gicd_sgir_t)1U << 15)
+/*lint -esym(621, OKL4_SHIFT_NSATT_GICD_SGIR) */
+#define OKL4_SHIFT_NSATT_GICD_SGIR (15)
+/*lint -esym(621, OKL4_WIDTH_NSATT_GICD_SGIR) */
+#define OKL4_WIDTH_NSATT_GICD_SGIR (1)
+/*lint -esym(621, OKL4_GICD_SGIR_CPUTARGETLIST_MASK) */
+#define OKL4_GICD_SGIR_CPUTARGETLIST_MASK ((okl4_gicd_sgir_t)255U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_MASK_CPUTARGETLIST_GICD_SGIR ((okl4_gicd_sgir_t)255U << 16)
+/*lint -esym(621, OKL4_SHIFT_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_SHIFT_CPUTARGETLIST_GICD_SGIR (16)
+/*lint -esym(621, OKL4_WIDTH_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_WIDTH_CPUTARGETLIST_GICD_SGIR (8)
+/*lint -esym(621, OKL4_GICD_SGIR_TARGETLISTFILTER_MASK) */
+#define OKL4_GICD_SGIR_TARGETLISTFILTER_MASK ((okl4_gicd_sgir_t)3U << 24) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_MASK_TARGETLISTFILTER_GICD_SGIR ((okl4_gicd_sgir_t)3U << 24)
+/*lint -esym(621, OKL4_SHIFT_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_SHIFT_TARGETLISTFILTER_GICD_SGIR (24)
+/*lint -esym(621, OKL4_WIDTH_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_WIDTH_TARGETLISTFILTER_GICD_SGIR (2)
+
+
+/*lint -sem(okl4_gicd_sgir_getsgiintid, 1p, @n >= 0 && @n <= 15) */
+/*lint -esym(621, okl4_gicd_sgir_getsgiintid) */
+/*lint -esym(714, okl4_gicd_sgir_getsgiintid) */
+OKL4_FORCE_INLINE okl4_interrupt_number_t
+okl4_gicd_sgir_getsgiintid(const okl4_gicd_sgir_t *x)
+{
+    okl4_interrupt_number_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 4;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_interrupt_number_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_setsgiintid, 2n >= 0 && 2n <= 15) */
+/*lint -esym(714, okl4_gicd_sgir_setsgiintid) */
+
+/*lint -esym(621, okl4_gicd_sgir_setsgiintid) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setsgiintid(okl4_gicd_sgir_t *x, okl4_interrupt_number_t _sgiintid)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 4;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_sgiintid;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_gicd_sgir_getnsatt, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_gicd_sgir_getnsatt) */
+/*lint -esym(714, okl4_gicd_sgir_getnsatt) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_gicd_sgir_getnsatt(const okl4_gicd_sgir_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 15;
+            _Bool field : 1;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_setnsatt, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_gicd_sgir_setnsatt) */
+
+/*lint -esym(621, okl4_gicd_sgir_setnsatt) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setnsatt(okl4_gicd_sgir_t *x, okl4_bool_t _nsatt)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 15;
+            _Bool field : 1;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_nsatt;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_gicd_sgir_getcputargetlist, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_gicd_sgir_getcputargetlist) */
+/*lint -esym(714, okl4_gicd_sgir_getcputargetlist) */
+OKL4_FORCE_INLINE uint8_t
+okl4_gicd_sgir_getcputargetlist(const okl4_gicd_sgir_t *x)
+{
+    uint8_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 8;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint8_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_setcputargetlist, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_gicd_sgir_setcputargetlist) */
+
+/*lint -esym(621, okl4_gicd_sgir_setcputargetlist) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_setcputargetlist(okl4_gicd_sgir_t *x, uint8_t _cputargetlist)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 8;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_cputargetlist;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_gicd_sgir_gettargetlistfilter, 1p, @n >= 0 && @n <= 3) */
+/*lint -esym(621, okl4_gicd_sgir_gettargetlistfilter) */
+/*lint -esym(714, okl4_gicd_sgir_gettargetlistfilter) */
+OKL4_FORCE_INLINE okl4_sgi_target_t
+okl4_gicd_sgir_gettargetlistfilter(const okl4_gicd_sgir_t *x)
+{
+    okl4_sgi_target_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 2;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_sgi_target_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_gicd_sgir_settargetlistfilter, 2n >= 0 && 2n <= 3) */
+/*lint -esym(714, okl4_gicd_sgir_settargetlistfilter) */
+
+/*lint -esym(621, okl4_gicd_sgir_settargetlistfilter) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_settargetlistfilter(okl4_gicd_sgir_t *x, okl4_sgi_target_t _targetlistfilter)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 2;
+        } bits;
+        okl4_gicd_sgir_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_targetlistfilter;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_gicd_sgir_init) */
+OKL4_FORCE_INLINE void
+okl4_gicd_sgir_init(okl4_gicd_sgir_t *x)
+{
+    *x = (okl4_gicd_sgir_t)32768U;
+}
+
+/*lint -esym(714, okl4_gicd_sgir_cast) */
+OKL4_FORCE_INLINE okl4_gicd_sgir_t
+okl4_gicd_sgir_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_gicd_sgir_t x = (okl4_gicd_sgir_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+    The okl4_kmmu_t structure is used to represent a kernel MMU
+    context.
+*/
+
+struct okl4_kmmu {
+    okl4_kcap_t kcap;
+};
+
+
+
+
+/**
+    The `okl4_ksp_arg_t` type represents an unsigned, machine-native
+    register-sized integer value used for KSP call arguments. Important: it is
+    truncated to guest register-size when guest register-size is smaller than
+    kernel register-size.
+*/
+
+typedef okl4_register_t okl4_ksp_arg_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_ksp_user_agent {
+    okl4_kcap_t kcap;
+    okl4_interrupt_number_t virq;
+};
+
+
+
+
+
+typedef uint32_t okl4_ksp_vdevice_class_t;
+
+
+
+
+
+typedef okl4_register_t okl4_laddr_pn_t;
+
+
+
+
+
+typedef okl4_register_t okl4_laddr_tr_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_pipe_data {
+    okl4_kcap_t kcap;
+    okl4_irq_t virq;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_pipe_ep_data {
+    struct okl4_pipe_data rx;
+    struct okl4_pipe_data tx;
+};
+
+
+
+
+
+typedef uint32_t okl4_link_role_t;
+
+/*lint -esym(621, OKL4_LINK_ROLE_SYMMETRIC) */
+#define OKL4_LINK_ROLE_SYMMETRIC ((okl4_link_role_t)0x0U)
+/*lint -esym(621, OKL4_LINK_ROLE_SERVER) */
+#define OKL4_LINK_ROLE_SERVER ((okl4_link_role_t)0x1U)
+/*lint -esym(621, OKL4_LINK_ROLE_CLIENT) */
+#define OKL4_LINK_ROLE_CLIENT ((okl4_link_role_t)0x2U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_ROLE_MAX) */
+#define OKL4_LINK_ROLE_MAX ((okl4_link_role_t)0x2U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_ROLE_INVALID) */
+#define OKL4_LINK_ROLE_INVALID ((okl4_link_role_t)0xffffffffU)
+
+/*lint -esym(714, okl4_link_role_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_role_is_element_of(okl4_link_role_t var);
+
+
+/*lint -esym(714, okl4_link_role_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_role_is_element_of(okl4_link_role_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_LINK_ROLE_SYMMETRIC) ||
+            (var == OKL4_LINK_ROLE_SERVER) ||
+            (var == OKL4_LINK_ROLE_CLIENT));
+}
+
+
+
+typedef uint32_t okl4_link_transport_type_t;
+
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_LINK_TRANSPORT_TYPE_SHARED_BUFFER ((okl4_link_transport_type_t)0x0U)
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_AXONS) */
+#define OKL4_LINK_TRANSPORT_TYPE_AXONS ((okl4_link_transport_type_t)0x1U)
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_PIPES) */
+#define OKL4_LINK_TRANSPORT_TYPE_PIPES ((okl4_link_transport_type_t)0x2U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_MAX) */
+#define OKL4_LINK_TRANSPORT_TYPE_MAX ((okl4_link_transport_type_t)0x2U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_LINK_TRANSPORT_TYPE_INVALID) */
+#define OKL4_LINK_TRANSPORT_TYPE_INVALID ((okl4_link_transport_type_t)0xffffffffU)
+
+/*lint -esym(714, okl4_link_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_transport_type_is_element_of(okl4_link_transport_type_t var);
+
+
+/*lint -esym(714, okl4_link_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_link_transport_type_is_element_of(okl4_link_transport_type_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_LINK_TRANSPORT_TYPE_SHARED_BUFFER) ||
+            (var == OKL4_LINK_TRANSPORT_TYPE_AXONS) ||
+            (var == OKL4_LINK_TRANSPORT_TYPE_PIPES));
+}
+
+
+/**
+
+*/
+
+struct okl4_link {
+    __ptr64(okl4_string_t, name);
+    __ptr64(void *, opaque);
+    __ptr64(okl4_string_t, partner_name);
+    okl4_link_role_t role;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    union {
+        struct {
+            struct okl4_virtmem_item buffer;
+            okl4_irq_t virq_in;
+            okl4_kcap_t virq_out;
+        } shared_buffer;
+
+        struct {
+            struct okl4_axon_ep_data axon_ep;
+            okl4_ksize_t message_size;
+            okl4_count_t queue_length;
+            _okl4_padding_t __padding0_4; /**< Padding 8 */
+            _okl4_padding_t __padding1_5; /**< Padding 8 */
+            _okl4_padding_t __padding2_6; /**< Padding 8 */
+            _okl4_padding_t __padding3_7; /**< Padding 8 */
+        } axons;
+
+        struct {
+            okl4_ksize_t message_size;
+            struct okl4_pipe_ep_data pipe_ep;
+            okl4_count_t queue_length;
+            _okl4_padding_t __padding0_4; /**< Padding 8 */
+            _okl4_padding_t __padding1_5; /**< Padding 8 */
+            _okl4_padding_t __padding2_6; /**< Padding 8 */
+            _okl4_padding_t __padding3_7; /**< Padding 8 */
+        } pipes;
+
+    } transport;
+
+    okl4_link_transport_type_t transport_type;
+    _okl4_padding_t __padding4_4;
+    _okl4_padding_t __padding5_5;
+    _okl4_padding_t __padding6_6;
+    _okl4_padding_t __padding7_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_links {
+    okl4_count_t num_links;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64_array(struct okl4_link *, links)[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+typedef okl4_register_t okl4_lsize_pn_t;
+
+
+
+
+
+typedef okl4_register_t okl4_lsize_tr_t;
+
+
+
+
+/**
+    The okl4_machine_info_t structure holds machine-specific
+    constants that are only known at weave-time. Objects of this
+    type are typically obtained from the OKL4 environment.
+*/
+
+struct okl4_machine_info {
+    okl4_ksize_t l1_cache_line_size;
+    okl4_ksize_t l2_cache_line_size;
+    okl4_count_t num_cpus;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_merged_physpool {
+    okl4_paddr_t phys_addr;
+    okl4_count_t num_segments;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    struct okl4_virtmem_item segments[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+typedef uint32_t okl4_microseconds_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_microvisor_timer {
+    okl4_kcap_t kcap;
+    okl4_irq_t virq;
+};
+
+
+
+
+/**
+    - BITS 15..0 -   @ref OKL4_MASK_ERROR_MMU_LOOKUP_INDEX
+    - BITS 31..16 -   @ref OKL4_MASK_INDEX_MMU_LOOKUP_INDEX
+*/
+
+/*lint -esym(621, okl4_mmu_lookup_index_t) */
+typedef uint32_t okl4_mmu_lookup_index_t;
+
+/*lint -esym(621, okl4_mmu_lookup_index_geterror) */
+/*lint -esym(714, okl4_mmu_lookup_index_geterror) */
+OKL4_FORCE_INLINE okl4_error_t
+okl4_mmu_lookup_index_geterror(const okl4_mmu_lookup_index_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_index_seterror) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_seterror(okl4_mmu_lookup_index_t *x, okl4_error_t _error);
+
+/*lint -esym(621, okl4_mmu_lookup_index_getindex) */
+/*lint -esym(714, okl4_mmu_lookup_index_getindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_index_getindex(const okl4_mmu_lookup_index_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_index_setindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_setindex(okl4_mmu_lookup_index_t *x, okl4_count_t _index);
+
+/*lint -esym(714, okl4_mmu_lookup_index_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_init(okl4_mmu_lookup_index_t *x);
+
+/*lint -esym(714, okl4_mmu_lookup_index_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_index_t
+okl4_mmu_lookup_index_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_MMU_LOOKUP_INDEX_ERROR_MASK) */
+#define OKL4_MMU_LOOKUP_INDEX_ERROR_MASK ((okl4_mmu_lookup_index_t)65535U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_MASK_ERROR_MMU_LOOKUP_INDEX ((okl4_mmu_lookup_index_t)65535U)
+/*lint -esym(621, OKL4_SHIFT_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_SHIFT_ERROR_MMU_LOOKUP_INDEX (0)
+/*lint -esym(621, OKL4_WIDTH_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_WIDTH_ERROR_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_MMU_LOOKUP_INDEX_INDEX_MASK) */
+#define OKL4_MMU_LOOKUP_INDEX_INDEX_MASK ((okl4_mmu_lookup_index_t)65535U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_MASK_INDEX_MMU_LOOKUP_INDEX ((okl4_mmu_lookup_index_t)65535U << 16)
+/*lint -esym(621, OKL4_SHIFT_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_SHIFT_INDEX_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_WIDTH_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_WIDTH_INDEX_MMU_LOOKUP_INDEX (16)
+
+
+/*lint -sem(okl4_mmu_lookup_index_geterror, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_mmu_lookup_index_geterror) */
+/*lint -esym(714, okl4_mmu_lookup_index_geterror) */
+OKL4_FORCE_INLINE okl4_error_t
+okl4_mmu_lookup_index_geterror(const okl4_mmu_lookup_index_t *x)
+{
+    okl4_error_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_mmu_lookup_index_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_error_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_index_seterror, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_mmu_lookup_index_seterror) */
+
+/*lint -esym(621, okl4_mmu_lookup_index_seterror) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_seterror(okl4_mmu_lookup_index_t *x, okl4_error_t _error)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_mmu_lookup_index_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_error;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_mmu_lookup_index_getindex, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_mmu_lookup_index_getindex) */
+/*lint -esym(714, okl4_mmu_lookup_index_getindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_index_getindex(const okl4_mmu_lookup_index_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        okl4_mmu_lookup_index_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_index_setindex, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_mmu_lookup_index_setindex) */
+
+/*lint -esym(621, okl4_mmu_lookup_index_setindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_setindex(okl4_mmu_lookup_index_t *x, okl4_count_t _index)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        okl4_mmu_lookup_index_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_index;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_mmu_lookup_index_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_index_init(okl4_mmu_lookup_index_t *x)
+{
+    *x = (okl4_mmu_lookup_index_t)0U;
+}
+
+/*lint -esym(714, okl4_mmu_lookup_index_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_index_t
+okl4_mmu_lookup_index_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_mmu_lookup_index_t x = (okl4_mmu_lookup_index_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BITS 9..0 -   @ref OKL4_MASK_SEG_INDEX_MMU_LOOKUP_SIZE
+    - BITS 63..10 -   @ref OKL4_MASK_SIZE_10_MMU_LOOKUP_SIZE
+*/
+
+/*lint -esym(621, okl4_mmu_lookup_size_t) */
+typedef okl4_register_t okl4_mmu_lookup_size_t;
+
+/*lint -esym(621, okl4_mmu_lookup_size_getsegindex) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsegindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_size_getsegindex(const okl4_mmu_lookup_size_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsegindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsegindex(okl4_mmu_lookup_size_t *x, okl4_count_t _seg_index);
+
+/*lint -esym(621, okl4_mmu_lookup_size_getsize10) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsize10) */
+OKL4_FORCE_INLINE okl4_register_t
+okl4_mmu_lookup_size_getsize10(const okl4_mmu_lookup_size_t *x);
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsize10) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsize10(okl4_mmu_lookup_size_t *x, okl4_register_t _size_10);
+
+/*lint -esym(714, okl4_mmu_lookup_size_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_init(okl4_mmu_lookup_size_t *x);
+
+/*lint -esym(714, okl4_mmu_lookup_size_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_size_t
+okl4_mmu_lookup_size_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_MMU_LOOKUP_SIZE_SEG_INDEX_MASK) */
+#define OKL4_MMU_LOOKUP_SIZE_SEG_INDEX_MASK ((okl4_mmu_lookup_size_t)1023U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_MASK_SEG_INDEX_MMU_LOOKUP_SIZE ((okl4_mmu_lookup_size_t)1023U)
+/*lint -esym(621, OKL4_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE (0)
+/*lint -esym(621, OKL4_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_MMU_LOOKUP_SIZE_SIZE_10_MASK) */
+#define OKL4_MMU_LOOKUP_SIZE_SIZE_10_MASK ((okl4_mmu_lookup_size_t)18014398509481983U << 10) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_MASK_SIZE_10_MMU_LOOKUP_SIZE ((okl4_mmu_lookup_size_t)18014398509481983U << 10)
+/*lint -esym(621, OKL4_SHIFT_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_SHIFT_SIZE_10_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_WIDTH_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_WIDTH_SIZE_10_MMU_LOOKUP_SIZE (54)
+
+
+/*lint -sem(okl4_mmu_lookup_size_getsegindex, 1p, @n >= 0 && @n <= 1023) */
+/*lint -esym(621, okl4_mmu_lookup_size_getsegindex) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsegindex) */
+OKL4_FORCE_INLINE okl4_count_t
+okl4_mmu_lookup_size_getsegindex(const okl4_mmu_lookup_size_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t field : 10;
+        } bits;
+        okl4_mmu_lookup_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_size_setsegindex, 2n >= 0 && 2n <= 1023) */
+/*lint -esym(714, okl4_mmu_lookup_size_setsegindex) */
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsegindex) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsegindex(okl4_mmu_lookup_size_t *x, okl4_count_t _seg_index)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t field : 10;
+        } bits;
+        okl4_mmu_lookup_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_seg_index;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_mmu_lookup_size_getsize10, 1p, @n >= 0 && @n <= 18014398509481983) */
+/*lint -esym(621, okl4_mmu_lookup_size_getsize10) */
+/*lint -esym(714, okl4_mmu_lookup_size_getsize10) */
+OKL4_FORCE_INLINE okl4_register_t
+okl4_mmu_lookup_size_getsize10(const okl4_mmu_lookup_size_t *x)
+{
+    okl4_register_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 10;
+            uint64_t field : 54;
+        } bits;
+        okl4_mmu_lookup_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_register_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_mmu_lookup_size_setsize10, 2n >= 0 && 2n <= 18014398509481983) */
+/*lint -esym(714, okl4_mmu_lookup_size_setsize10) */
+
+/*lint -esym(621, okl4_mmu_lookup_size_setsize10) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_setsize10(okl4_mmu_lookup_size_t *x, okl4_register_t _size_10)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint64_t _skip : 10;
+            uint64_t field : 54;
+        } bits;
+        okl4_mmu_lookup_size_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint64_t)_size_10;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_mmu_lookup_size_init) */
+OKL4_FORCE_INLINE void
+okl4_mmu_lookup_size_init(okl4_mmu_lookup_size_t *x)
+{
+    *x = (okl4_mmu_lookup_size_t)0U;
+}
+
+/*lint -esym(714, okl4_mmu_lookup_size_cast) */
+OKL4_FORCE_INLINE okl4_mmu_lookup_size_t
+okl4_mmu_lookup_size_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_mmu_lookup_size_t x = (okl4_mmu_lookup_size_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+typedef uint64_t okl4_nanoseconds_t;
+
+/** Timer period upper bound is (1 << 55) ns */
+/*lint -esym(621, OKL4_TIMER_MAX_PERIOD_NS) */
+#define OKL4_TIMER_MAX_PERIOD_NS ((okl4_nanoseconds_t)(36028797018963968U))
+
+/** Timer period lower bound is 1000000 ns */
+/*lint -esym(621, OKL4_TIMER_MIN_PERIOD_NS) */
+#define OKL4_TIMER_MIN_PERIOD_NS ((okl4_nanoseconds_t)(1000000U))
+
+
+
+/**
+    - BITS 2..0 -   @ref _OKL4_MASK_RWX_PAGE_ATTRIBUTE
+    - BITS 31..4 -   @ref _OKL4_MASK_ATTRIB_PAGE_ATTRIBUTE
+*/
+
+/*lint -esym(621, _okl4_page_attribute_t) */
+typedef uint32_t _okl4_page_attribute_t;
+
+/*lint -esym(621, _okl4_page_attribute_getrwx) */
+/*lint -esym(714, _okl4_page_attribute_getrwx) */
+OKL4_FORCE_INLINE okl4_page_perms_t
+_okl4_page_attribute_getrwx(const _okl4_page_attribute_t *x);
+
+/*lint -esym(621, _okl4_page_attribute_setrwx) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setrwx(_okl4_page_attribute_t *x, okl4_page_perms_t _rwx);
+
+/*lint -esym(621, _okl4_page_attribute_getattrib) */
+/*lint -esym(714, _okl4_page_attribute_getattrib) */
+OKL4_FORCE_INLINE okl4_page_cache_t
+_okl4_page_attribute_getattrib(const _okl4_page_attribute_t *x);
+
+/*lint -esym(621, _okl4_page_attribute_setattrib) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setattrib(_okl4_page_attribute_t *x, okl4_page_cache_t _attrib);
+
+/*lint -esym(714, _okl4_page_attribute_init) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_init(_okl4_page_attribute_t *x);
+
+/*lint -esym(714, _okl4_page_attribute_cast) */
+OKL4_FORCE_INLINE _okl4_page_attribute_t
+_okl4_page_attribute_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, _OKL4_PAGE_ATTRIBUTE_RWX_MASK) */
+#define _OKL4_PAGE_ATTRIBUTE_RWX_MASK ((_okl4_page_attribute_t)7U) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_MASK_RWX_PAGE_ATTRIBUTE ((_okl4_page_attribute_t)7U)
+/*lint -esym(621, _OKL4_SHIFT_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_SHIFT_RWX_PAGE_ATTRIBUTE (0)
+/*lint -esym(621, _OKL4_WIDTH_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_WIDTH_RWX_PAGE_ATTRIBUTE (3)
+/*lint -esym(621, _OKL4_PAGE_ATTRIBUTE_ATTRIB_MASK) */
+#define _OKL4_PAGE_ATTRIBUTE_ATTRIB_MASK ((_okl4_page_attribute_t)268435455U << 4) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_MASK_ATTRIB_PAGE_ATTRIBUTE ((_okl4_page_attribute_t)268435455U << 4)
+/*lint -esym(621, _OKL4_SHIFT_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_SHIFT_ATTRIB_PAGE_ATTRIBUTE (4)
+/*lint -esym(621, _OKL4_WIDTH_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_WIDTH_ATTRIB_PAGE_ATTRIBUTE (28)
+
+
+/*lint -sem(_okl4_page_attribute_getrwx, 1p, @n >= 0 && @n <= 7) */
+/*lint -esym(621, _okl4_page_attribute_getrwx) */
+/*lint -esym(714, _okl4_page_attribute_getrwx) */
+OKL4_FORCE_INLINE okl4_page_perms_t
+_okl4_page_attribute_getrwx(const _okl4_page_attribute_t *x)
+{
+    okl4_page_perms_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 3;
+        } bits;
+        _okl4_page_attribute_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_page_perms_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_page_attribute_setrwx, 2n >= 0 && 2n <= 7) */
+/*lint -esym(714, _okl4_page_attribute_setrwx) */
+
+/*lint -esym(621, _okl4_page_attribute_setrwx) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setrwx(_okl4_page_attribute_t *x, okl4_page_perms_t _rwx)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 3;
+        } bits;
+        _okl4_page_attribute_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_rwx;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_page_attribute_getattrib, 1p, @n >= 0 && @n <= 268435455) */
+/*lint -esym(621, _okl4_page_attribute_getattrib) */
+/*lint -esym(714, _okl4_page_attribute_getattrib) */
+OKL4_FORCE_INLINE okl4_page_cache_t
+_okl4_page_attribute_getattrib(const _okl4_page_attribute_t *x)
+{
+    okl4_page_cache_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            uint32_t field : 28;
+        } bits;
+        _okl4_page_attribute_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_page_cache_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_page_attribute_setattrib, 2n >= 0 && 2n <= 268435455) */
+/*lint -esym(714, _okl4_page_attribute_setattrib) */
+
+/*lint -esym(621, _okl4_page_attribute_setattrib) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_setattrib(_okl4_page_attribute_t *x, okl4_page_cache_t _attrib)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            uint32_t field : 28;
+        } bits;
+        _okl4_page_attribute_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_attrib;
+    *x = _conv.raw;
+}
+/*lint -esym(714, _okl4_page_attribute_init) */
+OKL4_FORCE_INLINE void
+_okl4_page_attribute_init(_okl4_page_attribute_t *x)
+{
+    *x = (_okl4_page_attribute_t)0U;
+}
+
+/*lint -esym(714, _okl4_page_attribute_cast) */
+OKL4_FORCE_INLINE _okl4_page_attribute_t
+_okl4_page_attribute_cast(uint32_t p, okl4_bool_t force)
+{
+    _okl4_page_attribute_t x = (_okl4_page_attribute_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_DO_OP_PIPE_CONTROL
+    - BITS 3..1 -   @ref OKL4_MASK_OPERATION_PIPE_CONTROL
+*/
+
+/*lint -esym(621, okl4_pipe_control_t) */
+typedef uint8_t okl4_pipe_control_t;
+
+/*lint -esym(621, okl4_pipe_control_getdoop) */
+/*lint -esym(714, okl4_pipe_control_getdoop) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_control_getdoop(const okl4_pipe_control_t *x);
+
+/*lint -esym(621, okl4_pipe_control_setdoop) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setdoop(okl4_pipe_control_t *x, okl4_bool_t _do_op);
+
+/*lint -esym(621, okl4_pipe_control_getoperation) */
+/*lint -esym(714, okl4_pipe_control_getoperation) */
+OKL4_FORCE_INLINE uint8_t
+okl4_pipe_control_getoperation(const okl4_pipe_control_t *x);
+
+/*lint -esym(621, okl4_pipe_control_setoperation) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setoperation(okl4_pipe_control_t *x, uint8_t _operation);
+
+/*lint -esym(714, okl4_pipe_control_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_init(okl4_pipe_control_t *x);
+
+/*lint -esym(714, okl4_pipe_control_cast) */
+OKL4_FORCE_INLINE okl4_pipe_control_t
+okl4_pipe_control_cast(uint8_t p, okl4_bool_t force);
+
+
+
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_CLR_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_CLR_HALTED ((okl4_pipe_control_t)(4U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_RESET) */
+#define OKL4_PIPE_CONTROL_OP_RESET ((okl4_pipe_control_t)(0U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_SET_HALTED ((okl4_pipe_control_t)(3U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_RX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_RX_READY ((okl4_pipe_control_t)(2U))
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_TX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_TX_READY ((okl4_pipe_control_t)(1U))
+
+/*lint -esym(621, OKL4_PIPE_CONTROL_DO_OP_MASK) */
+#define OKL4_PIPE_CONTROL_DO_OP_MASK (okl4_pipe_control_t)(1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_DO_OP_PIPE_CONTROL) */
+#define OKL4_MASK_DO_OP_PIPE_CONTROL (okl4_pipe_control_t)(1U)
+/*lint -esym(621, OKL4_SHIFT_DO_OP_PIPE_CONTROL) */
+#define OKL4_SHIFT_DO_OP_PIPE_CONTROL (0)
+/*lint -esym(621, OKL4_WIDTH_DO_OP_PIPE_CONTROL) */
+#define OKL4_WIDTH_DO_OP_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OPERATION_MASK) */
+#define OKL4_PIPE_CONTROL_OPERATION_MASK (okl4_pipe_control_t)(7U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OPERATION_PIPE_CONTROL) */
+#define OKL4_MASK_OPERATION_PIPE_CONTROL (okl4_pipe_control_t)(7U << 1)
+/*lint -esym(621, OKL4_SHIFT_OPERATION_PIPE_CONTROL) */
+#define OKL4_SHIFT_OPERATION_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_WIDTH_OPERATION_PIPE_CONTROL) */
+#define OKL4_WIDTH_OPERATION_PIPE_CONTROL (3)
+
+
+/*lint -sem(okl4_pipe_control_getdoop, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_control_getdoop) */
+/*lint -esym(714, okl4_pipe_control_getdoop) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_control_getdoop(const okl4_pipe_control_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_control_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_control_setdoop, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_control_setdoop) */
+
+/*lint -esym(621, okl4_pipe_control_setdoop) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setdoop(okl4_pipe_control_t *x, okl4_bool_t _do_op)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_control_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_do_op;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_control_getoperation, 1p, @n >= 0 && @n <= 7) */
+/*lint -esym(621, okl4_pipe_control_getoperation) */
+/*lint -esym(714, okl4_pipe_control_getoperation) */
+OKL4_FORCE_INLINE uint8_t
+okl4_pipe_control_getoperation(const okl4_pipe_control_t *x)
+{
+    uint8_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            uint32_t field : 3;
+        } bits;
+        okl4_pipe_control_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint8_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_control_setoperation, 2n >= 0 && 2n <= 7) */
+/*lint -esym(714, okl4_pipe_control_setoperation) */
+
+/*lint -esym(621, okl4_pipe_control_setoperation) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_setoperation(okl4_pipe_control_t *x, uint8_t _operation)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            uint32_t field : 3;
+        } bits;
+        okl4_pipe_control_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_operation;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_pipe_control_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_control_init(okl4_pipe_control_t *x)
+{
+    *x = (okl4_pipe_control_t)0U;
+}
+
+/*lint -esym(714, okl4_pipe_control_cast) */
+OKL4_FORCE_INLINE okl4_pipe_control_t
+okl4_pipe_control_cast(uint8_t p, okl4_bool_t force)
+{
+    okl4_pipe_control_t x = (okl4_pipe_control_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_RESET_PIPE_STATE
+    - BIT 1 -   @ref OKL4_MASK_HALTED_PIPE_STATE
+    - BIT 2 -   @ref OKL4_MASK_RX_READY_PIPE_STATE
+    - BIT 3 -   @ref OKL4_MASK_TX_READY_PIPE_STATE
+    - BIT 4 -   @ref OKL4_MASK_RX_AVAILABLE_PIPE_STATE
+    - BIT 5 -   @ref OKL4_MASK_TX_AVAILABLE_PIPE_STATE
+    - BIT 6 -   @ref OKL4_MASK_WAITING_PIPE_STATE
+    - BIT 7 -   @ref OKL4_MASK_OVERQUOTA_PIPE_STATE
+*/
+
+/*lint -esym(621, okl4_pipe_state_t) */
+typedef uint8_t okl4_pipe_state_t;
+
+/*lint -esym(621, okl4_pipe_state_getreset) */
+/*lint -esym(714, okl4_pipe_state_getreset) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getreset(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setreset) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setreset(okl4_pipe_state_t *x, okl4_bool_t _reset);
+
+/*lint -esym(621, okl4_pipe_state_gethalted) */
+/*lint -esym(714, okl4_pipe_state_gethalted) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gethalted(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_sethalted) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_sethalted(okl4_pipe_state_t *x, okl4_bool_t _halted);
+
+/*lint -esym(621, okl4_pipe_state_getrxready) */
+/*lint -esym(714, okl4_pipe_state_getrxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxready(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setrxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxready(okl4_pipe_state_t *x, okl4_bool_t _rx_ready);
+
+/*lint -esym(621, okl4_pipe_state_gettxready) */
+/*lint -esym(714, okl4_pipe_state_gettxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxready(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_settxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxready(okl4_pipe_state_t *x, okl4_bool_t _tx_ready);
+
+/*lint -esym(621, okl4_pipe_state_getrxavailable) */
+/*lint -esym(714, okl4_pipe_state_getrxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxavailable(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setrxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxavailable(okl4_pipe_state_t *x, okl4_bool_t _rx_available);
+
+/*lint -esym(621, okl4_pipe_state_gettxavailable) */
+/*lint -esym(714, okl4_pipe_state_gettxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxavailable(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_settxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxavailable(okl4_pipe_state_t *x, okl4_bool_t _tx_available);
+
+/*lint -esym(621, okl4_pipe_state_getwaiting) */
+/*lint -esym(714, okl4_pipe_state_getwaiting) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getwaiting(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setwaiting) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setwaiting(okl4_pipe_state_t *x, okl4_bool_t _waiting);
+
+/*lint -esym(621, okl4_pipe_state_getoverquota) */
+/*lint -esym(714, okl4_pipe_state_getoverquota) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getoverquota(const okl4_pipe_state_t *x);
+
+/*lint -esym(621, okl4_pipe_state_setoverquota) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setoverquota(okl4_pipe_state_t *x, okl4_bool_t _overquota);
+
+/*lint -esym(714, okl4_pipe_state_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_init(okl4_pipe_state_t *x);
+
+/*lint -esym(714, okl4_pipe_state_cast) */
+OKL4_FORCE_INLINE okl4_pipe_state_t
+okl4_pipe_state_cast(uint8_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_PIPE_STATE_RESET_MASK) */
+#define OKL4_PIPE_STATE_RESET_MASK (okl4_pipe_state_t)(1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RESET_PIPE_STATE) */
+#define OKL4_MASK_RESET_PIPE_STATE (okl4_pipe_state_t)(1U)
+/*lint -esym(621, OKL4_SHIFT_RESET_PIPE_STATE) */
+#define OKL4_SHIFT_RESET_PIPE_STATE (0)
+/*lint -esym(621, OKL4_WIDTH_RESET_PIPE_STATE) */
+#define OKL4_WIDTH_RESET_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_HALTED_MASK) */
+#define OKL4_PIPE_STATE_HALTED_MASK (okl4_pipe_state_t)(1U << 1) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_HALTED_PIPE_STATE) */
+#define OKL4_MASK_HALTED_PIPE_STATE (okl4_pipe_state_t)(1U << 1)
+/*lint -esym(621, OKL4_SHIFT_HALTED_PIPE_STATE) */
+#define OKL4_SHIFT_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_WIDTH_HALTED_PIPE_STATE) */
+#define OKL4_WIDTH_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_RX_READY_MASK) */
+#define OKL4_PIPE_STATE_RX_READY_MASK (okl4_pipe_state_t)(1U << 2) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RX_READY_PIPE_STATE) */
+#define OKL4_MASK_RX_READY_PIPE_STATE (okl4_pipe_state_t)(1U << 2)
+/*lint -esym(621, OKL4_SHIFT_RX_READY_PIPE_STATE) */
+#define OKL4_SHIFT_RX_READY_PIPE_STATE (2)
+/*lint -esym(621, OKL4_WIDTH_RX_READY_PIPE_STATE) */
+#define OKL4_WIDTH_RX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_TX_READY_MASK) */
+#define OKL4_PIPE_STATE_TX_READY_MASK (okl4_pipe_state_t)(1U << 3) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TX_READY_PIPE_STATE) */
+#define OKL4_MASK_TX_READY_PIPE_STATE (okl4_pipe_state_t)(1U << 3)
+/*lint -esym(621, OKL4_SHIFT_TX_READY_PIPE_STATE) */
+#define OKL4_SHIFT_TX_READY_PIPE_STATE (3)
+/*lint -esym(621, OKL4_WIDTH_TX_READY_PIPE_STATE) */
+#define OKL4_WIDTH_TX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_RX_AVAILABLE_MASK) */
+#define OKL4_PIPE_STATE_RX_AVAILABLE_MASK (okl4_pipe_state_t)(1U << 4) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_MASK_RX_AVAILABLE_PIPE_STATE (okl4_pipe_state_t)(1U << 4)
+/*lint -esym(621, OKL4_SHIFT_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_SHIFT_RX_AVAILABLE_PIPE_STATE (4)
+/*lint -esym(621, OKL4_WIDTH_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_WIDTH_RX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_TX_AVAILABLE_MASK) */
+#define OKL4_PIPE_STATE_TX_AVAILABLE_MASK (okl4_pipe_state_t)(1U << 5) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_MASK_TX_AVAILABLE_PIPE_STATE (okl4_pipe_state_t)(1U << 5)
+/*lint -esym(621, OKL4_SHIFT_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_SHIFT_TX_AVAILABLE_PIPE_STATE (5)
+/*lint -esym(621, OKL4_WIDTH_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_WIDTH_TX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_WAITING_MASK) */
+#define OKL4_PIPE_STATE_WAITING_MASK (okl4_pipe_state_t)(1U << 6) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_WAITING_PIPE_STATE) */
+#define OKL4_MASK_WAITING_PIPE_STATE (okl4_pipe_state_t)(1U << 6)
+/*lint -esym(621, OKL4_SHIFT_WAITING_PIPE_STATE) */
+#define OKL4_SHIFT_WAITING_PIPE_STATE (6)
+/*lint -esym(621, OKL4_WIDTH_WAITING_PIPE_STATE) */
+#define OKL4_WIDTH_WAITING_PIPE_STATE (1)
+/*lint -esym(621, OKL4_PIPE_STATE_OVERQUOTA_MASK) */
+#define OKL4_PIPE_STATE_OVERQUOTA_MASK (okl4_pipe_state_t)(1U << 7) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OVERQUOTA_PIPE_STATE) */
+#define OKL4_MASK_OVERQUOTA_PIPE_STATE (okl4_pipe_state_t)(1U << 7)
+/*lint -esym(621, OKL4_SHIFT_OVERQUOTA_PIPE_STATE) */
+#define OKL4_SHIFT_OVERQUOTA_PIPE_STATE (7)
+/*lint -esym(621, OKL4_WIDTH_OVERQUOTA_PIPE_STATE) */
+#define OKL4_WIDTH_OVERQUOTA_PIPE_STATE (1)
+
+
+/*lint -sem(okl4_pipe_state_getreset, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getreset) */
+/*lint -esym(714, okl4_pipe_state_getreset) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getreset(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setreset, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setreset) */
+
+/*lint -esym(621, okl4_pipe_state_setreset) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setreset(okl4_pipe_state_t *x, okl4_bool_t _reset)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_reset;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_gethalted, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_gethalted) */
+/*lint -esym(714, okl4_pipe_state_gethalted) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gethalted(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_sethalted, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_sethalted) */
+
+/*lint -esym(621, okl4_pipe_state_sethalted) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_sethalted(okl4_pipe_state_t *x, okl4_bool_t _halted)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_halted;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getrxready, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getrxready) */
+/*lint -esym(714, okl4_pipe_state_getrxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxready(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setrxready, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setrxready) */
+
+/*lint -esym(621, okl4_pipe_state_setrxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxready(okl4_pipe_state_t *x, okl4_bool_t _rx_ready)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_rx_ready;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_gettxready, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_gettxready) */
+/*lint -esym(714, okl4_pipe_state_gettxready) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxready(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_settxready, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_settxready) */
+
+/*lint -esym(621, okl4_pipe_state_settxready) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxready(okl4_pipe_state_t *x, okl4_bool_t _tx_ready)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_tx_ready;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getrxavailable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getrxavailable) */
+/*lint -esym(714, okl4_pipe_state_getrxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getrxavailable(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setrxavailable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setrxavailable) */
+
+/*lint -esym(621, okl4_pipe_state_setrxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setrxavailable(okl4_pipe_state_t *x, okl4_bool_t _rx_available)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_rx_available;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_gettxavailable, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_gettxavailable) */
+/*lint -esym(714, okl4_pipe_state_gettxavailable) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_gettxavailable(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_settxavailable, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_settxavailable) */
+
+/*lint -esym(621, okl4_pipe_state_settxavailable) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_settxavailable(okl4_pipe_state_t *x, okl4_bool_t _tx_available)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_tx_available;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getwaiting, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getwaiting) */
+/*lint -esym(714, okl4_pipe_state_getwaiting) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getwaiting(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 6;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setwaiting, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setwaiting) */
+
+/*lint -esym(621, okl4_pipe_state_setwaiting) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setwaiting(okl4_pipe_state_t *x, okl4_bool_t _waiting)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 6;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_waiting;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_pipe_state_getoverquota, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_pipe_state_getoverquota) */
+/*lint -esym(714, okl4_pipe_state_getoverquota) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_pipe_state_getoverquota(const okl4_pipe_state_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_pipe_state_setoverquota, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_pipe_state_setoverquota) */
+
+/*lint -esym(621, okl4_pipe_state_setoverquota) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_setoverquota(okl4_pipe_state_t *x, okl4_bool_t _overquota)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 7;
+            _Bool field : 1;
+        } bits;
+        okl4_pipe_state_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_overquota;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_pipe_state_init) */
+OKL4_FORCE_INLINE void
+okl4_pipe_state_init(okl4_pipe_state_t *x)
+{
+    *x = (okl4_pipe_state_t)1U;
+}
+
+/*lint -esym(714, okl4_pipe_state_cast) */
+OKL4_FORCE_INLINE okl4_pipe_state_t
+okl4_pipe_state_cast(uint8_t p, okl4_bool_t force)
+{
+    okl4_pipe_state_t x = (okl4_pipe_state_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+typedef uint32_t okl4_power_state_t;
+
+/*lint -esym(621, OKL4_POWER_STATE_IDLE) */
+#define OKL4_POWER_STATE_IDLE ((okl4_power_state_t)(0U))
+
+/*lint -esym(621, OKL4_POWER_STATE_PLATFORM_BASE) */
+#define OKL4_POWER_STATE_PLATFORM_BASE ((okl4_power_state_t)(256U))
+
+/*lint -esym(621, OKL4_POWER_STATE_POWEROFF) */
+#define OKL4_POWER_STATE_POWEROFF ((okl4_power_state_t)(1U))
+
+
+
+/**
+    The okl4_priority_t type represents a thread scheduling priority.
+    Valid prioritys range from [0, CONFIG\_SCHEDULER\_NUM\_PRIOS).
+*/
+
+typedef int8_t okl4_priority_t;
+
+
+
+
+
+typedef okl4_register_t okl4_psize_pn_t;
+
+
+
+
+
+typedef okl4_register_t okl4_psize_tr_t;
+
+
+
+
+/**
+    The okl4_register_set_t type is an enumeration identifying one of
+    the register sets supported by the host machine. This includes the
+    general-purpose registers, along with other CPU-specific register
+    sets such as floating point or vector registers.
+
+    - @ref OKL4_REGISTER_SET_CPU_REGS
+    - @ref OKL4_REGISTER_SET_VFP_REGS
+    - @ref OKL4_REGISTER_SET_VFP_CTRL_REGS
+    - @ref OKL4_REGISTER_SET_VFP64_REGS
+    - @ref OKL4_REGISTER_SET_VFP128_REGS
+    - @ref OKL4_REGISTER_SET_MAX
+    - @ref OKL4_REGISTER_SET_INVALID
+*/
+
+typedef uint32_t okl4_register_set_t;
+
+/*lint -esym(621, OKL4_REGISTER_SET_CPU_REGS) */
+#define OKL4_REGISTER_SET_CPU_REGS ((okl4_register_set_t)0x0U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP_REGS) */
+#define OKL4_REGISTER_SET_VFP_REGS ((okl4_register_set_t)0x1U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP_CTRL_REGS) */
+#define OKL4_REGISTER_SET_VFP_CTRL_REGS ((okl4_register_set_t)0x2U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP64_REGS) */
+#define OKL4_REGISTER_SET_VFP64_REGS ((okl4_register_set_t)0x3U)
+/*lint -esym(621, OKL4_REGISTER_SET_VFP128_REGS) */
+#define OKL4_REGISTER_SET_VFP128_REGS ((okl4_register_set_t)0x4U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_REGISTER_SET_MAX) */
+#define OKL4_REGISTER_SET_MAX ((okl4_register_set_t)0x4U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_REGISTER_SET_INVALID) */
+#define OKL4_REGISTER_SET_INVALID ((okl4_register_set_t)0xffffffffU)
+
+/*lint -esym(714, okl4_register_set_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_register_set_is_element_of(okl4_register_set_t var);
+
+
+/*lint -esym(714, okl4_register_set_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_register_set_is_element_of(okl4_register_set_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_REGISTER_SET_CPU_REGS) ||
+            (var == OKL4_REGISTER_SET_VFP_REGS) ||
+            (var == OKL4_REGISTER_SET_VFP_CTRL_REGS) ||
+            (var == OKL4_REGISTER_SET_VFP64_REGS) ||
+            (var == OKL4_REGISTER_SET_VFP128_REGS));
+}
+
+
+
+typedef okl4_psize_t okl4_vsize_t;
+
+
+
+
+/**
+    The okl4_register_and_set_t type is a bitfield containing a register
+    set identifier of type okl4_register_set_t, and an index into that
+    register set.
+
+    - BITS 15..0 -   @ref OKL4_MASK_OFFSET_REGISTER_AND_SET
+    - BITS 31..16 -   @ref OKL4_MASK_SET_REGISTER_AND_SET
+*/
+
+/*lint -esym(621, okl4_register_and_set_t) */
+typedef uint32_t okl4_register_and_set_t;
+
+/*lint -esym(621, okl4_register_and_set_getoffset) */
+/*lint -esym(714, okl4_register_and_set_getoffset) */
+OKL4_FORCE_INLINE okl4_vsize_t
+okl4_register_and_set_getoffset(const okl4_register_and_set_t *x);
+
+/*lint -esym(621, okl4_register_and_set_setoffset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setoffset(okl4_register_and_set_t *x, okl4_vsize_t _offset);
+
+/*lint -esym(621, okl4_register_and_set_getset) */
+/*lint -esym(714, okl4_register_and_set_getset) */
+OKL4_FORCE_INLINE okl4_register_set_t
+okl4_register_and_set_getset(const okl4_register_and_set_t *x);
+
+/*lint -esym(621, okl4_register_and_set_setset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setset(okl4_register_and_set_t *x, okl4_register_set_t _set);
+
+/*lint -esym(714, okl4_register_and_set_init) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_init(okl4_register_and_set_t *x);
+
+/*lint -esym(714, okl4_register_and_set_cast) */
+OKL4_FORCE_INLINE okl4_register_and_set_t
+okl4_register_and_set_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_REGISTER_AND_SET_OFFSET_MASK) */
+#define OKL4_REGISTER_AND_SET_OFFSET_MASK ((okl4_register_and_set_t)65535U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_OFFSET_REGISTER_AND_SET) */
+#define OKL4_MASK_OFFSET_REGISTER_AND_SET ((okl4_register_and_set_t)65535U)
+/*lint -esym(621, OKL4_SHIFT_OFFSET_REGISTER_AND_SET) */
+#define OKL4_SHIFT_OFFSET_REGISTER_AND_SET (0)
+/*lint -esym(621, OKL4_WIDTH_OFFSET_REGISTER_AND_SET) */
+#define OKL4_WIDTH_OFFSET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_REGISTER_AND_SET_SET_MASK) */
+#define OKL4_REGISTER_AND_SET_SET_MASK ((okl4_register_and_set_t)65535U << 16) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SET_REGISTER_AND_SET) */
+#define OKL4_MASK_SET_REGISTER_AND_SET ((okl4_register_and_set_t)65535U << 16)
+/*lint -esym(621, OKL4_SHIFT_SET_REGISTER_AND_SET) */
+#define OKL4_SHIFT_SET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_WIDTH_SET_REGISTER_AND_SET) */
+#define OKL4_WIDTH_SET_REGISTER_AND_SET (16)
+
+
+/*lint -sem(okl4_register_and_set_getoffset, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_register_and_set_getoffset) */
+/*lint -esym(714, okl4_register_and_set_getoffset) */
+OKL4_FORCE_INLINE okl4_vsize_t
+okl4_register_and_set_getoffset(const okl4_register_and_set_t *x)
+{
+    okl4_vsize_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_register_and_set_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_vsize_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_register_and_set_setoffset, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_register_and_set_setoffset) */
+
+/*lint -esym(621, okl4_register_and_set_setoffset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setoffset(okl4_register_and_set_t *x, okl4_vsize_t _offset)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        okl4_register_and_set_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_offset;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_register_and_set_getset, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, okl4_register_and_set_getset) */
+/*lint -esym(714, okl4_register_and_set_getset) */
+OKL4_FORCE_INLINE okl4_register_set_t
+okl4_register_and_set_getset(const okl4_register_and_set_t *x)
+{
+    okl4_register_set_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        okl4_register_and_set_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_register_set_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_register_and_set_setset, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, okl4_register_and_set_setset) */
+
+/*lint -esym(621, okl4_register_and_set_setset) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_setset(okl4_register_and_set_t *x, okl4_register_set_t _set)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        okl4_register_and_set_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_set;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_register_and_set_init) */
+OKL4_FORCE_INLINE void
+okl4_register_and_set_init(okl4_register_and_set_t *x)
+{
+    *x = (okl4_register_and_set_t)0U;
+}
+
+/*lint -esym(714, okl4_register_and_set_cast) */
+OKL4_FORCE_INLINE okl4_register_and_set_t
+okl4_register_and_set_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_register_and_set_t x = (okl4_register_and_set_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct okl4_cpu_registers {
+    okl4_register_t x[31];
+    okl4_register_t sp_el0;
+    okl4_register_t ip;
+    uint32_t cpsr;
+    _okl4_padding_t __padding0_4; /**< Padding 8 */
+    _okl4_padding_t __padding1_5; /**< Padding 8 */
+    _okl4_padding_t __padding2_6; /**< Padding 8 */
+    _okl4_padding_t __padding3_7; /**< Padding 8 */
+    okl4_register_t sp_EL1;
+    okl4_register_t elr_EL1;
+    uint32_t spsr_EL1;
+    uint32_t spsr_abt;
+    uint32_t spsr_und;
+    uint32_t spsr_irq;
+    uint32_t spsr_fiq;
+    uint32_t csselr_EL1;
+    okl4_arm_sctlr_t sctlr_EL1;
+    uint32_t cpacr_EL1;
+    uint64_t ttbr0_EL1;
+    uint64_t ttbr1_EL1;
+    uint64_t tcr_EL1;
+    uint32_t dacr32_EL2;
+    uint32_t ifsr32_EL2;
+    uint32_t esr_EL1;
+    _okl4_padding_t __padding4_4; /**< Padding 8 */
+    _okl4_padding_t __padding5_5; /**< Padding 8 */
+    _okl4_padding_t __padding6_6; /**< Padding 8 */
+    _okl4_padding_t __padding7_7; /**< Padding 8 */
+    uint64_t far_EL1;
+    uint64_t par_EL1;
+    uint64_t mair_EL1;
+    uint64_t vbar_EL1;
+    uint32_t contextidr_EL1;
+    _okl4_padding_t __padding8_4; /**< Padding 8 */
+    _okl4_padding_t __padding9_5; /**< Padding 8 */
+    _okl4_padding_t __padding10_6; /**< Padding 8 */
+    _okl4_padding_t __padding11_7; /**< Padding 8 */
+    uint64_t tpidr_EL1;
+    uint64_t tpidrro_EL0;
+    uint64_t tpidr_EL0;
+    uint32_t pmcr_EL0;
+    _okl4_padding_t __padding12_4; /**< Padding 8 */
+    _okl4_padding_t __padding13_5; /**< Padding 8 */
+    _okl4_padding_t __padding14_6; /**< Padding 8 */
+    _okl4_padding_t __padding15_7; /**< Padding 8 */
+    uint64_t pmccntr_EL0;
+    uint32_t fpexc32_EL2;
+    uint32_t cntkctl_EL1;
+};
+
+
+
+
+
+
+/**
+    The okl4_cpu_registers_t type represents a set of CPU general-purpose
+    registers on the native machine.
+*/
+
+typedef struct okl4_cpu_registers okl4_cpu_registers_t;
+
+
+
+
+/**
+    The `okl4_rights_t` type represents a set of operations that are allowed to
+    be performed using a given cap.
+*/
+
+typedef uint32_t okl4_rights_t;
+
+
+
+
+
+typedef uint64_t okl4_soc_time_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_schedule_profile_data {
+    okl4_soc_time_t timestamp;
+    okl4_soc_time_t cpu_time;
+    okl4_count_t context_switches;
+    okl4_count_t cpu_migrations;
+    okl4_count_t cpu_hwirqs;
+    okl4_count_t cpu_virqs;
+};
+
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS
+*/
+
+/*lint -esym(621, okl4_scheduler_virq_flags_t) */
+typedef okl4_virq_flags_t okl4_scheduler_virq_flags_t;
+
+/*lint -esym(621, okl4_scheduler_virq_flags_getpowersuspended) */
+/*lint -esym(714, okl4_scheduler_virq_flags_getpowersuspended) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_scheduler_virq_flags_getpowersuspended(const okl4_scheduler_virq_flags_t *x);
+
+/*lint -esym(621, okl4_scheduler_virq_flags_setpowersuspended) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_setpowersuspended(okl4_scheduler_virq_flags_t *x, okl4_bool_t _power_suspended);
+
+/*lint -esym(714, okl4_scheduler_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_init(okl4_scheduler_virq_flags_t *x);
+
+/*lint -esym(714, okl4_scheduler_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_scheduler_virq_flags_t
+okl4_scheduler_virq_flags_cast(uint64_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_SCHEDULER_VIRQ_FLAGS_POWER_SUSPENDED_MASK) */
+#define OKL4_SCHEDULER_VIRQ_FLAGS_POWER_SUSPENDED_MASK ((okl4_scheduler_virq_flags_t)1U) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS ((okl4_scheduler_virq_flags_t)1U)
+/*lint -esym(621, OKL4_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (1)
+
+
+/*lint -sem(okl4_scheduler_virq_flags_getpowersuspended, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_scheduler_virq_flags_getpowersuspended) */
+/*lint -esym(714, okl4_scheduler_virq_flags_getpowersuspended) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_scheduler_virq_flags_getpowersuspended(const okl4_scheduler_virq_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_scheduler_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_scheduler_virq_flags_setpowersuspended, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_scheduler_virq_flags_setpowersuspended) */
+
+/*lint -esym(621, okl4_scheduler_virq_flags_setpowersuspended) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_setpowersuspended(okl4_scheduler_virq_flags_t *x, okl4_bool_t _power_suspended)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_scheduler_virq_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_power_suspended;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_scheduler_virq_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_scheduler_virq_flags_init(okl4_scheduler_virq_flags_t *x)
+{
+    *x = (okl4_scheduler_virq_flags_t)0U;
+}
+
+/*lint -esym(714, okl4_scheduler_virq_flags_cast) */
+OKL4_FORCE_INLINE okl4_scheduler_virq_flags_t
+okl4_scheduler_virq_flags_cast(uint64_t p, okl4_bool_t force)
+{
+    okl4_scheduler_virq_flags_t x = (okl4_scheduler_virq_flags_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    The `okl4_scount_t` type represents a natural number of items or
+    iterations. Negative values represent errors. Use `okl4_count_t` if error
+    values are not required.
+*/
+
+typedef int32_t okl4_scount_t;
+
+
+
+
+/**
+    The SDK_VERSION contains a global SDK wide versioning of software.
+
+    - BITS 5..0 -   @ref OKL4_MASK_MAINTENANCE_SDK_VERSION
+    - BITS 15..8 -   @ref OKL4_MASK_RELEASE_SDK_VERSION
+    - BITS 21..16 -   @ref OKL4_MASK_MINOR_SDK_VERSION
+    - BITS 27..24 -   @ref OKL4_MASK_MAJOR_SDK_VERSION
+    - BIT 28 -   @ref OKL4_MASK_RES0_FLAG_SDK_VERSION
+    - BIT 30 -   @ref OKL4_MASK_DEV_FLAG_SDK_VERSION
+    - BIT 31 -   @ref OKL4_MASK_FORMAT_FLAG_SDK_VERSION
+*/
+
+/*lint -esym(621, okl4_sdk_version_t) */
+typedef uint32_t okl4_sdk_version_t;
+
+/*lint -esym(621, okl4_sdk_version_getformatflag) */
+/*lint -esym(714, okl4_sdk_version_getformatflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getformatflag(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setformatflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setformatflag(okl4_sdk_version_t *x, uint32_t _format_flag);
+
+/*lint -esym(621, okl4_sdk_version_getdevflag) */
+/*lint -esym(714, okl4_sdk_version_getdevflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getdevflag(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setdevflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setdevflag(okl4_sdk_version_t *x, uint32_t _dev_flag);
+
+/*lint -esym(621, okl4_sdk_version_getres0flag) */
+/*lint -esym(714, okl4_sdk_version_getres0flag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getres0flag(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setres0flag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setres0flag(okl4_sdk_version_t *x, uint32_t _res0_flag);
+
+/*lint -esym(621, okl4_sdk_version_getmajor) */
+/*lint -esym(714, okl4_sdk_version_getmajor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmajor(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setmajor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmajor(okl4_sdk_version_t *x, uint32_t _major);
+
+/*lint -esym(621, okl4_sdk_version_getminor) */
+/*lint -esym(714, okl4_sdk_version_getminor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getminor(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setminor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setminor(okl4_sdk_version_t *x, uint32_t _minor);
+
+/*lint -esym(621, okl4_sdk_version_getrelease) */
+/*lint -esym(714, okl4_sdk_version_getrelease) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getrelease(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setrelease) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setrelease(okl4_sdk_version_t *x, uint32_t _release);
+
+/*lint -esym(621, okl4_sdk_version_getmaintenance) */
+/*lint -esym(714, okl4_sdk_version_getmaintenance) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmaintenance(const okl4_sdk_version_t *x);
+
+/*lint -esym(621, okl4_sdk_version_setmaintenance) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmaintenance(okl4_sdk_version_t *x, uint32_t _maintenance);
+
+/*lint -esym(714, okl4_sdk_version_init) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_init(okl4_sdk_version_t *x);
+
+/*lint -esym(714, okl4_sdk_version_cast) */
+OKL4_FORCE_INLINE okl4_sdk_version_t
+okl4_sdk_version_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_SDK_VERSION_MAINTENANCE_MASK) */
+#define OKL4_SDK_VERSION_MAINTENANCE_MASK ((okl4_sdk_version_t)63U) /* Deprecated */
+/** Maintenance number */
+/*lint -esym(621, OKL4_MASK_MAINTENANCE_SDK_VERSION) */
+#define OKL4_MASK_MAINTENANCE_SDK_VERSION ((okl4_sdk_version_t)63U)
+/*lint -esym(621, OKL4_SHIFT_MAINTENANCE_SDK_VERSION) */
+#define OKL4_SHIFT_MAINTENANCE_SDK_VERSION (0)
+/*lint -esym(621, OKL4_WIDTH_MAINTENANCE_SDK_VERSION) */
+#define OKL4_WIDTH_MAINTENANCE_SDK_VERSION (6)
+/*lint -esym(621, OKL4_SDK_VERSION_RELEASE_MASK) */
+#define OKL4_SDK_VERSION_RELEASE_MASK ((okl4_sdk_version_t)255U << 8) /* Deprecated */
+/** SDK Release Number */
+/*lint -esym(621, OKL4_MASK_RELEASE_SDK_VERSION) */
+#define OKL4_MASK_RELEASE_SDK_VERSION ((okl4_sdk_version_t)255U << 8)
+/*lint -esym(621, OKL4_SHIFT_RELEASE_SDK_VERSION) */
+#define OKL4_SHIFT_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_WIDTH_RELEASE_SDK_VERSION) */
+#define OKL4_WIDTH_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_SDK_VERSION_MINOR_MASK) */
+#define OKL4_SDK_VERSION_MINOR_MASK ((okl4_sdk_version_t)63U << 16) /* Deprecated */
+/** SDK Minor Number */
+/*lint -esym(621, OKL4_MASK_MINOR_SDK_VERSION) */
+#define OKL4_MASK_MINOR_SDK_VERSION ((okl4_sdk_version_t)63U << 16)
+/*lint -esym(621, OKL4_SHIFT_MINOR_SDK_VERSION) */
+#define OKL4_SHIFT_MINOR_SDK_VERSION (16)
+/*lint -esym(621, OKL4_WIDTH_MINOR_SDK_VERSION) */
+#define OKL4_WIDTH_MINOR_SDK_VERSION (6)
+/*lint -esym(621, OKL4_SDK_VERSION_MAJOR_MASK) */
+#define OKL4_SDK_VERSION_MAJOR_MASK ((okl4_sdk_version_t)15U << 24) /* Deprecated */
+/** SDK Major Number */
+/*lint -esym(621, OKL4_MASK_MAJOR_SDK_VERSION) */
+#define OKL4_MASK_MAJOR_SDK_VERSION ((okl4_sdk_version_t)15U << 24)
+/*lint -esym(621, OKL4_SHIFT_MAJOR_SDK_VERSION) */
+#define OKL4_SHIFT_MAJOR_SDK_VERSION (24)
+/*lint -esym(621, OKL4_WIDTH_MAJOR_SDK_VERSION) */
+#define OKL4_WIDTH_MAJOR_SDK_VERSION (4)
+/*lint -esym(621, OKL4_SDK_VERSION_RES0_FLAG_MASK) */
+#define OKL4_SDK_VERSION_RES0_FLAG_MASK ((okl4_sdk_version_t)1U << 28) /* Deprecated */
+/** Reserved */
+/*lint -esym(621, OKL4_MASK_RES0_FLAG_SDK_VERSION) */
+#define OKL4_MASK_RES0_FLAG_SDK_VERSION ((okl4_sdk_version_t)1U << 28)
+/*lint -esym(621, OKL4_SHIFT_RES0_FLAG_SDK_VERSION) */
+#define OKL4_SHIFT_RES0_FLAG_SDK_VERSION (28)
+/*lint -esym(621, OKL4_WIDTH_RES0_FLAG_SDK_VERSION) */
+#define OKL4_WIDTH_RES0_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_SDK_VERSION_DEV_FLAG_MASK) */
+#define OKL4_SDK_VERSION_DEV_FLAG_MASK ((okl4_sdk_version_t)1U << 30) /* Deprecated */
+/** Unreleased internal development version */
+/*lint -esym(621, OKL4_MASK_DEV_FLAG_SDK_VERSION) */
+#define OKL4_MASK_DEV_FLAG_SDK_VERSION ((okl4_sdk_version_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_DEV_FLAG_SDK_VERSION) */
+#define OKL4_SHIFT_DEV_FLAG_SDK_VERSION (30)
+/*lint -esym(621, OKL4_WIDTH_DEV_FLAG_SDK_VERSION) */
+#define OKL4_WIDTH_DEV_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_SDK_VERSION_FORMAT_FLAG_MASK) */
+#define OKL4_SDK_VERSION_FORMAT_FLAG_MASK ((okl4_sdk_version_t)1U << 31) /* Deprecated */
+/** Format: 0 = Version format 1, 1 = Reserved */
+/*lint -esym(621, OKL4_MASK_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_MASK_FORMAT_FLAG_SDK_VERSION ((okl4_sdk_version_t)1U << 31)
+/*lint -esym(621, OKL4_SHIFT_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_SHIFT_FORMAT_FLAG_SDK_VERSION (31)
+/*lint -esym(621, OKL4_WIDTH_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_WIDTH_FORMAT_FLAG_SDK_VERSION (1)
+
+
+/*lint -sem(okl4_sdk_version_getmaintenance, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, okl4_sdk_version_getmaintenance) */
+/*lint -esym(714, okl4_sdk_version_getmaintenance) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmaintenance(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 6;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setmaintenance, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, okl4_sdk_version_setmaintenance) */
+
+/*lint -esym(621, okl4_sdk_version_setmaintenance) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmaintenance(okl4_sdk_version_t *x, uint32_t _maintenance)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 6;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_maintenance;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getrelease, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, okl4_sdk_version_getrelease) */
+/*lint -esym(714, okl4_sdk_version_getrelease) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getrelease(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            uint32_t field : 8;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setrelease, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, okl4_sdk_version_setrelease) */
+
+/*lint -esym(621, okl4_sdk_version_setrelease) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setrelease(okl4_sdk_version_t *x, uint32_t _release)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            uint32_t field : 8;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_release;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getminor, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, okl4_sdk_version_getminor) */
+/*lint -esym(714, okl4_sdk_version_getminor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getminor(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 6;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setminor, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, okl4_sdk_version_setminor) */
+
+/*lint -esym(621, okl4_sdk_version_setminor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setminor(okl4_sdk_version_t *x, uint32_t _minor)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 6;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_minor;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getmajor, 1p, @n >= 0 && @n <= 15) */
+/*lint -esym(621, okl4_sdk_version_getmajor) */
+/*lint -esym(714, okl4_sdk_version_getmajor) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getmajor(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 4;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setmajor, 2n >= 0 && 2n <= 15) */
+/*lint -esym(714, okl4_sdk_version_setmajor) */
+
+/*lint -esym(621, okl4_sdk_version_setmajor) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setmajor(okl4_sdk_version_t *x, uint32_t _major)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 24;
+            uint32_t field : 4;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_major;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getres0flag, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_sdk_version_getres0flag) */
+/*lint -esym(714, okl4_sdk_version_getres0flag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getres0flag(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setres0flag, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_sdk_version_setres0flag) */
+
+/*lint -esym(621, okl4_sdk_version_setres0flag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setres0flag(okl4_sdk_version_t *x, uint32_t _res0_flag)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_res0_flag;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getdevflag, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_sdk_version_getdevflag) */
+/*lint -esym(714, okl4_sdk_version_getdevflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getdevflag(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setdevflag, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_sdk_version_setdevflag) */
+
+/*lint -esym(621, okl4_sdk_version_setdevflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setdevflag(okl4_sdk_version_t *x, uint32_t _dev_flag)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_dev_flag;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_sdk_version_getformatflag, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_sdk_version_getformatflag) */
+/*lint -esym(714, okl4_sdk_version_getformatflag) */
+OKL4_FORCE_INLINE uint32_t
+okl4_sdk_version_getformatflag(const okl4_sdk_version_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 31;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_sdk_version_setformatflag, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_sdk_version_setformatflag) */
+
+/*lint -esym(621, okl4_sdk_version_setformatflag) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_setformatflag(okl4_sdk_version_t *x, uint32_t _format_flag)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 31;
+            uint32_t field : 1;
+        } bits;
+        okl4_sdk_version_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_format_flag;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_sdk_version_init) */
+OKL4_FORCE_INLINE void
+okl4_sdk_version_init(okl4_sdk_version_t *x)
+{
+    *x = (okl4_sdk_version_t)0U;
+}
+
+/*lint -esym(714, okl4_sdk_version_cast) */
+OKL4_FORCE_INLINE okl4_sdk_version_t
+okl4_sdk_version_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_sdk_version_t x = (okl4_sdk_version_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+
+*/
+
+struct okl4_shared_buffer {
+    okl4_paddr_t physical_base;
+    struct okl4_virtmem_item virtmem_item;
+    okl4_kcap_t cap;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_shared_buffers_array {
+    __ptr64(struct okl4_shared_buffer *, buffers);
+    okl4_count_t num_buffers;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+
+typedef okl4_kcap_t okl4_signal_t;
+
+
+
+
+
+
+
+
+/**
+    The `okl4_sregister_t` type represents a signed, machine-native
+    register-sized integer value.
+*/
+
+typedef int64_t okl4_sregister_t;
+
+
+
+
+
+typedef uint64_t okl4_ticks_t;
+
+
+
+
+/**
+    - BIT 0 -   @ref OKL4_MASK_ACTIVE_TIMER_FLAGS
+    - BIT 1 -   @ref OKL4_MASK_PERIODIC_TIMER_FLAGS
+    - BIT 2 -   @ref OKL4_MASK_ABSOLUTE_TIMER_FLAGS
+    - BIT 3 -   @ref OKL4_MASK_UNITS_TIMER_FLAGS
+    - BIT 4 -   @ref OKL4_MASK_ALIGN_TIMER_FLAGS
+    - BIT 5 -   @ref OKL4_MASK_WATCHDOG_TIMER_FLAGS
+    - BIT 30 -   @ref OKL4_MASK_RELOAD_TIMER_FLAGS
+    - BIT 31 -   @ref OKL4_MASK_TIMESLICE_TIMER_FLAGS
+*/
+
+/*lint -esym(621, okl4_timer_flags_t) */
+typedef uint32_t okl4_timer_flags_t;
+
+/*lint -esym(621, okl4_timer_flags_getactive) */
+/*lint -esym(714, okl4_timer_flags_getactive) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getactive(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setactive) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setactive(okl4_timer_flags_t *x, okl4_bool_t _active);
+
+/*lint -esym(621, okl4_timer_flags_getperiodic) */
+/*lint -esym(714, okl4_timer_flags_getperiodic) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getperiodic(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setperiodic) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setperiodic(okl4_timer_flags_t *x, okl4_bool_t _periodic);
+
+/*lint -esym(621, okl4_timer_flags_getabsolute) */
+/*lint -esym(714, okl4_timer_flags_getabsolute) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getabsolute(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setabsolute) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setabsolute(okl4_timer_flags_t *x, okl4_bool_t _absolute);
+
+/*lint -esym(621, okl4_timer_flags_getunits) */
+/*lint -esym(714, okl4_timer_flags_getunits) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getunits(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setunits) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setunits(okl4_timer_flags_t *x, okl4_bool_t _units);
+
+/*lint -esym(621, okl4_timer_flags_getalign) */
+/*lint -esym(714, okl4_timer_flags_getalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getalign(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setalign) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setalign(okl4_timer_flags_t *x, okl4_bool_t _align);
+
+/*lint -esym(621, okl4_timer_flags_getwatchdog) */
+/*lint -esym(714, okl4_timer_flags_getwatchdog) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getwatchdog(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setwatchdog) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setwatchdog(okl4_timer_flags_t *x, okl4_bool_t _watchdog);
+
+/*lint -esym(621, okl4_timer_flags_getreload) */
+/*lint -esym(714, okl4_timer_flags_getreload) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getreload(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_setreload) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setreload(okl4_timer_flags_t *x, okl4_bool_t _reload);
+
+/*lint -esym(621, okl4_timer_flags_gettimeslice) */
+/*lint -esym(714, okl4_timer_flags_gettimeslice) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_gettimeslice(const okl4_timer_flags_t *x);
+
+/*lint -esym(621, okl4_timer_flags_settimeslice) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_settimeslice(okl4_timer_flags_t *x, okl4_bool_t _timeslice);
+
+/*lint -esym(714, okl4_timer_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_init(okl4_timer_flags_t *x);
+
+/*lint -esym(714, okl4_timer_flags_cast) */
+OKL4_FORCE_INLINE okl4_timer_flags_t
+okl4_timer_flags_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, OKL4_TIMER_FLAGS_ACTIVE_MASK) */
+#define OKL4_TIMER_FLAGS_ACTIVE_MASK ((okl4_timer_flags_t)1U) /* Deprecated */
+/** Indicates that the timer has a timeout set */
+/*lint -esym(621, OKL4_MASK_ACTIVE_TIMER_FLAGS) */
+#define OKL4_MASK_ACTIVE_TIMER_FLAGS ((okl4_timer_flags_t)1U)
+/*lint -esym(621, OKL4_SHIFT_ACTIVE_TIMER_FLAGS) */
+#define OKL4_SHIFT_ACTIVE_TIMER_FLAGS (0)
+/*lint -esym(621, OKL4_WIDTH_ACTIVE_TIMER_FLAGS) */
+#define OKL4_WIDTH_ACTIVE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_PERIODIC_MASK) */
+#define OKL4_TIMER_FLAGS_PERIODIC_MASK ((okl4_timer_flags_t)1U << 1) /* Deprecated */
+/** Indicates that the timer is periodic, otherwise it is one-shot */
+/*lint -esym(621, OKL4_MASK_PERIODIC_TIMER_FLAGS) */
+#define OKL4_MASK_PERIODIC_TIMER_FLAGS ((okl4_timer_flags_t)1U << 1)
+/*lint -esym(621, OKL4_SHIFT_PERIODIC_TIMER_FLAGS) */
+#define OKL4_SHIFT_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_WIDTH_PERIODIC_TIMER_FLAGS) */
+#define OKL4_WIDTH_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_ABSOLUTE_MASK) */
+#define OKL4_TIMER_FLAGS_ABSOLUTE_MASK ((okl4_timer_flags_t)1U << 2) /* Deprecated */
+/** Indicates that the timeout value is absolute, otherwise it is relative */
+/*lint -esym(621, OKL4_MASK_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_MASK_ABSOLUTE_TIMER_FLAGS ((okl4_timer_flags_t)1U << 2)
+/*lint -esym(621, OKL4_SHIFT_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_SHIFT_ABSOLUTE_TIMER_FLAGS (2)
+/*lint -esym(621, OKL4_WIDTH_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_WIDTH_ABSOLUTE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_UNITS_MASK) */
+#define OKL4_TIMER_FLAGS_UNITS_MASK ((okl4_timer_flags_t)1U << 3) /* Deprecated */
+/** Select time in UNITS of raw ticks */
+/*lint -esym(621, OKL4_MASK_UNITS_TIMER_FLAGS) */
+#define OKL4_MASK_UNITS_TIMER_FLAGS ((okl4_timer_flags_t)1U << 3)
+/*lint -esym(621, OKL4_SHIFT_UNITS_TIMER_FLAGS) */
+#define OKL4_SHIFT_UNITS_TIMER_FLAGS (3)
+/*lint -esym(621, OKL4_WIDTH_UNITS_TIMER_FLAGS) */
+#define OKL4_WIDTH_UNITS_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_ALIGN_MASK) */
+#define OKL4_TIMER_FLAGS_ALIGN_MASK ((okl4_timer_flags_t)1U << 4) /* Deprecated */
+/** Align first timeout of a periodic timer to a multiple of the timeout length */
+/*lint -esym(621, OKL4_MASK_ALIGN_TIMER_FLAGS) */
+#define OKL4_MASK_ALIGN_TIMER_FLAGS ((okl4_timer_flags_t)1U << 4)
+/*lint -esym(621, OKL4_SHIFT_ALIGN_TIMER_FLAGS) */
+#define OKL4_SHIFT_ALIGN_TIMER_FLAGS (4)
+/*lint -esym(621, OKL4_WIDTH_ALIGN_TIMER_FLAGS) */
+#define OKL4_WIDTH_ALIGN_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_WATCHDOG_MASK) */
+#define OKL4_TIMER_FLAGS_WATCHDOG_MASK ((okl4_timer_flags_t)1U << 5) /* Deprecated */
+/** Enter the kernel interactive debugger on timer expiry (no effect for production builds of the kernel) */
+/*lint -esym(621, OKL4_MASK_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_MASK_WATCHDOG_TIMER_FLAGS ((okl4_timer_flags_t)1U << 5)
+/*lint -esym(621, OKL4_SHIFT_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_SHIFT_WATCHDOG_TIMER_FLAGS (5)
+/*lint -esym(621, OKL4_WIDTH_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_WIDTH_WATCHDOG_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_RELOAD_MASK) */
+#define OKL4_TIMER_FLAGS_RELOAD_MASK ((okl4_timer_flags_t)1U << 30) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_RELOAD_TIMER_FLAGS) */
+#define OKL4_MASK_RELOAD_TIMER_FLAGS ((okl4_timer_flags_t)1U << 30)
+/*lint -esym(621, OKL4_SHIFT_RELOAD_TIMER_FLAGS) */
+#define OKL4_SHIFT_RELOAD_TIMER_FLAGS (30)
+/*lint -esym(621, OKL4_WIDTH_RELOAD_TIMER_FLAGS) */
+#define OKL4_WIDTH_RELOAD_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_TIMER_FLAGS_TIMESLICE_MASK) */
+#define OKL4_TIMER_FLAGS_TIMESLICE_MASK ((okl4_timer_flags_t)1U << 31) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_MASK_TIMESLICE_TIMER_FLAGS ((okl4_timer_flags_t)1U << 31)
+/*lint -esym(621, OKL4_SHIFT_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_SHIFT_TIMESLICE_TIMER_FLAGS (31)
+/*lint -esym(621, OKL4_WIDTH_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_WIDTH_TIMESLICE_TIMER_FLAGS (1)
+
+
+/*lint -sem(okl4_timer_flags_getactive, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getactive) */
+/*lint -esym(714, okl4_timer_flags_getactive) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getactive(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setactive, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setactive) */
+
+/*lint -esym(621, okl4_timer_flags_setactive) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setactive(okl4_timer_flags_t *x, okl4_bool_t _active)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_active;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getperiodic, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getperiodic) */
+/*lint -esym(714, okl4_timer_flags_getperiodic) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getperiodic(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setperiodic, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setperiodic) */
+
+/*lint -esym(621, okl4_timer_flags_setperiodic) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setperiodic(okl4_timer_flags_t *x, okl4_bool_t _periodic)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 1;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_periodic;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getabsolute, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getabsolute) */
+/*lint -esym(714, okl4_timer_flags_getabsolute) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getabsolute(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setabsolute, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setabsolute) */
+
+/*lint -esym(621, okl4_timer_flags_setabsolute) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setabsolute(okl4_timer_flags_t *x, okl4_bool_t _absolute)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 2;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_absolute;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getunits, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getunits) */
+/*lint -esym(714, okl4_timer_flags_getunits) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getunits(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setunits, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setunits) */
+
+/*lint -esym(621, okl4_timer_flags_setunits) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setunits(okl4_timer_flags_t *x, okl4_bool_t _units)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 3;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_units;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getalign, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getalign) */
+/*lint -esym(714, okl4_timer_flags_getalign) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getalign(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setalign, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setalign) */
+
+/*lint -esym(621, okl4_timer_flags_setalign) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setalign(okl4_timer_flags_t *x, okl4_bool_t _align)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 4;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_align;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getwatchdog, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getwatchdog) */
+/*lint -esym(714, okl4_timer_flags_getwatchdog) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getwatchdog(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setwatchdog, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setwatchdog) */
+
+/*lint -esym(621, okl4_timer_flags_setwatchdog) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setwatchdog(okl4_timer_flags_t *x, okl4_bool_t _watchdog)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 5;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_watchdog;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_getreload, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_getreload) */
+/*lint -esym(714, okl4_timer_flags_getreload) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_getreload(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_setreload, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_setreload) */
+
+/*lint -esym(621, okl4_timer_flags_setreload) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_setreload(okl4_timer_flags_t *x, okl4_bool_t _reload)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 30;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_reload;
+    *x = _conv.raw;
+}
+/*lint -sem(okl4_timer_flags_gettimeslice, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_timer_flags_gettimeslice) */
+/*lint -esym(714, okl4_timer_flags_gettimeslice) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_timer_flags_gettimeslice(const okl4_timer_flags_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 31;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(okl4_timer_flags_settimeslice, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_timer_flags_settimeslice) */
+
+/*lint -esym(621, okl4_timer_flags_settimeslice) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_settimeslice(okl4_timer_flags_t *x, okl4_bool_t _timeslice)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 31;
+            _Bool field : 1;
+        } bits;
+        okl4_timer_flags_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_timeslice;
+    *x = _conv.raw;
+}
+/*lint -esym(714, okl4_timer_flags_init) */
+OKL4_FORCE_INLINE void
+okl4_timer_flags_init(okl4_timer_flags_t *x)
+{
+    *x = (okl4_timer_flags_t)0U;
+}
+
+/*lint -esym(714, okl4_timer_flags_cast) */
+OKL4_FORCE_INLINE okl4_timer_flags_t
+okl4_timer_flags_cast(uint32_t p, okl4_bool_t force)
+{
+    okl4_timer_flags_t x = (okl4_timer_flags_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct _okl4_tracebuffer_buffer_header {
+    okl4_soc_time_t timestamp;
+    okl4_count_t wrap;
+    _okl4_padding_t __padding0_4; /**< Padding 8 */
+    _okl4_padding_t __padding1_5; /**< Padding 8 */
+    _okl4_padding_t __padding2_6; /**< Padding 8 */
+    _okl4_padding_t __padding3_7; /**< Padding 8 */
+    okl4_ksize_t size;
+    okl4_ksize_t head;
+    okl4_ksize_t offset;
+};
+
+
+
+
+
+
+/**
+
+*/
+
+struct okl4_tracebuffer_env {
+    struct okl4_virtmem_item virt;
+    okl4_interrupt_number_t virq;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+
+struct _okl4_tracebuffer_header {
+    uint32_t magic;
+    uint32_t version;
+    uint32_t id;
+    okl4_count_t num_buffers;
+    okl4_ksize_t buffer_size;
+    okl4_atomic_uint32_t log_mask;
+    okl4_atomic_uint32_t active_buffer;
+    okl4_atomic_uint32_t grabbed_buffer;
+    okl4_atomic_uint32_t empty_buffers;
+    struct _okl4_tracebuffer_buffer_header buffers[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+
+
+typedef uint32_t okl4_tracepoint_class_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_THREAD_STATE) */
+#define OKL4_TRACEPOINT_CLASS_THREAD_STATE ((okl4_tracepoint_class_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_SYSCALLS) */
+#define OKL4_TRACEPOINT_CLASS_SYSCALLS ((okl4_tracepoint_class_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_PRIMARY) */
+#define OKL4_TRACEPOINT_CLASS_PRIMARY ((okl4_tracepoint_class_t)0x2U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_SECONDARY) */
+#define OKL4_TRACEPOINT_CLASS_SECONDARY ((okl4_tracepoint_class_t)0x3U)
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_TERTIARY) */
+#define OKL4_TRACEPOINT_CLASS_TERTIARY ((okl4_tracepoint_class_t)0x4U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_MAX) */
+#define OKL4_TRACEPOINT_CLASS_MAX ((okl4_tracepoint_class_t)0x4U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_CLASS_INVALID) */
+#define OKL4_TRACEPOINT_CLASS_INVALID ((okl4_tracepoint_class_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_class_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_class_is_element_of(okl4_tracepoint_class_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_class_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_class_is_element_of(okl4_tracepoint_class_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_TRACEPOINT_CLASS_THREAD_STATE) ||
+            (var == OKL4_TRACEPOINT_CLASS_SYSCALLS) ||
+            (var == OKL4_TRACEPOINT_CLASS_PRIMARY) ||
+            (var == OKL4_TRACEPOINT_CLASS_SECONDARY) ||
+            (var == OKL4_TRACEPOINT_CLASS_TERTIARY));
+}
+
+
+/**
+    - BITS 7..0 -   @ref _OKL4_MASK_ID_TRACEPOINT_DESC
+    - BIT 8 -   @ref _OKL4_MASK_USER_TRACEPOINT_DESC
+    - BIT 9 -   @ref _OKL4_MASK_BIN_TRACEPOINT_DESC
+    - BITS 15..10 -   @ref _OKL4_MASK_RECLEN_TRACEPOINT_DESC
+    - BITS 21..16 -   @ref _OKL4_MASK_CPUID_TRACEPOINT_DESC
+    - BITS 27..22 -   @ref _OKL4_MASK_THREADID_TRACEPOINT_DESC
+    - BITS 31..28 -   @ref _OKL4_MASK__R1_TRACEPOINT_DESC
+*/
+
+/*lint -esym(621, _okl4_tracepoint_desc_t) */
+typedef uint32_t _okl4_tracepoint_desc_t;
+
+/*lint -esym(621, _okl4_tracepoint_desc_getid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getid(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setid(_okl4_tracepoint_desc_t *x, uint32_t _id);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getuser) */
+/*lint -esym(714, _okl4_tracepoint_desc_getuser) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getuser(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setuser) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setuser(_okl4_tracepoint_desc_t *x, okl4_bool_t _user);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getbin) */
+/*lint -esym(714, _okl4_tracepoint_desc_getbin) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getbin(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setbin) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setbin(_okl4_tracepoint_desc_t *x, okl4_bool_t _bin);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getreclen) */
+/*lint -esym(714, _okl4_tracepoint_desc_getreclen) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getreclen(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setreclen) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setreclen(_okl4_tracepoint_desc_t *x, uint32_t _reclen);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getcpuid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getcpuid) */
+OKL4_FORCE_INLINE okl4_count_t
+_okl4_tracepoint_desc_getcpuid(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setcpuid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setcpuid(_okl4_tracepoint_desc_t *x, okl4_count_t _cpuid);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getthreadid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getthreadid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getthreadid(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setthreadid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setthreadid(_okl4_tracepoint_desc_t *x, uint32_t _threadid);
+
+/*lint -esym(621, _okl4_tracepoint_desc_getr1) */
+/*lint -esym(714, _okl4_tracepoint_desc_getr1) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getr1(const _okl4_tracepoint_desc_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_desc_setr1) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setr1(_okl4_tracepoint_desc_t *x, uint32_t __r1);
+
+/*lint -esym(714, _okl4_tracepoint_desc_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_init(_okl4_tracepoint_desc_t *x);
+
+/*lint -esym(714, _okl4_tracepoint_desc_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_desc_t
+_okl4_tracepoint_desc_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_ID_MASK) */
+#define _OKL4_TRACEPOINT_DESC_ID_MASK ((_okl4_tracepoint_desc_t)255U) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_ID_TRACEPOINT_DESC) */
+#define _OKL4_MASK_ID_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)255U)
+/*lint -esym(621, _OKL4_SHIFT_ID_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_ID_TRACEPOINT_DESC (0)
+/*lint -esym(621, _OKL4_WIDTH_ID_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_ID_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_USER_MASK) */
+#define _OKL4_TRACEPOINT_DESC_USER_MASK ((_okl4_tracepoint_desc_t)1U << 8) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_USER_TRACEPOINT_DESC) */
+#define _OKL4_MASK_USER_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)1U << 8)
+/*lint -esym(621, _OKL4_SHIFT_USER_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_USER_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_WIDTH_USER_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_USER_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_BIN_MASK) */
+#define _OKL4_TRACEPOINT_DESC_BIN_MASK ((_okl4_tracepoint_desc_t)1U << 9) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_BIN_TRACEPOINT_DESC) */
+#define _OKL4_MASK_BIN_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)1U << 9)
+/*lint -esym(621, _OKL4_SHIFT_BIN_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_BIN_TRACEPOINT_DESC (9)
+/*lint -esym(621, _OKL4_WIDTH_BIN_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_BIN_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_RECLEN_MASK) */
+#define _OKL4_TRACEPOINT_DESC_RECLEN_MASK ((_okl4_tracepoint_desc_t)63U << 10) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_MASK_RECLEN_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)63U << 10)
+/*lint -esym(621, _OKL4_SHIFT_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_RECLEN_TRACEPOINT_DESC (10)
+/*lint -esym(621, _OKL4_WIDTH_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_RECLEN_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_CPUID_MASK) */
+#define _OKL4_TRACEPOINT_DESC_CPUID_MASK ((_okl4_tracepoint_desc_t)63U << 16) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_MASK_CPUID_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)63U << 16)
+/*lint -esym(621, _OKL4_SHIFT_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_CPUID_TRACEPOINT_DESC (16)
+/*lint -esym(621, _OKL4_WIDTH_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_CPUID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC_THREADID_MASK) */
+#define _OKL4_TRACEPOINT_DESC_THREADID_MASK ((_okl4_tracepoint_desc_t)63U << 22) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_MASK_THREADID_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)63U << 22)
+/*lint -esym(621, _OKL4_SHIFT_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT_THREADID_TRACEPOINT_DESC (22)
+/*lint -esym(621, _OKL4_WIDTH_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH_THREADID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_TRACEPOINT_DESC__R1_MASK) */
+#define _OKL4_TRACEPOINT_DESC__R1_MASK ((_okl4_tracepoint_desc_t)15U << 28) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK__R1_TRACEPOINT_DESC) */
+#define _OKL4_MASK__R1_TRACEPOINT_DESC ((_okl4_tracepoint_desc_t)15U << 28)
+/*lint -esym(621, _OKL4_SHIFT__R1_TRACEPOINT_DESC) */
+#define _OKL4_SHIFT__R1_TRACEPOINT_DESC (28)
+/*lint -esym(621, _OKL4_WIDTH__R1_TRACEPOINT_DESC) */
+#define _OKL4_WIDTH__R1_TRACEPOINT_DESC (4)
+
+
+/*lint -sem(_okl4_tracepoint_desc_getid, 1p, @n >= 0 && @n <= 255) */
+/*lint -esym(621, _okl4_tracepoint_desc_getid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getid(const _okl4_tracepoint_desc_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 8;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setid, 2n >= 0 && 2n <= 255) */
+/*lint -esym(714, _okl4_tracepoint_desc_setid) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setid(_okl4_tracepoint_desc_t *x, uint32_t _id)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 8;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_id;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getuser, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, _okl4_tracepoint_desc_getuser) */
+/*lint -esym(714, _okl4_tracepoint_desc_getuser) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getuser(const _okl4_tracepoint_desc_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            _Bool field : 1;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setuser, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, _okl4_tracepoint_desc_setuser) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setuser) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setuser(_okl4_tracepoint_desc_t *x, okl4_bool_t _user)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 8;
+            _Bool field : 1;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_user;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getbin, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, _okl4_tracepoint_desc_getbin) */
+/*lint -esym(714, _okl4_tracepoint_desc_getbin) */
+OKL4_FORCE_INLINE okl4_bool_t
+_okl4_tracepoint_desc_getbin(const _okl4_tracepoint_desc_t *x)
+{
+    okl4_bool_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 9;
+            _Bool field : 1;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_bool_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setbin, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, _okl4_tracepoint_desc_setbin) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setbin) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setbin(_okl4_tracepoint_desc_t *x, okl4_bool_t _bin)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 9;
+            _Bool field : 1;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (_Bool)_bin;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getreclen, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, _okl4_tracepoint_desc_getreclen) */
+/*lint -esym(714, _okl4_tracepoint_desc_getreclen) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getreclen(const _okl4_tracepoint_desc_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 10;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setreclen, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, _okl4_tracepoint_desc_setreclen) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setreclen) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setreclen(_okl4_tracepoint_desc_t *x, uint32_t _reclen)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 10;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_reclen;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getcpuid, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, _okl4_tracepoint_desc_getcpuid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getcpuid) */
+OKL4_FORCE_INLINE okl4_count_t
+_okl4_tracepoint_desc_getcpuid(const _okl4_tracepoint_desc_t *x)
+{
+    okl4_count_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (okl4_count_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setcpuid, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, _okl4_tracepoint_desc_setcpuid) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setcpuid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setcpuid(_okl4_tracepoint_desc_t *x, okl4_count_t _cpuid)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_cpuid;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getthreadid, 1p, @n >= 0 && @n <= 63) */
+/*lint -esym(621, _okl4_tracepoint_desc_getthreadid) */
+/*lint -esym(714, _okl4_tracepoint_desc_getthreadid) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getthreadid(const _okl4_tracepoint_desc_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 22;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setthreadid, 2n >= 0 && 2n <= 63) */
+/*lint -esym(714, _okl4_tracepoint_desc_setthreadid) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setthreadid) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setthreadid(_okl4_tracepoint_desc_t *x, uint32_t _threadid)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 22;
+            uint32_t field : 6;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_threadid;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_desc_getr1, 1p, @n >= 0 && @n <= 15) */
+/*lint -esym(621, _okl4_tracepoint_desc_getr1) */
+/*lint -esym(714, _okl4_tracepoint_desc_getr1) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_desc_getr1(const _okl4_tracepoint_desc_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            uint32_t field : 4;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_desc_setr1, 2n >= 0 && 2n <= 15) */
+/*lint -esym(714, _okl4_tracepoint_desc_setr1) */
+
+/*lint -esym(621, _okl4_tracepoint_desc_setr1) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_setr1(_okl4_tracepoint_desc_t *x, uint32_t __r1)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 28;
+            uint32_t field : 4;
+        } bits;
+        _okl4_tracepoint_desc_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)__r1;
+    *x = _conv.raw;
+}
+/*lint -esym(714, _okl4_tracepoint_desc_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_desc_init(_okl4_tracepoint_desc_t *x)
+{
+    *x = (_okl4_tracepoint_desc_t)0U;
+}
+
+/*lint -esym(714, _okl4_tracepoint_desc_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_desc_t
+_okl4_tracepoint_desc_cast(uint32_t p, okl4_bool_t force)
+{
+    _okl4_tracepoint_desc_t x = (_okl4_tracepoint_desc_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+/**
+    - BITS 15..0 -   @ref _OKL4_MASK_CLASS_TRACEPOINT_MASKS
+    - BITS 31..16 -   @ref _OKL4_MASK_SUBSYSTEM_TRACEPOINT_MASKS
+*/
+
+/*lint -esym(621, _okl4_tracepoint_masks_t) */
+typedef uint32_t _okl4_tracepoint_masks_t;
+
+/*lint -esym(621, _okl4_tracepoint_masks_getclass) */
+/*lint -esym(714, _okl4_tracepoint_masks_getclass) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getclass(const _okl4_tracepoint_masks_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_masks_setclass) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setclass(_okl4_tracepoint_masks_t *x, uint32_t _class);
+
+/*lint -esym(621, _okl4_tracepoint_masks_getsubsystem) */
+/*lint -esym(714, _okl4_tracepoint_masks_getsubsystem) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getsubsystem(const _okl4_tracepoint_masks_t *x);
+
+/*lint -esym(621, _okl4_tracepoint_masks_setsubsystem) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setsubsystem(_okl4_tracepoint_masks_t *x, uint32_t _subsystem);
+
+/*lint -esym(714, _okl4_tracepoint_masks_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_init(_okl4_tracepoint_masks_t *x);
+
+/*lint -esym(714, _okl4_tracepoint_masks_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_masks_t
+_okl4_tracepoint_masks_cast(uint32_t p, okl4_bool_t force);
+
+
+
+
+/*lint -esym(621, _OKL4_TRACEPOINT_MASKS_CLASS_MASK) */
+#define _OKL4_TRACEPOINT_MASKS_CLASS_MASK ((_okl4_tracepoint_masks_t)65535U) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_MASK_CLASS_TRACEPOINT_MASKS ((_okl4_tracepoint_masks_t)65535U)
+/*lint -esym(621, _OKL4_SHIFT_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_SHIFT_CLASS_TRACEPOINT_MASKS (0)
+/*lint -esym(621, _OKL4_WIDTH_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_WIDTH_CLASS_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_TRACEPOINT_MASKS_SUBSYSTEM_MASK) */
+#define _OKL4_TRACEPOINT_MASKS_SUBSYSTEM_MASK ((_okl4_tracepoint_masks_t)65535U << 16) /* Deprecated */
+/*lint -esym(621, _OKL4_MASK_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_MASK_SUBSYSTEM_TRACEPOINT_MASKS ((_okl4_tracepoint_masks_t)65535U << 16)
+/*lint -esym(621, _OKL4_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS (16)
+
+
+/*lint -sem(_okl4_tracepoint_masks_getclass, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, _okl4_tracepoint_masks_getclass) */
+/*lint -esym(714, _okl4_tracepoint_masks_getclass) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getclass(const _okl4_tracepoint_masks_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        _okl4_tracepoint_masks_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_masks_setclass, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, _okl4_tracepoint_masks_setclass) */
+
+/*lint -esym(621, _okl4_tracepoint_masks_setclass) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setclass(_okl4_tracepoint_masks_t *x, uint32_t _class)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t field : 16;
+        } bits;
+        _okl4_tracepoint_masks_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_class;
+    *x = _conv.raw;
+}
+/*lint -sem(_okl4_tracepoint_masks_getsubsystem, 1p, @n >= 0 && @n <= 65535) */
+/*lint -esym(621, _okl4_tracepoint_masks_getsubsystem) */
+/*lint -esym(714, _okl4_tracepoint_masks_getsubsystem) */
+OKL4_FORCE_INLINE uint32_t
+_okl4_tracepoint_masks_getsubsystem(const _okl4_tracepoint_masks_t *x)
+{
+    uint32_t field;
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        _okl4_tracepoint_masks_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    field = (uint32_t)_conv.bits.field;
+    return field;
+}
+
+/*lint -sem(_okl4_tracepoint_masks_setsubsystem, 2n >= 0 && 2n <= 65535) */
+/*lint -esym(714, _okl4_tracepoint_masks_setsubsystem) */
+
+/*lint -esym(621, _okl4_tracepoint_masks_setsubsystem) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_setsubsystem(_okl4_tracepoint_masks_t *x, uint32_t _subsystem)
+{
+    union {
+        /*lint -e{806} -e{958} -e{959} */
+        struct {
+            uint32_t _skip : 16;
+            uint32_t field : 16;
+        } bits;
+        _okl4_tracepoint_masks_t raw;
+    } _conv;
+
+    _conv.raw = *x;
+    _conv.bits.field = (uint32_t)_subsystem;
+    *x = _conv.raw;
+}
+/*lint -esym(714, _okl4_tracepoint_masks_init) */
+OKL4_FORCE_INLINE void
+_okl4_tracepoint_masks_init(_okl4_tracepoint_masks_t *x)
+{
+    *x = (_okl4_tracepoint_masks_t)0U;
+}
+
+/*lint -esym(714, _okl4_tracepoint_masks_cast) */
+OKL4_FORCE_INLINE _okl4_tracepoint_masks_t
+_okl4_tracepoint_masks_cast(uint32_t p, okl4_bool_t force)
+{
+    _okl4_tracepoint_masks_t x = (_okl4_tracepoint_masks_t)p;
+    (void)force;
+    return x;
+}
+
+
+
+
+struct okl4_tracepoint_entry_base {
+    uint32_t time_offset;
+    _okl4_tracepoint_masks_t masks;
+    _okl4_tracepoint_desc_t description;
+};
+
+
+
+
+
+
+
+typedef uint32_t okl4_tracepoint_evt_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE) */
+#define OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE ((okl4_tracepoint_evt_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE) */
+#define OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE ((okl4_tracepoint_evt_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH) */
+#define OKL4_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH ((okl4_tracepoint_evt_t)0x2U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME) */
+#define OKL4_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME ((okl4_tracepoint_evt_t)0x3U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV ((okl4_tracepoint_evt_t)0x4U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_HALTED) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_HALTED ((okl4_tracepoint_evt_t)0x5U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA ((okl4_tracepoint_evt_t)0x6U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE ((okl4_tracepoint_evt_t)0x7U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT ((okl4_tracepoint_evt_t)0x8U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA ((okl4_tracepoint_evt_t)0x9U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE ((okl4_tracepoint_evt_t)0xaU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT ((okl4_tracepoint_evt_t)0xbU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) */
+#define OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND ((okl4_tracepoint_evt_t)0xcU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK ((okl4_tracepoint_evt_t)0xdU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE ((okl4_tracepoint_evt_t)0xeU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED ((okl4_tracepoint_evt_t)0xfU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH ((okl4_tracepoint_evt_t)0x10U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE ((okl4_tracepoint_evt_t)0x11U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI ((okl4_tracepoint_evt_t)0x12U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING ((okl4_tracepoint_evt_t)0x13U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD ((okl4_tracepoint_evt_t)0x14U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS ((okl4_tracepoint_evt_t)0x15U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK ((okl4_tracepoint_evt_t)0x16U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE ((okl4_tracepoint_evt_t)0x17U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT ((okl4_tracepoint_evt_t)0x18U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG ((okl4_tracepoint_evt_t)0x19U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL ((okl4_tracepoint_evt_t)0x1aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY ((okl4_tracepoint_evt_t)0x1bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK ((okl4_tracepoint_evt_t)0x1cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS ((okl4_tracepoint_evt_t)0x1dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) */
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK ((okl4_tracepoint_evt_t)0x1eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT) */
+#define OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT ((okl4_tracepoint_evt_t)0x1fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) */
+#define OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME ((okl4_tracepoint_evt_t)0x20U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) */
+#define OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL ((okl4_tracepoint_evt_t)0x21U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT ((okl4_tracepoint_evt_t)0x22U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT ((okl4_tracepoint_evt_t)0x23U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE ((okl4_tracepoint_evt_t)0x24U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN ((okl4_tracepoint_evt_t)0x25U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE ((okl4_tracepoint_evt_t)0x26U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN ((okl4_tracepoint_evt_t)0x27U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE ((okl4_tracepoint_evt_t)0x28U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN ((okl4_tracepoint_evt_t)0x29U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE ((okl4_tracepoint_evt_t)0x2aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN ((okl4_tracepoint_evt_t)0x2bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS ((okl4_tracepoint_evt_t)0x2cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS ((okl4_tracepoint_evt_t)0x2dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS ((okl4_tracepoint_evt_t)0x2eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) */
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS ((okl4_tracepoint_evt_t)0x2fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) */
+#define OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL ((okl4_tracepoint_evt_t)0x30U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL) */
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL ((okl4_tracepoint_evt_t)0x31U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV) */
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV ((okl4_tracepoint_evt_t)0x32U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND) */
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND ((okl4_tracepoint_evt_t)0x33U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) */
+#define OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE ((okl4_tracepoint_evt_t)0x34U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER ((okl4_tracepoint_evt_t)0x35U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS ((okl4_tracepoint_evt_t)0x36U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32 ((okl4_tracepoint_evt_t)0x37U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER ((okl4_tracepoint_evt_t)0x38U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS ((okl4_tracepoint_evt_t)0x39U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) */
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32 ((okl4_tracepoint_evt_t)0x3aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED ((okl4_tracepoint_evt_t)0x3bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED ((okl4_tracepoint_evt_t)0x3cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE ((okl4_tracepoint_evt_t)0x3dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE ((okl4_tracepoint_evt_t)0x3eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA ((okl4_tracepoint_evt_t)0x3fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE ((okl4_tracepoint_evt_t)0x40U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE ((okl4_tracepoint_evt_t)0x41U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA ((okl4_tracepoint_evt_t)0x42U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND ((okl4_tracepoint_evt_t)0x43U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL ((okl4_tracepoint_evt_t)0x44U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION ((okl4_tracepoint_evt_t)0x45U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME ((okl4_tracepoint_evt_t)0x46U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY ((okl4_tracepoint_evt_t)0x47U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_START) */
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_START ((okl4_tracepoint_evt_t)0x48U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) */
+#define OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC ((okl4_tracepoint_evt_t)0x49U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET ((okl4_tracepoint_evt_t)0x4aU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_START) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_START ((okl4_tracepoint_evt_t)0x4bU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP ((okl4_tracepoint_evt_t)0x4cU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE ((okl4_tracepoint_evt_t)0x4dU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV ((okl4_tracepoint_evt_t)0x4eU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE ((okl4_tracepoint_evt_t)0x4fU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE ((okl4_tracepoint_evt_t)0x50U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) */
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY ((okl4_tracepoint_evt_t)0x51U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE) */
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE ((okl4_tracepoint_evt_t)0x52U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_MAX) */
+#define OKL4_TRACEPOINT_EVT_MAX ((okl4_tracepoint_evt_t)0x52U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_INVALID) */
+#define OKL4_TRACEPOINT_EVT_INVALID ((okl4_tracepoint_evt_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_evt_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_evt_is_element_of(okl4_tracepoint_evt_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_evt_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_evt_is_element_of(okl4_tracepoint_evt_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH) ||
+            (var == OKL4_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_HALTED) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TIMER_START) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_START) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) ||
+            (var == OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE));
+}
+
+
+
+typedef uint32_t okl4_tracepoint_level_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_DEBUG) */
+#define OKL4_TRACEPOINT_LEVEL_DEBUG ((okl4_tracepoint_level_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_INFO) */
+#define OKL4_TRACEPOINT_LEVEL_INFO ((okl4_tracepoint_level_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_WARN) */
+#define OKL4_TRACEPOINT_LEVEL_WARN ((okl4_tracepoint_level_t)0x2U)
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_CRITICAL) */
+#define OKL4_TRACEPOINT_LEVEL_CRITICAL ((okl4_tracepoint_level_t)0x3U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_MAX) */
+#define OKL4_TRACEPOINT_LEVEL_MAX ((okl4_tracepoint_level_t)0x3U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_LEVEL_INVALID) */
+#define OKL4_TRACEPOINT_LEVEL_INVALID ((okl4_tracepoint_level_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_level_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_level_is_element_of(okl4_tracepoint_level_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_level_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_level_is_element_of(okl4_tracepoint_level_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_TRACEPOINT_LEVEL_DEBUG) ||
+            (var == OKL4_TRACEPOINT_LEVEL_INFO) ||
+            (var == OKL4_TRACEPOINT_LEVEL_WARN) ||
+            (var == OKL4_TRACEPOINT_LEVEL_CRITICAL));
+}
+
+
+
+typedef uint32_t okl4_tracepoint_mask_t;
+
+
+
+
+
+typedef uint32_t okl4_tracepoint_subsystem_t;
+
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_SCHEDULER) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_SCHEDULER ((okl4_tracepoint_subsystem_t)0x0U)
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_TRACE) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_TRACE ((okl4_tracepoint_subsystem_t)0x1U)
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_CORE) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_CORE ((okl4_tracepoint_subsystem_t)0x2U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_MAX) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_MAX ((okl4_tracepoint_subsystem_t)0x2U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_TRACEPOINT_SUBSYSTEM_INVALID) */
+#define OKL4_TRACEPOINT_SUBSYSTEM_INVALID ((okl4_tracepoint_subsystem_t)0xffffffffU)
+
+/*lint -esym(714, okl4_tracepoint_subsystem_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_subsystem_is_element_of(okl4_tracepoint_subsystem_t var);
+
+
+/*lint -esym(714, okl4_tracepoint_subsystem_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_tracepoint_subsystem_is_element_of(okl4_tracepoint_subsystem_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_TRACEPOINT_SUBSYSTEM_SCHEDULER) ||
+            (var == OKL4_TRACEPOINT_SUBSYSTEM_TRACE) ||
+            (var == OKL4_TRACEPOINT_SUBSYSTEM_CORE));
+}
+
+
+
+struct okl4_tracepoint_unpacked_entry {
+    struct okl4_tracepoint_entry_base entry;
+    uint32_t data[]; /*lint --e{9038} flex array */
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+
+*/
+
+struct okl4_vclient_info {
+    struct okl4_axon_ep_data axon_ep;
+    __ptr64(void *, opaque);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vcpu_entry {
+    okl4_kcap_t vcpu;
+    okl4_kcap_t ipi;
+    okl4_interrupt_number_t irq;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    okl4_register_t stack_pointer;
+};
+
+
+
+
+
+typedef okl4_arm_mpidr_t okl4_vcpu_id_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_vcpu_table {
+    okl4_count_t num_vcpus;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(struct okl4_vcpu_entry *, vcpu);
+};
+
+
+
+
+/**
+    The okl4_vfp_ctrl_registers object represents the set of control
+    registers in the ARM VFP unit.
+*/
+
+struct okl4_vfp_ctrl_registers {
+    uint32_t fpsr;
+    uint32_t fpcr;
+};
+
+
+
+
+
+
+/**
+    The okl4_vfp_registers_t type represents a set of VFP registers on
+    the native machine.
+*/
+
+typedef struct okl4_vfp_ctrl_registers okl4_vfp_ctrl_registers_t;
+
+
+
+
+/**
+    The okl4_vfp_ops_t object represents the set of operations that may be
+    performed on the ARM VFP unit.
+
+    - @ref OKL4_VFP_OPS_MAX
+    - @ref OKL4_VFP_OPS_INVALID
+*/
+
+typedef uint32_t okl4_vfp_ops_t;
+
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_VFP_OPS_MAX) */
+#define OKL4_VFP_OPS_MAX ((okl4_vfp_ops_t)0x0U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_VFP_OPS_INVALID) */
+#define OKL4_VFP_OPS_INVALID ((okl4_vfp_ops_t)0xffffffffU)
+
+/*lint -esym(714, okl4_vfp_ops_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vfp_ops_is_element_of(okl4_vfp_ops_t var);
+
+
+/*lint -esym(714, okl4_vfp_ops_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vfp_ops_is_element_of(okl4_vfp_ops_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((okl4_bool_t)0);
+}
+
+
+
+struct okl4_vfp_register {
+    __attribute__((aligned(16))) uint8_t __bytes[16];
+};
+
+
+
+
+
+
+
+typedef struct okl4_vfp_register okl4_vfp_register_t;
+
+
+
+
+/**
+    The okl4_vfp_registers object represents the set of registers in the
+    ARM VFP unit, including the control registers.
+*/
+
+struct okl4_vfp_registers {
+    okl4_vfp_register_t v0;
+    okl4_vfp_register_t v1;
+    okl4_vfp_register_t v2;
+    okl4_vfp_register_t v3;
+    okl4_vfp_register_t v4;
+    okl4_vfp_register_t v5;
+    okl4_vfp_register_t v6;
+    okl4_vfp_register_t v7;
+    okl4_vfp_register_t v8;
+    okl4_vfp_register_t v9;
+    okl4_vfp_register_t v10;
+    okl4_vfp_register_t v11;
+    okl4_vfp_register_t v12;
+    okl4_vfp_register_t v13;
+    okl4_vfp_register_t v14;
+    okl4_vfp_register_t v15;
+    okl4_vfp_register_t v16;
+    okl4_vfp_register_t v17;
+    okl4_vfp_register_t v18;
+    okl4_vfp_register_t v19;
+    okl4_vfp_register_t v20;
+    okl4_vfp_register_t v21;
+    okl4_vfp_register_t v22;
+    okl4_vfp_register_t v23;
+    okl4_vfp_register_t v24;
+    okl4_vfp_register_t v25;
+    okl4_vfp_register_t v26;
+    okl4_vfp_register_t v27;
+    okl4_vfp_register_t v28;
+    okl4_vfp_register_t v29;
+    okl4_vfp_register_t v30;
+    okl4_vfp_register_t v31;
+    struct okl4_vfp_ctrl_registers control;
+    _okl4_padding_t __padding0_8; /**< Padding 16 */
+    _okl4_padding_t __padding1_9; /**< Padding 16 */
+    _okl4_padding_t __padding2_10; /**< Padding 16 */
+    _okl4_padding_t __padding3_11; /**< Padding 16 */
+    _okl4_padding_t __padding4_12; /**< Padding 16 */
+    _okl4_padding_t __padding5_13; /**< Padding 16 */
+    _okl4_padding_t __padding6_14; /**< Padding 16 */
+    _okl4_padding_t __padding7_15; /**< Padding 16 */
+};
+
+
+
+
+
+
+/**
+    The okl4_vfp_registers_t type represents a set of VFP registers on
+    the native machine.
+*/
+
+typedef struct okl4_vfp_registers okl4_vfp_registers_t;
+
+
+
+
+/**
+
+*/
+
+struct okl4_virtmem_pool {
+    struct okl4_virtmem_item pool;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_virtual_interrupt_lines {
+    okl4_count_t num_lines;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(okl4_kcap_t *, lines);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vserver_info {
+    struct {
+        __ptr64(struct okl4_axon_ep_data *, data);
+        okl4_count_t max_messages;
+        _okl4_padding_t __padding0_4; /**< Padding 8 */
+        _okl4_padding_t __padding1_5; /**< Padding 8 */
+        _okl4_padding_t __padding2_6; /**< Padding 8 */
+        _okl4_padding_t __padding3_7; /**< Padding 8 */
+        okl4_ksize_t message_size;
+    } channels;
+
+    okl4_count_t num_clients;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vservices_service_descriptor {
+    __ptr64(okl4_string_t, name);
+    __ptr64(okl4_string_t, protocol);
+    __ptr64(void *, RESERVED);
+};
+
+
+
+
+
+typedef uint32_t okl4_vservices_transport_type_t;
+
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_AXON) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_AXON ((okl4_vservices_transport_type_t)0x0U)
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER ((okl4_vservices_transport_type_t)0x1U)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_MAX) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_MAX ((okl4_vservices_transport_type_t)0x1U)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_VSERVICES_TRANSPORT_TYPE_INVALID) */
+#define OKL4_VSERVICES_TRANSPORT_TYPE_INVALID ((okl4_vservices_transport_type_t)0xffffffffU)
+
+/*lint -esym(714, okl4_vservices_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vservices_transport_type_is_element_of(okl4_vservices_transport_type_t var);
+
+
+/*lint -esym(714, okl4_vservices_transport_type_is_element_of) */
+OKL4_FORCE_INLINE okl4_bool_t
+okl4_vservices_transport_type_is_element_of(okl4_vservices_transport_type_t var)
+{
+    /*lint --e{944} Disable dead expression detection */
+    /*lint --e{948} --e{845} Disable constant always zero */
+    return ((var == OKL4_VSERVICES_TRANSPORT_TYPE_AXON) ||
+            (var == OKL4_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER));
+}
+
+
+/**
+
+*/
+
+struct okl4_vservices_transport_microvisor {
+    okl4_bool_t is_server;
+    _okl4_padding_t __padding0_1;
+    _okl4_padding_t __padding1_2;
+    _okl4_padding_t __padding2_3;
+    okl4_vservices_transport_type_t type;
+    union {
+        struct {
+            struct okl4_axon_ep_data ep;
+            okl4_ksize_t message_size;
+            okl4_count_t queue_length;
+            _okl4_padding_t __padding0_4; /**< Padding 8 */
+            _okl4_padding_t __padding1_5; /**< Padding 8 */
+            _okl4_padding_t __padding2_6; /**< Padding 8 */
+            _okl4_padding_t __padding3_7; /**< Padding 8 */
+        } axon;
+
+        struct {
+            okl4_ksize_t message_size;
+            okl4_count_t queue_length;
+            _okl4_padding_t __padding0_4; /**< Padding 8 */
+            _okl4_padding_t __padding1_5; /**< Padding 8 */
+            _okl4_padding_t __padding2_6; /**< Padding 8 */
+            _okl4_padding_t __padding3_7; /**< Padding 8 */
+            struct okl4_virtmem_item rx;
+            okl4_count_t rx_batch_size;
+            okl4_count_t rx_notify_bits;
+            struct okl4_virtmem_item tx;
+            okl4_count_t tx_batch_size;
+            okl4_count_t tx_notify_bits;
+        } shared_buffer;
+
+    } u;
+
+    struct okl4_virtual_interrupt_lines virqs_in;
+    struct okl4_virtual_interrupt_lines virqs_out;
+    okl4_count_t num_services;
+    _okl4_padding_t __padding3_4;
+    _okl4_padding_t __padding4_5;
+    _okl4_padding_t __padding5_6;
+    _okl4_padding_t __padding6_7;
+    __ptr64(struct okl4_vservices_service_descriptor *, services);
+};
+
+
+
+
+/**
+
+*/
+
+struct okl4_vservices_transports {
+    okl4_count_t num_transports;
+    _okl4_padding_t __padding0_4;
+    _okl4_padding_t __padding1_5;
+    _okl4_padding_t __padding2_6;
+    _okl4_padding_t __padding3_7;
+    __ptr64(struct okl4_vservices_transport_microvisor *, transports);
+};
+
+
+
+
+
+typedef struct okl4_axon_data okl4_axon_data_t;
+typedef struct okl4_axon_ep_data okl4_axon_ep_data_t;
+typedef struct okl4_range_item okl4_range_item_t;
+typedef struct okl4_virtmem_item okl4_virtmem_item_t;
+typedef struct okl4_cell_management_item okl4_cell_management_item_t;
+typedef struct okl4_cell_management okl4_cell_management_t;
+typedef struct okl4_segment_mapping okl4_segment_mapping_t;
+typedef struct okl4_cell_management_segments okl4_cell_management_segments_t;
+typedef struct okl4_cell_management_vcpus okl4_cell_management_vcpus_t;
+typedef struct _okl4_env okl4_env_t;
+typedef struct okl4_env_access_cell okl4_env_access_cell_t;
+typedef struct okl4_env_access_entry okl4_env_access_entry_t;
+typedef struct okl4_env_access_table okl4_env_access_table_t;
+typedef struct okl4_env_args okl4_env_args_t;
+typedef struct okl4_env_interrupt_device_map okl4_env_interrupt_device_map_t;
+typedef struct okl4_interrupt okl4_interrupt_t;
+typedef struct okl4_env_interrupt_handle okl4_env_interrupt_handle_t;
+typedef struct okl4_env_interrupt_list okl4_env_interrupt_list_t;
+typedef struct okl4_env_profile_cell okl4_env_profile_cell_t;
+typedef struct okl4_env_profile_cpu okl4_env_profile_cpu_t;
+typedef struct okl4_env_profile_table okl4_env_profile_table_t;
+typedef struct okl4_env_segment okl4_env_segment_t;
+typedef struct okl4_env_segment_table okl4_env_segment_table_t;
+typedef struct okl4_firmware_segment okl4_firmware_segment_t;
+typedef struct okl4_firmware_segments_info okl4_firmware_segments_info_t;
+typedef void (*okl4_irq_callback_t)(okl4_interrupt_number_t irq, void *opaque);
+typedef struct okl4_kmmu okl4_kmmu_t;
+typedef struct okl4_ksp_user_agent okl4_ksp_user_agent_t;
+typedef struct okl4_pipe_data okl4_pipe_data_t;
+typedef struct okl4_pipe_ep_data okl4_pipe_ep_data_t;
+typedef struct okl4_link okl4_link_t;
+typedef struct okl4_links okl4_links_t;
+typedef struct okl4_machine_info okl4_machine_info_t;
+typedef struct okl4_merged_physpool okl4_merged_physpool_t;
+typedef struct okl4_microvisor_timer okl4_microvisor_timer_t;
+typedef struct okl4_schedule_profile_data okl4_schedule_profile_data_t;
+typedef struct okl4_shared_buffer okl4_shared_buffer_t;
+typedef struct okl4_shared_buffers_array okl4_shared_buffers_array_t;
+typedef struct okl4_tracebuffer_env okl4_tracebuffer_env_t;
+typedef struct okl4_vclient_info okl4_vclient_info_t;
+typedef struct okl4_vcpu_entry okl4_vcpu_entry_t;
+typedef struct okl4_vcpu_table okl4_vcpu_table_t;
+typedef struct okl4_virtmem_pool okl4_virtmem_pool_t;
+typedef struct okl4_virtual_interrupt_lines okl4_virtual_interrupt_lines_t;
+typedef struct okl4_vserver_info okl4_vserver_info_t;
+typedef struct okl4_vservices_service_descriptor okl4_vservices_service_descriptor_t;
+typedef struct okl4_vservices_transport_microvisor okl4_vservices_transport_microvisor_t;
+typedef struct okl4_vservices_transports okl4_vservices_transports_t;
+
+/*
+ * Return structures from system calls.
+ */
+/*lint -save -e958 -e959 implicit padding */
+struct _okl4_sys_axon_process_recv_return {
+    okl4_error_t error;
+    okl4_bool_t send_empty;
+};
+
+struct _okl4_sys_axon_set_halted_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_recv_area_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_recv_queue_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_recv_segment_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_send_area_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_send_queue_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_set_send_segment_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_axon_trigger_send_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_ack_return {
+    okl4_interrupt_number_t irq;
+    uint8_t source;
+};
+
+struct _okl4_sys_interrupt_attach_private_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_attach_shared_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_detach_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_dist_enable_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_eoi_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_get_highest_priority_pending_return {
+    okl4_interrupt_number_t irq;
+    uint8_t source;
+};
+
+struct _okl4_sys_interrupt_get_payload_return {
+    okl4_error_t error;
+    okl4_virq_flags_t payload;
+};
+
+struct _okl4_sys_interrupt_limits_return {
+    okl4_count_t cpunumber;
+    okl4_count_t itnumber;
+};
+
+struct _okl4_sys_interrupt_mask_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_raise_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_binary_point_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_config_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_control_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_priority_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_priority_mask_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_set_targets_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_interrupt_unmask_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_kdb_set_object_name_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_ksp_procedure_call_return {
+    okl4_error_t error;
+    okl4_ksp_arg_t ret0;
+    okl4_ksp_arg_t ret1;
+    okl4_ksp_arg_t ret2;
+};
+
+struct _okl4_sys_mmu_attach_segment_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_detach_segment_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_flush_range_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_flush_range_pn_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_lookup_page_return {
+    okl4_error_t error;
+    okl4_psize_tr_t offset;
+    okl4_mmu_lookup_size_t size;
+    _okl4_page_attribute_t page_attr;
+};
+
+struct _okl4_sys_mmu_lookup_pn_return {
+    okl4_mmu_lookup_index_t segment_index;
+    okl4_psize_pn_t offset_pn;
+    okl4_lsize_pn_t count_pn;
+    _okl4_page_attribute_t page_attr;
+};
+
+struct _okl4_sys_mmu_map_page_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_map_pn_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_unmap_page_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_unmap_pn_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_page_attrs_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_page_perms_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_pn_attrs_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_mmu_update_pn_perms_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_performance_null_syscall_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_pipe_control_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_pipe_recv_return {
+    okl4_error_t error;
+    okl4_ksize_t size;
+};
+
+struct _okl4_sys_pipe_send_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_priority_waive_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_get_register_return {
+    uint32_t reg_w0;
+    uint32_t reg_w1;
+    uint32_t reg_w2;
+    uint32_t reg_w3;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_get_registers_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_read_memory32_return {
+    uint32_t data;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_set_register_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_set_registers_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_remote_write_memory32_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_metrics_status_suspended_return {
+    okl4_error_t error;
+    uint32_t power_suspend_version;
+    uint32_t power_suspend_running_count;
+};
+
+struct _okl4_sys_schedule_metrics_watch_suspended_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_cpu_disable_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_cpu_enable_return {
+    okl4_error_t error;
+    uint64_t timestamp;
+};
+
+struct _okl4_sys_schedule_profile_cpu_get_data_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_vcpu_disable_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_schedule_profile_vcpu_enable_return {
+    okl4_error_t error;
+    uint64_t timestamp;
+};
+
+struct _okl4_sys_schedule_profile_vcpu_get_data_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_scheduler_suspend_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_cancel_return {
+    uint64_t remaining;
+    okl4_timer_flags_t old_flags;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_get_resolution_return {
+    uint64_t tick_freq;
+    uint32_t a;
+    uint32_t b;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_get_time_return {
+    uint64_t time;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_query_return {
+    uint64_t remaining;
+    okl4_timer_flags_t active_flags;
+    okl4_error_t error;
+};
+
+struct _okl4_sys_timer_start_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_reset_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_start_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_stop_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vcpu_switch_mode_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vinterrupt_clear_and_raise_return {
+    okl4_error_t error;
+    okl4_virq_flags_t payload;
+};
+
+struct _okl4_sys_vinterrupt_modify_return {
+    okl4_error_t error;
+};
+
+struct _okl4_sys_vinterrupt_raise_return {
+    okl4_error_t error;
+};
+
+/*lint -restore */
+
+/*
+ * Ensure type sizes have been correctly calculated by the
+ * code generator.  We test to see if the C compiler agrees
+ * with us about the size of the type.
+ */
+
+#if !defined(GLOBAL_STATIC_ASSERT)
+#if defined(__cplusplus)
+/* FIX: we should be able to use static_assert, but it doesn't compile */
+#define GLOBAL_STATIC_ASSERT(expr, msg)
+#else
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#define GLOBAL_STATIC_ASSERT(expr, msg) \
+        _Static_assert(expr, #msg);
+#else
+#define GLOBAL_STATIC_ASSERT(expr, msg)
+#endif
+#endif
+#endif
+
+
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_mpidr_t) == 8U,
+        __autogen_confused_about_sizeof_arm_mpidr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_mpidr_t) == 8U,
+        __autogen_confused_about_alignof_arm_mpidr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_function_t) == 4U,
+        __autogen_confused_about_sizeof_arm_psci_function)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_function_t) == 4U,
+        __autogen_confused_about_alignof_arm_psci_function)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_result_t) == 4U,
+        __autogen_confused_about_sizeof_arm_psci_result)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_result_t) == 4U,
+        __autogen_confused_about_alignof_arm_psci_result)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_suspend_state_t) == 4U,
+        __autogen_confused_about_sizeof_arm_psci_suspend_state)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_suspend_state_t) == 4U,
+        __autogen_confused_about_alignof_arm_psci_suspend_state)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_sctlr_t) == 4U,
+        __autogen_confused_about_sizeof_arm_sctlr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_sctlr_t) == 4U,
+        __autogen_confused_about_alignof_arm_sctlr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_smccc_arch_function_t) == 4U,
+        __autogen_confused_about_sizeof_arm_smccc_arch_function)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_smccc_arch_function_t) == 4U,
+        __autogen_confused_about_alignof_arm_smccc_arch_function)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_smccc_result_t) == 4U,
+        __autogen_confused_about_sizeof_arm_smccc_result)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_smccc_result_t) == 4U,
+        __autogen_confused_about_alignof_arm_smccc_result)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_register) == 8U,
+        __autogen_confused_about_sizeof_atomic_register)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_register) == 8U,
+        __autogen_confused_about_alignof_atomic_register)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_register_t) == 8U,
+        __autogen_confused_about_sizeof_atomic_register_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_register_t) == 8U,
+        __autogen_confused_about_alignof_atomic_register_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint16) == 2U,
+        __autogen_confused_about_sizeof_atomic_uint16)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint16) == 2U,
+        __autogen_confused_about_alignof_atomic_uint16)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint16_t) == 2U,
+        __autogen_confused_about_sizeof_atomic_uint16_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint16_t) == 2U,
+        __autogen_confused_about_alignof_atomic_uint16_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint32) == 4U,
+        __autogen_confused_about_sizeof_atomic_uint32)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint32) == 4U,
+        __autogen_confused_about_alignof_atomic_uint32)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint32_t) == 4U,
+        __autogen_confused_about_sizeof_atomic_uint32_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint32_t) == 4U,
+        __autogen_confused_about_alignof_atomic_uint32_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint64) == 8U,
+        __autogen_confused_about_sizeof_atomic_uint64)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint64) == 8U,
+        __autogen_confused_about_alignof_atomic_uint64)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint64_t) == 8U,
+        __autogen_confused_about_sizeof_atomic_uint64_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint64_t) == 8U,
+        __autogen_confused_about_alignof_atomic_uint64_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_atomic_uint8) == 1U,
+        __autogen_confused_about_sizeof_atomic_uint8)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_atomic_uint8) == 1U,
+        __autogen_confused_about_alignof_atomic_uint8)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_atomic_uint8_t) == 1U,
+        __autogen_confused_about_sizeof_atomic_uint8_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_atomic_uint8_t) == 1U,
+        __autogen_confused_about_alignof_atomic_uint8_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_data) == 12U,
+        __autogen_confused_about_sizeof_axon_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_data) == 4U,
+        __autogen_confused_about_alignof_axon_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_axon_data_info_t) == 8U,
+        __autogen_confused_about_sizeof_axon_data_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_axon_data_info_t) == 8U,
+        __autogen_confused_about_alignof_axon_data_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_ep_data) == 24U,
+        __autogen_confused_about_sizeof_axon_ep_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_ep_data) == 4U,
+        __autogen_confused_about_alignof_axon_ep_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_queue) == 12U,
+        __autogen_confused_about_sizeof_axon_queue)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_queue) == 4U,
+        __autogen_confused_about_alignof_axon_queue)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_queue_entry) == 24U,
+        __autogen_confused_about_sizeof_axon_queue_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_queue_entry) == 8U,
+        __autogen_confused_about_alignof_axon_queue_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_axon_queue_size_t) == 2U,
+        __autogen_confused_about_sizeof_axon_queue_size)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_axon_queue_size_t) == 2U,
+        __autogen_confused_about_alignof_axon_queue_size)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_rx) == 56U,
+        __autogen_confused_about_sizeof_axon_rx)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_rx) == 4U,
+        __autogen_confused_about_alignof_axon_rx)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_axon_tx) == 48U,
+        __autogen_confused_about_sizeof_axon_tx)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_axon_tx) == 4U,
+        __autogen_confused_about_alignof_axon_tx)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_axon_virq_flags_t) == 8U,
+        __autogen_confused_about_sizeof_axon_virq_flags)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_axon_virq_flags_t) == 8U,
+        __autogen_confused_about_alignof_axon_virq_flags)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_page_cache_t) == 4U,
+        __autogen_confused_about_sizeof_cache_attr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_page_cache_t) == 4U,
+        __autogen_confused_about_alignof_cache_attr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_range_item) == 16U,
+        __autogen_confused_about_sizeof_range_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_range_item) == 8U,
+        __autogen_confused_about_alignof_range_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_virtmem_item) == 16U,
+        __autogen_confused_about_sizeof_virtmem_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_virtmem_item) == 8U,
+        __autogen_confused_about_alignof_virtmem_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management_item) == 104U,
+        __autogen_confused_about_sizeof_cell_management_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management_item) == 8U,
+        __autogen_confused_about_alignof_cell_management_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management) == 8U,
+        __autogen_confused_about_sizeof_cell_management)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management) == 8U,
+        __autogen_confused_about_alignof_cell_management)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_segment_mapping) == 32U,
+        __autogen_confused_about_sizeof_segment_mapping)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_segment_mapping) == 8U,
+        __autogen_confused_about_alignof_segment_mapping)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management_segments) == 8U,
+        __autogen_confused_about_sizeof_cell_management_segments)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management_segments) == 8U,
+        __autogen_confused_about_alignof_cell_management_segments)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cell_management_vcpus) == 4U,
+        __autogen_confused_about_sizeof_cell_management_vcpus)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cell_management_vcpus) == 4U,
+        __autogen_confused_about_alignof_cell_management_vcpus)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_cpu_mode_t) == 4U,
+        __autogen_confused_about_sizeof_cpu_mode)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_cpu_mode_t) == 4U,
+        __autogen_confused_about_alignof_cpu_mode)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_env_hdr) == 4U,
+        __autogen_confused_about_sizeof_env_hdr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_env_hdr) == 2U,
+        __autogen_confused_about_alignof_env_hdr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_env_item) == 16U,
+        __autogen_confused_about_sizeof_env_item)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_env_item) == 8U,
+        __autogen_confused_about_alignof_env_item)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_env) == 8U,
+        __autogen_confused_about_sizeof_env)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_env) == 8U,
+        __autogen_confused_about_alignof_env)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_access_cell) == 16U,
+        __autogen_confused_about_sizeof_env_access_cell)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_access_cell) == 8U,
+        __autogen_confused_about_alignof_env_access_cell)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_page_perms_t) == 4U,
+        __autogen_confused_about_sizeof_page_perms)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_page_perms_t) == 4U,
+        __autogen_confused_about_alignof_page_perms)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_access_entry) == 48U,
+        __autogen_confused_about_sizeof_env_access_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_access_entry) == 8U,
+        __autogen_confused_about_alignof_env_access_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_access_table) == 24U,
+        __autogen_confused_about_sizeof_env_access_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_access_table) == 8U,
+        __autogen_confused_about_alignof_env_access_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_args) == 8U,
+        __autogen_confused_about_sizeof_env_args)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_args) == 8U,
+        __autogen_confused_about_alignof_env_args)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_interrupt_device_map) == 4U,
+        __autogen_confused_about_sizeof_env_interrupt_device_map)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_interrupt_device_map) == 4U,
+        __autogen_confused_about_alignof_env_interrupt_device_map)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_interrupt) == 4U,
+        __autogen_confused_about_sizeof_okl4_interrupt)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_interrupt) == 4U,
+        __autogen_confused_about_alignof_okl4_interrupt)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_interrupt_handle) == 8U,
+        __autogen_confused_about_sizeof_env_interrupt_handle)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_interrupt_handle) == 4U,
+        __autogen_confused_about_alignof_env_interrupt_handle)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_interrupt_list) == 24U,
+        __autogen_confused_about_sizeof_env_interrupt_list)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_interrupt_list) == 8U,
+        __autogen_confused_about_alignof_env_interrupt_list)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_profile_cell) == 48U,
+        __autogen_confused_about_sizeof_env_profile_cell)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_profile_cell) == 8U,
+        __autogen_confused_about_alignof_env_profile_cell)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_profile_cpu) == 4U,
+        __autogen_confused_about_sizeof_env_profile_cpu)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_profile_cpu) == 4U,
+        __autogen_confused_about_alignof_env_profile_cpu)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_profile_table) == 16U,
+        __autogen_confused_about_sizeof_env_profile_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_profile_table) == 8U,
+        __autogen_confused_about_alignof_env_profile_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_segment) == 24U,
+        __autogen_confused_about_sizeof_env_segment)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_segment) == 8U,
+        __autogen_confused_about_alignof_env_segment)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_env_segment_table) == 8U,
+        __autogen_confused_about_sizeof_env_segment_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_env_segment_table) == 8U,
+        __autogen_confused_about_alignof_env_segment_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_error_t) == 4U,
+        __autogen_confused_about_sizeof_error_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_error_t) == 4U,
+        __autogen_confused_about_alignof_error_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_firmware_segment) == 32U,
+        __autogen_confused_about_sizeof_firmware_segment)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_firmware_segment) == 8U,
+        __autogen_confused_about_alignof_firmware_segment)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_firmware_segments_info) == 8U,
+        __autogen_confused_about_sizeof_firmware_segments_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_firmware_segments_info) == 8U,
+        __autogen_confused_about_alignof_firmware_segments_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_gicd_icfgr_t) == 4U,
+        __autogen_confused_about_sizeof_gicd_icfgr)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_gicd_icfgr_t) == 4U,
+        __autogen_confused_about_alignof_gicd_icfgr)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_sgi_target_t) == 4U,
+        __autogen_confused_about_sizeof_sgi_target)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_sgi_target_t) == 4U,
+        __autogen_confused_about_alignof_sgi_target)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_gicd_sgir_t) == 4U,
+        __autogen_confused_about_sizeof_gicd_sgir)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_gicd_sgir_t) == 4U,
+        __autogen_confused_about_alignof_gicd_sgir)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_kmmu) == 4U,
+        __autogen_confused_about_sizeof_kmmu)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_kmmu) == 4U,
+        __autogen_confused_about_alignof_kmmu)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_ksp_user_agent) == 8U,
+        __autogen_confused_about_sizeof_ksp_user_agent)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_ksp_user_agent) == 4U,
+        __autogen_confused_about_alignof_ksp_user_agent)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_pipe_data) == 8U,
+        __autogen_confused_about_sizeof_pipe_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_pipe_data) == 4U,
+        __autogen_confused_about_alignof_pipe_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_pipe_ep_data) == 16U,
+        __autogen_confused_about_sizeof_pipe_ep_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_pipe_ep_data) == 4U,
+        __autogen_confused_about_alignof_pipe_ep_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_link_role_t) == 4U,
+        __autogen_confused_about_sizeof_link_role)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_link_role_t) == 4U,
+        __autogen_confused_about_alignof_link_role)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_link_transport_type_t) == 4U,
+        __autogen_confused_about_sizeof_link_transport_type)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_link_transport_type_t) == 4U,
+        __autogen_confused_about_alignof_link_transport_type)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_link) == 80U,
+        __autogen_confused_about_sizeof_link)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_link) == 8U,
+        __autogen_confused_about_alignof_link)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_links) == 8U,
+        __autogen_confused_about_sizeof_links)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_links) == 8U,
+        __autogen_confused_about_alignof_links)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_machine_info) == 24U,
+        __autogen_confused_about_sizeof_machine_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_machine_info) == 8U,
+        __autogen_confused_about_alignof_machine_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_merged_physpool) == 16U,
+        __autogen_confused_about_sizeof_merged_physpool)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_merged_physpool) == 8U,
+        __autogen_confused_about_alignof_merged_physpool)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_microvisor_timer) == 8U,
+        __autogen_confused_about_sizeof_microvisor_timer)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_microvisor_timer) == 4U,
+        __autogen_confused_about_alignof_microvisor_timer)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_mmu_lookup_index_t) == 4U,
+        __autogen_confused_about_sizeof_mmu_lookup_index)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_mmu_lookup_index_t) == 4U,
+        __autogen_confused_about_alignof_mmu_lookup_index)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_mmu_lookup_size_t) == 8U,
+        __autogen_confused_about_sizeof_mmu_lookup_size)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_mmu_lookup_size_t) == 8U,
+        __autogen_confused_about_alignof_mmu_lookup_size)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(_okl4_page_attribute_t) == 4U,
+        __autogen_confused_about_sizeof_page_attribute)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(_okl4_page_attribute_t) == 4U,
+        __autogen_confused_about_alignof_page_attribute)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_pipe_control_t) == 1U,
+        __autogen_confused_about_sizeof_pipe_control)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_pipe_control_t) == 1U,
+        __autogen_confused_about_alignof_pipe_control)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_pipe_state_t) == 1U,
+        __autogen_confused_about_sizeof_pipe_state)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_pipe_state_t) == 1U,
+        __autogen_confused_about_alignof_pipe_state)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_register_set_t) == 4U,
+        __autogen_confused_about_sizeof_register_set)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_register_set_t) == 4U,
+        __autogen_confused_about_alignof_register_set)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_register_and_set_t) == 4U,
+        __autogen_confused_about_sizeof_register_and_set)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_register_and_set_t) == 4U,
+        __autogen_confused_about_alignof_register_and_set)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_cpu_registers) == 448U,
+        __autogen_confused_about_sizeof_registers)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_cpu_registers) == 8U,
+        __autogen_confused_about_alignof_registers)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_cpu_registers_t) == 448U,
+        __autogen_confused_about_sizeof_registers_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_cpu_registers_t) == 8U,
+        __autogen_confused_about_alignof_registers_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_schedule_profile_data) == 32U,
+        __autogen_confused_about_sizeof_schedule_profile_data)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_schedule_profile_data) == 8U,
+        __autogen_confused_about_alignof_schedule_profile_data)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_scheduler_virq_flags_t) == 8U,
+        __autogen_confused_about_sizeof_scheduler_virq_flags)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_scheduler_virq_flags_t) == 8U,
+        __autogen_confused_about_alignof_scheduler_virq_flags)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_sdk_version_t) == 4U,
+        __autogen_confused_about_sizeof_sdk_version)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_sdk_version_t) == 4U,
+        __autogen_confused_about_alignof_sdk_version)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_shared_buffer) == 32U,
+        __autogen_confused_about_sizeof_shared_buffer)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_shared_buffer) == 8U,
+        __autogen_confused_about_alignof_shared_buffer)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_shared_buffers_array) == 16U,
+        __autogen_confused_about_sizeof_shared_buffers_array)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_shared_buffers_array) == 8U,
+        __autogen_confused_about_alignof_shared_buffers_array)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_timer_flags_t) == 4U,
+        __autogen_confused_about_sizeof_timer_flags)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_timer_flags_t) == 4U,
+        __autogen_confused_about_alignof_timer_flags)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_tracebuffer_buffer_header) == 40U,
+        __autogen_confused_about_sizeof_tracebuffer_buffer_header)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_tracebuffer_buffer_header) == 8U,
+        __autogen_confused_about_alignof_tracebuffer_buffer_header)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_tracebuffer_env) == 24U,
+        __autogen_confused_about_sizeof_tracebuffer_env)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_tracebuffer_env) == 8U,
+        __autogen_confused_about_alignof_tracebuffer_env)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct _okl4_tracebuffer_header) == 40U,
+        __autogen_confused_about_sizeof_tracebuffer_header)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct _okl4_tracebuffer_header) == 8U,
+        __autogen_confused_about_alignof_tracebuffer_header)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_class_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_class)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_class_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_class)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(_okl4_tracepoint_desc_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_desc)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(_okl4_tracepoint_desc_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_desc)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(_okl4_tracepoint_masks_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_masks)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(_okl4_tracepoint_masks_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_masks)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_tracepoint_entry_base) == 12U,
+        __autogen_confused_about_sizeof_tracepoint_entry_base)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_tracepoint_entry_base) == 4U,
+        __autogen_confused_about_alignof_tracepoint_entry_base)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_evt_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_evt)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_evt_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_evt)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_level_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_level)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_level_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_level)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_tracepoint_subsystem_t) == 4U,
+        __autogen_confused_about_sizeof_tracepoint_subsystem)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_tracepoint_subsystem_t) == 4U,
+        __autogen_confused_about_alignof_tracepoint_subsystem)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_tracepoint_unpacked_entry) == 12U,
+        __autogen_confused_about_sizeof_tracepoint_unpacked_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_tracepoint_unpacked_entry) == 4U,
+        __autogen_confused_about_alignof_tracepoint_unpacked_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vclient_info) == 32U,
+        __autogen_confused_about_sizeof_vclient_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vclient_info) == 8U,
+        __autogen_confused_about_alignof_vclient_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vcpu_entry) == 24U,
+        __autogen_confused_about_sizeof_vcpu_entry)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vcpu_entry) == 8U,
+        __autogen_confused_about_alignof_vcpu_entry)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vcpu_table) == 16U,
+        __autogen_confused_about_sizeof_vcpu_table)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vcpu_table) == 8U,
+        __autogen_confused_about_alignof_vcpu_table)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vfp_ctrl_registers) == 8U,
+        __autogen_confused_about_sizeof_vfp_ctrl_registers)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vfp_ctrl_registers) == 4U,
+        __autogen_confused_about_alignof_vfp_ctrl_registers)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_ctrl_registers_t) == 8U,
+        __autogen_confused_about_sizeof_vfp_ctrl_registers_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_ctrl_registers_t) == 4U,
+        __autogen_confused_about_alignof_vfp_ctrl_registers_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_ops_t) == 4U,
+        __autogen_confused_about_sizeof_vfp_ops)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_ops_t) == 4U,
+        __autogen_confused_about_alignof_vfp_ops)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vfp_register) == 16U,
+        __autogen_confused_about_sizeof_vfp_register)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vfp_register) == 16U,
+        __autogen_confused_about_alignof_vfp_register)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_register_t) == 16U,
+        __autogen_confused_about_sizeof_vfp_register_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_register_t) == 16U,
+        __autogen_confused_about_alignof_vfp_register_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vfp_registers) == 528U,
+        __autogen_confused_about_sizeof_vfp_registers)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vfp_registers) == 16U,
+        __autogen_confused_about_alignof_vfp_registers)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vfp_registers_t) == 528U,
+        __autogen_confused_about_sizeof_vfp_registers_t)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vfp_registers_t) == 16U,
+        __autogen_confused_about_alignof_vfp_registers_t)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_virtmem_pool) == 16U,
+        __autogen_confused_about_sizeof_virtmem_pool)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_virtmem_pool) == 8U,
+        __autogen_confused_about_alignof_virtmem_pool)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_virtual_interrupt_lines) == 16U,
+        __autogen_confused_about_sizeof_virtual_interrupt_lines)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_virtual_interrupt_lines) == 8U,
+        __autogen_confused_about_alignof_virtual_interrupt_lines)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vserver_info) == 32U,
+        __autogen_confused_about_sizeof_vserver_info)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vserver_info) == 8U,
+        __autogen_confused_about_alignof_vserver_info)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vservices_service_descriptor) == 24U,
+        __autogen_confused_about_sizeof_vservices_service_descriptor)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_service_descriptor) == 8U,
+        __autogen_confused_about_alignof_vservices_service_descriptor)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(okl4_vservices_transport_type_t) == 4U,
+        __autogen_confused_about_sizeof_vservices_transport_type)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(okl4_vservices_transport_type_t) == 4U,
+        __autogen_confused_about_alignof_vservices_transport_type)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vservices_transport_microvisor) == 120U,
+        __autogen_confused_about_sizeof_vservices_transport_microvisor)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transport_microvisor) == 8U,
+        __autogen_confused_about_alignof_vservices_transport_microvisor)
+#endif
+GLOBAL_STATIC_ASSERT(sizeof(struct okl4_vservices_transports) == 16U,
+        __autogen_confused_about_sizeof_vservices_transports)
+#if !defined(LINTER)
+GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transports) == 8U,
+        __autogen_confused_about_alignof_vservices_transports)
+#endif
+
+#else
+
+/**
+ *  okl4_arm_mpidr_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_AFF0_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF0_ARM_MPIDR (255)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF0_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF0_ARM_MPIDR (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF0_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF0_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_MASK_AFF1_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF1_ARM_MPIDR (255 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF1_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF1_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF1_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_MASK_AFF2_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF2_ARM_MPIDR (255 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF2_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF2_ARM_MPIDR (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF2_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF2_ARM_MPIDR (8)
+/*lint -esym(621, OKL4_ASM_MASK_MT_ARM_MPIDR) */
+#define OKL4_ASM_MASK_MT_ARM_MPIDR (1 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_MT_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_MT_ARM_MPIDR (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_MT_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_MT_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ASM_MASK_U_ARM_MPIDR) */
+#define OKL4_ASM_MASK_U_ARM_MPIDR (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_U_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_U_ARM_MPIDR (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_U_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_U_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ASM_MASK_MP_ARM_MPIDR) */
+#define OKL4_ASM_MASK_MP_ARM_MPIDR (1 << 31)
+/*lint -esym(621, OKL4_ASM_SHIFT_MP_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_MP_ARM_MPIDR (31)
+/*lint -esym(621, OKL4_ASM_WIDTH_MP_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_MP_ARM_MPIDR (1)
+/*lint -esym(621, OKL4_ASM_MASK_AFF3_ARM_MPIDR) */
+#define OKL4_ASM_MASK_AFF3_ARM_MPIDR (255 << 32)
+/*lint -esym(621, OKL4_ASM_SHIFT_AFF3_ARM_MPIDR) */
+#define OKL4_ASM_SHIFT_AFF3_ARM_MPIDR (32)
+/*lint -esym(621, OKL4_ASM_WIDTH_AFF3_ARM_MPIDR) */
+#define OKL4_ASM_WIDTH_AFF3_ARM_MPIDR (8)
+
+
+/**
+ *  uint32_t
+ **/
+/*lint -esym(621, OKL4_AXON_NUM_RECEIVE_QUEUES) */
+#define OKL4_AXON_NUM_RECEIVE_QUEUES (4)
+
+/*lint -esym(621, OKL4_AXON_NUM_SEND_QUEUES) */
+#define OKL4_AXON_NUM_SEND_QUEUES (4)
+
+/*lint -esym(621, _OKL4_POISON) */
+#define _OKL4_POISON (3735928559)
+
+/*lint -esym(621, OKL4_TRACEBUFFER_INVALID_REF) */
+#define OKL4_TRACEBUFFER_INVALID_REF (-1)
+
+/**
+ *  okl4_arm_psci_function_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_VERSION) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_VERSION (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_SUSPEND) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_SUSPEND (0x1)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_OFF) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_OFF (0x2)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_ON) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_ON (0x3)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_AFFINITY_INFO) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_AFFINITY_INFO (0x4)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE (0x5)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_TYPE (0x6)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MIGRATE_INFO_UP_CPU (0x7)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_OFF) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_OFF (0x8)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_RESET) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_RESET (0x9)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_FEATURES) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_FEATURES (0xa)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_FREEZE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_FREEZE (0xb)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_CPU_DEFAULT_SUSPEND (0xc)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_NODE_HW_STATE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_NODE_HW_STATE (0xd)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND (0xe)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE (0xf)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY (0x10)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT (0x11)
+
+/**
+ *  okl4_arm_psci_result_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_SUCCESS) */
+#define OKL4_ASM_ARM_PSCI_RESULT_SUCCESS (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_INVALID_ADDRESS) */
+#define OKL4_ASM_ARM_PSCI_RESULT_INVALID_ADDRESS (0xfffffff7)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_DISABLED) */
+#define OKL4_ASM_ARM_PSCI_RESULT_DISABLED (0xfffffff8)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_NOT_PRESENT) */
+#define OKL4_ASM_ARM_PSCI_RESULT_NOT_PRESENT (0xfffffff9)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_INTERNAL_FAILURE) */
+#define OKL4_ASM_ARM_PSCI_RESULT_INTERNAL_FAILURE (0xfffffffa)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_ON_PENDING) */
+#define OKL4_ASM_ARM_PSCI_RESULT_ON_PENDING (0xfffffffb)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_ALREADY_ON) */
+#define OKL4_ASM_ARM_PSCI_RESULT_ALREADY_ON (0xfffffffc)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_DENIED) */
+#define OKL4_ASM_ARM_PSCI_RESULT_DENIED (0xfffffffd)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_INVALID_PARAMETERS) */
+#define OKL4_ASM_ARM_PSCI_RESULT_INVALID_PARAMETERS (0xfffffffe)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_RESULT_NOT_SUPPORTED) */
+#define OKL4_ASM_ARM_PSCI_RESULT_NOT_SUPPORTED (0xffffffff)
+
+/**
+ *  okl4_arm_psci_suspend_state_t
+ **/
+
+/*lint -esym(621, OKL4_ARM_PSCI_POWER_LEVEL_CPU) */
+#define OKL4_ARM_PSCI_POWER_LEVEL_CPU (0)
+
+/*lint -esym(621, OKL4_ASM_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE (65535)
+/*lint -esym(621, OKL4_ASM_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_ASM_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (3 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
+#define OKL4_ASM_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (2)
+
+
+/**
+ *  okl4_arm_sctlr_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_MMU_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_MMU_ENABLE_ARM_SCTLR (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_MMU_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_MMU_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_DATA_CACHE_ENABLE_ARM_SCTLR (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_DATA_CACHE_ENABLE_ARM_SCTLR (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_DATA_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_ASM_MASK_STACK_ALIGN_ARM_SCTLR (1 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_STACK_ALIGN_ARM_SCTLR (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_STACK_ALIGN_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_STACK_ALIGN_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_ASM_MASK_STACK_ALIGN_EL0_ARM_SCTLR (1 << 4)
+/*lint -esym(621, OKL4_ASM_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_STACK_ALIGN_EL0_ARM_SCTLR (4)
+/*lint -esym(621, OKL4_ASM_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_STACK_ALIGN_EL0_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_CP15_BARRIER_ENABLE_ARM_SCTLR (1 << 5)
+/*lint -esym(621, OKL4_ASM_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_CP15_BARRIER_ENABLE_ARM_SCTLR (5)
+/*lint -esym(621, OKL4_ASM_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_CP15_BARRIER_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_ASM_MASK_OKL_HCR_EL2_DC_ARM_SCTLR (1 << 6)
+/*lint -esym(621, OKL4_ASM_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_OKL_HCR_EL2_DC_ARM_SCTLR (6)
+/*lint -esym(621, OKL4_ASM_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_OKL_HCR_EL2_DC_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_IT_DISABLE_ARM_SCTLR (1 << 7)
+/*lint -esym(621, OKL4_ASM_SHIFT_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_IT_DISABLE_ARM_SCTLR (7)
+/*lint -esym(621, OKL4_ASM_WIDTH_IT_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_IT_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_SETEND_DISABLE_ARM_SCTLR (1 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_SETEND_DISABLE_ARM_SCTLR (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_SETEND_DISABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_SETEND_DISABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_ASM_MASK_USER_MASK_ACCESS_ARM_SCTLR (1 << 9)
+/*lint -esym(621, OKL4_ASM_SHIFT_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_USER_MASK_ACCESS_ARM_SCTLR (9)
+/*lint -esym(621, OKL4_ASM_WIDTH_USER_MASK_ACCESS_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_USER_MASK_ACCESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_RESERVED11_ARM_SCTLR) */
+#define OKL4_ASM_MASK_RESERVED11_ARM_SCTLR (1 << 11)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESERVED11_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_RESERVED11_ARM_SCTLR (11)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED11_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_RESERVED11_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (1 << 12)
+/*lint -esym(621, OKL4_ASM_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (12)
+/*lint -esym(621, OKL4_ASM_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_INSTRUCTION_CACHE_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_ASM_MASK_VECTORS_BIT_ARM_SCTLR (1 << 13)
+/*lint -esym(621, OKL4_ASM_SHIFT_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_VECTORS_BIT_ARM_SCTLR (13)
+/*lint -esym(621, OKL4_ASM_WIDTH_VECTORS_BIT_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_VECTORS_BIT_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_ASM_MASK_DCACHE_ZERO_ARM_SCTLR (1 << 14)
+/*lint -esym(621, OKL4_ASM_SHIFT_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_DCACHE_ZERO_ARM_SCTLR (14)
+/*lint -esym(621, OKL4_ASM_WIDTH_DCACHE_ZERO_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_DCACHE_ZERO_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_USER_CACHE_TYPE_ARM_SCTLR (1 << 15)
+/*lint -esym(621, OKL4_ASM_SHIFT_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_USER_CACHE_TYPE_ARM_SCTLR (15)
+/*lint -esym(621, OKL4_ASM_WIDTH_USER_CACHE_TYPE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_USER_CACHE_TYPE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_ASM_MASK_NO_TRAP_WFI_ARM_SCTLR (1 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_NO_TRAP_WFI_ARM_SCTLR (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_NO_TRAP_WFI_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_NO_TRAP_WFI_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_NO_TRAP_WFE_ARM_SCTLR (1 << 18)
+/*lint -esym(621, OKL4_ASM_SHIFT_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_NO_TRAP_WFE_ARM_SCTLR (18)
+/*lint -esym(621, OKL4_ASM_WIDTH_NO_TRAP_WFE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_NO_TRAP_WFE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_MASK_WRITE_EXEC_NEVER_ARM_SCTLR (1 << 19)
+/*lint -esym(621, OKL4_ASM_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_WRITE_EXEC_NEVER_ARM_SCTLR (19)
+/*lint -esym(621, OKL4_ASM_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR (1 << 20)
+/*lint -esym(621, OKL4_ASM_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_USER_WRITE_EXEC_NEVER_ARM_SCTLR (20)
+/*lint -esym(621, OKL4_ASM_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_USER_WRITE_EXEC_NEVER_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_RESERVED22_ARM_SCTLR) */
+#define OKL4_ASM_MASK_RESERVED22_ARM_SCTLR (1 << 22)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESERVED22_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_RESERVED22_ARM_SCTLR (22)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED22_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_RESERVED22_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_RESERVED23_ARM_SCTLR) */
+#define OKL4_ASM_MASK_RESERVED23_ARM_SCTLR (1 << 23)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESERVED23_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_RESERVED23_ARM_SCTLR (23)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED23_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_RESERVED23_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_MASK_EL0_ENDIANNESS_ARM_SCTLR (1 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_EL0_ENDIANNESS_ARM_SCTLR (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_EL0_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_EL0_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR (1 << 25)
+/*lint -esym(621, OKL4_ASM_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_EXCEPTION_ENDIANNESS_ARM_SCTLR (25)
+/*lint -esym(621, OKL4_ASM_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_EXCEPTION_ENDIANNESS_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_TEX_REMAP_ENABLE_ARM_SCTLR (1 << 28)
+/*lint -esym(621, OKL4_ASM_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_TEX_REMAP_ENABLE_ARM_SCTLR (28)
+/*lint -esym(621, OKL4_ASM_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_TEX_REMAP_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_ACCESS_FLAG_ENABLE_ARM_SCTLR (1 << 29)
+/*lint -esym(621, OKL4_ASM_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_ACCESS_FLAG_ENABLE_ARM_SCTLR (29)
+/*lint -esym(621, OKL4_ASM_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_ACCESS_FLAG_ENABLE_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_MASK_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_THUMB_EXCEPTION_ENABLE_ARM_SCTLR (1)
+
+
+/**
+ *  okl4_arm_smccc_arch_function_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION) */
+#define OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_SMCCC_VERSION (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES) */
+#define OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_FEATURES (0x1)
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1) */
+#define OKL4_ASM_ARM_SMCCC_ARCH_FUNCTION_ARCH_WORKAROUND_1 (0x8000)
+
+/**
+ *  okl4_arm_smccc_result_t
+ **/
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_RESULT_SUCCESS) */
+#define OKL4_ASM_ARM_SMCCC_RESULT_SUCCESS (0x0)
+/*lint -esym(621, OKL4_ASM_ARM_SMCCC_RESULT_NOT_SUPPORTED) */
+#define OKL4_ASM_ARM_SMCCC_RESULT_NOT_SUPPORTED (0xffffffff)
+
+/**
+ *  okl4_count_t
+ **/
+/*lint -esym(621, OKL4_DEFAULT_PAGEBITS) */
+#define OKL4_DEFAULT_PAGEBITS (12)
+
+/** The maximum limit for segment index retured in mmu_lookup_segment. */
+/*lint -esym(621, OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK) */
+#define OKL4_KMMU_LOOKUP_PAGE_SEGMENT_MASK (1023)
+
+/** The maximum limit for segment attachments to a KMMU. */
+/*lint -esym(621, OKL4_KMMU_MAX_SEGMENTS) */
+#define OKL4_KMMU_MAX_SEGMENTS (256)
+
+/*lint -esym(621, OKL4_PROFILE_NO_PCPUS) */
+#define OKL4_PROFILE_NO_PCPUS (-1)
+
+/**
+ *  okl4_kcap_t
+ **/
+/*lint -esym(621, OKL4_KCAP_INVALID) */
+#define OKL4_KCAP_INVALID (-1)
+
+/**
+ *  okl4_interrupt_number_t
+ **/
+/*lint -esym(621, OKL4_INTERRUPT_INVALID_IRQ) */
+#define OKL4_INTERRUPT_INVALID_IRQ (1023)
+
+/*lint -esym(621, OKL4_INVALID_VIRQ) */
+#define OKL4_INVALID_VIRQ (1023)
+
+/**
+ *  okl4_lsize_t
+ **/
+/*lint -esym(621, OKL4_DEFAULT_PAGESIZE) */
+#define OKL4_DEFAULT_PAGESIZE (4096)
+
+/**
+ *  okl4_laddr_t
+ **/
+/*lint -esym(621, OKL4_USER_AREA_END) */
+#define OKL4_USER_AREA_END (17592186044416)
+
+/**
+ *  okl4_axon_data_info_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_PENDING_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_PENDING_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_PENDING_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_PENDING_AXON_DATA_INFO (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_PENDING_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_PENDING_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_MASK_FAILURE_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_FAILURE_AXON_DATA_INFO (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_FAILURE_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_FAILURE_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_FAILURE_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_MASK_USR_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_USR_AXON_DATA_INFO (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_USR_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_USR_AXON_DATA_INFO (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_USR_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_USR_AXON_DATA_INFO (1)
+/*lint -esym(621, OKL4_ASM_MASK_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_MASK_LADDR_AXON_DATA_INFO (2305843009213693951 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_SHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_ASM_PRESHIFT_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_PRESHIFT_LADDR_AXON_DATA_INFO (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_LADDR_AXON_DATA_INFO) */
+#define OKL4_ASM_WIDTH_LADDR_AXON_DATA_INFO (61)
+
+
+/**
+ *  okl4_axon_queue_size_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_MASK_ALLOC_ORDER_AXON_QUEUE_SIZE (31)
+/*lint -esym(621, OKL4_ASM_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_SHIFT_ALLOC_ORDER_AXON_QUEUE_SIZE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_WIDTH_ALLOC_ORDER_AXON_QUEUE_SIZE (5)
+/*lint -esym(621, OKL4_ASM_MASK_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_MASK_MIN_ORDER_AXON_QUEUE_SIZE (31 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_SHIFT_MIN_ORDER_AXON_QUEUE_SIZE (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE) */
+#define OKL4_ASM_WIDTH_MIN_ORDER_AXON_QUEUE_SIZE (5)
+
+
+/**
+ *  okl4_axon_virq_flags_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_MASK_READY_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_SHIFT_READY_AXON_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_READY_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_WIDTH_READY_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_MASK_FAULT_AXON_VIRQ_FLAGS (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_SHIFT_FAULT_AXON_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_FAULT_AXON_VIRQ_FLAGS) */
+#define OKL4_ASM_WIDTH_FAULT_AXON_VIRQ_FLAGS (1)
+
+
+/**
+ *  okl4_page_cache_t
+ **/
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_WRITECOMBINE) */
+#define OKL4_ASM_PAGE_CACHE_WRITECOMBINE (0x0)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEFAULT) */
+#define OKL4_ASM_PAGE_CACHE_DEFAULT (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_IPC_RX) */
+#define OKL4_ASM_PAGE_CACHE_IPC_RX (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_IPC_TX) */
+#define OKL4_ASM_PAGE_CACHE_IPC_TX (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_TRACEBUFFER) */
+#define OKL4_ASM_PAGE_CACHE_TRACEBUFFER (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_WRITEBACK) */
+#define OKL4_ASM_PAGE_CACHE_WRITEBACK (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_IWB_RWA_ONC) */
+#define OKL4_ASM_PAGE_CACHE_IWB_RWA_ONC (0x2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_WRITETHROUGH) */
+#define OKL4_ASM_PAGE_CACHE_WRITETHROUGH (0x3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEVICE_GRE) */
+#define OKL4_ASM_PAGE_CACHE_DEVICE_GRE (0x4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEVICE_NGRE) */
+#define OKL4_ASM_PAGE_CACHE_DEVICE_NGRE (0x5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_DEVICE) */
+#define OKL4_ASM_PAGE_CACHE_DEVICE (0x6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_STRONG) */
+#define OKL4_ASM_PAGE_CACHE_STRONG (0x7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRNE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRNE (0x8000000)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_MASK) */
+#define OKL4_ASM_PAGE_CACHE_HW_MASK (0x8000000)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGNRE (0x8000004)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGRE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_NGRE (0x8000008)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_DEVICE_GRE) */
+#define OKL4_ASM_PAGE_CACHE_HW_DEVICE_GRE (0x800000c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_WA_NSH (0x8000011)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_NSH (0x8000012)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_NSH (0x8000013)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_NSH (0x8000014)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_NSH (0x8000015)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_NSH (0x8000016)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_NSH (0x8000017)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_NSH (0x8000018)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_NSH (0x8000019)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_NSH (0x800001a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_NSH (0x800001b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_NSH (0x800001c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_NSH (0x800001d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_NSH (0x800001e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_NSH (0x800001f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_NSH (0x8000021)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RA_NSH (0x8000022)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_NSH (0x8000023)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_NSH (0x8000024)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_NSH (0x8000025)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_NSH (0x8000026)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_NSH (0x8000027)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_NSH (0x8000028)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_NSH (0x8000029)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_NSH (0x800002a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_NSH (0x800002b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_NSH (0x800002c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_NSH (0x800002d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_NSH (0x800002e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_NSH (0x800002f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_NSH (0x8000031)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_NSH (0x8000032)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_NSH (0x8000033)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_NSH (0x8000034)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_NSH (0x8000035)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_NSH (0x8000036)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_NSH (0x8000037)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_NSH (0x8000038)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_NSH (0x8000039)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_NSH (0x800003a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_NSH (0x800003b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_NSH (0x800003c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_NSH (0x800003d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_NSH (0x800003e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_NSH (0x800003f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_NSH (0x8000041)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_NSH (0x8000042)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_NSH (0x8000043)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_NC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_NC_NSH (0x8000044)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_NSH (0x8000045)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_NSH (0x8000046)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_NSH (0x8000047)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_NSH (0x8000048)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_NSH (0x8000049)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_NSH (0x800004a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_NSH (0x800004b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_NSH (0x800004c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_NSH (0x800004d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_NSH (0x800004e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_NSH (0x800004f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_NSH (0x8000051)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_NSH (0x8000052)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_NSH (0x8000053)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_NSH (0x8000054)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_WA_NSH (0x8000055)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_NSH (0x8000056)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_NSH (0x8000057)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_NSH (0x8000058)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_NSH (0x8000059)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_NSH (0x800005a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_NSH (0x800005b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_NSH (0x800005c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_NSH (0x800005d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_NSH (0x800005e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_NSH (0x800005f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_NSH (0x8000061)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_NSH (0x8000062)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_NSH (0x8000063)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_NSH (0x8000064)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_NSH (0x8000065)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RA_NSH (0x8000066)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_NSH (0x8000067)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_NSH (0x8000068)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_NSH (0x8000069)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_NSH (0x800006a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_NSH (0x800006b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_NSH (0x800006c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_NSH (0x800006d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_NSH (0x800006e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_NSH (0x800006f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_NSH (0x8000071)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_NSH (0x8000072)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_NSH (0x8000073)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_NSH (0x8000074)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_NSH (0x8000075)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_NSH (0x8000076)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_NSH (0x8000077)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_NSH (0x8000078)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_NSH (0x8000079)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_NSH (0x800007a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_NSH (0x800007b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_NSH (0x800007c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_NSH (0x800007d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_NSH (0x800007e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_NSH (0x800007f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_NSH (0x8000081)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_NSH (0x8000082)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_NSH (0x8000083)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_NSH (0x8000084)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_NSH (0x8000085)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_NSH (0x8000086)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_NSH (0x8000087)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_NA_NSH (0x8000088)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_NSH (0x8000089)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_NSH (0x800008a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_NSH (0x800008b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_NSH (0x800008c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_NSH (0x800008d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_NSH (0x800008e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_NSH (0x800008f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_NSH (0x8000091)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_NSH (0x8000092)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_NSH (0x8000093)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_NSH (0x8000094)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_NSH (0x8000095)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_NSH (0x8000096)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_NSH (0x8000097)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_NSH (0x8000098)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_WA_NSH (0x8000099)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_NSH (0x800009a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_NSH (0x800009b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_NSH (0x800009c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_NSH (0x800009d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_NSH (0x800009e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_NSH (0x800009f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_NSH (0x80000a1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_NSH (0x80000a2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_NSH (0x80000a3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_NSH (0x80000a4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_NSH (0x80000a5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_NSH (0x80000a6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_NSH (0x80000a7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_NSH (0x80000a8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_NSH (0x80000a9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RA_NSH (0x80000aa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_NSH (0x80000ab)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_NSH (0x80000ac)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_NSH (0x80000ad)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_NSH (0x80000ae)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_NSH (0x80000af)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_NSH (0x80000b1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_NSH (0x80000b2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_NSH (0x80000b3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_NSH (0x80000b4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_NSH (0x80000b5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_NSH (0x80000b6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_NSH (0x80000b7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_NSH (0x80000b8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_NSH (0x80000b9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_NSH (0x80000ba)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RWA_NSH (0x80000bb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_NSH (0x80000bc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_NSH (0x80000bd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_NSH (0x80000be)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_NSH (0x80000bf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_NSH (0x80000c1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_NSH (0x80000c2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_NSH (0x80000c3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_NSH (0x80000c4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_NSH (0x80000c5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_NSH (0x80000c6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_NSH (0x80000c7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_NSH (0x80000c8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_NSH (0x80000c9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_NSH (0x80000ca)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_NSH (0x80000cb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_NA_NSH (0x80000cc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_NSH (0x80000cd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_NSH (0x80000ce)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_NSH (0x80000cf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_NSH (0x80000d1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_NSH (0x80000d2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_NSH (0x80000d3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_NSH (0x80000d4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_NSH (0x80000d5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_NSH (0x80000d6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_NSH (0x80000d7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_NSH (0x80000d8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_NSH (0x80000d9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_NSH (0x80000da)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_NSH (0x80000db)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_NSH (0x80000dc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_WA_NSH (0x80000dd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_NSH (0x80000de)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_NSH (0x80000df)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_NSH (0x80000e1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_NSH (0x80000e2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_NSH (0x80000e3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_NSH (0x80000e4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_NSH (0x80000e5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_NSH (0x80000e6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_NSH (0x80000e7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_NSH (0x80000e8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_NSH (0x80000e9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_NSH (0x80000ea)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_NSH (0x80000eb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_NSH (0x80000ec)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_NSH (0x80000ed)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RA_NSH (0x80000ee)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_NSH (0x80000ef)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_NSH (0x80000f1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_NSH (0x80000f2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_NSH (0x80000f3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_NSH (0x80000f4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_NSH (0x80000f5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_NSH (0x80000f6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_NSH (0x80000f7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_NSH (0x80000f8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_NSH (0x80000f9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_NSH (0x80000fa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_NSH (0x80000fb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_NSH (0x80000fc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_NSH (0x80000fd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_NSH (0x80000fe)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RWA_NSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RWA_NSH (0x80000ff)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_WA_OSH (0x8000211)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_OSH (0x8000212)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_OSH (0x8000213)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_OSH (0x8000214)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_OSH (0x8000215)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_OSH (0x8000216)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_OSH (0x8000217)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_OSH (0x8000218)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_OSH (0x8000219)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_OSH (0x800021a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_OSH (0x800021b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_OSH (0x800021c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_OSH (0x800021d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_OSH (0x800021e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_OSH (0x800021f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_OSH (0x8000221)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RA_OSH (0x8000222)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_OSH (0x8000223)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_OSH (0x8000224)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_OSH (0x8000225)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_OSH (0x8000226)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_OSH (0x8000227)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_OSH (0x8000228)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_OSH (0x8000229)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_OSH (0x800022a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_OSH (0x800022b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_OSH (0x800022c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_OSH (0x800022d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_OSH (0x800022e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_OSH (0x800022f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_OSH (0x8000231)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_OSH (0x8000232)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_OSH (0x8000233)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_OSH (0x8000234)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_OSH (0x8000235)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_OSH (0x8000236)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_OSH (0x8000237)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_OSH (0x8000238)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_OSH (0x8000239)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_OSH (0x800023a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_OSH (0x800023b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_OSH (0x800023c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_OSH (0x800023d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_OSH (0x800023e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_OSH (0x800023f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_OSH (0x8000241)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_OSH (0x8000242)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_OSH (0x8000243)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_NC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_NC_OSH (0x8000244)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_OSH (0x8000245)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_OSH (0x8000246)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_OSH (0x8000247)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_OSH (0x8000248)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_OSH (0x8000249)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_OSH (0x800024a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_OSH (0x800024b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_OSH (0x800024c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_OSH (0x800024d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_OSH (0x800024e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_OSH (0x800024f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_OSH (0x8000251)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_OSH (0x8000252)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_OSH (0x8000253)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_OSH (0x8000254)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_WA_OSH (0x8000255)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_OSH (0x8000256)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_OSH (0x8000257)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_OSH (0x8000258)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_OSH (0x8000259)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_OSH (0x800025a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_OSH (0x800025b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_OSH (0x800025c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_OSH (0x800025d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_OSH (0x800025e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_OSH (0x800025f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_OSH (0x8000261)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_OSH (0x8000262)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_OSH (0x8000263)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_OSH (0x8000264)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_OSH (0x8000265)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RA_OSH (0x8000266)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_OSH (0x8000267)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_OSH (0x8000268)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_OSH (0x8000269)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_OSH (0x800026a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_OSH (0x800026b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_OSH (0x800026c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_OSH (0x800026d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_OSH (0x800026e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_OSH (0x800026f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_OSH (0x8000271)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_OSH (0x8000272)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_OSH (0x8000273)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_OSH (0x8000274)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_OSH (0x8000275)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_OSH (0x8000276)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_OSH (0x8000277)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_OSH (0x8000278)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_OSH (0x8000279)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_OSH (0x800027a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_OSH (0x800027b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_OSH (0x800027c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_OSH (0x800027d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_OSH (0x800027e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_OSH (0x800027f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_OSH (0x8000281)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_OSH (0x8000282)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_OSH (0x8000283)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_OSH (0x8000284)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_OSH (0x8000285)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_OSH (0x8000286)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_OSH (0x8000287)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_NA_OSH (0x8000288)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_OSH (0x8000289)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_OSH (0x800028a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_OSH (0x800028b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_OSH (0x800028c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_OSH (0x800028d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_OSH (0x800028e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_OSH (0x800028f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_OSH (0x8000291)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_OSH (0x8000292)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_OSH (0x8000293)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_OSH (0x8000294)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_OSH (0x8000295)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_OSH (0x8000296)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_OSH (0x8000297)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_OSH (0x8000298)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_WA_OSH (0x8000299)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_OSH (0x800029a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_OSH (0x800029b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_OSH (0x800029c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_OSH (0x800029d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_OSH (0x800029e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_OSH (0x800029f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_OSH (0x80002a1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_OSH (0x80002a2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_OSH (0x80002a3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_OSH (0x80002a4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_OSH (0x80002a5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_OSH (0x80002a6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_OSH (0x80002a7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_OSH (0x80002a8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_OSH (0x80002a9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RA_OSH (0x80002aa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_OSH (0x80002ab)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_OSH (0x80002ac)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_OSH (0x80002ad)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_OSH (0x80002ae)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_OSH (0x80002af)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_OSH (0x80002b1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_OSH (0x80002b2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_OSH (0x80002b3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_OSH (0x80002b4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_OSH (0x80002b5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_OSH (0x80002b6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_OSH (0x80002b7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_OSH (0x80002b8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_OSH (0x80002b9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_OSH (0x80002ba)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RWA_OSH (0x80002bb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_OSH (0x80002bc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_OSH (0x80002bd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_OSH (0x80002be)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_OSH (0x80002bf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_OSH (0x80002c1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_OSH (0x80002c2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_OSH (0x80002c3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_OSH (0x80002c4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_OSH (0x80002c5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_OSH (0x80002c6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_OSH (0x80002c7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_OSH (0x80002c8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_OSH (0x80002c9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_OSH (0x80002ca)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_OSH (0x80002cb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_NA_OSH (0x80002cc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_OSH (0x80002cd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_OSH (0x80002ce)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_OSH (0x80002cf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_OSH (0x80002d1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_OSH (0x80002d2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_OSH (0x80002d3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_OSH (0x80002d4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_OSH (0x80002d5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_OSH (0x80002d6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_OSH (0x80002d7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_OSH (0x80002d8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_OSH (0x80002d9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_OSH (0x80002da)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_OSH (0x80002db)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_OSH (0x80002dc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_WA_OSH (0x80002dd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_OSH (0x80002de)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_OSH (0x80002df)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_OSH (0x80002e1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_OSH (0x80002e2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_OSH (0x80002e3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_OSH (0x80002e4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_OSH (0x80002e5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_OSH (0x80002e6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_OSH (0x80002e7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_OSH (0x80002e8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_OSH (0x80002e9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_OSH (0x80002ea)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_OSH (0x80002eb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_OSH (0x80002ec)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_OSH (0x80002ed)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RA_OSH (0x80002ee)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_OSH (0x80002ef)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_OSH (0x80002f1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_OSH (0x80002f2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_OSH (0x80002f3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_OSH (0x80002f4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_OSH (0x80002f5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_OSH (0x80002f6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_OSH (0x80002f7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_OSH (0x80002f8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_OSH (0x80002f9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_OSH (0x80002fa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_OSH (0x80002fb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_OSH (0x80002fc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_OSH (0x80002fd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_OSH (0x80002fe)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RWA_OSH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RWA_OSH (0x80002ff)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_WA_ISH (0x8000311)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_WA_ISH (0x8000312)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_WA_ISH (0x8000313)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_WA_ISH (0x8000314)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_WA_ISH (0x8000315)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_WA_ISH (0x8000316)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_WA_ISH (0x8000317)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_WA_ISH (0x8000318)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_WA_ISH (0x8000319)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_WA_ISH (0x800031a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_WA_ISH (0x800031b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_WA_ISH (0x800031c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_WA_ISH (0x800031d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_WA_ISH (0x800031e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_WA_ISH (0x800031f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RA_ISH (0x8000321)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RA_ISH (0x8000322)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWT_RA_ISH (0x8000323)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RA_ISH (0x8000324)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RA_ISH (0x8000325)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RA_ISH (0x8000326)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RA_ISH (0x8000327)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RA_ISH (0x8000328)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RA_ISH (0x8000329)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RA_ISH (0x800032a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RA_ISH (0x800032b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RA_ISH (0x800032c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RA_ISH (0x800032d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RA_ISH (0x800032e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RA_ISH (0x800032f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWT_RWA_ISH (0x8000331)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWT_RWA_ISH (0x8000332)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWT_RWA_ISH (0x8000333)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWT_RWA_ISH (0x8000334)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWT_RWA_ISH (0x8000335)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWT_RWA_ISH (0x8000336)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWT_RWA_ISH (0x8000337)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWT_RWA_ISH (0x8000338)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWT_RWA_ISH (0x8000339)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWT_RWA_ISH (0x800033a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWT_RWA_ISH (0x800033b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWT_RWA_ISH (0x800033c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWT_RWA_ISH (0x800033d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWT_RWA_ISH (0x800033e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWT_RWA_ISH (0x800033f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_ONC_ISH (0x8000341)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_ONC_ISH (0x8000342)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_ONC_ISH (0x8000343)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_NC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_NC_ISH (0x8000344)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_ONC_ISH (0x8000345)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_ONC_ISH (0x8000346)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_ONC_ISH (0x8000347)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_ONC_ISH (0x8000348)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_ONC_ISH (0x8000349)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_ONC_ISH (0x800034a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_ONC_ISH (0x800034b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_ONC_ISH (0x800034c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_ONC_ISH (0x800034d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_ONC_ISH (0x800034e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_ONC_ISH (0x800034f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_WA_ISH (0x8000351)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_WA_ISH (0x8000352)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_WA_ISH (0x8000353)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_WA_ISH (0x8000354)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_WA_ISH (0x8000355)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_WA_ISH (0x8000356)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_WA_ISH (0x8000357)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_WA_ISH (0x8000358)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_WA_ISH (0x8000359)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_WA_ISH (0x800035a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_WA_ISH (0x800035b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_WA_ISH (0x800035c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_WA_ISH (0x800035d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_WA_ISH (0x800035e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_WA_ISH (0x800035f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RA_ISH (0x8000361)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RA_ISH (0x8000362)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RA_ISH (0x8000363)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RA_ISH (0x8000364)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RA_ISH (0x8000365)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RA_ISH (0x8000366)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OTWB_RA_ISH (0x8000367)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RA_ISH (0x8000368)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RA_ISH (0x8000369)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RA_ISH (0x800036a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RA_ISH (0x800036b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RA_ISH (0x800036c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RA_ISH (0x800036d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RA_ISH (0x800036e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RA_ISH (0x800036f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OTWB_RWA_ISH (0x8000371)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OTWB_RWA_ISH (0x8000372)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OTWB_RWA_ISH (0x8000373)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OTWB_RWA_ISH (0x8000374)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OTWB_RWA_ISH (0x8000375)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OTWB_RWA_ISH (0x8000376)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_TWB_RWA_ISH (0x8000377)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OTWB_RWA_ISH (0x8000378)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OTWB_RWA_ISH (0x8000379)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OTWB_RWA_ISH (0x800037a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OTWB_RWA_ISH (0x800037b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OTWB_RWA_ISH (0x800037c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OTWB_RWA_ISH (0x800037d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OTWB_RWA_ISH (0x800037e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OTWB_RWA_ISH (0x800037f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_NA_ISH (0x8000381)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_NA_ISH (0x8000382)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_NA_ISH (0x8000383)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_NA_ISH (0x8000384)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_NA_ISH (0x8000385)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_NA_ISH (0x8000386)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_NA_ISH (0x8000387)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_NA_ISH (0x8000388)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_NA_ISH (0x8000389)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_NA_ISH (0x800038a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_NA_ISH (0x800038b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_NA_ISH (0x800038c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_NA_ISH (0x800038d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_NA_ISH (0x800038e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_NA_ISH (0x800038f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_WA_ISH (0x8000391)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_WA_ISH (0x8000392)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_WA_ISH (0x8000393)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_WA_ISH (0x8000394)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_WA_ISH (0x8000395)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_WA_ISH (0x8000396)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_WA_ISH (0x8000397)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_WA_ISH (0x8000398)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_WA_ISH (0x8000399)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_WA_ISH (0x800039a)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_WA_ISH (0x800039b)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_WA_ISH (0x800039c)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_WA_ISH (0x800039d)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_WA_ISH (0x800039e)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_WA_ISH (0x800039f)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RA_ISH (0x80003a1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RA_ISH (0x80003a2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RA_ISH (0x80003a3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RA_ISH (0x80003a4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RA_ISH (0x80003a5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RA_ISH (0x80003a6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RA_ISH (0x80003a7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RA_ISH (0x80003a8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RA_ISH (0x80003a9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RA_ISH (0x80003aa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWT_RA_ISH (0x80003ab)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RA_ISH (0x80003ac)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RA_ISH (0x80003ad)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RA_ISH (0x80003ae)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RA_ISH (0x80003af)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWT_RWA_ISH (0x80003b1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWT_RWA_ISH (0x80003b2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWT_RWA_ISH (0x80003b3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWT_RWA_ISH (0x80003b4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWT_RWA_ISH (0x80003b5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWT_RWA_ISH (0x80003b6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWT_RWA_ISH (0x80003b7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWT_RWA_ISH (0x80003b8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWT_RWA_ISH (0x80003b9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWT_RWA_ISH (0x80003ba)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WT_RWA_ISH (0x80003bb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWT_RWA_ISH (0x80003bc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWT_RWA_ISH (0x80003bd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWT_RWA_ISH (0x80003be)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWT_RWA_ISH (0x80003bf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_NA_ISH (0x80003c1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_NA_ISH (0x80003c2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_NA_ISH (0x80003c3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_NA_ISH (0x80003c4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_NA_ISH (0x80003c5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_NA_ISH (0x80003c6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_NA_ISH (0x80003c7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_NA_ISH (0x80003c8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_NA_ISH (0x80003c9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_NA_ISH (0x80003ca)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_NA_ISH (0x80003cb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_NA_ISH (0x80003cc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_NA_ISH (0x80003cd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_NA_ISH (0x80003ce)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_NA_ISH (0x80003cf)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_WA_ISH (0x80003d1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_WA_ISH (0x80003d2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_WA_ISH (0x80003d3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_WA_ISH (0x80003d4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_WA_ISH (0x80003d5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_WA_ISH (0x80003d6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_WA_ISH (0x80003d7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_WA_ISH (0x80003d8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_WA_ISH (0x80003d9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_WA_ISH (0x80003da)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_WA_ISH (0x80003db)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_WA_ISH (0x80003dc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_WA_ISH (0x80003dd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_WA_ISH (0x80003de)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_WA_ISH (0x80003df)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RA_ISH (0x80003e1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RA_ISH (0x80003e2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RA_ISH (0x80003e3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RA_ISH (0x80003e4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RA_ISH (0x80003e5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RA_ISH (0x80003e6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RA_ISH (0x80003e7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RA_ISH (0x80003e8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RA_ISH (0x80003e9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RA_ISH (0x80003ea)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RA_ISH (0x80003eb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RA_ISH (0x80003ec)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RA_ISH (0x80003ed)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RA_ISH (0x80003ee)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RWA_OWB_RA_ISH (0x80003ef)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_WA_OWB_RWA_ISH (0x80003f1)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RA_OWB_RWA_ISH (0x80003f2)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWT_RWA_OWB_RWA_ISH (0x80003f3)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_INC_OWB_RWA_ISH (0x80003f4)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_WA_OWB_RWA_ISH (0x80003f5)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RA_OWB_RWA_ISH (0x80003f6)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_ITWB_RWA_OWB_RWA_ISH (0x80003f7)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_NA_OWB_RWA_ISH (0x80003f8)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_WA_OWB_RWA_ISH (0x80003f9)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RA_OWB_RWA_ISH (0x80003fa)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWT_RWA_OWB_RWA_ISH (0x80003fb)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_NA_OWB_RWA_ISH (0x80003fc)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_WA_OWB_RWA_ISH (0x80003fd)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_IWB_RA_OWB_RWA_ISH (0x80003fe)
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_HW_WB_RWA_ISH) */
+#define OKL4_ASM_PAGE_CACHE_HW_WB_RWA_ISH (0x80003ff)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_MAX) */
+#define OKL4_ASM_PAGE_CACHE_MAX (0x80003ff)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_CACHE_INVALID) */
+#define OKL4_ASM_PAGE_CACHE_INVALID (0xffffffff)
+
+/**
+ *  okl4_cpu_exec_mode
+ **/
+/*lint -esym(621, OKL4_ARM_MODE) */
+#define OKL4_ARM_MODE (0)
+
+/*lint -esym(621, OKL4_DEFAULT_MODE) */
+#define OKL4_DEFAULT_MODE (4)
+
+/*lint -esym(621, OKL4_JAZELLE_MODE) */
+#define OKL4_JAZELLE_MODE (2)
+
+/*lint -esym(621, OKL4_THUMBEE_MODE) */
+#define OKL4_THUMBEE_MODE (3)
+
+/*lint -esym(621, OKL4_THUMB_MODE) */
+#define OKL4_THUMB_MODE (1)
+
+/**
+ *  okl4_cpu_mode_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_EXEC_MODE_CPU_MODE) */
+#define OKL4_ASM_MASK_EXEC_MODE_CPU_MODE (7)
+/*lint -esym(621, OKL4_ASM_SHIFT_EXEC_MODE_CPU_MODE) */
+#define OKL4_ASM_SHIFT_EXEC_MODE_CPU_MODE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_EXEC_MODE_CPU_MODE) */
+#define OKL4_ASM_WIDTH_EXEC_MODE_CPU_MODE (3)
+/*lint -esym(621, OKL4_ASM_MASK_ENDIAN_CPU_MODE) */
+#define OKL4_ASM_MASK_ENDIAN_CPU_MODE (1 << 7)
+/*lint -esym(621, OKL4_ASM_SHIFT_ENDIAN_CPU_MODE) */
+#define OKL4_ASM_SHIFT_ENDIAN_CPU_MODE (7)
+/*lint -esym(621, OKL4_ASM_WIDTH_ENDIAN_CPU_MODE) */
+#define OKL4_ASM_WIDTH_ENDIAN_CPU_MODE (1)
+
+
+/**
+ *  okl4_page_perms_t
+ **/
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_NONE) */
+#define OKL4_ASM_PAGE_PERMS_NONE (0x0)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_X) */
+#define OKL4_ASM_PAGE_PERMS_X (0x1)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_W) */
+#define OKL4_ASM_PAGE_PERMS_W (0x2)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_WX) */
+#define OKL4_ASM_PAGE_PERMS_WX (0x3)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_R) */
+#define OKL4_ASM_PAGE_PERMS_R (0x4)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_RX) */
+#define OKL4_ASM_PAGE_PERMS_RX (0x5)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_RW) */
+#define OKL4_ASM_PAGE_PERMS_RW (0x6)
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_RWX) */
+#define OKL4_ASM_PAGE_PERMS_RWX (0x7)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_MAX) */
+#define OKL4_ASM_PAGE_PERMS_MAX (0x7)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_PAGE_PERMS_INVALID) */
+#define OKL4_ASM_PAGE_PERMS_INVALID (0xffffffff)
+
+/**
+ *  okl4_error_t
+ **/
+/**
+    KSP returned OK
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_OK) */
+#define OKL4_ASM_ERROR_KSP_OK (0x0)
+/**
+    The operation succeeded
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_OK) */
+#define OKL4_ASM_ERROR_OK (0x0)
+/**
+    The target vCPU was already running.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_ALREADY_STARTED) */
+#define OKL4_ASM_ERROR_ALREADY_STARTED (0x1)
+/**
+    The target vCPU was not running.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_ALREADY_STOPPED) */
+#define OKL4_ASM_ERROR_ALREADY_STOPPED (0x2)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_AREA_TOO_BIG) */
+#define OKL4_ASM_ERROR_AXON_AREA_TOO_BIG (0x3)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_BAD_MESSAGE_SIZE) */
+#define OKL4_ASM_ERROR_AXON_BAD_MESSAGE_SIZE (0x4)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_INVALID_OFFSET) */
+#define OKL4_ASM_ERROR_AXON_INVALID_OFFSET (0x5)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_QUEUE_NOT_MAPPED) */
+#define OKL4_ASM_ERROR_AXON_QUEUE_NOT_MAPPED (0x6)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_QUEUE_NOT_READY) */
+#define OKL4_ASM_ERROR_AXON_QUEUE_NOT_READY (0x7)
+/*lint -esym(621, OKL4_ASM_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED) */
+#define OKL4_ASM_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED (0x8)
+/**
+    A blocking operation was cancelled due to an abort of the operation.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_CANCELLED) */
+#define OKL4_ASM_ERROR_CANCELLED (0x9)
+/**
+    The operation failed due to an existing mapping.  Mapping
+    operations must not overlap an existing mapping.  Unmapping
+    must be performed at the same size as the original mapping.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_EXISTING_MAPPING) */
+#define OKL4_ASM_ERROR_EXISTING_MAPPING (0xa)
+/**
+    The operation requested with a segment failed due to
+    insufficient rights in the segment.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INSUFFICIENT_SEGMENT_RIGHTS) */
+#define OKL4_ASM_ERROR_INSUFFICIENT_SEGMENT_RIGHTS (0xb)
+/**
+    The operation did not complete because it was interrupted by a
+    preemption.  This error value is only used internally.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPTED) */
+#define OKL4_ASM_ERROR_INTERRUPTED (0xc)
+/**
+    Attempt to attach an interrupt to an IRQ number, when the
+    interrupt is already attached to an IRQ number
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPT_ALREADY_ATTACHED) */
+#define OKL4_ASM_ERROR_INTERRUPT_ALREADY_ATTACHED (0xd)
+/**
+    Attempt to use an IRQ number that is out of range, of
+    the wrong type, or not in the correct state
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPT_INVALID_IRQ) */
+#define OKL4_ASM_ERROR_INTERRUPT_INVALID_IRQ (0xe)
+/**
+    Attempt to operate on an unknown IRQ number
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INTERRUPT_NOT_ATTACHED) */
+#define OKL4_ASM_ERROR_INTERRUPT_NOT_ATTACHED (0xf)
+/**
+    An invalid argument was provided.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_ARGUMENT) */
+#define OKL4_ASM_ERROR_INVALID_ARGUMENT (0x10)
+/**
+    The operation failed because one of the arguments does not refer to a
+    valid object.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_DESIGNATOR) */
+#define OKL4_ASM_ERROR_INVALID_DESIGNATOR (0x11)
+/**
+    The operation failed because the power_state
+    argument is invalid.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_POWER_STATE) */
+#define OKL4_ASM_ERROR_INVALID_POWER_STATE (0x12)
+/**
+    The operation failed because the given segment index does
+    not correspond to an attached physical segment.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_SEGMENT_INDEX) */
+#define OKL4_ASM_ERROR_INVALID_SEGMENT_INDEX (0x13)
+/**
+    A user provided address produced a read or write fault in the operation.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_MEMORY_FAULT) */
+#define OKL4_ASM_ERROR_MEMORY_FAULT (0x14)
+/**
+    The operation failed because there is no mapping at the
+    specified location.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_MISSING_MAPPING) */
+#define OKL4_ASM_ERROR_MISSING_MAPPING (0x15)
+/**
+    The delete operation failed because the KMMU context is not
+    empty.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NON_EMPTY_MMU_CONTEXT) */
+#define OKL4_ASM_ERROR_NON_EMPTY_MMU_CONTEXT (0x16)
+/**
+    The lookup operation failed because the given virtual address
+    of the given KMMU context is not mapped at the given physical
+    segment.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NOT_IN_SEGMENT) */
+#define OKL4_ASM_ERROR_NOT_IN_SEGMENT (0x17)
+/**
+    The operation failed because the caller is not on the last
+    online cpu.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NOT_LAST_CPU) */
+#define OKL4_ASM_ERROR_NOT_LAST_CPU (0x18)
+/**
+    Insufficient resources are available to perform the operation.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NO_RESOURCES) */
+#define OKL4_ASM_ERROR_NO_RESOURCES (0x19)
+/**
+    Operation failed because pipe was not in the required state.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_BAD_STATE) */
+#define OKL4_ASM_ERROR_PIPE_BAD_STATE (0x1a)
+/**
+    Operation failed because no messages are in the queue.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_EMPTY) */
+#define OKL4_ASM_ERROR_PIPE_EMPTY (0x1b)
+/**
+    Operation failed because no memory is available in the queue.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_FULL) */
+#define OKL4_ASM_ERROR_PIPE_FULL (0x1c)
+/**
+    Operation failed because the pipe is in reset or not ready.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_NOT_READY) */
+#define OKL4_ASM_ERROR_PIPE_NOT_READY (0x1d)
+/**
+    Message was truncated because receive buffer size is too small.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_PIPE_RECV_OVERFLOW) */
+#define OKL4_ASM_ERROR_PIPE_RECV_OVERFLOW (0x1e)
+/**
+    The operation failed because at least one VCPU has a monitored
+    power state and is not currently suspended.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_POWER_VCPU_RESUMED) */
+#define OKL4_ASM_ERROR_POWER_VCPU_RESUMED (0x1f)
+/**
+    The operation requires a segment to be unused, or not attached
+    to an MMU context.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_SEGMENT_USED) */
+#define OKL4_ASM_ERROR_SEGMENT_USED (0x20)
+/*lint -esym(621, OKL4_ASM_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED) */
+#define OKL4_ASM_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED (0x21)
+/**
+    The timer is already active, and was not reprogrammed.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_TIMER_ACTIVE) */
+#define OKL4_ASM_ERROR_TIMER_ACTIVE (0x22)
+/**
+    The timer has already been cancelled or expired.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_TIMER_CANCELLED) */
+#define OKL4_ASM_ERROR_TIMER_CANCELLED (0x23)
+/**
+    Operation failed due to a temporary condition, and may be retried.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_TRY_AGAIN) */
+#define OKL4_ASM_ERROR_TRY_AGAIN (0x24)
+/**
+    The non-blocking operation failed because it would
+    block on a resource.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_WOULD_BLOCK) */
+#define OKL4_ASM_ERROR_WOULD_BLOCK (0x25)
+/**
+    Insufficient resources
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_ALLOC_EXHAUSTED) */
+#define OKL4_ASM_ERROR_ALLOC_EXHAUSTED (0x26)
+/**
+    KSP specific error 0
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_0) */
+#define OKL4_ASM_ERROR_KSP_ERROR_0 (0x10000010)
+/**
+    KSP specific error 1
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_1) */
+#define OKL4_ASM_ERROR_KSP_ERROR_1 (0x10000011)
+/**
+    KSP specific error 2
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_2) */
+#define OKL4_ASM_ERROR_KSP_ERROR_2 (0x10000012)
+/**
+    KSP specific error 3
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_3) */
+#define OKL4_ASM_ERROR_KSP_ERROR_3 (0x10000013)
+/**
+    KSP specific error 4
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_4) */
+#define OKL4_ASM_ERROR_KSP_ERROR_4 (0x10000014)
+/**
+    KSP specific error 5
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_5) */
+#define OKL4_ASM_ERROR_KSP_ERROR_5 (0x10000015)
+/**
+    KSP specific error 6
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_6) */
+#define OKL4_ASM_ERROR_KSP_ERROR_6 (0x10000016)
+/**
+    KSP specific error 7
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_7) */
+#define OKL4_ASM_ERROR_KSP_ERROR_7 (0x10000017)
+/**
+    Invalid argument to KSP
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_INVALID_ARG) */
+#define OKL4_ASM_ERROR_KSP_INVALID_ARG (0x80000001)
+/**
+    KSP doesn't implement requested feature
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_NOT_IMPLEMENTED) */
+#define OKL4_ASM_ERROR_KSP_NOT_IMPLEMENTED (0x80000002)
+/**
+    User didn't supply rights for requested feature
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_INSUFFICIENT_RIGHTS) */
+#define OKL4_ASM_ERROR_KSP_INSUFFICIENT_RIGHTS (0x80000003)
+/**
+    Interrupt already registered
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_KSP_INTERRUPT_REGISTERED) */
+#define OKL4_ASM_ERROR_KSP_INTERRUPT_REGISTERED (0x80000004)
+/**
+    Requested operation is not implemented.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_NOT_IMPLEMENTED) */
+#define OKL4_ASM_ERROR_NOT_IMPLEMENTED (0xffffffff)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_MAX) */
+#define OKL4_ASM_ERROR_MAX (0xffffffff)
+
+/**
+ *  okl4_gicd_icfgr_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_EDGE_GICD_ICFGR) */
+#define OKL4_ASM_MASK_EDGE_GICD_ICFGR (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_EDGE_GICD_ICFGR) */
+#define OKL4_ASM_SHIFT_EDGE_GICD_ICFGR (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_EDGE_GICD_ICFGR) */
+#define OKL4_ASM_WIDTH_EDGE_GICD_ICFGR (1)
+
+
+/**
+ *  okl4_sgi_target_t
+ **/
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_LISTED) */
+#define OKL4_ASM_SGI_TARGET_LISTED (0x0)
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_ALL_OTHERS) */
+#define OKL4_ASM_SGI_TARGET_ALL_OTHERS (0x1)
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_SELF) */
+#define OKL4_ASM_SGI_TARGET_SELF (0x2)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_MAX) */
+#define OKL4_ASM_SGI_TARGET_MAX (0x2)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_SGI_TARGET_INVALID) */
+#define OKL4_ASM_SGI_TARGET_INVALID (0xffffffff)
+
+/**
+ *  okl4_gicd_sgir_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_SGIINTID_GICD_SGIR) */
+#define OKL4_ASM_MASK_SGIINTID_GICD_SGIR (15)
+/*lint -esym(621, OKL4_ASM_SHIFT_SGIINTID_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_SGIINTID_GICD_SGIR (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_SGIINTID_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_SGIINTID_GICD_SGIR (4)
+/*lint -esym(621, OKL4_ASM_MASK_NSATT_GICD_SGIR) */
+#define OKL4_ASM_MASK_NSATT_GICD_SGIR (1 << 15)
+/*lint -esym(621, OKL4_ASM_SHIFT_NSATT_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_NSATT_GICD_SGIR (15)
+/*lint -esym(621, OKL4_ASM_WIDTH_NSATT_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_NSATT_GICD_SGIR (1)
+/*lint -esym(621, OKL4_ASM_MASK_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_ASM_MASK_CPUTARGETLIST_GICD_SGIR (255 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_CPUTARGETLIST_GICD_SGIR (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_CPUTARGETLIST_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_CPUTARGETLIST_GICD_SGIR (8)
+/*lint -esym(621, OKL4_ASM_MASK_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_ASM_MASK_TARGETLISTFILTER_GICD_SGIR (3 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_ASM_SHIFT_TARGETLISTFILTER_GICD_SGIR (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_TARGETLISTFILTER_GICD_SGIR) */
+#define OKL4_ASM_WIDTH_TARGETLISTFILTER_GICD_SGIR (2)
+
+
+/**
+ *  okl4_link_role_t
+ **/
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_SYMMETRIC) */
+#define OKL4_ASM_LINK_ROLE_SYMMETRIC (0x0)
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_SERVER) */
+#define OKL4_ASM_LINK_ROLE_SERVER (0x1)
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_CLIENT) */
+#define OKL4_ASM_LINK_ROLE_CLIENT (0x2)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_MAX) */
+#define OKL4_ASM_LINK_ROLE_MAX (0x2)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_ROLE_INVALID) */
+#define OKL4_ASM_LINK_ROLE_INVALID (0xffffffff)
+
+/**
+ *  okl4_link_transport_type_t
+ **/
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_SHARED_BUFFER (0x0)
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_AXONS) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_AXONS (0x1)
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_PIPES) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_PIPES (0x2)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_MAX) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_MAX (0x2)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_LINK_TRANSPORT_TYPE_INVALID) */
+#define OKL4_ASM_LINK_TRANSPORT_TYPE_INVALID (0xffffffff)
+
+/**
+ *  okl4_mmu_lookup_index_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_MASK_ERROR_MMU_LOOKUP_INDEX (65535)
+/*lint -esym(621, OKL4_ASM_SHIFT_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_SHIFT_ERROR_MMU_LOOKUP_INDEX (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_ERROR_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_WIDTH_ERROR_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_ASM_MASK_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_MASK_INDEX_MMU_LOOKUP_INDEX (65535 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_SHIFT_INDEX_MMU_LOOKUP_INDEX (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_INDEX_MMU_LOOKUP_INDEX) */
+#define OKL4_ASM_WIDTH_INDEX_MMU_LOOKUP_INDEX (16)
+
+
+/**
+ *  okl4_mmu_lookup_size_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_MASK_SEG_INDEX_MMU_LOOKUP_SIZE (1023)
+/*lint -esym(621, OKL4_ASM_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_SHIFT_SEG_INDEX_MMU_LOOKUP_SIZE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_WIDTH_SEG_INDEX_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_ASM_MASK_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_MASK_SIZE_10_MMU_LOOKUP_SIZE (18014398509481983 << 10)
+/*lint -esym(621, OKL4_ASM_SHIFT_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_SHIFT_SIZE_10_MMU_LOOKUP_SIZE (10)
+/*lint -esym(621, OKL4_ASM_WIDTH_SIZE_10_MMU_LOOKUP_SIZE) */
+#define OKL4_ASM_WIDTH_SIZE_10_MMU_LOOKUP_SIZE (54)
+
+
+/**
+ *  okl4_nanoseconds_t
+ **/
+/** Timer period upper bound is (1 << 55) ns */
+/*lint -esym(621, OKL4_TIMER_MAX_PERIOD_NS) */
+#define OKL4_TIMER_MAX_PERIOD_NS (36028797018963968)
+
+/** Timer period lower bound is 1000000 ns */
+/*lint -esym(621, OKL4_TIMER_MIN_PERIOD_NS) */
+#define OKL4_TIMER_MIN_PERIOD_NS (1000000)
+
+/**
+ *  _okl4_page_attribute_t
+ **/
+
+
+/*lint -esym(621, _OKL4_ASM_MASK_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_MASK_RWX_PAGE_ATTRIBUTE (7)
+/*lint -esym(621, _OKL4_ASM_SHIFT_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_SHIFT_RWX_PAGE_ATTRIBUTE (0)
+/*lint -esym(621, _OKL4_ASM_WIDTH_RWX_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_WIDTH_RWX_PAGE_ATTRIBUTE (3)
+/*lint -esym(621, _OKL4_ASM_MASK_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_MASK_ATTRIB_PAGE_ATTRIBUTE (268435455 << 4)
+/*lint -esym(621, _OKL4_ASM_SHIFT_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_SHIFT_ATTRIB_PAGE_ATTRIBUTE (4)
+/*lint -esym(621, _OKL4_ASM_WIDTH_ATTRIB_PAGE_ATTRIBUTE) */
+#define _OKL4_ASM_WIDTH_ATTRIB_PAGE_ATTRIBUTE (28)
+
+
+/**
+ *  okl4_pipe_control_t
+ **/
+
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_CLR_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_CLR_HALTED (4)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_RESET) */
+#define OKL4_PIPE_CONTROL_OP_RESET (0)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_HALTED) */
+#define OKL4_PIPE_CONTROL_OP_SET_HALTED (3)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_RX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_RX_READY (2)
+/*lint -esym(621, OKL4_PIPE_CONTROL_OP_SET_TX_READY) */
+#define OKL4_PIPE_CONTROL_OP_SET_TX_READY (1)
+
+/*lint -esym(621, OKL4_ASM_MASK_DO_OP_PIPE_CONTROL) */
+#define OKL4_ASM_MASK_DO_OP_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_DO_OP_PIPE_CONTROL) */
+#define OKL4_ASM_SHIFT_DO_OP_PIPE_CONTROL (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_DO_OP_PIPE_CONTROL) */
+#define OKL4_ASM_WIDTH_DO_OP_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_ASM_MASK_OPERATION_PIPE_CONTROL) */
+#define OKL4_ASM_MASK_OPERATION_PIPE_CONTROL (7 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_OPERATION_PIPE_CONTROL) */
+#define OKL4_ASM_SHIFT_OPERATION_PIPE_CONTROL (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_OPERATION_PIPE_CONTROL) */
+#define OKL4_ASM_WIDTH_OPERATION_PIPE_CONTROL (3)
+
+
+/**
+ *  okl4_pipe_state_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_RESET_PIPE_STATE) */
+#define OKL4_ASM_MASK_RESET_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_RESET_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_RESET_PIPE_STATE (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_RESET_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_RESET_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_HALTED_PIPE_STATE) */
+#define OKL4_ASM_MASK_HALTED_PIPE_STATE (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_HALTED_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_HALTED_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_HALTED_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_RX_READY_PIPE_STATE) */
+#define OKL4_ASM_MASK_RX_READY_PIPE_STATE (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_RX_READY_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_RX_READY_PIPE_STATE (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_RX_READY_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_RX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_TX_READY_PIPE_STATE) */
+#define OKL4_ASM_MASK_TX_READY_PIPE_STATE (1 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_TX_READY_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_TX_READY_PIPE_STATE (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_TX_READY_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_TX_READY_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_MASK_RX_AVAILABLE_PIPE_STATE (1 << 4)
+/*lint -esym(621, OKL4_ASM_SHIFT_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_RX_AVAILABLE_PIPE_STATE (4)
+/*lint -esym(621, OKL4_ASM_WIDTH_RX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_RX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_MASK_TX_AVAILABLE_PIPE_STATE (1 << 5)
+/*lint -esym(621, OKL4_ASM_SHIFT_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_TX_AVAILABLE_PIPE_STATE (5)
+/*lint -esym(621, OKL4_ASM_WIDTH_TX_AVAILABLE_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_TX_AVAILABLE_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_WAITING_PIPE_STATE) */
+#define OKL4_ASM_MASK_WAITING_PIPE_STATE (1 << 6)
+/*lint -esym(621, OKL4_ASM_SHIFT_WAITING_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_WAITING_PIPE_STATE (6)
+/*lint -esym(621, OKL4_ASM_WIDTH_WAITING_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_WAITING_PIPE_STATE (1)
+/*lint -esym(621, OKL4_ASM_MASK_OVERQUOTA_PIPE_STATE) */
+#define OKL4_ASM_MASK_OVERQUOTA_PIPE_STATE (1 << 7)
+/*lint -esym(621, OKL4_ASM_SHIFT_OVERQUOTA_PIPE_STATE) */
+#define OKL4_ASM_SHIFT_OVERQUOTA_PIPE_STATE (7)
+/*lint -esym(621, OKL4_ASM_WIDTH_OVERQUOTA_PIPE_STATE) */
+#define OKL4_ASM_WIDTH_OVERQUOTA_PIPE_STATE (1)
+
+
+/**
+ *  okl4_power_state_t
+ **/
+/*lint -esym(621, OKL4_POWER_STATE_IDLE) */
+#define OKL4_POWER_STATE_IDLE (0)
+
+/*lint -esym(621, OKL4_POWER_STATE_PLATFORM_BASE) */
+#define OKL4_POWER_STATE_PLATFORM_BASE (256)
+
+/*lint -esym(621, OKL4_POWER_STATE_POWEROFF) */
+#define OKL4_POWER_STATE_POWEROFF (1)
+
+/**
+ *  okl4_register_set_t
+ **/
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_CPU_REGS) */
+#define OKL4_ASM_REGISTER_SET_CPU_REGS (0x0)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP_REGS (0x1)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP_CTRL_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP_CTRL_REGS (0x2)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP64_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP64_REGS (0x3)
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_VFP128_REGS) */
+#define OKL4_ASM_REGISTER_SET_VFP128_REGS (0x4)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_MAX) */
+#define OKL4_ASM_REGISTER_SET_MAX (0x4)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_REGISTER_SET_INVALID) */
+#define OKL4_ASM_REGISTER_SET_INVALID (0xffffffff)
+
+/**
+ *  okl4_register_and_set_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_OFFSET_REGISTER_AND_SET) */
+#define OKL4_ASM_MASK_OFFSET_REGISTER_AND_SET (65535)
+/*lint -esym(621, OKL4_ASM_SHIFT_OFFSET_REGISTER_AND_SET) */
+#define OKL4_ASM_SHIFT_OFFSET_REGISTER_AND_SET (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_OFFSET_REGISTER_AND_SET) */
+#define OKL4_ASM_WIDTH_OFFSET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_ASM_MASK_SET_REGISTER_AND_SET) */
+#define OKL4_ASM_MASK_SET_REGISTER_AND_SET (65535 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_SET_REGISTER_AND_SET) */
+#define OKL4_ASM_SHIFT_SET_REGISTER_AND_SET (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_SET_REGISTER_AND_SET) */
+#define OKL4_ASM_WIDTH_SET_REGISTER_AND_SET (16)
+
+
+/**
+ *  okl4_scheduler_virq_flags_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_ASM_MASK_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_ASM_SHIFT_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS) */
+#define OKL4_ASM_WIDTH_POWER_SUSPENDED_SCHEDULER_VIRQ_FLAGS (1)
+
+
+/**
+ *  okl4_sdk_version_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_MAINTENANCE_SDK_VERSION) */
+#define OKL4_ASM_MASK_MAINTENANCE_SDK_VERSION (63)
+/*lint -esym(621, OKL4_ASM_SHIFT_MAINTENANCE_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_MAINTENANCE_SDK_VERSION (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_MAINTENANCE_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_MAINTENANCE_SDK_VERSION (6)
+/*lint -esym(621, OKL4_ASM_MASK_RELEASE_SDK_VERSION) */
+#define OKL4_ASM_MASK_RELEASE_SDK_VERSION (255 << 8)
+/*lint -esym(621, OKL4_ASM_SHIFT_RELEASE_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_ASM_WIDTH_RELEASE_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_RELEASE_SDK_VERSION (8)
+/*lint -esym(621, OKL4_ASM_MASK_MINOR_SDK_VERSION) */
+#define OKL4_ASM_MASK_MINOR_SDK_VERSION (63 << 16)
+/*lint -esym(621, OKL4_ASM_SHIFT_MINOR_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_MINOR_SDK_VERSION (16)
+/*lint -esym(621, OKL4_ASM_WIDTH_MINOR_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_MINOR_SDK_VERSION (6)
+/*lint -esym(621, OKL4_ASM_MASK_MAJOR_SDK_VERSION) */
+#define OKL4_ASM_MASK_MAJOR_SDK_VERSION (15 << 24)
+/*lint -esym(621, OKL4_ASM_SHIFT_MAJOR_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_MAJOR_SDK_VERSION (24)
+/*lint -esym(621, OKL4_ASM_WIDTH_MAJOR_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_MAJOR_SDK_VERSION (4)
+/*lint -esym(621, OKL4_ASM_MASK_RES0_FLAG_SDK_VERSION) */
+#define OKL4_ASM_MASK_RES0_FLAG_SDK_VERSION (1 << 28)
+/*lint -esym(621, OKL4_ASM_SHIFT_RES0_FLAG_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_RES0_FLAG_SDK_VERSION (28)
+/*lint -esym(621, OKL4_ASM_WIDTH_RES0_FLAG_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_RES0_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_ASM_MASK_DEV_FLAG_SDK_VERSION) */
+#define OKL4_ASM_MASK_DEV_FLAG_SDK_VERSION (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_DEV_FLAG_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_DEV_FLAG_SDK_VERSION (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_DEV_FLAG_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_DEV_FLAG_SDK_VERSION (1)
+/*lint -esym(621, OKL4_ASM_MASK_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_ASM_MASK_FORMAT_FLAG_SDK_VERSION (1 << 31)
+/*lint -esym(621, OKL4_ASM_SHIFT_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_ASM_SHIFT_FORMAT_FLAG_SDK_VERSION (31)
+/*lint -esym(621, OKL4_ASM_WIDTH_FORMAT_FLAG_SDK_VERSION) */
+#define OKL4_ASM_WIDTH_FORMAT_FLAG_SDK_VERSION (1)
+
+
+/**
+ *  okl4_timer_flags_t
+ **/
+
+
+/*lint -esym(621, OKL4_ASM_MASK_ACTIVE_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_ACTIVE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_SHIFT_ACTIVE_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_ACTIVE_TIMER_FLAGS (0)
+/*lint -esym(621, OKL4_ASM_WIDTH_ACTIVE_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_ACTIVE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_PERIODIC_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_PERIODIC_TIMER_FLAGS (1 << 1)
+/*lint -esym(621, OKL4_ASM_SHIFT_PERIODIC_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_WIDTH_PERIODIC_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_PERIODIC_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_ABSOLUTE_TIMER_FLAGS (1 << 2)
+/*lint -esym(621, OKL4_ASM_SHIFT_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_ABSOLUTE_TIMER_FLAGS (2)
+/*lint -esym(621, OKL4_ASM_WIDTH_ABSOLUTE_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_ABSOLUTE_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_UNITS_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_UNITS_TIMER_FLAGS (1 << 3)
+/*lint -esym(621, OKL4_ASM_SHIFT_UNITS_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_UNITS_TIMER_FLAGS (3)
+/*lint -esym(621, OKL4_ASM_WIDTH_UNITS_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_UNITS_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_ALIGN_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_ALIGN_TIMER_FLAGS (1 << 4)
+/*lint -esym(621, OKL4_ASM_SHIFT_ALIGN_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_ALIGN_TIMER_FLAGS (4)
+/*lint -esym(621, OKL4_ASM_WIDTH_ALIGN_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_ALIGN_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_WATCHDOG_TIMER_FLAGS (1 << 5)
+/*lint -esym(621, OKL4_ASM_SHIFT_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_WATCHDOG_TIMER_FLAGS (5)
+/*lint -esym(621, OKL4_ASM_WIDTH_WATCHDOG_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_WATCHDOG_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_RELOAD_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_RELOAD_TIMER_FLAGS (1 << 30)
+/*lint -esym(621, OKL4_ASM_SHIFT_RELOAD_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_RELOAD_TIMER_FLAGS (30)
+/*lint -esym(621, OKL4_ASM_WIDTH_RELOAD_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_RELOAD_TIMER_FLAGS (1)
+/*lint -esym(621, OKL4_ASM_MASK_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_ASM_MASK_TIMESLICE_TIMER_FLAGS (1 << 31)
+/*lint -esym(621, OKL4_ASM_SHIFT_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_ASM_SHIFT_TIMESLICE_TIMER_FLAGS (31)
+/*lint -esym(621, OKL4_ASM_WIDTH_TIMESLICE_TIMER_FLAGS) */
+#define OKL4_ASM_WIDTH_TIMESLICE_TIMER_FLAGS (1)
+
+
+/**
+ *  okl4_tracepoint_class_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_THREAD_STATE) */
+#define OKL4_ASM_TRACEPOINT_CLASS_THREAD_STATE (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_SYSCALLS) */
+#define OKL4_ASM_TRACEPOINT_CLASS_SYSCALLS (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_PRIMARY) */
+#define OKL4_ASM_TRACEPOINT_CLASS_PRIMARY (0x2)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_SECONDARY) */
+#define OKL4_ASM_TRACEPOINT_CLASS_SECONDARY (0x3)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_TERTIARY) */
+#define OKL4_ASM_TRACEPOINT_CLASS_TERTIARY (0x4)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_MAX) */
+#define OKL4_ASM_TRACEPOINT_CLASS_MAX (0x4)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_CLASS_INVALID) */
+#define OKL4_ASM_TRACEPOINT_CLASS_INVALID (0xffffffff)
+
+/**
+ *  _okl4_tracepoint_desc_t
+ **/
+
+
+/*lint -esym(621, _OKL4_ASM_MASK_ID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_ID_TRACEPOINT_DESC (255)
+/*lint -esym(621, _OKL4_ASM_SHIFT_ID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_ID_TRACEPOINT_DESC (0)
+/*lint -esym(621, _OKL4_ASM_WIDTH_ID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_ID_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_ASM_MASK_USER_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_USER_TRACEPOINT_DESC (1 << 8)
+/*lint -esym(621, _OKL4_ASM_SHIFT_USER_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_USER_TRACEPOINT_DESC (8)
+/*lint -esym(621, _OKL4_ASM_WIDTH_USER_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_USER_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_ASM_MASK_BIN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_BIN_TRACEPOINT_DESC (1 << 9)
+/*lint -esym(621, _OKL4_ASM_SHIFT_BIN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_BIN_TRACEPOINT_DESC (9)
+/*lint -esym(621, _OKL4_ASM_WIDTH_BIN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_BIN_TRACEPOINT_DESC (1)
+/*lint -esym(621, _OKL4_ASM_MASK_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_RECLEN_TRACEPOINT_DESC (63 << 10)
+/*lint -esym(621, _OKL4_ASM_SHIFT_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_RECLEN_TRACEPOINT_DESC (10)
+/*lint -esym(621, _OKL4_ASM_WIDTH_RECLEN_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_RECLEN_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_ASM_MASK_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_CPUID_TRACEPOINT_DESC (63 << 16)
+/*lint -esym(621, _OKL4_ASM_SHIFT_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_CPUID_TRACEPOINT_DESC (16)
+/*lint -esym(621, _OKL4_ASM_WIDTH_CPUID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_CPUID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_ASM_MASK_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK_THREADID_TRACEPOINT_DESC (63 << 22)
+/*lint -esym(621, _OKL4_ASM_SHIFT_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT_THREADID_TRACEPOINT_DESC (22)
+/*lint -esym(621, _OKL4_ASM_WIDTH_THREADID_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH_THREADID_TRACEPOINT_DESC (6)
+/*lint -esym(621, _OKL4_ASM_MASK__R1_TRACEPOINT_DESC) */
+#define _OKL4_ASM_MASK__R1_TRACEPOINT_DESC (15 << 28)
+/*lint -esym(621, _OKL4_ASM_SHIFT__R1_TRACEPOINT_DESC) */
+#define _OKL4_ASM_SHIFT__R1_TRACEPOINT_DESC (28)
+/*lint -esym(621, _OKL4_ASM_WIDTH__R1_TRACEPOINT_DESC) */
+#define _OKL4_ASM_WIDTH__R1_TRACEPOINT_DESC (4)
+
+
+/**
+ *  _okl4_tracepoint_masks_t
+ **/
+
+
+/*lint -esym(621, _OKL4_ASM_MASK_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_MASK_CLASS_TRACEPOINT_MASKS (65535)
+/*lint -esym(621, _OKL4_ASM_SHIFT_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_SHIFT_CLASS_TRACEPOINT_MASKS (0)
+/*lint -esym(621, _OKL4_ASM_WIDTH_CLASS_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_WIDTH_CLASS_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_ASM_MASK_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_MASK_SUBSYSTEM_TRACEPOINT_MASKS (65535 << 16)
+/*lint -esym(621, _OKL4_ASM_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_SHIFT_SUBSYSTEM_TRACEPOINT_MASKS (16)
+/*lint -esym(621, _OKL4_ASM_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS) */
+#define _OKL4_ASM_WIDTH_SUBSYSTEM_TRACEPOINT_MASKS (16)
+
+
+/**
+ *  okl4_tracepoint_evt_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_SET_RUNNABLE (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SCH_SCHEDULER_FLAG_CLEAR_RUNNABLE (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH) */
+#define OKL4_ASM_TRACEPOINT_EVT_SCH_CONTEXT_SWITCH (0x2)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME) */
+#define OKL4_ASM_TRACEPOINT_EVT_KDB_SET_OBJECT_NAME (0x3)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_PROCESS_RECV (0x4)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_HALTED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_HALTED (0x5)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_AREA (0x6)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_QUEUE (0x7)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_RECV_SEGMENT (0x8)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_AREA (0x9)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE (0xa)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT (0xb)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND (0xc)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ACK (0xd)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE (0xe)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED (0xf)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH (0x10)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE (0x11)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_EOI (0x12)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING (0x13)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD (0x14)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS (0x15)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_MASK (0x16)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE (0x17)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT (0x18)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG (0x19)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL (0x1a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY (0x1b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK (0x1c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS (0x1d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK (0x1e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_INTERACT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_INTERACT (0x1f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME (0x20)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL (0x21)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT (0x22)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT (0x23)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE (0x24)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN (0x25)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE (0x26)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN (0x27)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE (0x28)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PN (0x29)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE (0x2a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN (0x2b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS (0x2c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS (0x2d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS (0x2e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS (0x2f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL (0x30)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_CONTROL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_CONTROL (0x31)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_RECV) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_RECV (0x32)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_SEND) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_SEND (0x33)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE (0x34)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER (0x35)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS (0x36)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32 (0x37)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER (0x38)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS (0x39)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32 (0x3a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED (0x3b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED (0x3c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE (0x3d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE (0x3e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA (0x3f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE (0x40)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE (0x41)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA (0x42)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND (0x43)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_CANCEL) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_CANCEL (0x44)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION (0x45)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_TIME (0x46)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_QUERY) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_QUERY (0x47)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_START) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_START (0x48)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC (0x49)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_RESET) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_RESET (0x4a)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_START) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_START (0x4b)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_STOP) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_STOP (0x4c)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE (0x4d)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV (0x4e)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE (0x4f)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE (0x50)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY (0x51)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE (0x52)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_MAX) */
+#define OKL4_ASM_TRACEPOINT_EVT_MAX (0x52)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_INVALID) */
+#define OKL4_ASM_TRACEPOINT_EVT_INVALID (0xffffffff)
+
+/**
+ *  okl4_tracepoint_level_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_DEBUG) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_DEBUG (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_INFO) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_INFO (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_WARN) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_WARN (0x2)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_CRITICAL) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_CRITICAL (0x3)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_MAX) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_MAX (0x3)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_LEVEL_INVALID) */
+#define OKL4_ASM_TRACEPOINT_LEVEL_INVALID (0xffffffff)
+
+/**
+ *  okl4_tracepoint_subsystem_t
+ **/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_SCHEDULER) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_SCHEDULER (0x0)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_TRACE) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_TRACE (0x1)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_CORE) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_CORE (0x2)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_MAX) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_MAX (0x2)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_SUBSYSTEM_INVALID) */
+#define OKL4_ASM_TRACEPOINT_SUBSYSTEM_INVALID (0xffffffff)
+
+/**
+ *  okl4_vfp_ops_t
+ **/
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VFP_OPS_MAX) */
+#define OKL4_ASM_VFP_OPS_MAX (0x0)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VFP_OPS_INVALID) */
+#define OKL4_ASM_VFP_OPS_INVALID (0xffffffff)
+
+/**
+ *  okl4_vservices_transport_type_t
+ **/
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_AXON) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_AXON (0x0)
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_SHARED_BUFFER (0x1)
+/**
+    Maximum enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_MAX) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_MAX (0x1)
+/**
+    Invalid enumeration value
+*/
+/*lint -esym(621, OKL4_ASM_VSERVICES_TRANSPORT_TYPE_INVALID) */
+#define OKL4_ASM_VSERVICES_TRANSPORT_TYPE_INVALID (0xffffffff)
+
+
+#endif /* !ASSEMBLY */
+
+#endif /* __AUTO__MICROVISOR_TYPES_H__ */
+/** @} */
+/** @} */
diff --git a/include/microvisor/microvisor.h b/include/microvisor/microvisor.h
new file mode 100644
index 0000000..3bb8d64
--- /dev/null
+++ b/include/microvisor/microvisor.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MICROVISOR_H_
+#define _MICROVISOR_H_
+
+/**
+ * @defgroup lib_microvisor The Microvisor Library
+ *
+ * @{
+ *
+ * The Microvisor Library is the primary low-level API between the OKL4
+ * Microvisor and a Cell application or guest-OS. It also provides certain
+ * common data types such as structure definitions used in these interactions.
+ *
+ */
+
+/**
+ * Temporarily define _Bool to allow C++ compilation of
+ * OKL code that makes use of it.
+ */
+#if defined(__cplusplus) && !defined(_Bool)
+#define _OKL4_CPP_BOOL
+#define _Bool bool
+#endif
+
+#define OKL4_INLINE static inline
+
+#if defined(_lint) || defined(_splint)
+#define OKL4_FORCE_INLINE static
+#else
+#define OKL4_FORCE_INLINE static inline __attribute__((always_inline))
+#endif
+
+#include <microvisor/kernel/types.h>
+#include <microvisor/kernel/microvisor.h>
+#include <microvisor/kernel/syscalls.h>
+#include <microvisor/kernel/offsets.h>
+
+/** @} */
+
+/**
+ * Remove temporary definition of _Bool if it was defined
+ */
+#if defined(_OKL4_CPP_BOOL)
+#undef _Bool
+#undef _OKL4_CPP_BOOL
+#endif
+
+#endif /* _MICROVISOR_H_ */
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index f32ed9a..f38fe1c 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -62,7 +62,8 @@
 	struct list_head pending_links;
 	struct list_head accept_queue;
 	bool rejected;
-	struct delayed_work dwork;
+	struct delayed_work connect_work;
+	struct delayed_work pending_work;
 	struct delayed_work close_work;
 	bool close_work_scheduled;
 	u32 peer_shutdown;
@@ -75,7 +76,6 @@
 
 s64 vsock_stream_has_data(struct vsock_sock *vsk);
 s64 vsock_stream_has_space(struct vsock_sock *vsk);
-void vsock_pending_work(struct work_struct *work);
 struct sock *__vsock_create(struct net *net,
 			    struct socket *sock,
 			    struct sock *parent,
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
index e95ef8b..4e86755 100644
--- a/include/net/cnss2.h
+++ b/include/net/cnss2.h
@@ -115,6 +115,11 @@
 	u32 addr;
 };
 
+struct cnss_rri_over_ddr_cfg {
+	u32 base_addr_low;
+	u32 base_addr_high;
+};
+
 struct cnss_wlan_enable_cfg {
 	u32 num_ce_tgt_cfg;
 	struct cnss_ce_tgt_pipe_cfg *ce_tgt_cfg;
@@ -124,6 +129,8 @@
 	struct cnss_shadow_reg_cfg *shadow_reg_cfg;
 	u32 num_shadow_reg_v2_cfg;
 	struct cnss_shadow_reg_v2_cfg *shadow_reg_v2_cfg;
+	bool rri_over_ddr_cfg_valid;
+	struct cnss_rri_over_ddr_cfg rri_over_ddr_cfg;
 };
 
 enum cnss_driver_mode {
@@ -174,6 +181,9 @@
 extern int cnss_wlan_pm_control(struct device *dev, bool vote);
 extern int cnss_auto_suspend(struct device *dev);
 extern int cnss_auto_resume(struct device *dev);
+extern int cnss_pci_force_wake_request(struct device *dev);
+extern int cnss_pci_is_device_awake(struct device *dev);
+extern int cnss_pci_force_wake_release(struct device *dev);
 extern int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
 					int *num_vectors,
 					uint32_t *user_base_data,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index e64210c..64b0e9d 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -312,14 +312,7 @@
 struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
 					  struct ipv6_txoptions *opt,
 					  int newtype,
-					  struct ipv6_opt_hdr __user *newopt,
-					  int newoptlen);
-struct ipv6_txoptions *
-ipv6_renew_options_kern(struct sock *sk,
-			struct ipv6_txoptions *opt,
-			int newtype,
-			struct ipv6_opt_hdr *newopt,
-			int newoptlen);
+					  struct ipv6_opt_hdr *newopt);
 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
 					  struct ipv6_txoptions *opt);
 
@@ -794,7 +787,7 @@
 	 * to minimize possbility that any useful information to an
 	 * attacker is leaked. Only lower 20 bits are relevant.
 	 */
-	rol32(hash, 16);
+	hash = rol32(hash, 16);
 
 	flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
 
diff --git a/include/net/llc.h b/include/net/llc.h
index e8e61d4..82d9899 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -116,6 +116,11 @@
 	atomic_inc(&sap->refcnt);
 }
 
+static inline bool llc_sap_hold_safe(struct llc_sap *sap)
+{
+	return atomic_inc_not_zero(&sap->refcnt);
+}
+
 void llc_sap_close(struct llc_sap *sap);
 
 static inline void llc_sap_put(struct llc_sap *sap)
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 23102da..c05db6f 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -116,6 +116,7 @@
 #endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 	struct netns_nf_frag	nf_frag;
+	struct ctl_table_header *nf_frag_frags_hdr;
 #endif
 	struct sock		*nfnl;
 	struct sock		*nfnl_stash;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 10d0848..5cae575 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -89,7 +89,6 @@
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 struct netns_nf_frag {
-	struct netns_sysctl_ipv6 sysctl;
 	struct netns_frags	frags;
 };
 #endif
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 316694d..008f466 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -87,7 +87,7 @@
  * According to specification 102 622 chapter 4.4 Pipes,
  * the pipe identifier is 7 bits long.
  */
-#define NFC_HCI_MAX_PIPES		127
+#define NFC_HCI_MAX_PIPES		128
 struct nfc_hci_init_data {
 	u8 gate_count;
 	struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index 253f8da..2dcd80d 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -16,7 +16,6 @@
 struct tcf_tunnel_key_params {
 	struct rcu_head		rcu;
 	int			tcft_action;
-	int			action;
 	struct metadata_dst     *tcft_enc_metadata;
 };
 
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 76d7c97..1f5ddaf 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -379,6 +379,7 @@
 extern int tcp_proc_delayed_ack_control(struct ctl_table *table, int write,
 				void __user *buffer, size_t *length,
 				loff_t *ppos);
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
 static inline void tcp_dec_quickack_mode(struct sock *sk,
 					 const unsigned int pkts)
 {
@@ -569,6 +570,7 @@
 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 int tcp_send_synack(struct sock *);
 void tcp_push_one(struct sock *, unsigned int mss_now);
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
 void tcp_send_ack(struct sock *sk);
 void tcp_send_delayed_ack(struct sock *sk);
 void tcp_send_loss_probe(struct sock *sk);
@@ -864,8 +866,6 @@
 	CA_EVENT_LOSS,		/* loss timeout */
 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
-	CA_EVENT_DELAYED_ACK,	/* Delayed ack is sent */
-	CA_EVENT_NON_DELAYED_ACK,
 };
 
 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 5ad43a4..a42535f 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -3308,6 +3308,20 @@
 	return 0;
 }
 
+static inline bool ib_access_writable(int access_flags)
+{
+	/*
+	 * We have writable memory backing the MR if any of the following
+	 * access flags are set.  "Local write" and "remote write" obviously
+	 * require write access.  "Remote atomic" can do things like fetch and
+	 * add, which will modify memory, and "MW bind" can change permissions
+	 * by binding a window.
+	 */
+	return access_flags &
+		(IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
+		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
+}
+
 /**
  * ib_check_mr_status: lightweight check of MR status.
  *     This routine may provide status checks on a selected
diff --git a/include/soc/qcom/minidump.h b/include/soc/qcom/minidump.h
index 5c751e8..0d43e14 100644
--- a/include/soc/qcom/minidump.h
+++ b/include/soc/qcom/minidump.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-18 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -30,13 +30,22 @@
 	u64	size;
 };
 
-/* Register an entry in Minidump table
- * Returns:
- *	Zero: on successful addition
- *	Negetive error number on failures
- */
 #ifdef CONFIG_QCOM_MINIDUMP
+/*
+ * Register an entry in Minidump table
+ * Returns:
+ *	region number: entry position in minidump table.
+ *	Negetive error number on failures.
+ */
 extern int msm_minidump_add_region(const struct md_region *entry);
+/*
+ * Update registered region address in Minidump table.
+ * It does not hold any locks, so strictly serialize the region updates.
+ * Returns:
+ *	Zero: on successfully update
+ *	Negetive error number on failures.
+ */
+extern int msm_minidump_update_region(int regno, const struct md_region *entry);
 extern bool msm_minidump_enabled(void);
 extern void dump_stack_minidump(u64 sp);
 #else
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 2eaaaa5..c7dcf36 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -114,14 +114,20 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sxr1120")
 #define early_machine_is_msm8953()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8953")
+#define early_machine_is_sdmnobelium()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm670")
 #define early_machine_is_msm8937()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8937")
 #define early_machine_is_msm8917()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8917")
+#define early_machine_is_msm8940()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8940")
 #define early_machine_is_mdm9607()      \
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mdm9607")
 #define early_machine_is_sdm450()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm450")
+#define early_machine_is_sda450()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda450")
 #define early_machine_is_sdm632()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm632")
 #define early_machine_is_sdm439()	\
@@ -185,7 +191,9 @@
 #define early_machine_is_msm8953()	0
 #define early_machine_is_msm8937()	0
 #define early_machine_is_msm8917()	0
+#define early_machine_is_msm8940()	0
 #define early_machine_is_sdm450()	0
+#define early_machine_is_sda450()	0
 #define early_machine_is_sdm632()	0
 #define early_machine_is_sdm439()	0
 #define early_machine_is_sdm429()	0
@@ -193,6 +201,7 @@
 #define early_machine_is_sda429()	0
 #define early_machine_is_mdm9650()     0
 #define early_machine_is_qm215()	0
+#define early_machine_is_sdmnobelium()	0
 #endif
 
 #define PLATFORM_SUBTYPE_MDM	1
@@ -262,12 +271,15 @@
 	MSM_CPU_SXR1120,
 	MSM_CPU_8953,
 	MSM_CPU_SDM450,
+	MSM_CPU_SDA450,
 	MSM_CPU_SDM632,
 	MSM_CPU_SDA632,
 	MSM_CPU_8937,
 	MSM_CPU_8917,
+	MSM_CPU_8940,
 	MSM_CPU_9607,
 	MSM_CPU_SDM439,
+	MSM_CPU_SDMNOBELIUM,
 	MSM_CPU_SDM429,
 	MSM_CPU_SDA439,
 	MSM_CPU_SDA429,
diff --git a/include/soc/qcom/sysmon.h b/include/soc/qcom/sysmon.h
index 2ad3a5e..cca1dcc 100644
--- a/include/soc/qcom/sysmon.h
+++ b/include/soc/qcom/sysmon.h
@@ -40,6 +40,7 @@
  */
 enum ssctl_ssr_event_enum_type {
 	SSCTL_SSR_EVENT_ENUM_TYPE_MIN_ENUM_VAL = -2147483647,
+	SSCTL_SSR_EVENT_INVALID = -1,
 	SSCTL_SSR_EVENT_BEFORE_POWERUP = 0,
 	SSCTL_SSR_EVENT_AFTER_POWERUP = 1,
 	SSCTL_SSR_EVENT_BEFORE_SHUTDOWN = 2,
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 44202ff..f759e09 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -99,6 +99,8 @@
 	u8 client_id_mask;
 
 	const struct tegra_smmu_soc *smmu;
+
+	u32 intmask;
 };
 
 struct tegra_mc {
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 435cee5..c2882c2 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -515,3 +515,8 @@
 header-y += msm_rotator.h
 header-y += bgcom_interface.h
 header-y += nfc/
+
+ifneq ($(VSERVICES_SUPPORT), "")
+include include/linux/Kbuild.vservices
+endif
+header-y += okl4-link-shbuf.h
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 5539933..bd0da0e 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -246,6 +246,15 @@
 	__u32            has_weak_ref;
 };
 
+struct binder_node_info_for_ref {
+	__u32            handle;
+	__u32            strong_count;
+	__u32            weak_count;
+	__u32            reserved1;
+	__u32            reserved2;
+	__u32            reserved3;
+};
+
 #define BINDER_WRITE_READ		_IOWR('b', 1, struct binder_write_read)
 #define BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, __s64)
 #define BINDER_SET_MAX_THREADS		_IOW('b', 5, __u32)
@@ -254,6 +263,7 @@
 #define BINDER_THREAD_EXIT		_IOW('b', 8, __s32)
 #define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
 #define BINDER_GET_NODE_DEBUG_INFO	_IOWR('b', 11, struct binder_node_debug_info)
+#define BINDER_GET_NODE_INFO_FOR_REF	_IOWR('b', 12, struct binder_node_info_for_ref)
 
 /*
  * NOTE: Two special error codes you should check for when calling
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 5c22e8c..8c5335b 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -882,13 +882,13 @@
 static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
 {
 	return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
-};
+}
 
 static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
 {
 	return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
 				ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
-};
+}
 
 /**
  * struct ethtool_rxnfc - command to get or set RX flow classification rules
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 05b9bb6..a0a365c 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -717,6 +717,7 @@
 #define KVM_TRACE_PAUSE           __KVM_DEPRECATED_MAIN_0x07
 #define KVM_TRACE_DISABLE         __KVM_DEPRECATED_MAIN_0x08
 #define KVM_GET_EMULATED_CPUID	  _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
+#define KVM_GET_MSR_FEATURE_INDEX_LIST    _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
 
 /*
  * Extension capability list.
@@ -871,6 +872,7 @@
 #define KVM_CAP_MSI_DEVID 131
 #define KVM_CAP_PPC_HTM 132
 #define KVM_CAP_S390_BPB 152
+#define KVM_CAP_GET_MSR_FEATURES 153
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
diff --git a/include/uapi/linux/mhi.h b/include/uapi/linux/mhi.h
index 834c1dc..6442c85 100644
--- a/include/uapi/linux/mhi.h
+++ b/include/uapi/linux/mhi.h
@@ -32,6 +32,7 @@
 #define MHI_UCI_IOCTL_MAGIC	'm'
 
 #define MHI_UCI_EP_LOOKUP _IOR(MHI_UCI_IOCTL_MAGIC, 2, struct ep_info)
+#define MHI_UCI_DPL_EP_LOOKUP _IOR(MHI_UCI_IOCTL_MAGIC, 3, struct ep_info)
 
 #endif /* _UAPI_MHI_H */
 
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 44c4cc2..ebb8dbe 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -176,11 +176,13 @@
 #define IPA_FLT_TCP_SYN_L2TP		(1ul << 24)
 #define IPA_FLT_L2TP_INNER_IP_TYPE  (1ul << 25)
 #define IPA_FLT_L2TP_INNER_IPV4_DST_ADDR (1ul << 26)
+#define IPA_FLT_IS_PURE_ACK		(1ul << 27)
 
 /**
  * maximal number of NAT PDNs in the PDN config table
  */
 #define IPA_MAX_PDN_NUM 5
+#define IPA_ADPL_MHI_OVER_PCIE
 
 /**
  * enum ipa_client_type - names for the various IPA "clients"
@@ -300,10 +302,16 @@
 	IPA_CLIENT_TEST4_PROD			= 70,
 	IPA_CLIENT_TEST4_CONS			= 71,
 
-	/* RESERVERD PROD				= 72, */
-	IPA_CLIENT_DUMMY_CONS			= 73
+	/* RESERVERD PROD		            = 72, */
+	IPA_CLIENT_DUMMY_CONS                      = 73,
+
+	/* RESERVERD PROD                            = 74, */
+	IPA_CLIENT_MHI_DPL_CONS                 = 75,
+	/* RESERVED PROD                             76, */
+	IPA_CLIENT_DUMMY_CONS1			= 77
 };
 
+#define IPA_CLIENT_DUMMY_CONS IPA_CLIENT_DUMMY_CONS1
 #define IPA_CLIENT_MAX (IPA_CLIENT_DUMMY_CONS + 1)
 
 #define IPA_CLIENT_IS_APPS_CONS(client) \
@@ -370,11 +378,14 @@
 	(client) == IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD)
 
 #define IPA_CLIENT_IS_MHI_CONS(client) \
-	((client) == IPA_CLIENT_MHI_CONS)
+	((client) == IPA_CLIENT_MHI_CONS || \
+	(client) == IPA_CLIENT_MHI_DPL_CONS)
+
 
 #define IPA_CLIENT_IS_MHI(client) \
 	((client) == IPA_CLIENT_MHI_CONS || \
-	(client) == IPA_CLIENT_MHI_PROD)
+	(client) == IPA_CLIENT_MHI_PROD || \
+	(client) == IPA_CLIENT_MHI_DPL_CONS)
 
 #define IPA_CLIENT_IS_TEST_PROD(client) \
 	((client) == IPA_CLIENT_TEST_PROD || \
@@ -1845,16 +1856,20 @@
 	IPACM_CLIENT_MAX
 };
 
+#define IPACM_SUPPORT_OF_LAN_STATS_FOR_ODU_CLIENTS
+
 enum ipacm_per_client_device_type {
 	IPACM_CLIENT_DEVICE_TYPE_USB = 0,
 	IPACM_CLIENT_DEVICE_TYPE_WLAN = 1,
-	IPACM_CLIENT_DEVICE_TYPE_ETH = 2
+	IPACM_CLIENT_DEVICE_TYPE_ETH = 2,
+	IPACM_CLIENT_DEVICE_TYPE_ODU = 3,
+	IPACM_CLIENT_DEVICE_MAX
 };
 
 /**
  * max number of device types supported.
  */
-#define IPACM_MAX_CLIENT_DEVICE_TYPES 3
+#define IPACM_MAX_CLIENT_DEVICE_TYPES IPACM_CLIENT_DEVICE_MAX
 
 /**
  * @lanIface - Name of the lan interface
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index 8ffef59..96053c6 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -65,6 +65,7 @@
 #define KGSL_CONTEXT_TYPE_CL		2
 #define KGSL_CONTEXT_TYPE_C2D		3
 #define KGSL_CONTEXT_TYPE_RS		4
+#define KGSL_CONTEXT_TYPE_VK		5
 #define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
 
 #define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000
diff --git a/include/uapi/linux/okl4-link-shbuf.h b/include/uapi/linux/okl4-link-shbuf.h
new file mode 100644
index 0000000..69561bc
--- /dev/null
+++ b/include/uapi/linux/okl4-link-shbuf.h
@@ -0,0 +1,40 @@
+/*
+ *  User-visible interface to driver for inter-cell links using the
+ *  shared-buffer transport.
+ *
+ *  Copyright (c) 2016 Cog Systems Pty Ltd.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _LINUX_OKL4_LINK_SHBUF_H
+#define _LINUX_OKL4_LINK_SHBUF_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * Ioctl that indicates a request to raise the outgoing vIRQ. This value is
+ * chosen to avoid conflict with the numbers documented in Linux 4.1's
+ * ioctl-numbers.txt. The argument is a payload to transmit to the receiver.
+ * Note that consecutive transmissions without an interleaved clear of the
+ * interrupt results in the payloads being ORed together.
+ */
+#define OKL4_LINK_SHBUF_IOCTL_IRQ_TX _IOW(0x8d, 1, __u64)
+
+/*
+ * Ioctl that indicates a request to clear any pending incoming vIRQ. The value
+ * returned through the argument to the ioctl is the payload, which is also
+ * cleared.
+ *
+ * The caller cannot distinguish between the cases of no pending interrupt and
+ * a pending interrupt with payload 0. It is expected that the caller is
+ * communicating with a cooperative sender and has polled their file descriptor
+ * to determine there is a pending interrupt before using this ioctl.
+ */
+#define OKL4_LINK_SHBUF_IOCTL_IRQ_CLR _IOR(0x8d, 2, __u64)
+
+#endif /* _LINUX_OKL4_LINK_SHBUF_H */
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 841c40a..ae65649 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -262,6 +262,9 @@
 	((idx & CAM_MEM_MGR_HDL_IDX_MASK) | \
 	(fd << (CAM_MEM_MGR_HDL_FD_END_POS - CAM_MEM_MGR_HDL_FD_SIZE))) \
 
+#define GET_FD_FROM_HANDLE(hdl) \
+	(hdl >> (CAM_MEM_MGR_HDL_FD_END_POS - CAM_MEM_MGR_HDL_FD_SIZE)) \
+
 #define CAM_MEM_MGR_GET_HDL_IDX(hdl) (hdl & CAM_MEM_MGR_HDL_IDX_MASK)
 
 #define CAM_MEM_MGR_SET_SECURE_HDL(hdl, flag) \
diff --git a/include/uapi/media/cam_sync.h b/include/uapi/media/cam_sync.h
index 003c9ad..4a8781f 100644
--- a/include/uapi/media/cam_sync.h
+++ b/include/uapi/media/cam_sync.h
@@ -117,7 +117,7 @@
 	__u32 size;
 	__u32 result;
 	__u32 reserved;
-	__user __u64 ioctl_ptr;
+	__u64 ioctl_ptr;
 };
 
 #define CAM_PRIVATE_IOCTL_CMD \
diff --git a/include/uapi/media/msmb_isp.h b/include/uapi/media/msmb_isp.h
index 74a8d93..271a731 100644
--- a/include/uapi/media/msmb_isp.h
+++ b/include/uapi/media/msmb_isp.h
@@ -26,9 +26,16 @@
 #define ISP_STATS_STREAM_BIT  0x80000000
 
 #define VFE_HW_LIMIT 1
+#define ISP_KERNEL_STATE 1
 
 struct msm_vfe_cfg_cmd_list;
 
+struct isp_kstate {
+	uint32_t kernel_sofid;
+	uint32_t drop_reconfig;
+	uint32_t vfeid;
+};
+
 enum ISP_START_PIXEL_PATTERN {
 	ISP_BAYER_RGRGRG,
 	ISP_BAYER_GRGRGR,
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index f9466fa..2ad9a6d 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -87,7 +87,7 @@
 #define MIN_RAW_PIX_BYTES	2
 #define MIN_RAW_CMD_BYTES	(RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
 
-#define DL_DEFIO_WRITE_DELAY    5 /* fb_deferred_io.delay in jiffies */
+#define DL_DEFIO_WRITE_DELAY    msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */
 #define DL_DEFIO_WRITE_DISABLE  (HZ*60) /* "disable" with long delay */
 
 /* remove these once align.h patch is taken into kernel */
diff --git a/include/vservices/Kbuild b/include/vservices/Kbuild
new file mode 100644
index 0000000..8b955fc
--- /dev/null
+++ b/include/vservices/Kbuild
@@ -0,0 +1,2 @@
+header-y += protocol/
+header-y += ioctl.h
diff --git a/include/vservices/buffer.h b/include/vservices/buffer.h
new file mode 100644
index 0000000..910aa07
--- /dev/null
+++ b/include/vservices/buffer.h
@@ -0,0 +1,239 @@
+/*
+ * include/vservices/buffer.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines simple wrapper types for strings and variable-size buffers
+ * that are stored inside Virtual Services message buffers.
+ */
+
+#ifndef _VSERVICES_BUFFER_H_
+#define _VSERVICES_BUFFER_H_
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+struct vs_mbuf;
+
+/**
+ * struct vs_string - Virtual Services fixed sized string type
+ * @ptr: String pointer
+ * @max_size: Maximum length of the string in bytes
+ *
+ * A handle to a possibly NUL-terminated string stored in a message buffer. If
+ * the size of the string equals to max_size, the string is not NUL-terminated.
+ * If the protocol does not specify an encoding, the encoding is assumed to be
+ * UTF-8. Wide character encodings are not supported by this type; use struct
+ * vs_pbuf for wide character strings.
+ */
+struct vs_string {
+	char *ptr;
+	size_t max_size;
+};
+
+/**
+ * vs_string_copyout - Copy a Virtual Services string to a C string buffer.
+ * @dest: C string to copy to
+ * @src: Virtual Services string to copy from
+ * @max_size: Size of the destination buffer, including the NUL terminator.
+ *
+ * The behaviour is similar to strlcpy(): that is, the copied string
+ * is guaranteed not to exceed the specified size (including the NUL
+ * terminator byte), and is guaranteed to be NUL-terminated as long as
+ * the size is nonzero (unlike strncpy()).
+ *
+ * The return value is the size of the input string (even if the output was
+ * truncated); this is to make truncation easy to detect.
+ */
+static inline size_t
+vs_string_copyout(char *dest, const struct vs_string *src, size_t max_size)
+{
+	size_t src_len = strnlen(src->ptr, src->max_size);
+
+	if (max_size) {
+		size_t dest_len = min(src_len, max_size - 1);
+
+		memcpy(dest, src->ptr, dest_len);
+		dest[dest_len] = '\0';
+	}
+	return src_len;
+}
+
+/**
+ * vs_string_copyin_len - Copy a C string, up to a given length, into a Virtual
+ *                        Services string.
+ * @dest: Virtual Services string to copy to
+ * @src: C string to copy from
+ * @max_size: Maximum number of bytes to copy
+ *
+ * Returns the number of bytes copied, which may be less than the input
+ * string's length.
+ */
+static inline size_t
+vs_string_copyin_len(struct vs_string *dest, const char *src, size_t max_size)
+{
+	strncpy(dest->ptr, src, min(max_size, dest->max_size));
+
+	return strnlen(dest->ptr, dest->max_size);
+}
+
+/**
+ * vs_string_copyin - Copy a C string into a Virtual Services string.
+ * @dest: Virtual Services string to copy to
+ * @src: C string to copy from
+ *
+ * Returns the number of bytes copied, which may be less than the input
+ * string's length.
+ */
+static inline size_t
+vs_string_copyin(struct vs_string *dest, const char *src)
+{
+	return vs_string_copyin_len(dest, src, dest->max_size);
+}
+
+/**
+ * vs_string_length - Return the size of the string stored in a Virtual Services
+ *                    string.
+ * @str: Virtual Service string to get the length of
+ */
+static inline size_t
+vs_string_length(struct vs_string *str)
+{
+	return strnlen(str->ptr, str->max_size);
+}
+
+/**
+ * vs_string_dup - Allocate a C string buffer and copy a Virtual Services string
+ *                 into it.
+ * @str: Virtual Services string to duplicate
+ */
+static inline char *
+vs_string_dup(struct vs_string *str, gfp_t gfp)
+{
+	size_t len;
+	char *ret;
+
+	len = strnlen(str->ptr, str->max_size) + 1;
+	ret = kmalloc(len, gfp);
+	if (ret)
+		vs_string_copyout(ret, str, len);
+	return ret;
+}
+
+/**
+ * vs_string_max_size - Return the maximum size of a Virtual Services string,
+ *                      not including the NUL terminator if the lenght of the
+ *                      string is equal to max_size.
+ *
+ * @str Virtual Services string to return the maximum size of.
+ *
+ * @return The maximum size of the string.
+ */
+static inline size_t
+vs_string_max_size(struct vs_string *str)
+{
+	return str->max_size;
+}
+
+/**
+ * struct vs_pbuf - Handle to a variable-size buffered payload.
+ * @data: Data buffer
+ * @size: Current size of the buffer
+ * @max_size: Maximum size of the buffer
+ *
+ * This is similar to struct vs_string, except that has an explicitly
+ * stored size rather than being null-terminated. The functions that
+ * return ssize_t all return the new size of the modified buffer, and
+ * will return a negative size if the buffer overflows.
+ */
+struct vs_pbuf {
+	void *data;
+	size_t size, max_size;
+};
+
+/**
+ * vs_pbuf_size - Get the size of a pbuf
+ * @pbuf: pbuf to get the size of
+ */
+static inline size_t vs_pbuf_size(const struct vs_pbuf *pbuf)
+{
+	return pbuf->size;
+}
+
+/**
+ * vs_pbuf_data - Get the data pointer for a a pbuf
+ * @pbuf: pbuf to get the data pointer for
+ */
+static inline const void *vs_pbuf_data(const struct vs_pbuf *pbuf)
+{
+	return pbuf->data;
+}
+
+/**
+ * vs_pbuf_resize - Resize a pbuf
+ * @pbuf: pbuf to resize
+ * @size: New size
+ */
+static inline ssize_t vs_pbuf_resize(struct vs_pbuf *pbuf, size_t size)
+{
+	if (size > pbuf->max_size)
+		return -EOVERFLOW;
+
+	pbuf->size = size;
+	return size;
+}
+
+/**
+ * vs_pbuf_copyin - Copy data into a pbuf
+ * @pbuf: pbuf to copy data into
+ * @offset: Offset to copy data to
+ * @data: Pointer to data to copy into the pbuf
+ * @nbytes: Number of bytes to copy into the pbuf
+ */
+static inline ssize_t vs_pbuf_copyin(struct vs_pbuf *pbuf, off_t offset,
+		const void *data, size_t nbytes)
+{
+	if (offset + nbytes > pbuf->size)
+		return -EOVERFLOW;
+
+	memcpy(pbuf->data + offset, data, nbytes);
+
+	return nbytes;
+}
+
+/**
+ * vs_pbuf_append - Append data to a pbuf
+ * @pbuf: pbuf to append to
+ * @data: Pointer to data to append to the pbuf
+ * @nbytes: Number of bytes to append
+ */
+static inline ssize_t vs_pbuf_append(struct vs_pbuf *pbuf,
+		const void *data, size_t nbytes)
+{
+	if (pbuf->size + nbytes > pbuf->max_size)
+		return -EOVERFLOW;
+
+	memcpy(pbuf->data + pbuf->size, data, nbytes);
+	pbuf->size += nbytes;
+
+	return pbuf->size;
+}
+
+/**
+ * vs_pbuf_dup_string - Duplicate the contents of a pbuf as a C string. The
+ * string is allocated and must be freed using kfree.
+ * @pbuf: pbuf to convert
+ * @gfp_flags: GFP flags for the string allocation
+ */
+static inline char *vs_pbuf_dup_string(struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+	return kstrndup(pbuf->data, pbuf->size, gfp_flags);
+}
+
+#endif /* _VSERVICES_BUFFER_H_ */
diff --git a/include/vservices/ioctl.h b/include/vservices/ioctl.h
new file mode 100644
index 0000000..d96fcab
--- /dev/null
+++ b/include/vservices/ioctl.h
@@ -0,0 +1,48 @@
+/*
+ * vservices/ioctl.h - Interface to service character devices
+ *
+ * Copyright (c) 2016, Cog Systems Pty Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PUBLIC_VSERVICES_IOCTL_H__
+#define __LINUX_PUBLIC_VSERVICES_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/* ioctls that work on any opened service device */
+#define IOCTL_VS_RESET_SERVICE		_IO('4', 0)
+#define IOCTL_VS_GET_NAME		_IOR('4', 1, char[16])
+#define IOCTL_VS_GET_PROTOCOL		_IOR('4', 2, char[32])
+
+/*
+ * Claim a device for user I/O (if no kernel driver is attached). The claim
+ * persists until the char device is closed.
+ */
+struct vs_ioctl_bind {
+	__u32 send_quota;
+	__u32 recv_quota;
+	__u32 send_notify_bits;
+	__u32 recv_notify_bits;
+	size_t msg_size;
+};
+#define IOCTL_VS_BIND_CLIENT _IOR('4', 3, struct vs_ioctl_bind)
+#define IOCTL_VS_BIND_SERVER _IOWR('4', 4, struct vs_ioctl_bind)
+
+/* send and receive messages and notifications */
+#define IOCTL_VS_NOTIFY _IOW('4', 5, __u32)
+struct vs_ioctl_iovec {
+	union {
+		__u32 iovcnt; /* input */
+		__u32 notify_bits; /* output (recv only) */
+	};
+	struct iovec *iov;
+};
+#define IOCTL_VS_SEND _IOW('4', 6, struct vs_ioctl_iovec)
+#define IOCTL_VS_RECV _IOWR('4', 7, struct vs_ioctl_iovec)
+
+#endif /* __LINUX_PUBLIC_VSERVICES_IOCTL_H__ */
diff --git a/include/vservices/protocol/Kbuild b/include/vservices/protocol/Kbuild
new file mode 100644
index 0000000..374d9b6
--- /dev/null
+++ b/include/vservices/protocol/Kbuild
@@ -0,0 +1,12 @@
+#
+# Find all of the protocol directory names, and get the basename followed
+# by a trailing slash.
+#
+protocols=$(shell find include/vservices/protocol/ -mindepth 1 -type d -exec basename {} \;)
+protocol_dirs=$(foreach p, $(protocols), $(p)/)
+
+#
+# Export the headers for all protocols. The kbuild file in each protocol
+# directory specifies exactly which headers to export.
+#
+header-y += $(protocol_dirs)
diff --git a/include/vservices/protocol/block/Kbuild b/include/vservices/protocol/block/Kbuild
new file mode 100644
index 0000000..ec3cbe8
--- /dev/null
+++ b/include/vservices/protocol/block/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/block/client.h b/include/vservices/protocol/block/client.h
new file mode 100644
index 0000000..4cd2847
--- /dev/null
+++ b/include/vservices/protocol/block/client.h
@@ -0,0 +1,175 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_BLOCK__)
+#define __VSERVICES_CLIENT_BLOCK__
+
+struct vs_service_device;
+struct vs_client_block_state;
+
+struct vs_client_block {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+    /** session setup **/
+	struct vs_client_block_state *(*alloc) (struct vs_service_device *
+						service);
+	void (*release) (struct vs_client_block_state * _state);
+
+	struct vs_service_driver *driver;
+
+/** Opened, reopened and closed functions **/
+
+	void (*opened) (struct vs_client_block_state * _state);
+
+	void (*reopened) (struct vs_client_block_state * _state);
+
+	void (*closed) (struct vs_client_block_state * _state);
+
+/** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_client_block_state * _state);
+
+	struct {
+		int (*ack_read) (struct vs_client_block_state * _state,
+				 void *_opaque, struct vs_pbuf data,
+				 struct vs_mbuf * _mbuf);
+		int (*nack_read) (struct vs_client_block_state * _state,
+				  void *_opaque,
+				  vservice_block_block_io_error_t err);
+
+		int (*ack_write) (struct vs_client_block_state * _state,
+				  void *_opaque);
+		int (*nack_write) (struct vs_client_block_state * _state,
+				   void *_opaque,
+				   vservice_block_block_io_error_t err);
+
+	} io;
+};
+
+struct vs_client_block_state {
+	vservice_block_state_t state;
+	bool readonly;
+	uint32_t sector_size;
+	uint32_t segment_size;
+	uint64_t device_sectors;
+	bool flushable;
+	bool committable;
+	struct {
+		uint32_t sector_size;
+		uint32_t segment_size;
+	} io;
+	struct vs_service_device *service;
+	bool released;
+};
+
+extern int vs_client_block_reopen(struct vs_client_block_state *_state);
+
+extern int vs_client_block_close(struct vs_client_block_state *_state);
+
+    /** interface block_io **/
+/* command parallel read */
+extern int vs_client_block_io_getbufs_ack_read(struct vs_client_block_state
+					       *_state, struct vs_pbuf *data,
+					       struct vs_mbuf *_mbuf);
+extern int vs_client_block_io_free_ack_read(struct vs_client_block_state
+					    *_state, struct vs_pbuf *data,
+					    struct vs_mbuf *_mbuf);
+extern int vs_client_block_io_req_read(struct vs_client_block_state *_state,
+				       void *_opaque, uint64_t sector_index,
+				       uint32_t num_sects, bool nodelay,
+				       bool flush, gfp_t flags);
+
+	/* command parallel write */
+extern struct vs_mbuf *vs_client_block_io_alloc_req_write(struct
+							  vs_client_block_state
+							  *_state,
+							  struct vs_pbuf *data,
+							  gfp_t flags);
+extern int vs_client_block_io_free_req_write(struct vs_client_block_state
+					     *_state, struct vs_pbuf *data,
+					     struct vs_mbuf *_mbuf);
+extern int vs_client_block_io_req_write(struct vs_client_block_state *_state,
+					void *_opaque, uint64_t sector_index,
+					uint32_t num_sects, bool nodelay,
+					bool flush, bool commit,
+					struct vs_pbuf data,
+					struct vs_mbuf *_mbuf);
+
+/* Status APIs for async parallel commands */
+static inline bool vs_client_block_io_req_read_can_send(struct
+							vs_client_block_state
+							*_state)
+{
+	return !bitmap_full(_state->state.io.read_bitmask,
+			    VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+}
+
+static inline bool vs_client_block_io_req_read_is_pending(struct
+							  vs_client_block_state
+							  *_state)
+{
+	return !bitmap_empty(_state->state.io.read_bitmask,
+			     VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+}
+
+static inline bool vs_client_block_io_req_write_can_send(struct
+							 vs_client_block_state
+							 *_state)
+{
+	return !bitmap_full(_state->state.io.write_bitmask,
+			    VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+}
+
+static inline bool vs_client_block_io_req_write_is_pending(struct
+							   vs_client_block_state
+							   *_state)
+{
+	return !bitmap_empty(_state->state.io.write_bitmask,
+			     VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+}
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_block_client_register(struct vs_client_block *client,
+					    const char *name,
+					    struct module *owner);
+
+static inline int vservice_block_client_register(struct vs_client_block *client,
+						 const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_block_client_register(client, name, this_module);
+}
+
+extern int vservice_block_client_unregister(struct vs_client_block *client);
+
+#endif				/* ! __VSERVICES_CLIENT_BLOCK__ */
diff --git a/include/vservices/protocol/block/common.h b/include/vservices/protocol/block/common.h
new file mode 100644
index 0000000..2779b18
--- /dev/null
+++ b/include/vservices/protocol/block/common.h
@@ -0,0 +1,42 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_BLOCK_PROTOCOL_H__)
+#define __VSERVICES_BLOCK_PROTOCOL_H__
+
+#define VSERVICE_BLOCK_PROTOCOL_NAME "com.ok-labs.block"
+typedef enum {
+	VSERVICE_BLOCK_BASE_REQ_OPEN,
+	VSERVICE_BLOCK_BASE_ACK_OPEN,
+	VSERVICE_BLOCK_BASE_NACK_OPEN,
+	VSERVICE_BLOCK_BASE_REQ_CLOSE,
+	VSERVICE_BLOCK_BASE_ACK_CLOSE,
+	VSERVICE_BLOCK_BASE_NACK_CLOSE,
+	VSERVICE_BLOCK_BASE_REQ_REOPEN,
+	VSERVICE_BLOCK_BASE_ACK_REOPEN,
+	VSERVICE_BLOCK_BASE_NACK_REOPEN,
+	VSERVICE_BLOCK_BASE_MSG_RESET,
+	VSERVICE_BLOCK_IO_REQ_READ,
+	VSERVICE_BLOCK_IO_ACK_READ,
+	VSERVICE_BLOCK_IO_NACK_READ,
+	VSERVICE_BLOCK_IO_REQ_WRITE,
+	VSERVICE_BLOCK_IO_ACK_WRITE,
+	VSERVICE_BLOCK_IO_NACK_WRITE,
+} vservice_block_message_id_t;
+typedef enum {
+	VSERVICE_BLOCK_NBIT_IN__COUNT
+} vservice_block_nbit_in_t;
+
+typedef enum {
+	VSERVICE_BLOCK_NBIT_OUT__COUNT
+} vservice_block_nbit_out_t;
+
+/* Notification mask macros */
+#endif				/* ! __VSERVICES_BLOCK_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/block/server.h b/include/vservices/protocol/block/server.h
new file mode 100644
index 0000000..65b0bfd
--- /dev/null
+++ b/include/vservices/protocol/block/server.h
@@ -0,0 +1,177 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_BLOCK)
+#define VSERVICES_SERVER_BLOCK
+
+struct vs_service_device;
+struct vs_server_block_state;
+
+struct vs_server_block {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+
+	/*
+	 * These are the driver's recommended message quotas. They are used
+	 * by the core service to select message quotas for services with no
+	 * explicitly configured quotas.
+	 */
+	u32 in_quota_best;
+	u32 out_quota_best;
+    /** session setup **/
+	struct vs_server_block_state *(*alloc) (struct vs_service_device *
+						service);
+	void (*release) (struct vs_server_block_state * _state);
+
+	struct vs_service_driver *driver;
+
+/** Open, reopen, close and closed functions **/
+
+	 vs_server_response_type_t(*open) (struct vs_server_block_state *
+					   _state);
+
+	 vs_server_response_type_t(*reopen) (struct vs_server_block_state *
+					     _state);
+
+	 vs_server_response_type_t(*close) (struct vs_server_block_state *
+					    _state);
+
+	void (*closed) (struct vs_server_block_state * _state);
+
+/** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_server_block_state * _state);
+
+	struct {
+		int (*req_read) (struct vs_server_block_state * _state,
+				 uint32_t _opaque, uint64_t sector_index,
+				 uint32_t num_sects, bool nodelay, bool flush);
+
+		int (*req_write) (struct vs_server_block_state * _state,
+				  uint32_t _opaque, uint64_t sector_index,
+				  uint32_t num_sects, bool nodelay, bool flush,
+				  bool commit, struct vs_pbuf data,
+				  struct vs_mbuf * _mbuf);
+
+	} io;
+};
+
+struct vs_server_block_state {
+	vservice_block_state_t state;
+	bool readonly;
+	uint32_t sector_size;
+	uint32_t segment_size;
+	uint64_t device_sectors;
+	bool flushable;
+	bool committable;
+	struct {
+		uint32_t sector_size;
+		uint32_t segment_size;
+	} io;
+	struct vs_service_device *service;
+	bool released;
+};
+
+/** Complete calls for server core functions **/
+extern int vs_server_block_open_complete(struct vs_server_block_state *_state,
+					 vs_server_response_type_t resp);
+
+extern int vs_server_block_close_complete(struct vs_server_block_state *_state,
+					  vs_server_response_type_t resp);
+
+extern int vs_server_block_reopen_complete(struct vs_server_block_state *_state,
+					   vs_server_response_type_t resp);
+
+    /** interface block_io **/
+/* command parallel read */
+extern struct vs_mbuf *vs_server_block_io_alloc_ack_read(struct
+							 vs_server_block_state
+							 *_state,
+							 struct vs_pbuf *data,
+							 gfp_t flags);
+extern int vs_server_block_io_free_ack_read(struct vs_server_block_state
+					    *_state, struct vs_pbuf *data,
+					    struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_send_ack_read(struct vs_server_block_state
+					    *_state, uint32_t _opaque,
+					    struct vs_pbuf data,
+					    struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_send_nack_read(struct vs_server_block_state
+					     *_state, uint32_t _opaque,
+					     vservice_block_block_io_error_t
+					     err, gfp_t flags);
+    /* command parallel write */
+extern int vs_server_block_io_getbufs_req_write(struct vs_server_block_state
+						*_state, struct vs_pbuf *data,
+						struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_free_req_write(struct vs_server_block_state
+					     *_state, struct vs_pbuf *data,
+					     struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_send_ack_write(struct vs_server_block_state
+					     *_state, uint32_t _opaque,
+					     gfp_t flags);
+extern int vs_server_block_io_send_nack_write(struct vs_server_block_state
+					      *_state, uint32_t _opaque,
+					      vservice_block_block_io_error_t
+					      err, gfp_t flags);
+
+static inline bool vs_server_block_io_send_ack_read_is_pending(struct
+							       vs_server_block_state
+							       *_state)
+{
+	return !bitmap_empty(_state->state.io.read_bitmask,
+			     VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+}
+
+static inline bool vs_server_block_io_send_ack_write_is_pending(struct
+								vs_server_block_state
+								*_state)
+{
+	return !bitmap_empty(_state->state.io.write_bitmask,
+			     VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+}
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_block_server_register(struct vs_server_block *server,
+					    const char *name,
+					    struct module *owner);
+
+static inline int vservice_block_server_register(struct vs_server_block *server,
+						 const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_block_server_register(server, name, this_module);
+}
+
+extern int vservice_block_server_unregister(struct vs_server_block *server);
+#endif				/* ! VSERVICES_SERVER_BLOCK */
diff --git a/include/vservices/protocol/block/types.h b/include/vservices/protocol/block/types.h
new file mode 100644
index 0000000..52845a3
--- /dev/null
+++ b/include/vservices/protocol/block/types.h
@@ -0,0 +1,106 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_BLOCK_TYPES_H)
+#define VSERVICES_BLOCK_TYPES_H
+
+#define VSERVICE_BLOCK_IO_READ_MAX_PENDING 1024
+#define VSERVICE_BLOCK_IO_WRITE_MAX_PENDING 1024
+
+typedef enum vservice_block_block_io_error {
+	VSERVICE_BLOCK_INVALID_INDEX,
+	VSERVICE_BLOCK_MEDIA_FAILURE,
+	VSERVICE_BLOCK_MEDIA_TIMEOUT,
+	VSERVICE_BLOCK_UNSUPPORTED_COMMAND,
+	VSERVICE_BLOCK_SERVICE_RESET
+} vservice_block_block_io_error_t;
+
+typedef enum {
+/* state closed */
+	VSERVICE_BASE_STATE_CLOSED = 0,
+	VSERVICE_BASE_STATE_CLOSED__OPEN,
+	VSERVICE_BASE_STATE_CLOSED__CLOSE,
+	VSERVICE_BASE_STATE_CLOSED__REOPEN,
+
+/* state running */
+	VSERVICE_BASE_STATE_RUNNING,
+	VSERVICE_BASE_STATE_RUNNING__OPEN,
+	VSERVICE_BASE_STATE_RUNNING__CLOSE,
+	VSERVICE_BASE_STATE_RUNNING__REOPEN,
+
+	VSERVICE_BASE__RESET = VSERVICE_BASE_STATE_CLOSED
+} vservice_base_statenum_t;
+
+typedef struct {
+	vservice_base_statenum_t statenum;
+} vservice_base_state_t;
+
+#define VSERVICE_BASE_RESET_STATE (vservice_base_state_t) { \
+.statenum = VSERVICE_BASE__RESET}
+
+#define VSERVICE_BASE_STATE_IS_CLOSED(state) (\
+((state).statenum == VSERVICE_BASE_STATE_CLOSED) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__REOPEN))
+
+#define VSERVICE_BASE_STATE_IS_RUNNING(state) (\
+((state).statenum == VSERVICE_BASE_STATE_RUNNING) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__REOPEN))
+
+#define VSERVICE_BASE_STATE_VALID(state) ( \
+VSERVICE_BASE_STATE_IS_CLOSED(state) ? true : \
+VSERVICE_BASE_STATE_IS_RUNNING(state) ? true : \
+false)
+
+static inline const char *vservice_base_get_state_string(vservice_base_state_t
+							 state)
+{
+	static const char *names[] =
+	    { "closed", "closed__open", "closed__close", "closed__reopen",
+		"running", "running__open", "running__close", "running__reopen"
+	};
+	if (!VSERVICE_BASE_STATE_VALID(state)) {
+		return "INVALID";
+	}
+	return names[state.statenum];
+}
+
+typedef struct {
+	DECLARE_BITMAP(read_bitmask, VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+	void *read_tags[VSERVICE_BLOCK_IO_READ_MAX_PENDING];
+	 DECLARE_BITMAP(write_bitmask, VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+	void *write_tags[VSERVICE_BLOCK_IO_WRITE_MAX_PENDING];
+} vservice_block_io_state_t;
+
+#define VSERVICE_BLOCK_IO_RESET_STATE (vservice_block_io_state_t) { \
+.read_bitmask = {0}, \
+.read_tags = {NULL}, \
+.write_bitmask = {0}, \
+.write_tags = {NULL}}
+
+#define VSERVICE_BLOCK_IO_STATE_VALID(state) true
+
+typedef struct {
+
+	vservice_base_state_t base;
+
+	vservice_block_io_state_t io;
+} vservice_block_state_t;
+
+#define VSERVICE_BLOCK_RESET_STATE (vservice_block_state_t) {\
+.base = VSERVICE_BASE_RESET_STATE,\
+.io = VSERVICE_BLOCK_IO_RESET_STATE }
+
+#define VSERVICE_BLOCK_IS_STATE_RESET(state) \
+            ((state).base.statenum == VSERVICE_BASE__RESET)
+#endif				/* ! VSERVICES_BLOCK_TYPES_H */
diff --git a/include/vservices/protocol/core.h b/include/vservices/protocol/core.h
new file mode 100644
index 0000000..3a86af5
--- /dev/null
+++ b/include/vservices/protocol/core.h
@@ -0,0 +1,145 @@
+/*
+ * include/vservices/protocol/core.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * These are the common generated definitions for the core protocol drivers;
+ * specifically the message IDs and the protocol state representation.
+ *
+ * This is currently hand-generated, but will eventually be autogenerated,
+ * from the protocol specifications in core.vs. Please keep it consistent
+ * with that file.
+ */
+
+#define VSERVICE_CORE_PROTOCOL_NAME "com.ok-labs.core"
+#define VSERVICE_CORE_PARAM_SIZE_SERVICE_INFO__PROTOCOL_NAME 32
+#define VSERVICE_CORE_PARAM_SIZE_SERVICE_INFO__SERVICE_NAME 16
+
+/*
+ * Identifiers for in-band messages.
+ *
+ * This definition applies in both directions, because there is no practical
+ * limit on message IDs (services are unlikely to define 2^16 distinct message
+ * names).
+ */
+typedef enum {
+	/** simple_protocol core **/
+	/* message out startup */
+	VSERVICE_CORE_MSG_STARTUP,
+
+	/* message out shutdown */
+	VSERVICE_CORE_MSG_SHUTDOWN,
+
+	/* command in sync connect */
+	VSERVICE_CORE_REQ_CONNECT,
+	VSERVICE_CORE_ACK_CONNECT,
+	VSERVICE_CORE_NACK_CONNECT,
+
+	/* command in sync disconnect */
+	VSERVICE_CORE_REQ_DISCONNECT,
+	VSERVICE_CORE_ACK_DISCONNECT,
+	VSERVICE_CORE_NACK_DISCONNECT,
+
+	/* command in service_count */
+	VSERVICE_CORE_REQ_SERVICE_COUNT,
+	VSERVICE_CORE_ACK_SERVICE_COUNT,
+	VSERVICE_CORE_NACK_SERVICE_COUNT,
+
+	/* command in queued service_info */
+	VSERVICE_CORE_REQ_SERVICE_INFO,
+	VSERVICE_CORE_ACK_SERVICE_INFO,
+	VSERVICE_CORE_NACK_SERVICE_INFO,
+
+	/* message inout service_reset */
+	VSERVICE_CORE_MSG_SERVICE_RESET,
+
+	/* message inout service_ready */
+	VSERVICE_CORE_MSG_SERVICE_READY,
+
+	/* message out notification bits */
+	VSERVICE_CORE_MSG_NOTIFICATION_BITS_INFO,
+
+} vservice_core_message_id_t;
+
+/*
+ * Notification bits are defined separately for each direction because there
+ * is relatively limited space to allocate them from (specifically, the bits in
+ * a machine word). It is unlikely but possible for a protocol to reach this
+ * limit.
+ */
+
+/* Bits in the in (client -> server) notification bitmask. */
+typedef enum {
+	/** simple_protocol core **/
+	/* No in notifications */
+
+	VSERVICE_CORE_NBIT_IN__COUNT = 0,
+} vservice_core_nbit_in_t;
+
+/* Masks for the in notification bits */
+/* No in notifications */
+
+/* Bits in the out (server -> client) notification bitmask. */
+typedef enum {
+	/** simple_protocol core **/
+	/* notification out reenumerate */
+	VSERVICE_CORE_NBIT_OUT_REENUMERATE = 0,
+
+	VSERVICE_CORE_NBIT_OUT__COUNT,
+} vservice_core_nbit_out_t;
+
+/* Masks for the out notification bits */
+#define VSERVICE_CORE_NMASK_OUT_REENUMERATE \
+		(1 << VSERVICE_CORE_NBIT_OUT_REENUMERATE)
+
+/* Valid states of the interface's generated state machine. */
+typedef enum {
+	/* state offline */
+	VSERVICE_CORE_STATE_OFFLINE = 0,
+
+	/* state disconnected */
+	VSERVICE_CORE_STATE_DISCONNECTED,
+	VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+
+	/* state connected */
+	VSERVICE_CORE_STATE_CONNECTED,
+	VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+
+	/* reset offline */
+	VSERVICE_CORE_STATE__RESET = VSERVICE_CORE_STATE_OFFLINE,
+} vservice_core_statenum_t;
+
+typedef struct {
+	vservice_core_statenum_t statenum;
+	bool pending_service_count;
+	unsigned pending_service_info;
+} vservice_core_state_t;
+
+#define VSERVICE_CORE_RESET_STATE (vservice_core_state_t) { \
+	.statenum = VSERVICE_CORE_STATE__RESET, \
+	.pending_service_count = false, \
+	.pending_service_info = 0 }
+
+#define VSERVICE_CORE_STATE_IS_OFFLINE(state) ( \
+	((state).statenum == VSERVICE_CORE_STATE_OFFLINE))
+#define VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ( \
+	((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED) || \
+	((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__CONNECT))
+#define VSERVICE_CORE_STATE_IS_CONNECTED(state) ( \
+	((state).statenum == VSERVICE_CORE_STATE_CONNECTED) || \
+	((state).statenum == VSERVICE_CORE_STATE_CONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_VALID(state) \
+	VSERVICE_CORE_STATE_IS_OFFLINE(state) ? ( \
+		((state).pending_service_count == false) && \
+		((state).pending_service_info == 0)) : \
+	VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ? ( \
+		((state).pending_service_count == false) && \
+		((state).pending_service_info == 0)) : \
+	VSERVICE_CORE_STATE_IS_CONNECTED(state) ? true : \
+	false)
diff --git a/include/vservices/protocol/core/Kbuild b/include/vservices/protocol/core/Kbuild
new file mode 100644
index 0000000..ec3cbe8
--- /dev/null
+++ b/include/vservices/protocol/core/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/core/client.h b/include/vservices/protocol/core/client.h
new file mode 100644
index 0000000..3d52999
--- /dev/null
+++ b/include/vservices/protocol/core/client.h
@@ -0,0 +1,155 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_CORE__)
+#define __VSERVICES_CLIENT_CORE__
+
+struct vs_service_device;
+struct vs_client_core_state;
+
+struct vs_client_core {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+    /** session setup **/
+	struct vs_client_core_state *(*alloc) (struct vs_service_device *
+					       service);
+	void (*release) (struct vs_client_core_state * _state);
+
+	struct vs_service_driver *driver;
+
+	/** Core service base interface **/
+	void (*start) (struct vs_client_core_state * _state);
+	void (*reset) (struct vs_client_core_state * _state);
+    /** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_client_core_state * _state);
+
+	struct {
+		int (*state_change) (struct vs_client_core_state * _state,
+				     vservice_core_statenum_t old,
+				     vservice_core_statenum_t new);
+
+		int (*ack_connect) (struct vs_client_core_state * _state);
+		int (*nack_connect) (struct vs_client_core_state * _state);
+
+		int (*ack_disconnect) (struct vs_client_core_state * _state);
+		int (*nack_disconnect) (struct vs_client_core_state * _state);
+
+		int (*msg_startup) (struct vs_client_core_state * _state,
+				    uint32_t core_in_quota,
+				    uint32_t core_out_quota);
+
+		int (*msg_shutdown) (struct vs_client_core_state * _state);
+
+		int (*msg_service_created) (struct vs_client_core_state *
+					    _state, uint32_t service_id,
+					    struct vs_string service_name,
+					    struct vs_string protocol_name,
+					    struct vs_mbuf * _mbuf);
+
+		int (*msg_service_removed) (struct vs_client_core_state *
+					    _state, uint32_t service_id);
+
+		int (*msg_server_ready) (struct vs_client_core_state * _state,
+					 uint32_t service_id, uint32_t in_quota,
+					 uint32_t out_quota,
+					 uint32_t in_bit_offset,
+					 uint32_t in_num_bits,
+					 uint32_t out_bit_offset,
+					 uint32_t out_num_bits);
+
+		int (*msg_service_reset) (struct vs_client_core_state * _state,
+					  uint32_t service_id);
+
+	} core;
+};
+
+struct vs_client_core_state {
+	vservice_core_protocol_state_t state;
+	struct vs_service_device *service;
+	bool released;
+};
+
+extern int vs_client_core_reopen(struct vs_client_core_state *_state);
+
+extern int vs_client_core_close(struct vs_client_core_state *_state);
+
+    /** interface core **/
+/* command sync connect */
+extern int vs_client_core_core_req_connect(struct vs_client_core_state *_state,
+					   gfp_t flags);
+
+	/* command sync disconnect */
+extern int vs_client_core_core_req_disconnect(struct vs_client_core_state
+					      *_state, gfp_t flags);
+
+	/* message startup */
+/* message shutdown */
+/* message service_created */
+extern int vs_client_core_core_getbufs_service_created(struct
+						       vs_client_core_state
+						       *_state,
+						       struct vs_string
+						       *service_name,
+						       struct vs_string
+						       *protocol_name,
+						       struct vs_mbuf *_mbuf);
+extern int vs_client_core_core_free_service_created(struct vs_client_core_state
+						    *_state,
+						    struct vs_string
+						    *service_name,
+						    struct vs_string
+						    *protocol_name,
+						    struct vs_mbuf *_mbuf);
+    /* message service_removed */
+/* message server_ready */
+/* message service_reset */
+extern int vs_client_core_core_send_service_reset(struct vs_client_core_state
+						  *_state, uint32_t service_id,
+						  gfp_t flags);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_core_client_register(struct vs_client_core *client,
+					   const char *name,
+					   struct module *owner);
+
+static inline int vservice_core_client_register(struct vs_client_core *client,
+						const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_core_client_register(client, name, this_module);
+}
+
+extern int vservice_core_client_unregister(struct vs_client_core *client);
+
+#endif				/* ! __VSERVICES_CLIENT_CORE__ */
diff --git a/include/vservices/protocol/core/common.h b/include/vservices/protocol/core/common.h
new file mode 100644
index 0000000..b496416
--- /dev/null
+++ b/include/vservices/protocol/core/common.h
@@ -0,0 +1,38 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CORE_PROTOCOL_H__)
+#define __VSERVICES_CORE_PROTOCOL_H__
+
+#define VSERVICE_CORE_PROTOCOL_NAME "com.ok-labs.core"
+typedef enum {
+	VSERVICE_CORE_CORE_REQ_CONNECT,
+	VSERVICE_CORE_CORE_ACK_CONNECT,
+	VSERVICE_CORE_CORE_NACK_CONNECT,
+	VSERVICE_CORE_CORE_REQ_DISCONNECT,
+	VSERVICE_CORE_CORE_ACK_DISCONNECT,
+	VSERVICE_CORE_CORE_NACK_DISCONNECT,
+	VSERVICE_CORE_CORE_MSG_STARTUP,
+	VSERVICE_CORE_CORE_MSG_SHUTDOWN,
+	VSERVICE_CORE_CORE_MSG_SERVICE_CREATED,
+	VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED,
+	VSERVICE_CORE_CORE_MSG_SERVER_READY,
+	VSERVICE_CORE_CORE_MSG_SERVICE_RESET,
+} vservice_core_message_id_t;
+typedef enum {
+	VSERVICE_CORE_NBIT_IN__COUNT
+} vservice_core_nbit_in_t;
+
+typedef enum {
+	VSERVICE_CORE_NBIT_OUT__COUNT
+} vservice_core_nbit_out_t;
+
+/* Notification mask macros */
+#endif				/* ! __VSERVICES_CORE_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/core/server.h b/include/vservices/protocol/core/server.h
new file mode 100644
index 0000000..959b8c3
--- /dev/null
+++ b/include/vservices/protocol/core/server.h
@@ -0,0 +1,171 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_CORE)
+#define VSERVICES_SERVER_CORE
+
+struct vs_service_device;
+struct vs_server_core_state;
+
+struct vs_server_core {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+
+	/*
+	 * These are the driver's recommended message quotas. They are used
+	 * by the core service to select message quotas for services with no
+	 * explicitly configured quotas.
+	 */
+	u32 in_quota_best;
+	u32 out_quota_best;
+    /** session setup **/
+	struct vs_server_core_state *(*alloc) (struct vs_service_device *
+					       service);
+	void (*release) (struct vs_server_core_state * _state);
+
+	struct vs_service_driver *driver;
+
+	/** Core service base interface **/
+	void (*start) (struct vs_server_core_state * _state);
+	void (*reset) (struct vs_server_core_state * _state);
+    /** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_server_core_state * _state);
+
+	struct {
+		int (*state_change) (struct vs_server_core_state * _state,
+				     vservice_core_statenum_t old,
+				     vservice_core_statenum_t new);
+
+		int (*req_connect) (struct vs_server_core_state * _state);
+
+		int (*req_disconnect) (struct vs_server_core_state * _state);
+
+		int (*msg_service_reset) (struct vs_server_core_state * _state,
+					  uint32_t service_id);
+
+	} core;
+};
+
+struct vs_server_core_state {
+	vservice_core_protocol_state_t state;
+	struct vs_service_device *service;
+	bool released;
+};
+
+/** Complete calls for server core functions **/
+
+    /** interface core **/
+/* command sync connect */
+extern int vs_server_core_core_send_ack_connect(struct vs_server_core_state
+						*_state, gfp_t flags);
+extern int vs_server_core_core_send_nack_connect(struct vs_server_core_state
+						 *_state, gfp_t flags);
+    /* command sync disconnect */
+extern int vs_server_core_core_send_ack_disconnect(struct vs_server_core_state
+						   *_state, gfp_t flags);
+extern int vs_server_core_core_send_nack_disconnect(struct vs_server_core_state
+						    *_state, gfp_t flags);
+    /* message startup */
+extern int vs_server_core_core_send_startup(struct vs_server_core_state *_state,
+					    uint32_t core_in_quota,
+					    uint32_t core_out_quota,
+					    gfp_t flags);
+
+	    /* message shutdown */
+extern int vs_server_core_core_send_shutdown(struct vs_server_core_state
+					     *_state, gfp_t flags);
+
+	    /* message service_created */
+extern struct vs_mbuf *vs_server_core_core_alloc_service_created(struct
+								 vs_server_core_state
+								 *_state,
+								 struct
+								 vs_string
+								 *service_name,
+								 struct
+								 vs_string
+								 *protocol_name,
+								 gfp_t flags);
+extern int vs_server_core_core_free_service_created(struct vs_server_core_state
+						    *_state,
+						    struct vs_string
+						    *service_name,
+						    struct vs_string
+						    *protocol_name,
+						    struct vs_mbuf *_mbuf);
+extern int vs_server_core_core_send_service_created(struct vs_server_core_state
+						    *_state,
+						    uint32_t service_id,
+						    struct vs_string
+						    service_name,
+						    struct vs_string
+						    protocol_name,
+						    struct vs_mbuf *_mbuf);
+
+	    /* message service_removed */
+extern int vs_server_core_core_send_service_removed(struct vs_server_core_state
+						    *_state,
+						    uint32_t service_id,
+						    gfp_t flags);
+
+	    /* message server_ready */
+extern int vs_server_core_core_send_server_ready(struct vs_server_core_state
+						 *_state, uint32_t service_id,
+						 uint32_t in_quota,
+						 uint32_t out_quota,
+						 uint32_t in_bit_offset,
+						 uint32_t in_num_bits,
+						 uint32_t out_bit_offset,
+						 uint32_t out_num_bits,
+						 gfp_t flags);
+
+	    /* message service_reset */
+extern int vs_server_core_core_send_service_reset(struct vs_server_core_state
+						  *_state, uint32_t service_id,
+						  gfp_t flags);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_core_server_register(struct vs_server_core *server,
+					   const char *name,
+					   struct module *owner);
+
+static inline int vservice_core_server_register(struct vs_server_core *server,
+						const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_core_server_register(server, name, this_module);
+}
+
+extern int vservice_core_server_unregister(struct vs_server_core *server);
+#endif				/* ! VSERVICES_SERVER_CORE */
diff --git a/include/vservices/protocol/core/types.h b/include/vservices/protocol/core/types.h
new file mode 100644
index 0000000..2d6928d
--- /dev/null
+++ b/include/vservices/protocol/core/types.h
@@ -0,0 +1,87 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_CORE_TYPES_H)
+#define VSERVICES_CORE_TYPES_H
+
+#define VSERVICE_CORE_SERVICE_NAME_SIZE (uint32_t)16
+
+#define VSERVICE_CORE_PROTOCOL_NAME_SIZE (uint32_t)32
+
+typedef enum {
+/* state offline */
+	VSERVICE_CORE_STATE_OFFLINE = 0,
+	VSERVICE_CORE_STATE_OFFLINE__CONNECT,
+	VSERVICE_CORE_STATE_OFFLINE__DISCONNECT,
+
+/* state disconnected */
+	VSERVICE_CORE_STATE_DISCONNECTED,
+	VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+	VSERVICE_CORE_STATE_DISCONNECTED__DISCONNECT,
+
+/* state connected */
+	VSERVICE_CORE_STATE_CONNECTED,
+	VSERVICE_CORE_STATE_CONNECTED__CONNECT,
+	VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+
+	VSERVICE_CORE__RESET = VSERVICE_CORE_STATE_OFFLINE
+} vservice_core_statenum_t;
+
+typedef struct {
+	vservice_core_statenum_t statenum;
+} vservice_core_state_t;
+
+#define VSERVICE_CORE_RESET_STATE (vservice_core_state_t) { \
+.statenum = VSERVICE_CORE__RESET}
+
+#define VSERVICE_CORE_STATE_IS_OFFLINE(state) (\
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE) || \
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_IS_DISCONNECTED(state) (\
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED) || \
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_IS_CONNECTED(state) (\
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED) || \
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_VALID(state) ( \
+VSERVICE_CORE_STATE_IS_OFFLINE(state) ? true : \
+VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ? true : \
+VSERVICE_CORE_STATE_IS_CONNECTED(state) ? true : \
+false)
+
+static inline const char *vservice_core_get_state_string(vservice_core_state_t
+							 state)
+{
+	static const char *names[] =
+	    { "offline", "offline__connect", "offline__disconnect",
+		"disconnected", "disconnected__connect",
+		    "disconnected__disconnect",
+		"connected", "connected__connect", "connected__disconnect"
+	};
+	if (!VSERVICE_CORE_STATE_VALID(state)) {
+		return "INVALID";
+	}
+	return names[state.statenum];
+}
+
+typedef struct {
+
+	vservice_core_state_t core;
+} vservice_core_protocol_state_t;
+
+#define VSERVICE_CORE_PROTOCOL_RESET_STATE (vservice_core_protocol_state_t) {\
+.core = VSERVICE_CORE_RESET_STATE }
+#endif				/* ! VSERVICES_CORE_TYPES_H */
diff --git a/include/vservices/protocol/serial/Kbuild b/include/vservices/protocol/serial/Kbuild
new file mode 100644
index 0000000..ec3cbe8
--- /dev/null
+++ b/include/vservices/protocol/serial/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/serial/client.h b/include/vservices/protocol/serial/client.h
new file mode 100644
index 0000000..78efed2e
--- /dev/null
+++ b/include/vservices/protocol/serial/client.h
@@ -0,0 +1,114 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_SERIAL__)
+#define __VSERVICES_CLIENT_SERIAL__
+
+struct vs_service_device;
+struct vs_client_serial_state;
+
+struct vs_client_serial {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+    /** session setup **/
+	struct vs_client_serial_state *(*alloc) (struct vs_service_device *
+						 service);
+	void (*release) (struct vs_client_serial_state * _state);
+
+	struct vs_service_driver *driver;
+
+/** Opened, reopened and closed functions **/
+
+	void (*opened) (struct vs_client_serial_state * _state);
+
+	void (*reopened) (struct vs_client_serial_state * _state);
+
+	void (*closed) (struct vs_client_serial_state * _state);
+
+/** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_client_serial_state * _state);
+
+	struct {
+		int (*msg_msg) (struct vs_client_serial_state * _state,
+				struct vs_pbuf b, struct vs_mbuf * _mbuf);
+
+	} serial;
+};
+
+struct vs_client_serial_state {
+	vservice_serial_protocol_state_t state;
+	uint32_t packet_size;
+	struct {
+		uint32_t packet_size;
+	} serial;
+	struct vs_service_device *service;
+	bool released;
+};
+
+extern int vs_client_serial_reopen(struct vs_client_serial_state *_state);
+
+extern int vs_client_serial_close(struct vs_client_serial_state *_state);
+
+    /** interface serial **/
+/* message msg */
+extern struct vs_mbuf *vs_client_serial_serial_alloc_msg(struct
+							 vs_client_serial_state
+							 *_state,
+							 struct vs_pbuf *b,
+							 gfp_t flags);
+extern int vs_client_serial_serial_getbufs_msg(struct vs_client_serial_state
+					       *_state, struct vs_pbuf *b,
+					       struct vs_mbuf *_mbuf);
+extern int vs_client_serial_serial_free_msg(struct vs_client_serial_state
+					    *_state, struct vs_pbuf *b,
+					    struct vs_mbuf *_mbuf);
+extern int vs_client_serial_serial_send_msg(struct vs_client_serial_state
+					    *_state, struct vs_pbuf b,
+					    struct vs_mbuf *_mbuf);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_serial_client_register(struct vs_client_serial *client,
+					     const char *name,
+					     struct module *owner);
+
+static inline int vservice_serial_client_register(struct vs_client_serial
+						  *client, const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_serial_client_register(client, name, this_module);
+}
+
+extern int vservice_serial_client_unregister(struct vs_client_serial *client);
+
+#endif				/* ! __VSERVICES_CLIENT_SERIAL__ */
diff --git a/include/vservices/protocol/serial/common.h b/include/vservices/protocol/serial/common.h
new file mode 100644
index 0000000..a530645
--- /dev/null
+++ b/include/vservices/protocol/serial/common.h
@@ -0,0 +1,37 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_SERIAL_PROTOCOL_H__)
+#define __VSERVICES_SERIAL_PROTOCOL_H__
+
+#define VSERVICE_SERIAL_PROTOCOL_NAME "com.ok-labs.serial"
+typedef enum {
+	VSERVICE_SERIAL_BASE_REQ_OPEN,
+	VSERVICE_SERIAL_BASE_ACK_OPEN,
+	VSERVICE_SERIAL_BASE_NACK_OPEN,
+	VSERVICE_SERIAL_BASE_REQ_CLOSE,
+	VSERVICE_SERIAL_BASE_ACK_CLOSE,
+	VSERVICE_SERIAL_BASE_NACK_CLOSE,
+	VSERVICE_SERIAL_BASE_REQ_REOPEN,
+	VSERVICE_SERIAL_BASE_ACK_REOPEN,
+	VSERVICE_SERIAL_BASE_NACK_REOPEN,
+	VSERVICE_SERIAL_BASE_MSG_RESET,
+	VSERVICE_SERIAL_SERIAL_MSG_MSG,
+} vservice_serial_message_id_t;
+typedef enum {
+	VSERVICE_SERIAL_NBIT_IN__COUNT
+} vservice_serial_nbit_in_t;
+
+typedef enum {
+	VSERVICE_SERIAL_NBIT_OUT__COUNT
+} vservice_serial_nbit_out_t;
+
+/* Notification mask macros */
+#endif				/* ! __VSERVICES_SERIAL_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/serial/server.h b/include/vservices/protocol/serial/server.h
new file mode 100644
index 0000000..001fed5
--- /dev/null
+++ b/include/vservices/protocol/serial/server.h
@@ -0,0 +1,134 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_SERIAL)
+#define VSERVICES_SERVER_SERIAL
+
+struct vs_service_device;
+struct vs_server_serial_state;
+
+struct vs_server_serial {
+
+	/*
+	 * If set to false then the receive message handlers are run from
+	 * workqueue context and are allowed to sleep. If set to true the
+	 * message handlers are run from tasklet context and may not sleep.
+	 */
+	bool rx_atomic;
+
+	/*
+	 * If this is set to true along with rx_atomic, the driver is allowed
+	 * to send messages from softirq contexts other than the receive
+	 * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+	 * messages may only be sent from the receive message handlers, or
+	 * from task context after calling vs_service_state_lock. This must
+	 * not be set to true if rx_atomic is set to false.
+	 */
+	bool tx_atomic;
+
+	/*
+	 * These are the driver's recommended message quotas. They are used
+	 * by the core service to select message quotas for services with no
+	 * explicitly configured quotas.
+	 */
+	u32 in_quota_best;
+	u32 out_quota_best;
+    /** session setup **/
+	struct vs_server_serial_state *(*alloc) (struct vs_service_device *
+						 service);
+	void (*release) (struct vs_server_serial_state * _state);
+
+	struct vs_service_driver *driver;
+
+/** Open, reopen, close and closed functions **/
+
+	 vs_server_response_type_t(*open) (struct vs_server_serial_state *
+					   _state);
+
+	 vs_server_response_type_t(*reopen) (struct vs_server_serial_state *
+					     _state);
+
+	 vs_server_response_type_t(*close) (struct vs_server_serial_state *
+					    _state);
+
+	void (*closed) (struct vs_server_serial_state * _state);
+
+/** Send/receive state callbacks **/
+	int (*tx_ready) (struct vs_server_serial_state * _state);
+
+	struct {
+		int (*msg_msg) (struct vs_server_serial_state * _state,
+				struct vs_pbuf b, struct vs_mbuf * _mbuf);
+
+	} serial;
+};
+
+struct vs_server_serial_state {
+	vservice_serial_protocol_state_t state;
+	uint32_t packet_size;
+	struct {
+		uint32_t packet_size;
+	} serial;
+	struct vs_service_device *service;
+	bool released;
+};
+
+/** Complete calls for server core functions **/
+extern int vs_server_serial_open_complete(struct vs_server_serial_state *_state,
+					  vs_server_response_type_t resp);
+
+extern int vs_server_serial_close_complete(struct vs_server_serial_state
+					   *_state,
+					   vs_server_response_type_t resp);
+
+extern int vs_server_serial_reopen_complete(struct vs_server_serial_state
+					    *_state,
+					    vs_server_response_type_t resp);
+
+    /** interface serial **/
+/* message msg */
+extern struct vs_mbuf *vs_server_serial_serial_alloc_msg(struct
+							 vs_server_serial_state
+							 *_state,
+							 struct vs_pbuf *b,
+							 gfp_t flags);
+extern int vs_server_serial_serial_getbufs_msg(struct vs_server_serial_state
+					       *_state, struct vs_pbuf *b,
+					       struct vs_mbuf *_mbuf);
+extern int vs_server_serial_serial_free_msg(struct vs_server_serial_state
+					    *_state, struct vs_pbuf *b,
+					    struct vs_mbuf *_mbuf);
+extern int vs_server_serial_serial_send_msg(struct vs_server_serial_state
+					    *_state, struct vs_pbuf b,
+					    struct vs_mbuf *_mbuf);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_serial_server_register(struct vs_server_serial *server,
+					     const char *name,
+					     struct module *owner);
+
+static inline int vservice_serial_server_register(struct vs_server_serial
+						  *server, const char *name)
+{
+#ifdef MODULE
+	extern struct module __this_module;
+	struct module *this_module = &__this_module;
+#else
+	struct module *this_module = NULL;
+#endif
+
+	return __vservice_serial_server_register(server, name, this_module);
+}
+
+extern int vservice_serial_server_unregister(struct vs_server_serial *server);
+#endif				/* ! VSERVICES_SERVER_SERIAL */
diff --git a/include/vservices/protocol/serial/types.h b/include/vservices/protocol/serial/types.h
new file mode 100644
index 0000000..46edf95
--- /dev/null
+++ b/include/vservices/protocol/serial/types.h
@@ -0,0 +1,88 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERIAL_TYPES_H)
+#define VSERVICES_SERIAL_TYPES_H
+
+typedef enum {
+/* state closed */
+	VSERVICE_BASE_STATE_CLOSED = 0,
+	VSERVICE_BASE_STATE_CLOSED__OPEN,
+	VSERVICE_BASE_STATE_CLOSED__CLOSE,
+	VSERVICE_BASE_STATE_CLOSED__REOPEN,
+
+/* state running */
+	VSERVICE_BASE_STATE_RUNNING,
+	VSERVICE_BASE_STATE_RUNNING__OPEN,
+	VSERVICE_BASE_STATE_RUNNING__CLOSE,
+	VSERVICE_BASE_STATE_RUNNING__REOPEN,
+
+	VSERVICE_BASE__RESET = VSERVICE_BASE_STATE_CLOSED
+} vservice_base_statenum_t;
+
+typedef struct {
+	vservice_base_statenum_t statenum;
+} vservice_base_state_t;
+
+#define VSERVICE_BASE_RESET_STATE (vservice_base_state_t) { \
+.statenum = VSERVICE_BASE__RESET}
+
+#define VSERVICE_BASE_STATE_IS_CLOSED(state) (\
+((state).statenum == VSERVICE_BASE_STATE_CLOSED) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__REOPEN))
+
+#define VSERVICE_BASE_STATE_IS_RUNNING(state) (\
+((state).statenum == VSERVICE_BASE_STATE_RUNNING) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__REOPEN))
+
+#define VSERVICE_BASE_STATE_VALID(state) ( \
+VSERVICE_BASE_STATE_IS_CLOSED(state) ? true : \
+VSERVICE_BASE_STATE_IS_RUNNING(state) ? true : \
+false)
+
+static inline const char *vservice_base_get_state_string(vservice_base_state_t
+							 state)
+{
+	static const char *names[] =
+	    { "closed", "closed__open", "closed__close", "closed__reopen",
+		"running", "running__open", "running__close", "running__reopen"
+	};
+	if (!VSERVICE_BASE_STATE_VALID(state)) {
+		return "INVALID";
+	}
+	return names[state.statenum];
+}
+
+typedef struct {
+} vservice_serial_state_t;
+
+#define VSERVICE_SERIAL_RESET_STATE (vservice_serial_state_t) { \
+}
+
+#define VSERVICE_SERIAL_STATE_VALID(state) true
+
+typedef struct {
+
+	vservice_base_state_t base;
+
+	vservice_serial_state_t serial;
+} vservice_serial_protocol_state_t;
+
+#define VSERVICE_SERIAL_PROTOCOL_RESET_STATE (vservice_serial_protocol_state_t) {\
+.base = VSERVICE_BASE_RESET_STATE,\
+.serial = VSERVICE_SERIAL_RESET_STATE }
+
+#define VSERVICE_SERIAL_IS_STATE_RESET(state) \
+            ((state).base.statenum == VSERVICE_BASE__RESET)
+#endif				/* ! VSERVICES_SERIAL_TYPES_H */
diff --git a/include/vservices/service.h b/include/vservices/service.h
new file mode 100644
index 0000000..af232b6
--- /dev/null
+++ b/include/vservices/service.h
@@ -0,0 +1,674 @@
+/*
+ * include/vservices/service.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the driver and device types for vServices client and
+ * server drivers. These are generally defined by generated protocol-layer
+ * code. However, they can also be defined directly by applications that
+ * don't require protocol generation.
+ */
+
+#ifndef _VSERVICE_SERVICE_H_
+#define _VSERVICE_SERVICE_H_
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)
+#include <asm/atomic.h>
+#else
+#include <linux/atomic.h>
+#endif
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/types.h>
+
+struct vs_mbuf;
+
+/**
+ * struct vs_service_driver - Virtual service driver structure
+ * @protocol: Protocol name for this driver
+ * @is_server: True if this is a server driver, false if it is a client driver
+ * @rx_atomic: If set to false then the receive message handlers are run from
+ *	     workqueue context and are allowed to sleep. If set to true
+ *	     the message handlers are run from tasklet context and may not
+ *	     sleep. For this purpose, tx_ready is considered a receive
+ *	     message handler.
+ * @tx_atomic: If this is set to true along with rx_atomic, the driver is
+ *	allowed to send messages from softirq contexts other than the receive
+ *	message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ *	messages may only be sent from the receive message handlers, or from
+ *	task context after calling vs_service_state_lock.
+ * @probe: Probe function for this service
+ * @remove: Remove function for this service
+ * --- Callbacks ---
+ * @receive: Message handler function for this service
+ * @notify: Incoming notification handler function for this service
+ * @start: Callback which is run when this service is started
+ * @reset: Callback which is run when this service is reset
+ * @tx_ready: Callback which is run when the service has dropped below its
+ *	    send quota
+ * --- Resource requirements (valid for server only) ---
+ * @in_quota_min: minimum number of input messages for protocol functionality
+ * @in_quota_best: suggested number of input messages
+ * @out_quota_min: minimum number of output messages for protocol functionality
+ * @out_quota_best: suggested number of output messages
+ * @in_notify_count: number of input notification bits used
+ * @out_notify_count: number of output notification bits used
+ * --- Internal ---
+ * @driver: Linux device model driver structure
+ *
+ * The callback functions for a virtual service driver are all called from
+ * the virtual service device's work queue.
+ */
+struct vs_service_driver {
+	const char *protocol;
+	bool is_server;
+	bool rx_atomic, tx_atomic;
+
+	int (*probe)(struct vs_service_device *service);
+	int (*remove)(struct vs_service_device *service);
+
+	int (*receive)(struct vs_service_device *service,
+		struct vs_mbuf *mbuf);
+	void (*notify)(struct vs_service_device *service, u32 flags);
+
+	void (*start)(struct vs_service_device *service);
+	void (*reset)(struct vs_service_device *service);
+
+	int (*tx_ready)(struct vs_service_device *service);
+
+	unsigned in_quota_min;
+	unsigned in_quota_best;
+	unsigned out_quota_min;
+	unsigned out_quota_best;
+	unsigned in_notify_count;
+	unsigned out_notify_count;
+
+	struct device_driver driver;
+};
+
+#define to_vs_service_driver(d) \
+	container_of(d, struct vs_service_driver, driver)
+
+/* The vServices server/client bus types */
+extern struct bus_type vs_client_bus_type;
+extern struct bus_type vs_server_bus_type;
+
+/**
+ * struct vs_service_stats - Virtual service statistics
+ * @over_quota_time: Internal counter for tracking over quota time.
+ * @sent_mbufs: Total number of message buffers sent.
+ * @sent_bytes: Total bytes sent.
+ * @send_failures: Total number of send failures.
+ * @recv_mbufs: Total number of message buffers received.
+ * @recv_bytes: Total number of bytes recevied.
+ * @recv_failures: Total number of receive failures.
+ * @nr_over_quota: Number of times an mbuf allocation has failed because the
+ *                 service is over quota.
+ * @nr_tx_ready: Number of times the service has run its tx_ready handler
+ * @over_quota_time_total: The total amount of time in milli-seconds that the
+ *                         service has spent over quota. Measured as the time
+ *                         between exceeding quota in mbuf allocation and
+ *                         running the tx_ready handler.
+ * @over_quota_time_avg: The average amount of time in milli-seconds that the
+ *                       service is spending in the over quota state.
+ */
+struct vs_service_stats {
+	unsigned long	over_quota_time;
+
+	atomic_t        sent_mbufs;
+	atomic_t        sent_bytes;
+	atomic_t	send_failures;
+	atomic_t        recv_mbufs;
+	atomic_t        recv_bytes;
+	atomic_t	recv_failures;
+	atomic_t        nr_over_quota;
+	atomic_t        nr_tx_ready;
+	atomic_t        over_quota_time_total;
+	atomic_t        over_quota_time_avg;
+};
+
+/**
+ * struct vs_service_device - Virtual service device
+ * @id: Unique ID (to the session) for this service
+ * @name: Service name
+ * @sysfs_name: The sysfs name for the service
+ * @protocol: Service protocol name
+ * @is_server: True if this device is server, false if it is a client
+ * @owner: service responsible for managing this service. This must be
+ *     on the same session, and is NULL iff this is the core service.
+ *     It must not be a service whose driver has tx_atomic set.
+ * @lock_subclass: the number of generations of owners between this service
+ *     and the core service; 0 for the core service, 1 for anything directly
+ *     created by it, and so on. This is only used for verifying lock
+ *     ordering (when lockdep is enabled), hence the name.
+ * @ready_lock: mutex protecting readiness, disable_count and driver_probed.
+ *     This depends on the state_mutex of the service's owner, if any. Acquire
+ *     it using mutex_lock_nested(ready_lock, lock_subclass).
+ * @readiness: Service's readiness state, owned by session layer.
+ * @disable_count: Number of times the service has been disabled without
+ *     a matching enable.
+ * @driver_probed: True if a driver has been probed (and not removed)
+ * @work_queue: Work queue for this service's task-context work.
+ * @rx_tasklet: Tasklet for handling incoming messages. This is only used
+ *     if the service driver has rx_atomic set to true. Otherwise
+ *     incoming messages are handled on the workqueue by rx_work.
+ * @rx_work: Work structure for handling incoming messages. This is only
+ *     used if the service driver has rx_atomic set to false.
+ * @rx_lock: Spinlock which protects access to rx_queue and tx_ready
+ * @rx_queue: Queue of incoming messages
+ * @tx_ready: Flag indicating that a tx_ready event is pending
+ * @tx_batching: Flag indicating that outgoing messages are being batched
+ * @state_spinlock: spinlock used to protect the service state if the
+ *     service driver has tx_atomic (and rx_atomic) set to true. This
+ *     depends on the service's ready_lock. Acquire it only by
+ *     calling vs_service_state_lock_bh().
+ * @state_mutex: mutex used to protect the service state if the service
+ *     driver has tx_atomic set to false. This depends on the service's
+ *     ready_lock, and if rx_atomic is true, the rx_tasklet must be
+ *     disabled while it is held. Acquire it only by calling
+ *     vs_service_state_lock().
+ * @state_spinlock_used: Flag to check if the state spinlock has been acquired.
+ * @state_mutex_used: Flag to check if the state mutex has been acquired.
+ * @reset_work: Work to reset the service after a driver fails
+ * @pending_reset: Set if reset_work has been queued and not completed.
+ * @ready_work: Work to make service ready after a throttling delay
+ * @cooloff_work: Work for cooling off reset throttling after the reset
+ * throttling limit was hit
+ * @cleanup_work: Work for cleaning up and freeing the service structure
+ * @last_reset: Time in jiffies at which this service last reset
+ * @last_reset_request: Time in jiffies the last reset request for this
+ *     service occurred at
+ * @last_ready: Time in jiffies at which this service last became ready
+ * @reset_delay: Time in jiffies that the next throttled reset will be
+ *     delayed for. A value of zero means that reset throttling is not in
+ *     effect.
+ * @is_over_quota: Internal flag for whether the service is over quota. This
+ *                 flag is only used for stats accounting.
+ * @quota_wq: waitqueue that is woken whenever the available send quota
+ *            increases.
+ * @notify_send_bits: The number of bits allocated for outgoing notifications.
+ * @notify_send_offset: The first bit allocated for outgoing notifications.
+ * @notify_recv_bits: The number of bits allocated for incoming notifications.
+ * @notify_recv_offset: The first bit allocated for incoming notifications.
+ * @send_quota: The maximum number of outgoing messages.
+ * @recv_quota: The maximum number of incoming messages.
+ * @in_quota_set: For servers, the number of client->server messages
+ *     requested during system configuration (sysfs or environment).
+ * @out_quota_set: For servers, the number of server->client messages
+ *     requested during system configuration (sysfs or environment).
+ * @dev: Linux device model device structure
+ * @stats: Service statistics
+ */
+struct vs_service_device {
+	vs_service_id_t id;
+	char *name;
+	char *sysfs_name;
+	char *protocol;
+	bool is_server;
+
+	struct vs_service_device *owner;
+	unsigned lock_subclass;
+
+	struct mutex ready_lock;
+	unsigned readiness;
+	int disable_count;
+	bool driver_probed;
+
+	struct workqueue_struct *work_queue;
+
+	struct tasklet_struct rx_tasklet;
+	struct work_struct rx_work;
+
+	spinlock_t rx_lock;
+	struct list_head rx_queue;
+	bool tx_ready, tx_batching;
+
+	spinlock_t state_spinlock;
+	struct mutex state_mutex;
+
+	struct work_struct reset_work;
+	bool pending_reset;
+	struct delayed_work ready_work;
+	struct delayed_work cooloff_work;
+	struct work_struct cleanup_work;
+
+	unsigned long last_reset;
+	unsigned long last_reset_request;
+	unsigned long last_ready;
+	unsigned long reset_delay;
+
+	atomic_t is_over_quota;
+	wait_queue_head_t quota_wq;
+
+	unsigned notify_send_bits;
+	unsigned notify_send_offset;
+	unsigned notify_recv_bits;
+	unsigned notify_recv_offset;
+	unsigned send_quota;
+	unsigned recv_quota;
+
+	unsigned in_quota_set;
+	unsigned out_quota_set;
+
+	void *transport_priv;
+
+	struct device dev;
+	struct vs_service_stats stats;
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	bool state_spinlock_used;
+	bool state_mutex_used;
+#endif
+};
+
+#define to_vs_service_device(d) container_of(d, struct vs_service_device, dev)
+
+/**
+ * vs_service_get_session - Return the session for a service
+ * @service: Service to get the session for
+ */
+static inline struct vs_session_device *
+vs_service_get_session(struct vs_service_device *service)
+{
+	return to_vs_session_device(service->dev.parent);
+}
+
+/**
+ * vs_service_send - Send a message from a service
+ * @service: Service to send the message from
+ * @mbuf: Message buffer to send
+ */
+static inline int
+vs_service_send(struct vs_service_device *service, struct vs_mbuf *mbuf)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	const struct vs_transport_vtable *vt = session->transport->vt;
+	const unsigned long flags =
+		service->tx_batching ?  VS_TRANSPORT_SEND_FLAGS_MORE : 0;
+	size_t msg_size = vt->mbuf_size(mbuf);
+	int err;
+
+	err = vt->send(session->transport, service, mbuf, flags);
+	if (!err) {
+		atomic_inc(&service->stats.sent_mbufs);
+		atomic_add(msg_size, &service->stats.sent_bytes);
+	} else {
+		atomic_inc(&service->stats.send_failures);
+	}
+
+	return err;
+}
+
+/**
+ * vs_service_alloc_mbuf - Allocate a message buffer for a service
+ * @service: Service to allocate the buffer for
+ * @size: Size of the data buffer to allocate
+ * @flags: Flags to pass to the buffer allocation
+ */
+static inline struct vs_mbuf *
+vs_service_alloc_mbuf(struct vs_service_device *service, size_t size,
+		gfp_t flags)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+	struct vs_mbuf *mbuf;
+
+	mbuf = session->transport->vt->alloc_mbuf(session->transport,
+			service, size, flags);
+	if (IS_ERR(mbuf) && PTR_ERR(mbuf) == -ENOBUFS) {
+		/* Over quota accounting */
+		if (atomic_cmpxchg(&service->is_over_quota, 0, 1) == 0) {
+			service->stats.over_quota_time = jiffies;
+			atomic_inc(&service->stats.nr_over_quota);
+		}
+	}
+
+	/*
+	 * The transport drivers should return either a valid message buffer
+	 * pointer or an ERR_PTR value. Warn here if a transport driver is
+	 * returning NULL on message buffer allocation failure.
+	 */
+	if (WARN_ON_ONCE(!mbuf))
+		return ERR_PTR(-ENOMEM);
+
+	return mbuf;
+}
+
+/**
+ * vs_service_free_mbuf - Deallocate a message buffer for a service
+ * @service: Service the message buffer was allocated for
+ * @mbuf: Message buffer to deallocate
+ */
+static inline void
+vs_service_free_mbuf(struct vs_service_device *service, struct vs_mbuf *mbuf)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	session->transport->vt->free_mbuf(session->transport, service, mbuf);
+}
+
+/**
+ * vs_service_notify - Send a notification from a service
+ * @service: Service to send the notification from
+ * @flags: Notification bits to send
+ */
+static inline int
+vs_service_notify(struct vs_service_device *service, u32 flags)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	return session->transport->vt->notify(session->transport,
+			service, flags);
+}
+
+/**
+ * vs_service_has_atomic_rx - Return whether or not a service's receive
+ * message handler runs in atomic context. This function should only be
+ * called for services which are bound to a driver.
+ *
+ * @service: Service to check
+ */
+static inline bool
+vs_service_has_atomic_rx(struct vs_service_device *service)
+{
+	if (WARN_ON(!service->dev.driver))
+		return false;
+
+	return to_vs_service_driver(service->dev.driver)->rx_atomic;
+}
+
+/**
+ * vs_session_max_mbuf_size - Return the maximum allocation size of a message
+ * buffer.
+ * @service: The service to check
+ */
+static inline size_t
+vs_service_max_mbuf_size(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	return session->transport->vt->max_mbuf_size(session->transport);
+}
+
+/**
+ * vs_service_send_mbufs_available - Return the number of mbufs which can be
+ * allocated for sending before going over quota.
+ * @service: The service to check
+ */
+static inline ssize_t
+vs_service_send_mbufs_available(struct vs_service_device *service)
+{
+	struct vs_session_device *session = vs_service_get_session(service);
+
+	return session->transport->vt->service_send_avail(session->transport,
+			service);
+}
+
+/**
+ * vs_service_has_atomic_tx - Return whether or not a service is allowed to
+ * transmit from atomic context (other than its receive message handler).
+ * This function should only be called for services which are bound to a
+ * driver.
+ *
+ * @service: Service to check
+ */
+static inline bool
+vs_service_has_atomic_tx(struct vs_service_device *service)
+{
+	if (WARN_ON(!service->dev.driver))
+		return false;
+
+	return to_vs_service_driver(service->dev.driver)->tx_atomic;
+}
+
+/**
+ * vs_service_state_lock - Acquire a lock allowing service state operations
+ * from external task contexts.
+ *
+ * @service: Service to lock.
+ *
+ * This must be used to protect any service state accesses that occur in task
+ * contexts outside of a callback from the vservices protocol layer. It must
+ * not be called from a protocol layer callback, nor from atomic context.
+ *
+ * If this service's state is also accessed from softirq contexts other than
+ * vservices protocol layer callbacks, use vs_service_state_lock_bh instead,
+ * and set the driver's tx_atomic flag.
+ *
+ * If this is called from outside the service's workqueue, the calling driver
+ * must provide its own guarantee that it has not been detached from the
+ * service. If that is not possible, use vs_state_lock_safe().
+ */
+static inline void
+vs_service_state_lock(struct vs_service_device *service)
+__acquires(service)
+{
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	WARN_ON_ONCE(vs_service_has_atomic_tx(service));
+#endif
+
+	mutex_lock_nested(&service->state_mutex, service->lock_subclass);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	if (WARN_ON_ONCE(service->state_spinlock_used))
+		dev_err(&service->dev, "Service is using both the state spinlock and mutex - Fix your driver\n");
+	service->state_mutex_used = true;
+#endif
+
+	if (vs_service_has_atomic_rx(service))
+		tasklet_disable(&service->rx_tasklet);
+
+	__acquire(service);
+}
+
+/**
+ * vs_service_state_unlock - Release the lock acquired by vs_service_state_lock.
+ *
+ * @service: Service to unlock.
+ */
+static inline void
+vs_service_state_unlock(struct vs_service_device *service)
+__releases(service)
+{
+	__release(service);
+
+	mutex_unlock(&service->state_mutex);
+
+	if (vs_service_has_atomic_rx(service)) {
+		tasklet_enable(&service->rx_tasklet);
+
+		/* Kick the tasklet if there is RX work to do */
+		if (!list_empty(&service->rx_queue))
+			tasklet_schedule(&service->rx_tasklet);
+	}
+}
+
+/**
+ * vs_service_state_lock_bh - Acquire a lock allowing service state operations
+ * from external task or softirq contexts.
+ *
+ * @service: Service to lock.
+ *
+ * This is an alternative to vs_service_state_lock for drivers that receive
+ * messages in atomic context (i.e. have their rx_atomic flag set), *and* must
+ * transmit messages from softirq contexts other than their own message
+ * receive and tx_ready callbacks. Such drivers must set their tx_atomic
+ * flag, so generated protocol drivers perform correct locking.
+ *
+ * This should replace all calls to vs_service_state_lock for services that
+ * need it. Do not use both locking functions in one service driver.
+ *
+ * The calling driver must provide its own guarantee that it has not been
+ * detached from the service. If that is not possible, use
+ * vs_state_lock_safe_bh().
+ */
+static inline void
+vs_service_state_lock_bh(struct vs_service_device *service)
+__acquires(service)
+__acquires(&service->state_spinlock)
+{
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	WARN_ON_ONCE(!vs_service_has_atomic_rx(service));
+	WARN_ON_ONCE(!vs_service_has_atomic_tx(service));
+#endif
+
+#ifdef CONFIG_SMP
+	/* Not necessary on UP because it's implied by spin_lock_bh(). */
+	tasklet_disable(&service->rx_tasklet);
+#endif
+
+	spin_lock_bh(&service->state_spinlock);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+	if (WARN_ON_ONCE(service->state_mutex_used))
+		dev_err(&service->dev, "Service is using both the state spinlock and mutex - Fix your driver\n");
+	service->state_spinlock_used = true;
+#endif
+
+	__acquire(service);
+}
+
+/**
+ * vs_service_state_unlock_bh - Release the lock acquired by
+ * vs_service_state_lock_bh.
+ *
+ * @service: Service to unlock.
+ */
+static inline void
+vs_service_state_unlock_bh(struct vs_service_device *service)
+__releases(service)
+__releases(&service->state_spinlock)
+{
+	__release(service);
+
+	spin_unlock_bh(&service->state_spinlock);
+
+#ifdef CONFIG_SMP
+	tasklet_enable(&service->rx_tasklet);
+#endif
+}
+
+/* Convenience macros for locking a state structure rather than a service. */
+#define vs_state_lock(state) vs_service_state_lock((state)->service)
+#define vs_state_unlock(state) vs_service_state_unlock((state)->service)
+#define vs_state_lock_bh(state) vs_service_state_lock_bh((state)->service)
+#define vs_state_unlock_bh(state) vs_service_state_unlock_bh((state)->service)
+
+/**
+ * vs_state_lock_safe[_bh] - Aqcuire a lock for a state structure's service,
+ * when the service may have been detached from the state.
+ *
+ * This is useful for blocking operations that can't easily be terminated
+ * before returning from the service reset handler, such as file I/O. To use
+ * this, the state structure should be reference-counted rather than freed in
+ * the release callback, and the driver should retain its own reference to the
+ * service until the state structure is freed.
+ *
+ * This macro acquires the lock and returns true if the state has not been
+ * detached from the service. Otherwise, it returns false.
+ *
+ * Note that the _bh variant cannot be used from atomic context, because it
+ * acquires a mutex.
+ */
+#define __vs_state_lock_safe(_state, _lock, _unlock) ({ \
+	bool __ok = true;						\
+	typeof(_state) __state = (_state);				\
+	struct vs_service_device *__service = __state->service;		\
+	mutex_lock_nested(&__service->ready_lock,			\
+			__service->lock_subclass);			\
+	__ok = !ACCESS_ONCE(__state->released);				\
+	if (__ok) {							\
+		_lock(__state);						\
+		__ok = !ACCESS_ONCE(__state->released);			\
+		if (!__ok)						\
+			_unlock(__state);				\
+	}								\
+	mutex_unlock(&__service->ready_lock);				\
+	__ok;								\
+})
+#define vs_state_lock_safe(_state) \
+	__vs_state_lock_safe((_state), vs_state_lock, vs_state_unlock)
+#define vs_state_lock_safe_bh(_state) \
+	__vs_state_lock_safe((_state), vs_state_lock_bh, vs_state_unlock_bh)
+
+/**
+ * vs_get_service - Get a reference to a service.
+ * @service: Service to get a reference to.
+ */
+static inline struct vs_service_device *
+vs_get_service(struct vs_service_device *service)
+{
+	if (service)
+		get_device(&service->dev);
+	return service;
+}
+
+/**
+ * vs_put_service - Put a reference to a service.
+ * @service: The service to put the reference to.
+ */
+static inline void
+vs_put_service(struct vs_service_device *service)
+{
+	put_device(&service->dev);
+}
+
+extern int vs_service_reset(struct vs_service_device *service,
+		struct vs_service_device *caller);
+extern void vs_service_reset_nosync(struct vs_service_device *service);
+
+/**
+ * vs_service_send_batch_start - Start a batch of outgoing messages
+ * @service: The service that is starting a batch
+ * @flush: Finish any previously started batch (if false, then duplicate
+ * calls to this function have no effect)
+ */
+static inline void
+vs_service_send_batch_start(struct vs_service_device *service, bool flush)
+{
+	if (flush && service->tx_batching) {
+		struct vs_session_device *session =
+			vs_service_get_session(service);
+		const struct vs_transport_vtable *vt = session->transport->vt;
+		if (vt->flush)
+			vt->flush(session->transport, service);
+	} else {
+		service->tx_batching = true;
+	}
+}
+
+/**
+ * vs_service_send_batch_end - End a batch of outgoing messages
+ * @service: The service that is ending a batch
+ * @flush: Start sending the batch immediately (if false, the batch will
+ * be flushed when the next message is sent)
+ */
+static inline void
+vs_service_send_batch_end(struct vs_service_device *service, bool flush)
+{
+	service->tx_batching = false;
+	if (flush) {
+		struct vs_session_device *session =
+			vs_service_get_session(service);
+		const struct vs_transport_vtable *vt = session->transport->vt;
+		if (vt->flush)
+			vt->flush(session->transport, service);
+	}
+}
+
+
+#endif /* _VSERVICE_SERVICE_H_ */
diff --git a/include/vservices/session.h b/include/vservices/session.h
new file mode 100644
index 0000000..b9dc775
--- /dev/null
+++ b/include/vservices/session.h
@@ -0,0 +1,161 @@
+/*
+ * include/vservices/session.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the device type for a vServices session attached to a
+ * transport. This should only be used by transport drivers, the vServices
+ * session code, and the inline transport-access functions defined in
+ * vservices/service.h.
+ *
+ * Drivers for these devices are defined internally by the vServices
+ * framework. Other drivers should not attach to these devices.
+ */
+
+#ifndef _VSERVICES_SESSION_H_
+#define _VSERVICES_SESSION_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+
+#include <vservices/types.h>
+
+struct vs_service_device;
+struct vs_mbuf;
+
+struct notifier_block;
+
+/**
+ * enum vs_notify_event_t - vService notifier events
+ *
+ * @VS_SESSION_NOTIFY_ADD: vService session added. Argument is a pointer to
+ * the vs_session_device. This notification is sent after the session has been
+ * added.
+ *
+ * @VS_SESSION_NOTIFY_REMOVE: vService session about to be removed. Argument is
+ * a pointer to the vs_session_device. This notification is sent before the
+ * session is removed.
+ */
+enum vs_notify_event_t {
+	VS_SESSION_NOTIFY_ADD,
+	VS_SESSION_NOTIFY_REMOVE,
+};
+
+/**
+ * struct vs_session_device - Session device
+ * @name: The unique human-readable name of this session.
+ * @is_server: True if this session is a server, false if client
+ * @transport: The transport device for this session
+ * @session_num: Unique ID for this session. Used for sysfs
+ * @session_lock: Mutex which protects any change to service presence or
+ *     readiness
+ * @core_service: The core service, if one has ever been registered. Once set,
+ *     this must remain valid and unchanged until the session driver is
+ *     removed. Writes are protected by the service_ids_lock.
+ * @services: Dynamic array of the services on this session. Protected by
+ *     service_ids_lock.
+ * @alloc_service_ids: Size of the session services array
+ * @service_ids_lock: Mutex protecting service array updates
+ * @activation_work: work structure for handling session activation & reset
+ * @activation_state: true if transport is currently active
+ * @fatal_error_work: work structure for handling fatal session failures
+ * @debug_mask: Debug level mask
+ * @list: Entry in the global session list
+ * @sysfs_entry: Kobject pointer pointing to session device in sysfs under
+ *     sys/vservices
+ * @dev: Device structure for the Linux device model
+ */
+struct vs_session_device {
+	char *name;
+	bool is_server;
+	struct vs_transport *transport;
+	int session_num;
+
+	struct mutex session_lock;
+
+	/*
+	 * The service_idr maintains the list of currently allocated services
+	 * on a session, and allows for recycling of service ids. The lock also
+	 * protects core_service.
+	 */
+	struct idr service_idr;
+	struct mutex service_idr_lock;
+	struct vs_service_device *core_service;
+
+	struct work_struct activation_work;
+	atomic_t activation_state;
+
+	struct work_struct fatal_error_work;
+
+	unsigned long debug_mask;
+
+	struct list_head list;
+	struct kobject *sysfs_entry;
+
+	struct device dev;
+};
+
+#define to_vs_session_device(d) \
+	container_of(d, struct vs_session_device, dev)
+
+extern struct vs_session_device *
+vs_session_register(struct vs_transport *transport, struct device *parent,
+		bool server, const char *transport_name);
+extern void vs_session_start(struct vs_session_device *session);
+extern void vs_session_unregister(struct vs_session_device *session);
+
+extern int vs_session_handle_message(struct vs_session_device *session,
+		struct vs_mbuf *mbuf, vs_service_id_t service_id);
+
+extern void vs_session_quota_available(struct vs_session_device *session,
+		vs_service_id_t service_id, unsigned count,
+		bool send_tx_ready);
+
+extern void vs_session_handle_notify(struct vs_session_device *session,
+		unsigned long flags, vs_service_id_t service_id);
+
+extern void vs_session_handle_reset(struct vs_session_device *session);
+extern void vs_session_handle_activate(struct vs_session_device *session);
+
+extern struct vs_service_device *
+vs_server_create_service(struct vs_session_device *session,
+		struct vs_service_device *parent, const char *name,
+		const char *protocol, const void *plat_data);
+extern int vs_server_destroy_service(struct vs_service_device *service,
+		struct vs_service_device *parent);
+
+extern void vs_session_register_notify(struct notifier_block *nb);
+extern void vs_session_unregister_notify(struct notifier_block *nb);
+
+extern int vs_session_unbind_driver(struct vs_service_device *service);
+
+extern void vs_session_for_each_service(struct vs_session_device *session,
+		void (*func)(struct vs_service_device *, void *), void *data);
+
+extern struct mutex vs_session_lock;
+extern int vs_session_for_each_locked(
+		int (*fn)(struct vs_session_device *session, void *data),
+		void *data);
+
+static inline int vs_session_for_each(
+		int (*fn)(struct vs_session_device *session, void *data),
+		void *data)
+{
+	int r;
+	mutex_lock(&vs_session_lock);
+	r = vs_session_for_each_locked(fn, data);
+	mutex_unlock(&vs_session_lock);
+	return r;
+}
+
+#endif /* _VSERVICES_SESSION_H_ */
diff --git a/include/vservices/transport.h b/include/vservices/transport.h
new file mode 100644
index 0000000..6251ce1
--- /dev/null
+++ b/include/vservices/transport.h
@@ -0,0 +1,150 @@
+/*
+ * include/vservices/transport.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains the transport vtable structure. This is made public so
+ * that the application drivers can call the vtable functions directly (via
+ * the inlined wrappers in service.h) rather than indirectly via a function
+ * call.
+ *
+ */
+
+#ifndef _VSERVICES_TRANSPORT_H_
+#define _VSERVICES_TRANSPORT_H_
+
+#include <linux/types.h>
+
+#include <vservices/types.h>
+
+struct vs_transport;
+struct vs_mbuf;
+struct vs_service_device;
+
+/**
+ * struct vs_transport_vtable - Transport driver operations. Transport drivers
+ * must provide implementations for all operations in this table.
+ * --- Message buffer allocation ---
+ * @alloc_mbuf: Allocate an mbuf of the given size for the given service
+ * @free_mbuf: Deallocate an mbuf
+ * @mbuf_size: Return the size in bytes of a message buffer. The size returned
+ *             should be the total number of bytes including any headers.
+ * @max_mbuf_size: Return the maximum allowable message buffer allocation size.
+ * --- Message sending ---
+ * @send: Queue an mbuf for sending
+ * @flush: Start the transfer for the current message batch, if any
+ * @notify: Send a notification
+ * --- Transport-level reset handling ---
+ * @reset: Reset the transport layer
+ * @ready: Ready the transport layer
+ * --- Service management ---
+ * @service_add: A new service has been added to this transport's session
+ * @service_remove: A service has been removed from this transport's session
+ * @service_start: A service on this transport's session has had its resource
+ *     allocations set and is about to start. This is always interleaved with
+ *     service_reset, with one specific exception: the core service client,
+ *     which has its quotas initially hard-coded to 0 send / 1 recv and
+ *     adjusted when the initial startup message arrives.
+ * @service_reset: A service on this transport's session has just been reset,
+ *     and any resources allocated to it should be cleaned up to prepare
+ *     for later reallocation.
+ * @service_send_avail: The number of message buffers that this service is
+ *                      able to send before going over quota.
+ * --- Query transport capabilities ---
+ * @get_notify_bits: Fetch the number of sent and received notification bits
+ *     supported by this transport. Note that this can be any positive value
+ *     up to UINT_MAX.
+ * @get_quota_limits: Fetch the total send and receive message buffer quotas
+ *     supported by this transport. Note that this can be any positive value
+ *     up to UINT_MAX.
+ */
+struct vs_transport_vtable {
+	/* Message buffer allocation */
+	struct vs_mbuf *(*alloc_mbuf)(struct vs_transport *transport,
+			struct vs_service_device *service, size_t size,
+			gfp_t gfp_flags);
+	void (*free_mbuf)(struct vs_transport *transport,
+			struct vs_service_device *service,
+			struct vs_mbuf *mbuf);
+	size_t (*mbuf_size)(struct vs_mbuf *mbuf);
+	size_t (*max_mbuf_size)(struct vs_transport *transport);
+
+	/* Sending messages */
+	int (*send)(struct vs_transport *transport,
+			struct vs_service_device *service,
+			struct vs_mbuf *mbuf, unsigned long flags);
+	int (*flush)(struct vs_transport *transport,
+			struct vs_service_device *service);
+	int (*notify)(struct vs_transport *transport,
+			struct vs_service_device *service,
+			unsigned long bits);
+
+	/* Raising and clearing transport-level reset */
+	void (*reset)(struct vs_transport *transport);
+	void (*ready)(struct vs_transport *transport);
+
+	/* Service management */
+	int (*service_add)(struct vs_transport *transport,
+			struct vs_service_device *service);
+	void (*service_remove)(struct vs_transport *transport,
+			struct vs_service_device *service);
+
+	int (*service_start)(struct vs_transport *transport,
+			struct vs_service_device *service);
+	int (*service_reset)(struct vs_transport *transport,
+			struct vs_service_device *service);
+
+	ssize_t (*service_send_avail)(struct vs_transport *transport,
+			struct vs_service_device *service);
+
+	/* Query transport capabilities */
+	void (*get_notify_bits)(struct vs_transport *transport,
+			unsigned *send_notify_bits, unsigned *recv_notify_bits);
+	void (*get_quota_limits)(struct vs_transport *transport,
+			unsigned *send_quota, unsigned *recv_quota);
+};
+
+/* Flags for .send */
+#define VS_TRANSPORT_SEND_FLAGS_MORE		0x1
+
+/**
+ * struct vs_transport - A structure representing a transport
+ * @type: type of transport i.e. microvisror/loopback etc
+ * @vt: Transport operations table
+ * @notify_info: Array of incoming notification settings
+ * @notify_info_size: Size of the incoming notification array
+ */
+struct vs_transport {
+	const char *type;
+	const struct vs_transport_vtable *vt;
+	struct vs_notify_info *notify_info;
+	int notify_info_size;
+};
+
+/**
+ * struct vs_mbuf - Message buffer. This is always allocated and released by the
+ * transport callbacks defined above, so it may be embedded in a
+ * transport-specific structure containing additional state.
+ * @data: Message data buffer
+ * @size: Size of the data buffer in bytes
+ * @is_recv: True if this mbuf was received from the other end of the
+ *           transport. False if it was allocated by this end for sending.
+ * @priv: Private value that will not be touched by the framework
+ * @queue: list_head for entry in lists. The session layer uses this queue
+ * for receiving messages. The transport driver may use this queue for its
+ * own purposes when sending messages.
+ */
+struct vs_mbuf {
+	void *data;
+	size_t size;
+	bool is_recv;
+	void *priv;
+	struct list_head queue;
+};
+
+#endif /* _VSERVICES_TRANSPORT_H_ */
diff --git a/include/vservices/types.h b/include/vservices/types.h
new file mode 100644
index 0000000..306156e
--- /dev/null
+++ b/include/vservices/types.h
@@ -0,0 +1,41 @@
+/*
+ * include/vservices/types.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _VSERVICE_TYPES_H
+#define _VSERVICE_TYPES_H
+
+#include <linux/types.h>
+
+typedef u16 vs_service_id_t;
+typedef u16 vs_message_id_t;
+
+/*
+ * An opaque handle to a queued asynchronous command. This is used internally
+ * by the generated interface code, to identify which of the pending commands
+ * is being replied to. It is provided as a parameter to non-blocking handler
+ * callbacks for queued asynchronous requests, and must be stored by the server
+ * and passed to the corresponding reply call.
+ */
+typedef struct vservice_queued_request vservice_queued_request_t;
+
+/*
+ * Following enum is to be used by server for informing about successful or
+ * unsuccessful open callback by using VS_SERVER_RESP_SUCCESS or
+ * VS_SERVER_RESP_FAILURE resepectively. Server can choose to complete request
+ * explicitely in this case it should return VS_SERVER_RESP_EXPLICIT_COMPLETE.
+ */
+typedef enum vs_server_response_type {
+	VS_SERVER_RESP_SUCCESS,
+	VS_SERVER_RESP_FAILURE,
+	VS_SERVER_RESP_EXPLICIT_COMPLETE
+} vs_server_response_type_t;
+
+#endif /*_VSERVICE_TYPES_H */
diff --git a/include/vservices/wait.h b/include/vservices/wait.h
new file mode 100644
index 0000000..544937d
--- /dev/null
+++ b/include/vservices/wait.h
@@ -0,0 +1,455 @@
+/*
+ * include/vservices/wait.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Generic wait event helpers for Virtual Service drivers.
+ */
+
+#ifndef _VSERVICE_SERVICE_WAIT_H
+#define _VSERVICE_SERVICE_WAIT_H
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include <vservices/service.h>
+
+/* Older kernels don't have lockdep_assert_held_once(). */
+#ifndef lockdep_assert_held_once
+#ifdef CONFIG_LOCKDEP
+#define lockdep_assert_held_once(l) do {				\
+		WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));	\
+	} while (0)
+#else
+#define lockdep_assert_held_once(l) do { } while (0)
+#endif
+#endif
+
+/* Legacy wait macro; needs rewriting to use vs_state_lock_safe(). */
+/* FIXME: Redmine ticket #229 - philip. */
+/**
+ * __vs_service_wait_event - Wait for a condition to become true for a
+ * Virtual Service.
+ *
+ * @_service: The service to wait for the condition to be true for.
+ * @_wq: Waitqueue to wait on.
+ * @_condition: Condition to wait for.
+ *
+ * Returns: This function returns 0 if the condition is true, or a -ERESTARTSYS
+ *          if the wait loop wait interrupted. If _state is TASK_UNINTERRUPTIBLE
+ *          then this function will always return 0.
+ *
+ * This function must be called with the service's state lock held. The wait
+ * is performed without the state lock held, but the condition is re-checked
+ * after reacquiring the state lock. This property allows this function to
+ * check the state of the service's protocol in a thread safe manner.
+ *
+ * The caller is responsible for ensuring that it has not been detached from
+ * the given service.
+ *
+ * It is nearly always wrong to call this on the service workqueue, since
+ * the workqueue is single-threaded and the state can only change when a
+ * handler function is called on it.
+ */
+#define __vs_service_wait_event(_service, _wq, _cond, _state)		\
+	({								\
+		DEFINE_WAIT(__wait);					\
+		int __ret = 0;						\
+									\
+		lockdep_assert_held_once(&(_service)->state_mutex);	\
+		do {							\
+			prepare_to_wait(&(_wq), &__wait, (_state));	\
+									\
+			if (_cond)					\
+				break;					\
+									\
+			if ((_state) == TASK_INTERRUPTIBLE &&		\
+					signal_pending(current)) {	\
+				__ret = -ERESTARTSYS;			\
+				break;					\
+			}						\
+									\
+			vs_service_state_unlock(_service);		\
+			schedule();					\
+			vs_service_state_lock(_service);		\
+		} while (!(_cond));					\
+									\
+		finish_wait(&(_wq), &__wait);				\
+		__ret;							\
+	})
+
+/* Legacy wait macros; need rewriting to use __vs_wait_state(). */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_wait_event(_service, _wq, _cond) \
+	__vs_service_wait_event(_service, _wq, _cond, TASK_INTERRUPTIBLE)
+#define vs_service_wait_event_nointr(_service, _wq, _cond) \
+	__vs_service_wait_event(_service, _wq, _cond, TASK_UNINTERRUPTIBLE)
+
+/**
+ * __vs_wait_state - block until a condition becomes true on a service state.
+ *
+ * @_state: The protocol state to wait on.
+ * @_cond: Condition to wait for.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ *         with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ *         timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ *         non-framework tasklet); otherwise nothing.
+ *
+ * Return: Return a pointer to a message buffer on successful allocation,
+ *         or an error code in ERR_PTR form.
+ *
+ * This macro blocks waiting until a particular condition becomes true on a
+ * service state. The service must be running; if not, or if it ceases to be
+ * running during the wait, -ECANCELED will be returned.
+ *
+ * This is not an exclusive wait. If an exclusive wait is desired it is
+ * usually better to use the waiting alloc or send functions.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The state lock will be dropped by waiting
+ * but reacquired before returning, unless -ENOLINK is returned, in which case
+ * the service driver has been unbound and the lock cannot be reacquired.
+ */
+#define __vs_wait_state(_state, _cond, _intr, _timeout, _bh)	\
+	({								\
+		DEFINE_WAIT(__wait);					\
+		int __ret;						\
+		int __jiffies __maybe_unused = (_timeout);		\
+		struct vs_service_device *__service = (_state)->service;\
+									\
+		while (1) {						\
+			prepare_to_wait(&__service->quota_wq, &__wait,	\
+					_intr ? TASK_INTERRUPTIBLE :    \
+					TASK_UNINTERRUPTIBLE);		\
+									\
+			if (!VSERVICE_BASE_STATE_IS_RUNNING(		\
+					(_state)->state.base)) {	\
+				__ret = -ECANCELED;			\
+				break;					\
+			}						\
+									\
+			if (_cond) {					\
+				__ret = 0;				\
+				break;					\
+			}						\
+									\
+			if (_intr && signal_pending(current)) {		\
+				__ret = -ERESTARTSYS;			\
+				break;					\
+			}						\
+									\
+			vs_state_unlock##_bh(_state);			\
+									\
+			if (_timeout >= 0) {				\
+				__jiffies = schedule_timeout(__jiffies);\
+				if (!__jiffies) {			\
+					__ret = -ETIMEDOUT;		\
+					break;				\
+				}					\
+			} else {					\
+				schedule();				\
+			}						\
+									\
+			if (!vs_state_lock_safe##_bh(_state)) {		\
+				__ret = -ENOLINK;			\
+				break;					\
+			}						\
+		}							\
+									\
+		finish_wait(&__service->quota_wq, &__wait);		\
+		__ret;							\
+	})
+
+/* Specialisations of __vs_wait_state for common uses. */
+#define vs_wait_state(_state, _cond) \
+	__vs_wait_state(_state, _cond, true, -1,)
+#define vs_wait_state_timeout(_state, _cond, _timeout) \
+	__vs_wait_state(_state, _cond, true, _timeout,)
+#define vs_wait_state_nointr(_state, _cond) \
+	__vs_wait_state(_state, _cond, false, -1,)
+#define vs_wait_state_nointr_timeout(_state, _cond, _timeout) \
+	__vs_wait_state(_state, _cond, false, _timeout,)
+#define vs_wait_state_bh(_state, _cond) \
+	__vs_wait_state(_state, _cond, true, -1, _bh)
+#define vs_wait_state_timeout_bh(_state, _cond, _timeout) \
+	__vs_wait_state(_state, _cond, true, _timeout, _bh)
+#define vs_wait_state_nointr_bh(_state, _cond) \
+	__vs_wait_state(_state, _cond, false, -1, _bh)
+#define vs_wait_state_nointr_timeout_bh(_state, _cond, _timeout) \
+	__vs_wait_state(_state, _cond, false, _timeout, _bh)
+
+/**
+ * __vs_wait_alloc - block until quota is available, then allocate a buffer.
+ *
+ * @_state: The protocol state to allocate a message for.
+ * @_alloc_func: The message buffer allocation function to run. This is the
+ *         full function invocation, not a pointer to the function.
+ * @_cond: Additional condition which must remain true, or else the wait
+ *         will fail with -ECANCELED. This is typically used to check the
+ *         service's protocol state. Note that this condition will only
+ *         be checked after sleeping; it is assumed to be true when the
+ *         macro is first called.
+ * @_unlock: If true, drop the service state lock before sleeping. The wait
+ *         may then fail with -ENOLINK if the driver is detached from the
+ *         service, in which case the lock is dropped.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ *         with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ *         timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ *         non-framework tasklet); otherwise nothing.
+ *
+ * Return: Return a pointer to a message buffer on successful allocation,
+ *         or an error code in ERR_PTR form.
+ *
+ * This macro calls a specified message allocation function, and blocks
+ * if it returns -ENOBUFS, waiting until quota is available on the service
+ * before retrying. It aborts the wait if the service resets, or if the
+ * optionally specified condition becomes false. Note that a reset followed
+ * quickly by an activate might not trigger a failure; if that is significant
+ * for your driver, use the optional condition to detect it.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The reference and state lock will still be
+ * held on return, unless -ENOLINK is returned, in which case the lock has been
+ * dropped and cannot be reacquired.
+ *
+ * This is always an exclusive wait. It is safe to call without separately
+ * waking the waitqueue afterwards; if the allocator function fails for any
+ * reason other than quota exhaustion then another waiter will be woken.
+ *
+ * Be wary of potential deadlocks when using this macro on the service
+ * workqueue. If both ends block their service workqueues waiting for quota,
+ * then no progress can be made. It is usually only correct to block the
+ * service workqueue on the server side.
+ */
+#define __vs_wait_alloc(_state, _alloc_func, _cond, _unlock, _intr, 	\
+		_timeout, _bh)						\
+	({								\
+		DEFINE_WAIT(__wait);					\
+		struct vs_mbuf *__mbuf = NULL;				\
+		int __jiffies __maybe_unused = (_timeout);		\
+		struct vs_service_device *__service = (_state)->service;\
+									\
+		while (!vs_service_send_mbufs_available(__service)) {	\
+			if (_intr && signal_pending(current)) {		\
+				__mbuf = ERR_PTR(-ERESTARTSYS);		\
+				break;					\
+			}						\
+									\
+			prepare_to_wait_exclusive(			\
+					&__service->quota_wq, &__wait,	\
+					_intr ? TASK_INTERRUPTIBLE :    \
+					TASK_UNINTERRUPTIBLE);		\
+									\
+			if (_unlock)					\
+				vs_state_unlock##_bh(_state);		\
+									\
+			if (_timeout >= 0) {				\
+				__jiffies = schedule_timeout(__jiffies);\
+				if (!__jiffies) {			\
+					__mbuf = ERR_PTR(-ETIMEDOUT);	\
+					break;				\
+				}					\
+			} else {					\
+				schedule();				\
+			}						\
+									\
+			if (_unlock && !vs_state_lock_safe##_bh(	\
+						_state)) {		\
+				__mbuf = ERR_PTR(-ENOLINK);		\
+				break;					\
+			}						\
+									\
+			if (!VSERVICE_BASE_STATE_IS_RUNNING(		\
+					(_state)->state.base) ||	\
+					!(_cond)) {			\
+				__mbuf = ERR_PTR(-ECANCELED);		\
+				break;					\
+			}						\
+		}							\
+		finish_wait(&__service->quota_wq, &__wait);		\
+									\
+		if (__mbuf == NULL)					\
+			__mbuf = (_alloc_func);				\
+		if (IS_ERR(__mbuf) && (PTR_ERR(__mbuf) != -ENOBUFS))	\
+			wake_up(&__service->quota_wq);			\
+		__mbuf;							\
+	})
+
+/* Specialisations of __vs_wait_alloc for common uses. */
+#define vs_wait_alloc(_state, _cond, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1,)
+#define vs_wait_alloc_timeout(_state, _cond, _alloc_func, _timeout) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, _timeout,)
+#define vs_wait_alloc_nointr(_state, _cond, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1,)
+#define vs_wait_alloc_nointr_timeout(_state, _cond, _alloc_func, _timeout) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, _timeout,)
+#define vs_wait_alloc_bh(_state, _cond, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1, _bh)
+#define vs_wait_alloc_timeout_bh(_state, _cond, _alloc_func, _timeout) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, _timeout, _bh)
+#define vs_wait_alloc_nointr_bh(_state, _cond, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1, _bh)
+#define vs_wait_alloc_nointr_timeout_bh(_state, _cond, _alloc_func, _timeout) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, _timeout, _bh)
+#define vs_wait_alloc_locked(_state, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, true, false, true, -1,)
+
+/* Legacy wait macros, to be removed and replaced with those above. */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_waiting_alloc(_state, _alloc_func) \
+	__vs_wait_alloc(_state, _alloc_func, true, false, true, -1,)
+#define vs_service_waiting_alloc_cond_locked(_state, _alloc_func, _cond) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1,)
+#define vs_service_waiting_alloc_cond_locked_nointr(_state, _alloc_func, _cond) \
+	__vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1,)
+
+/**
+ * __vs_wait_send - block until quota is available, then send a message.
+ *
+ * @_state: The protocol state to send a message for.
+ * @_cond: Additional condition which must remain true, or else the wait
+ *         will fail with -ECANCELED. This is typically used to check the
+ *         service's protocol state. Note that this condition will only
+ *         be checked after sleeping; it is assumed to be true when the
+ *         macro is first called.
+ * @_send_func: The message send function to run. This is the full function
+ *         invocation, not a pointer to the function.
+ * @_unlock: If true, drop the service state lock before sleeping. The wait
+ *         may then fail with -ENOLINK if the driver is detached from the
+ *         service, in which case the lock is dropped.
+ * @_check_running: If true, the wait will return -ECANCELED if the service's
+ *         base state is not active, or ceases to be active.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ *         with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ *         timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ *         non-framework tasklet); otherwise nothing.
+ *
+ * Return: If the send succeeds, then 0 is returned; otherwise an error
+ *         code may be returned as described above.
+ *
+ * This macro calls a specified message send function, and blocks if it
+ * returns -ENOBUFS, waiting until quota is available on the service before
+ * retrying. It aborts the wait if it finds the service in reset, or if the
+ * optionally specified condition becomes false. Note that a reset followed
+ * quickly by an activate might not trigger a failure; if that is significant
+ * for your driver, use the optional condition to detect it.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The reference and state lock will still be
+ * held on return, unless -ENOLINK is returned, in which case the lock has been
+ * dropped and cannot be reacquired.
+ *
+ * This is always an exclusive wait. It is safe to call without separately
+ * waking the waitqueue afterwards; if the allocator function fails for any
+ * reason other than quota exhaustion then another waiter will be woken.
+ *
+ * Be wary of potential deadlocks when calling this function on the service
+ * workqueue. If both ends block their service workqueues waiting for quota,
+ * then no progress can be made. It is usually only correct to block the
+ * service workqueue on the server side.
+ */
+#define __vs_wait_send(_state, _cond, _send_func, _unlock, 		\
+		_check_running, _intr, _timeout, _bh)			\
+	({								\
+		DEFINE_WAIT(__wait);					\
+		int __ret = 0;						\
+		int __jiffies __maybe_unused = (_timeout);		\
+		struct vs_service_device *__service = (_state)->service;\
+									\
+		while (!vs_service_send_mbufs_available(__service)) {	\
+			if (_intr && signal_pending(current)) {		\
+				__ret = -ERESTARTSYS;			\
+				break;					\
+			}						\
+									\
+			prepare_to_wait_exclusive(			\
+					&__service->quota_wq, &__wait,	\
+					_intr ? TASK_INTERRUPTIBLE :    \
+					TASK_UNINTERRUPTIBLE);		\
+									\
+			if (_unlock)					\
+				vs_state_unlock##_bh(_state);		\
+									\
+			if (_timeout >= 0) {				\
+				__jiffies = schedule_timeout(__jiffies);\
+				if (!__jiffies) {			\
+					__ret = -ETIMEDOUT;		\
+					break;				\
+				}					\
+			} else {					\
+				schedule();				\
+			}						\
+									\
+			if (_unlock && !vs_state_lock_safe##_bh(	\
+						_state)) {		\
+				__ret = -ENOLINK;			\
+				break;					\
+			}						\
+									\
+			if ((_check_running &&				\
+					!VSERVICE_BASE_STATE_IS_RUNNING(\
+					(_state)->state.base)) ||	\
+					!(_cond)) {			\
+				__ret = -ECANCELED;			\
+				break;					\
+			}						\
+		}							\
+		finish_wait(&__service->quota_wq, &__wait);		\
+									\
+		if (!__ret)						\
+			__ret = (_send_func);				\
+		if ((__ret < 0) && (__ret != -ENOBUFS))			\
+			wake_up(&__service->quota_wq);			\
+		__ret;							\
+	})
+
+/* Specialisations of __vs_wait_send for common uses. */
+#define vs_wait_send(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, -1,)
+#define vs_wait_send_timeout(_state, _cond, _send_func, _timeout) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, _timeout,)
+#define vs_wait_send_nointr(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, -1,)
+#define vs_wait_send_nointr_timeout(_state, _cond, _send_func, _timeout) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, _timeout,)
+#define vs_wait_send_bh(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, -1, _bh)
+#define vs_wait_send_timeout_bh(_state, _cond, _send_func, _timeout) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, \
+			_timeout, _bh)
+#define vs_wait_send_nointr_bh(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, -1, _bh)
+#define vs_wait_send_nointr_timeout_bh(_state, _cond, _send_func, _timeout) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, \
+			_timeout, _bh)
+#define vs_wait_send_locked(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, false, true, true, -1,)
+#define vs_wait_send_locked_nocheck(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, false, false, true, -1,)
+
+/* Legacy wait macros, to be removed and replaced with those above. */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_waiting_send(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, true, true, true, -1,)
+#define vs_service_waiting_send_nointr(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, true, true, false, -1,)
+#define vs_service_waiting_send_cond(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, true, -1,)
+#define vs_service_waiting_send_cond_nointr(_state, _cond, _send_func) \
+	__vs_wait_send(_state, _cond, _send_func, true, true, false, -1,)
+#define vs_service_waiting_send_nocheck(_state, _send_func) \
+	__vs_wait_send(_state, true, _send_func, true, false, true, -1,)
+
+#endif /* _VSERVICE_SERVICE_WAIT_H */
diff --git a/init/Kconfig b/init/Kconfig
index e3929edd..c854ad5 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1930,6 +1930,20 @@
 	  SLUB sysfs support. /sys/slab will not exist and there will be
 	  no support for cache validation etc.
 
+config SLUB_MEMCG_SYSFS_ON
+	default n
+	bool "Enable memcg SLUB sysfs support by default" if EXPERT
+	depends on SLUB && SYSFS && MEMCG
+	help
+	  SLUB creates a directory under /sys/kernel/slab for each
+	  allocation cache to host info and debug files. If memory
+	  cgroup is enabled, each cache can have per memory cgroup
+	  caches. SLUB can create the same sysfs directories for these
+	  caches under /sys/kernel/slab/CACHE/cgroup but it can lead
+	  to a very high number of debug files being created. This is
+	  controlled by slub_memcg_sysfs boot parameter and this
+	  config option determines the parameter's default value.
+
 config COMPAT_BRK
 	bool "Disable heap randomization"
 	default y
diff --git a/init/main.c b/init/main.c
index f9cd3f0..5d0aa0a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -508,8 +508,8 @@
 	setup_command_line(command_line);
 	setup_nr_cpu_ids();
 	setup_per_cpu_areas();
-	boot_cpu_state_init();
 	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */
+	boot_cpu_hotplug_init();
 
 	build_all_zonelists(NULL, NULL);
 	page_alloc_init();
diff --git a/kernel/audit.c b/kernel/audit.c
index 3461a3d..194fa1a 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -125,6 +125,7 @@
 static atomic_t    audit_lost = ATOMIC_INIT(0);
 
 /* The netlink socket. */
+static DEFINE_MUTEX(audit_sock_mutex);
 static struct sock *audit_sock;
 static int audit_net_id;
 
@@ -411,7 +412,9 @@
 restart:
 	/* take a reference in case we can't send it and we want to hold it */
 	skb_get(skb);
+	mutex_lock(&audit_sock_mutex);
 	err = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+	mutex_unlock(&audit_sock_mutex);
 	if (err < 0) {
 		pr_err("netlink_unicast sending to audit_pid=%d returned error: %d\n",
 		       audit_pid, err);
@@ -423,7 +426,9 @@
 				snprintf(s, sizeof(s), "audit_pid=%d reset", audit_pid);
 				audit_log_lost(s);
 				audit_pid = 0;
+				mutex_lock(&audit_sock_mutex);
 				audit_sock = NULL;
+				mutex_unlock(&audit_sock_mutex);
 			} else {
 				pr_warn("re-scheduling(#%d) write to audit_pid=%d\n",
 					attempts, audit_pid);
@@ -811,12 +816,16 @@
 
 static int audit_replace(pid_t pid)
 {
+	int	len;
 	struct sk_buff *skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0,
 					       &pid, sizeof(pid));
 
 	if (!skb)
 		return -ENOMEM;
-	return netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+	mutex_lock(&audit_sock_mutex);
+	len = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+	mutex_unlock(&audit_sock_mutex);
+	return len;
 }
 
 static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -901,7 +910,9 @@
 				audit_log_config_change("audit_pid", new_pid, audit_pid, 1);
 			audit_pid = new_pid;
 			audit_nlk_portid = NETLINK_CB(skb).portid;
+			mutex_lock(&audit_sock_mutex);
 			audit_sock = skb->sk;
+			mutex_unlock(&audit_sock_mutex);
 		}
 		if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
 			err = audit_set_rate_limit(s.rate_limit);
@@ -1169,10 +1180,12 @@
 {
 	struct audit_net *aunet = net_generic(net, audit_net_id);
 	struct sock *sock = aunet->nlsk;
+	mutex_lock(&audit_sock_mutex);
 	if (sock == audit_sock) {
 		audit_pid = 0;
 		audit_sock = NULL;
 	}
+	mutex_unlock(&audit_sock_mutex);
 
 	RCU_INIT_POINTER(aunet->nlsk, NULL);
 	synchronize_net();
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 690e1e3..f036b6a 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -419,6 +419,13 @@
 	struct path parent_path;
 	int h, ret = 0;
 
+	/*
+	 * When we will be calling audit_add_to_parent, krule->watch might have
+	 * been updated and watch might have been freed.
+	 * So we need to keep a reference of watch.
+	 */
+	audit_get_watch(watch);
+
 	mutex_unlock(&audit_filter_mutex);
 
 	/* Avoid calling path_lookup under audit_filter_mutex. */
@@ -427,8 +434,10 @@
 	/* caller expects mutex locked */
 	mutex_lock(&audit_filter_mutex);
 
-	if (ret)
+	if (ret) {
+		audit_put_watch(watch);
 		return ret;
+	}
 
 	/* either find an old parent or attach a new one */
 	parent = audit_find_parent(d_backing_inode(parent_path.dentry));
@@ -446,6 +455,7 @@
 	*list = &audit_inode_hash[h];
 error:
 	path_put(&parent_path);
+	audit_put_watch(watch);
 	return ret;
 }
 
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 85d9cac..cd4f413 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -406,7 +406,7 @@
 			return -EINVAL;
 		break;
 	case AUDIT_EXE:
-		if (f->op != Audit_equal)
+		if (f->op != Audit_not_equal && f->op != Audit_equal)
 			return -EINVAL;
 		if (entry->rule.listnr != AUDIT_FILTER_EXIT)
 			return -EINVAL;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 93648f6..6039aa77 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -469,6 +469,8 @@
 			break;
 		case AUDIT_EXE:
 			result = audit_exe_compare(tsk, rule->exe);
+			if (f->op == Audit_not_equal)
+				result = !result;
 			break;
 		case AUDIT_UID:
 			result = audit_uid_comparator(cred->uid, f->op, f->uid);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 076e4a0..dafa270 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3225,7 +3225,7 @@
 			/* hold the map. If the program is rejected by verifier,
 			 * the map will be released by release_maps() or it
 			 * will be used by the valid program until it's unloaded
-			 * and all maps are released in free_bpf_prog_info()
+			 * and all maps are released in free_used_maps()
 			 */
 			map = bpf_map_inc(map, false);
 			if (IS_ERR(map)) {
@@ -3629,7 +3629,7 @@
 		vfree(log_buf);
 	if (!env->prog->aux->used_maps)
 		/* if we didn't copy map pointers into bpf_prog_info, release
-		 * them now. Otherwise free_bpf_prog_info() will release them.
+		 * them now. Otherwise free_used_maps() will release them.
 		 */
 		release_maps(env);
 	*prog = env->prog;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index aa03515..cc2e478 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -55,6 +55,7 @@
 	bool			rollback;
 	bool			single;
 	bool			bringup;
+	bool			booted_once;
 	struct hlist_node	*node;
 	enum cpuhp_state	cb_state;
 	int			result;
@@ -361,6 +362,85 @@
 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 #endif	/* CONFIG_HOTPLUG_CPU */
 
+#ifdef CONFIG_HOTPLUG_SMT
+enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+EXPORT_SYMBOL_GPL(cpu_smt_control);
+
+static bool cpu_smt_available __read_mostly;
+
+void __init cpu_smt_disable(bool force)
+{
+	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
+		cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+		return;
+
+	if (force) {
+		pr_info("SMT: Force disabled\n");
+		cpu_smt_control = CPU_SMT_FORCE_DISABLED;
+	} else {
+		cpu_smt_control = CPU_SMT_DISABLED;
+	}
+}
+
+/*
+ * The decision whether SMT is supported can only be done after the full
+ * CPU identification. Called from architecture code before non boot CPUs
+ * are brought up.
+ */
+void __init cpu_smt_check_topology_early(void)
+{
+	if (!topology_smt_supported())
+		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+}
+
+/*
+ * If SMT was disabled by BIOS, detect it here, after the CPUs have been
+ * brought online. This ensures the smt/l1tf sysfs entries are consistent
+ * with reality. cpu_smt_available is set to true during the bringup of non
+ * boot CPUs when a SMT sibling is detected. Note, this may overwrite
+ * cpu_smt_control's previous setting.
+ */
+void __init cpu_smt_check_topology(void)
+{
+	if (!cpu_smt_available)
+		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+}
+
+static int __init smt_cmdline_disable(char *str)
+{
+	cpu_smt_disable(str && !strcmp(str, "force"));
+	return 0;
+}
+early_param("nosmt", smt_cmdline_disable);
+
+static inline bool cpu_smt_allowed(unsigned int cpu)
+{
+	if (topology_is_primary_thread(cpu))
+		return true;
+
+	/*
+	 * If the CPU is not a 'primary' thread and the booted_once bit is
+	 * set then the processor has SMT support. Store this information
+	 * for the late check of SMT support in cpu_smt_check_topology().
+	 */
+	if (per_cpu(cpuhp_state, cpu).booted_once)
+		cpu_smt_available = true;
+
+	if (cpu_smt_control == CPU_SMT_ENABLED)
+		return true;
+
+	/*
+	 * On x86 it's required to boot all logical CPUs at least once so
+	 * that the init code can get a chance to set CR4.MCE on each
+	 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
+	 * core will shutdown the machine.
+	 */
+	return !per_cpu(cpuhp_state, cpu).booted_once;
+}
+#else
+static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
+#endif
+
 /* Need to know about CPUs going up/down? */
 int register_cpu_notifier(struct notifier_block *nb)
 {
@@ -438,6 +518,16 @@
 	stop_machine_unpark(cpu);
 	kthread_unpark(st->thread);
 
+	/*
+	 * SMT soft disabling on X86 requires to bring the CPU out of the
+	 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
+	 * CPU marked itself as booted_once in cpu_notify_starting() so the
+	 * cpu_smt_allowed() check will now return false if this is not the
+	 * primary sibling.
+	 */
+	if (!cpu_smt_allowed(cpu))
+		return -ECANCELED;
+
 	/* Should we go further up ? */
 	if (st->target > CPUHP_AP_ONLINE_IDLE) {
 		__cpuhp_kick_ap_work(st);
@@ -825,7 +915,6 @@
 
 	/* Park the smpboot threads */
 	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
-	smpboot_park_threads(cpu);
 
 	/*
 	 * Prevent irq alloc/free while the dying cpu reorganizes the
@@ -971,20 +1060,19 @@
 	return ret;
 }
 
+static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
+{
+	if (cpu_hotplug_disabled)
+		return -EBUSY;
+	return _cpu_down(cpu, 0, target);
+}
+
 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
 {
 	int err;
 
 	cpu_maps_update_begin();
-
-	if (cpu_hotplug_disabled) {
-		err = -EBUSY;
-		goto out;
-	}
-
-	err = _cpu_down(cpu, 0, target);
-
-out:
+	err = cpu_down_maps_locked(cpu, target);
 	cpu_maps_update_done();
 	return err;
 }
@@ -1008,6 +1096,7 @@
 	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
 
 	rcu_cpu_starting(cpu);	/* Enables RCU usage on this CPU. */
+	st->booted_once = true;
 	while (st->state < target) {
 		st->state++;
 		cpuhp_invoke_callback(cpu, st->state, true, NULL);
@@ -1149,6 +1238,10 @@
 		err = -EBUSY;
 		goto out;
 	}
+	if (!cpu_smt_allowed(cpu)) {
+		err = -EPERM;
+		goto out;
+	}
 
 	err = _cpu_up(cpu, 0, target);
 out:
@@ -1459,7 +1552,7 @@
 	[CPUHP_AP_SMPBOOT_THREADS] = {
 		.name			= "smpboot/threads:online",
 		.startup.single		= smpboot_unpark_threads,
-		.teardown.single	= NULL,
+		.teardown.single	= smpboot_park_threads,
 	},
 	[CPUHP_AP_PERF_ONLINE] = {
 		.name			= "perf:online",
@@ -1913,10 +2006,172 @@
 	NULL
 };
 
+#ifdef CONFIG_HOTPLUG_SMT
+
+static const char *smt_states[] = {
+	[CPU_SMT_ENABLED]		= "on",
+	[CPU_SMT_DISABLED]		= "off",
+	[CPU_SMT_FORCE_DISABLED]	= "forceoff",
+	[CPU_SMT_NOT_SUPPORTED]		= "notsupported",
+};
+
+static ssize_t
+show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
+}
+
+static void cpuhp_offline_cpu_device(unsigned int cpu)
+{
+	struct device *dev = get_cpu_device(cpu);
+
+	dev->offline = true;
+	/* Tell user space about the state change */
+	kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+}
+
+static void cpuhp_online_cpu_device(unsigned int cpu)
+{
+	struct device *dev = get_cpu_device(cpu);
+
+	dev->offline = false;
+	/* Tell user space about the state change */
+	kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+}
+
+static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+{
+	int cpu, ret = 0;
+
+	cpu_maps_update_begin();
+	for_each_online_cpu(cpu) {
+		if (topology_is_primary_thread(cpu))
+			continue;
+		ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
+		if (ret)
+			break;
+		/*
+		 * As this needs to hold the cpu maps lock it's impossible
+		 * to call device_offline() because that ends up calling
+		 * cpu_down() which takes cpu maps lock. cpu maps lock
+		 * needs to be held as this might race against in kernel
+		 * abusers of the hotplug machinery (thermal management).
+		 *
+		 * So nothing would update device:offline state. That would
+		 * leave the sysfs entry stale and prevent onlining after
+		 * smt control has been changed to 'off' again. This is
+		 * called under the sysfs hotplug lock, so it is properly
+		 * serialized against the regular offline usage.
+		 */
+		cpuhp_offline_cpu_device(cpu);
+	}
+	if (!ret)
+		cpu_smt_control = ctrlval;
+	cpu_maps_update_done();
+	return ret;
+}
+
+static int cpuhp_smt_enable(void)
+{
+	int cpu, ret = 0;
+
+	cpu_maps_update_begin();
+	cpu_smt_control = CPU_SMT_ENABLED;
+	for_each_present_cpu(cpu) {
+		/* Skip online CPUs and CPUs on offline nodes */
+		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+			continue;
+		ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
+		if (ret)
+			break;
+		/* See comment in cpuhp_smt_disable() */
+		cpuhp_online_cpu_device(cpu);
+	}
+	cpu_maps_update_done();
+	return ret;
+}
+
+static ssize_t
+store_smt_control(struct device *dev, struct device_attribute *attr,
+		  const char *buf, size_t count)
+{
+	int ctrlval, ret;
+
+	if (sysfs_streq(buf, "on"))
+		ctrlval = CPU_SMT_ENABLED;
+	else if (sysfs_streq(buf, "off"))
+		ctrlval = CPU_SMT_DISABLED;
+	else if (sysfs_streq(buf, "forceoff"))
+		ctrlval = CPU_SMT_FORCE_DISABLED;
+	else
+		return -EINVAL;
+
+	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
+		return -EPERM;
+
+	if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+		return -ENODEV;
+
+	ret = lock_device_hotplug_sysfs();
+	if (ret)
+		return ret;
+
+	if (ctrlval != cpu_smt_control) {
+		switch (ctrlval) {
+		case CPU_SMT_ENABLED:
+			ret = cpuhp_smt_enable();
+			break;
+		case CPU_SMT_DISABLED:
+		case CPU_SMT_FORCE_DISABLED:
+			ret = cpuhp_smt_disable(ctrlval);
+			break;
+		}
+	}
+
+	unlock_device_hotplug();
+	return ret ? ret : count;
+}
+static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
+
+static ssize_t
+show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	bool active = topology_max_smt_threads() > 1;
+
+	return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
+}
+static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
+
+static struct attribute *cpuhp_smt_attrs[] = {
+	&dev_attr_control.attr,
+	&dev_attr_active.attr,
+	NULL
+};
+
+static const struct attribute_group cpuhp_smt_attr_group = {
+	.attrs = cpuhp_smt_attrs,
+	.name = "smt",
+	NULL
+};
+
+static int __init cpu_smt_state_init(void)
+{
+	return sysfs_create_group(&cpu_subsys.dev_root->kobj,
+				  &cpuhp_smt_attr_group);
+}
+
+#else
+static inline int cpu_smt_state_init(void) { return 0; }
+#endif
+
 static int __init cpuhp_sysfs_init(void)
 {
 	int cpu, ret;
 
+	ret = cpu_smt_state_init();
+	if (ret)
+		return ret;
+
 	ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
 				 &cpuhp_cpu_root_attr_group);
 	if (ret)
@@ -2021,9 +2276,12 @@
 /*
  * Must be called _AFTER_ setting up the per_cpu areas
  */
-void __init boot_cpu_state_init(void)
+void __init boot_cpu_hotplug_init(void)
 {
-	per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
+#ifdef CONFIG_SMP
+	this_cpu_write(cpuhp_state.booted_once, true);
+#endif
+	this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
 }
 
 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ee74fff..340eccd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4289,7 +4289,7 @@
  * object, it will not preserve its functionality. Once the last 'user'
  * gives up the object, we'll destroy the thing.
  */
-int perf_event_release_kernel(struct perf_event *event)
+static int __perf_event_release_kernel(struct perf_event *event)
 {
 	struct perf_event_context *ctx = event->ctx;
 	struct perf_event *child, *tmp;
@@ -4300,7 +4300,7 @@
 	 *  back online.
 	 */
 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
-	if (event->cpu != -1 && !cpu_online(event->cpu)) {
+	if (event->cpu != -1 && per_cpu(is_hotplugging, event->cpu)) {
 		if (event->state == PERF_EVENT_STATE_ZOMBIE)
 			return 0;
 
@@ -4417,6 +4417,17 @@
 	put_event(event); /* Must be the 'last' reference */
 	return 0;
 }
+
+int perf_event_release_kernel(struct perf_event *event)
+{
+	int ret;
+
+	mutex_lock(&pmus_lock);
+	ret = __perf_event_release_kernel(event);
+	mutex_unlock(&pmus_lock);
+
+	return ret;
+}
 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
 
 /*
@@ -5693,6 +5704,7 @@
 		unsigned long sp;
 		unsigned int rem;
 		u64 dyn_size;
+		mm_segment_t fs;
 
 		/*
 		 * We dump:
@@ -5710,7 +5722,10 @@
 
 		/* Data. */
 		sp = perf_user_stack_pointer(regs);
+		fs = get_fs();
+		set_fs(USER_DS);
 		rem = __output_copy_user(handle, (void *) sp, dump_size);
+		set_fs(fs);
 		dyn_size = dump_size - rem;
 
 		perf_output_skip(handle, rem);
@@ -11130,7 +11145,7 @@
 		 * PMU expects it to be in an active state
 		 */
 		event->state = PERF_EVENT_STATE_ACTIVE;
-		perf_event_release_kernel(event);
+		__perf_event_release_kernel(event);
 
 		spin_lock(&zombie_list_lock);
 	}
@@ -11145,6 +11160,7 @@
 	struct perf_event *event;
 	int idx;
 
+	mutex_lock(&pmus_lock);
 	perf_event_zombie_cleanup(cpu);
 
 	idx = srcu_read_lock(&pmus_srcu);
@@ -11159,6 +11175,8 @@
 	}
 	srcu_read_unlock(&pmus_srcu, idx);
 	per_cpu(is_hotplugging, cpu) = false;
+	mutex_unlock(&pmus_lock);
+
 	return 0;
 }
 
@@ -11198,13 +11216,25 @@
 
 static void perf_event_exit_cpu_context(int cpu)
 {
+	struct perf_cpu_context *cpuctx;
 	struct perf_event_context *ctx;
+	unsigned long flags;
 	struct pmu *pmu;
 	int idx;
 
 	idx = srcu_read_lock(&pmus_srcu);
 	list_for_each_entry_rcu(pmu, &pmus, entry) {
-		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+		ctx = &cpuctx->ctx;
+
+		/* Cancel the mux hrtimer to avoid CPU migration */
+		if (pmu->task_ctx_nr != perf_sw_context) {
+			raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
+			hrtimer_cancel(&cpuctx->hrtimer);
+			cpuctx->hrtimer_active = 0;
+			raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock,
+							flags);
+		}
 
 		mutex_lock(&ctx->mutex);
 		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
@@ -11220,8 +11250,10 @@
 
 int perf_event_exit_cpu(unsigned int cpu)
 {
+	mutex_lock(&pmus_lock);
 	per_cpu(is_hotplugging, cpu) = true;
 	perf_event_exit_cpu_context(cpu);
+	mutex_unlock(&pmus_lock);
 	return 0;
 }
 
diff --git a/kernel/exit.c b/kernel/exit.c
index ee8c601..f5a7cbf 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -54,7 +54,6 @@
 #include <linux/writeback.h>
 #include <linux/shm.h>
 #include <linux/kcov.h>
-#include <linux/cpufreq_times.h>
 
 #include "sched/tune.h"
 
@@ -172,9 +171,6 @@
 {
 	struct task_struct *leader;
 	int zap_leader;
-#ifdef CONFIG_CPU_FREQ_TIMES
-	cpufreq_task_times_exit(p);
-#endif
 repeat:
 	/* don't need to get the RCU readlock here - the process is dead and
 	 * can't be modifying its own credentials. But shut RCU-lockdep up */
diff --git a/kernel/fork.c b/kernel/fork.c
index 7929fe7..62e8262 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -77,6 +77,7 @@
 #include <linux/compiler.h>
 #include <linux/sysctl.h>
 #include <linux/kcov.h>
+#include <linux/cpufreq_times.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -184,6 +185,9 @@
 			continue;
 		this_cpu_write(cached_stacks[i], NULL);
 
+		/* Clear stale pointers from reused stack. */
+		memset(s->addr, 0, THREAD_SIZE);
+
 		tsk->stack_vm_area = s;
 		local_irq_enable();
 		return s->addr;
@@ -339,6 +343,8 @@
 
 void free_task(struct task_struct *tsk)
 {
+	cpufreq_task_times_exit(tsk);
+
 #ifndef CONFIG_THREAD_INFO_IN_TASK
 	/*
 	 * The task is finally done with both the stack and thread_info,
@@ -1308,7 +1314,9 @@
 		return -ENOMEM;
 
 	atomic_set(&sig->count, 1);
+	spin_lock_irq(&current->sighand->siglock);
 	memcpy(sig->action, current->sighand->action, sizeof(sig->action));
+	spin_unlock_irq(&current->sighand->siglock);
 	return 0;
 }
 
@@ -1546,6 +1554,20 @@
 	 */
 	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
 
+	cpufreq_task_times_init(p);
+
+	/*
+	 * This _must_ happen before we call free_task(), i.e. before we jump
+	 * to any of the bad_fork_* labels. This is to avoid freeing
+	 * p->set_child_tid which is (ab)used as a kthread's data pointer for
+	 * kernel threads (PF_KTHREAD).
+	 */
+	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
+	/*
+	 * Clear TID on mm_release()?
+	 */
+	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
+
 	ftrace_graph_init_task(p);
 
 	rt_mutex_init_task(p);
@@ -1978,6 +2000,8 @@
 		struct completion vfork;
 		struct pid *pid;
 
+		cpufreq_task_times_alloc(p);
+
 		trace_sched_process_fork(current, p);
 
 		pid = get_task_pid(p, PIDTYPE_PID);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c93d4df..587971a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1029,6 +1029,13 @@
 	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
 		return 0;
 
+	/*
+	 * No further action required for interrupts which are requested as
+	 * threaded interrupts already
+	 */
+	if (new->handler == irq_default_primary_handler)
+		return 0;
+
 	new->flags |= IRQF_ONESHOT;
 
 	/*
@@ -1036,7 +1043,7 @@
 	 * thread handler. We force thread them as well by creating a
 	 * secondary action.
 	 */
-	if (new->handler != irq_default_primary_handler && new->thread_fn) {
+	if (new->handler && new->thread_fn) {
 		/* Allocate the secondary action */
 		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
 		if (!new->secondary)
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 154a80d..fad5144 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -240,7 +240,8 @@
 
 void kcov_task_init(struct task_struct *t)
 {
-	t->kcov_mode = KCOV_MODE_DISABLED;
+	WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
+	barrier();
 	t->kcov_size = 0;
 	t->kcov_area = NULL;
 	t->kcov = NULL;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 6948518..b9e966b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2441,7 +2441,7 @@
 	if (!dir)
 		return -ENOMEM;
 
-	file = debugfs_create_file("list", 0444, dir, NULL,
+	file = debugfs_create_file("list", 0400, dir, NULL,
 				&debugfs_kprobes_operations);
 	if (!file)
 		goto error;
@@ -2451,7 +2451,7 @@
 	if (!file)
 		goto error;
 
-	file = debugfs_create_file("blacklist", 0444, dir, NULL,
+	file = debugfs_create_file("blacklist", 0400, dir, NULL,
 				&debugfs_kprobe_blacklist_ops);
 	if (!file)
 		goto error;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index b65854c..981ebe9 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -290,8 +290,14 @@
 	task = create->result;
 	if (!IS_ERR(task)) {
 		static const struct sched_param param = { .sched_priority = 0 };
+		char name[TASK_COMM_LEN];
 
-		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
+		/*
+		 * task is already visible to other tasks, so updating
+		 * COMM must be protected.
+		 */
+		vsnprintf(name, sizeof(name), namefmt, args);
+		set_task_comm(task, name);
 		/*
 		 * root may have changed our (kthreadd's) priority or CPU mask.
 		 * The kernel thread should not inherit these properties.
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 6599c7f..61a15e5 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1240,11 +1240,11 @@
 	this.parent = NULL;
 	this.class = class;
 
-	local_irq_save(flags);
+	raw_local_irq_save(flags);
 	arch_spin_lock(&lockdep_lock);
 	ret = __lockdep_count_forward_deps(&this);
 	arch_spin_unlock(&lockdep_lock);
-	local_irq_restore(flags);
+	raw_local_irq_restore(flags);
 
 	return ret;
 }
@@ -1267,11 +1267,11 @@
 	this.parent = NULL;
 	this.class = class;
 
-	local_irq_save(flags);
+	raw_local_irq_save(flags);
 	arch_spin_lock(&lockdep_lock);
 	ret = __lockdep_count_backward_deps(&this);
 	arch_spin_unlock(&lockdep_lock);
-	local_irq_restore(flags);
+	raw_local_irq_restore(flags);
 
 	return ret;
 }
@@ -4273,7 +4273,7 @@
 	if (unlikely(!debug_locks))
 		return;
 
-	local_irq_save(flags);
+	raw_local_irq_save(flags);
 	for (i = 0; i < curr->lockdep_depth; i++) {
 		hlock = curr->held_locks + i;
 
@@ -4284,7 +4284,7 @@
 		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
 		break;
 	}
-	local_irq_restore(flags);
+	raw_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index e99d860..d381f55 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -174,6 +174,11 @@
 	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
 		msg, raw_smp_processor_id(), current->comm,
 		task_pid_nr(current), lock);
+#ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG
+	msm_trigger_wdog_bite();
+#elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG)
+	BUG();
+#endif
 	dump_stack();
 }
 
diff --git a/kernel/module.c b/kernel/module.c
index 8a84031..1277bdf 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -4035,7 +4035,7 @@
 
 	for (i = 0; i < kallsyms->num_symtab; i++)
 		if (strcmp(name, symname(kallsyms, i)) == 0 &&
-		    kallsyms->symtab[i].st_info != 'U')
+		    kallsyms->symtab[i].st_shndx != SHN_UNDEF)
 			return kallsyms->symtab[i].st_value;
 	return 0;
 }
@@ -4081,6 +4081,10 @@
 		if (mod->state == MODULE_STATE_UNFORMED)
 			continue;
 		for (i = 0; i < kallsyms->num_symtab; i++) {
+
+			if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
+				continue;
+
 			ret = fn(data, symname(kallsyms, i),
 				 mod, kallsyms->symtab[i].st_value);
 			if (ret != 0)
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index bf60b37..fac423f 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -133,6 +133,7 @@
 	def_bool y
 	depends on SUSPEND || HIBERNATE_CALLBACKS
 	select PM
+	select SRCU
 
 config PM_SLEEP_SMP
 	def_bool y
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 35310b6..bc6dde1 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -186,6 +186,11 @@
 		res = PAGE_SIZE - pg_offp;
 	}
 
+	if (!data_of(data->handle)) {
+		res = -EINVAL;
+		goto unlock;
+	}
+
 	res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp,
 			buf, count);
 	if (res > 0)
diff --git a/kernel/printk/nmi.c b/kernel/printk/nmi.c
index 5fa65aa..2c3e7f0 100644
--- a/kernel/printk/nmi.c
+++ b/kernel/printk/nmi.c
@@ -260,12 +260,12 @@
 	printk_nmi_flush();
 }
 
-void printk_nmi_enter(void)
+void notrace printk_nmi_enter(void)
 {
 	this_cpu_write(printk_func, vprintk_nmi);
 }
 
-void printk_nmi_exit(void)
+void notrace printk_nmi_exit(void)
 {
 	this_cpu_write(printk_func, vprintk_default);
 }
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8976980..8f29103 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2344,10 +2344,6 @@
 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
 #endif
 
-#ifdef CONFIG_CPU_FREQ_TIMES
-	cpufreq_task_times_init(p);
-#endif
-
 	RB_CLEAR_NODE(&p->dl.rb_node);
 	init_dl_task_timer(&p->dl);
 	__dl_clear_params(p);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index a7c4b4c..7cabd8c 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -166,10 +166,8 @@
 	/* Account for user time used */
 	acct_account_cputime(p);
 
-#ifdef CONFIG_CPU_FREQ_TIMES
 	/* Account power usage for user time */
 	cpufreq_acct_update_power(p, cputime);
-#endif
 }
 
 /*
@@ -220,10 +218,9 @@
 
 	/* Account for system time used */
 	acct_account_cputime(p);
-#ifdef CONFIG_CPU_FREQ_TIMES
+
 	/* Account power usage for system time */
 	cpufreq_acct_update_power(p, cputime);
-#endif
 }
 
 /*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0bb5046..7944ae9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7350,7 +7350,7 @@
 	return cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
 	       cpu_active(cpu) && !cpu_isolated(cpu) &&
 	       capacity_orig_of(cpu) >= capacity_orig_of(rtg_target_cpu) &&
-	       task_fits_max(p, cpu);
+	       task_fits_max(p, cpu) && !__cpu_overutilized(cpu, task_util(p));
 }
 
 #define SCHED_SELECT_PREV_CPU_NSEC	2000000
@@ -11166,7 +11166,8 @@
 	 * - A task which has been woken up by try_to_wake_up() and
 	 *   waiting for actually being woken up by sched_ttwu_pending().
 	 */
-	if (!se->sum_exec_runtime || p->state == TASK_WAKING)
+	if (!se->sum_exec_runtime ||
+	    (p->state == TASK_WAKING && p->sched_remote_wakeup))
 		return true;
 
 	return false;
diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
index 82f0dff..9c2da06 100644
--- a/kernel/sched/swait.c
+++ b/kernel/sched/swait.c
@@ -33,9 +33,6 @@
 {
 	unsigned long flags;
 
-	if (!swait_active(q))
-		return;
-
 	raw_spin_lock_irqsave(&q->lock, flags);
 	swake_up_locked(q);
 	raw_spin_unlock_irqrestore(&q->lock, flags);
@@ -51,9 +48,6 @@
 	struct swait_queue *curr;
 	LIST_HEAD(tmp);
 
-	if (!swait_active(q))
-		return;
-
 	raw_spin_lock_irq(&q->lock);
 	list_splice_init(&q->task_list, &tmp);
 	while (!list_empty(&tmp)) {
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index a8fab0c..92fcb92 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -791,7 +791,7 @@
 	    u64 prefer_idle)
 {
 	struct schedtune *st = css_st(css);
-	st->prefer_idle = prefer_idle;
+	st->prefer_idle = !!prefer_idle;
 
 	return 0;
 }
diff --git a/kernel/signal.c b/kernel/signal.c
index 4364e57..23af00f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -34,6 +34,8 @@
 #include <linux/compat.h>
 #include <linux/cn_proc.h>
 #include <linux/compiler.h>
+#include <linux/oom.h>
+#include <linux/capability.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/signal.h>
@@ -1269,8 +1271,11 @@
 	ret = check_kill_permission(sig, info, p);
 	rcu_read_unlock();
 
-	if (!ret && sig)
+	if (!ret && sig) {
 		ret = do_send_sig_info(sig, info, p, true);
+		if (capable(CAP_KILL) && sig == SIGKILL)
+			add_to_oom_reaper(p);
+	}
 
 	return ret;
 }
diff --git a/kernel/smp.c b/kernel/smp.c
index 313d9a8..d49e5df 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -597,6 +597,8 @@
 
 	free_boot_cpu_mask();
 
+	/* Final decision about SMT support */
+	cpu_smt_check_topology();
 	/* Any cleanup work */
 	smp_announce();
 	smp_cpus_done(setup_max_cpus);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index ec9ab2f..9b8cd7e 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -36,7 +36,7 @@
 struct cpu_stopper {
 	struct task_struct	*thread;
 
-	spinlock_t		lock;
+	raw_spinlock_t		lock;
 	bool			enabled;	/* is this stopper enabled? */
 	struct list_head	works;		/* list of pending works */
 
@@ -78,13 +78,13 @@
 	unsigned long flags;
 	bool enabled;
 
-	spin_lock_irqsave(&stopper->lock, flags);
+	raw_spin_lock_irqsave(&stopper->lock, flags);
 	enabled = stopper->enabled;
 	if (enabled)
 		__cpu_stop_queue_work(stopper, work);
 	else if (work->done)
 		cpu_stop_signal_done(work->done);
-	spin_unlock_irqrestore(&stopper->lock, flags);
+	raw_spin_unlock_irqrestore(&stopper->lock, flags);
 
 	return enabled;
 }
@@ -231,8 +231,8 @@
 	struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
 	int err;
 retry:
-	spin_lock_irq(&stopper1->lock);
-	spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
+	raw_spin_lock_irq(&stopper1->lock);
+	raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
 
 	err = -ENOENT;
 	if (!stopper1->enabled || !stopper2->enabled)
@@ -255,8 +255,8 @@
 	__cpu_stop_queue_work(stopper1, work1);
 	__cpu_stop_queue_work(stopper2, work2);
 unlock:
-	spin_unlock(&stopper2->lock);
-	spin_unlock_irq(&stopper1->lock);
+	raw_spin_unlock(&stopper2->lock);
+	raw_spin_unlock_irq(&stopper1->lock);
 
 	if (unlikely(err == -EDEADLK)) {
 		while (stop_cpus_in_progress)
@@ -448,9 +448,9 @@
 	unsigned long flags;
 	int run;
 
-	spin_lock_irqsave(&stopper->lock, flags);
+	raw_spin_lock_irqsave(&stopper->lock, flags);
 	run = !list_empty(&stopper->works);
-	spin_unlock_irqrestore(&stopper->lock, flags);
+	raw_spin_unlock_irqrestore(&stopper->lock, flags);
 	return run;
 }
 
@@ -461,13 +461,13 @@
 
 repeat:
 	work = NULL;
-	spin_lock_irq(&stopper->lock);
+	raw_spin_lock_irq(&stopper->lock);
 	if (!list_empty(&stopper->works)) {
 		work = list_first_entry(&stopper->works,
 					struct cpu_stop_work, list);
 		list_del_init(&work->list);
 	}
-	spin_unlock_irq(&stopper->lock);
+	raw_spin_unlock_irq(&stopper->lock);
 
 	if (work) {
 		cpu_stop_fn_t fn = work->fn;
@@ -541,7 +541,7 @@
 	for_each_possible_cpu(cpu) {
 		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 
-		spin_lock_init(&stopper->lock);
+		raw_spin_lock_init(&stopper->lock);
 		INIT_LIST_HEAD(&stopper->works);
 	}
 
diff --git a/kernel/sys.c b/kernel/sys.c
index af33cbf..60e7ee0 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1144,18 +1144,19 @@
 
 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
 {
-	int errno = 0;
+	struct new_utsname tmp;
 
 	down_read(&uts_sem);
-	if (copy_to_user(name, utsname(), sizeof *name))
-		errno = -EFAULT;
+	memcpy(&tmp, utsname(), sizeof(tmp));
 	up_read(&uts_sem);
+	if (copy_to_user(name, &tmp, sizeof(tmp)))
+		return -EFAULT;
 
-	if (!errno && override_release(name->release, sizeof(name->release)))
-		errno = -EFAULT;
-	if (!errno && override_architecture(name))
-		errno = -EFAULT;
-	return errno;
+	if (override_release(name->release, sizeof(name->release)))
+		return -EFAULT;
+	if (override_architecture(name))
+		return -EFAULT;
+	return 0;
 }
 
 #ifdef __ARCH_WANT_SYS_OLD_UNAME
@@ -1164,55 +1165,46 @@
  */
 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
 {
-	int error = 0;
+	struct old_utsname tmp;
 
 	if (!name)
 		return -EFAULT;
 
 	down_read(&uts_sem);
-	if (copy_to_user(name, utsname(), sizeof(*name)))
-		error = -EFAULT;
+	memcpy(&tmp, utsname(), sizeof(tmp));
 	up_read(&uts_sem);
+	if (copy_to_user(name, &tmp, sizeof(tmp)))
+		return -EFAULT;
 
-	if (!error && override_release(name->release, sizeof(name->release)))
-		error = -EFAULT;
-	if (!error && override_architecture(name))
-		error = -EFAULT;
-	return error;
+	if (override_release(name->release, sizeof(name->release)))
+		return -EFAULT;
+	if (override_architecture(name))
+		return -EFAULT;
+	return 0;
 }
 
 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
 {
-	int error;
+	struct oldold_utsname tmp = {};
 
 	if (!name)
 		return -EFAULT;
-	if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
-		return -EFAULT;
 
 	down_read(&uts_sem);
-	error = __copy_to_user(&name->sysname, &utsname()->sysname,
-			       __OLD_UTS_LEN);
-	error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
-	error |= __copy_to_user(&name->nodename, &utsname()->nodename,
-				__OLD_UTS_LEN);
-	error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
-	error |= __copy_to_user(&name->release, &utsname()->release,
-				__OLD_UTS_LEN);
-	error |= __put_user(0, name->release + __OLD_UTS_LEN);
-	error |= __copy_to_user(&name->version, &utsname()->version,
-				__OLD_UTS_LEN);
-	error |= __put_user(0, name->version + __OLD_UTS_LEN);
-	error |= __copy_to_user(&name->machine, &utsname()->machine,
-				__OLD_UTS_LEN);
-	error |= __put_user(0, name->machine + __OLD_UTS_LEN);
+	memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
+	memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
+	memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
+	memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
+	memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
 	up_read(&uts_sem);
+	if (copy_to_user(name, &tmp, sizeof(tmp)))
+		return -EFAULT;
 
-	if (!error && override_architecture(name))
-		error = -EFAULT;
-	if (!error && override_release(name->release, sizeof(name->release)))
-		error = -EFAULT;
-	return error ? -EFAULT : 0;
+	if (override_architecture(name))
+		return -EFAULT;
+	if (override_release(name->release, sizeof(name->release)))
+		return -EFAULT;
+	return 0;
 }
 #endif
 
@@ -1226,17 +1218,18 @@
 
 	if (len < 0 || len > __NEW_UTS_LEN)
 		return -EINVAL;
-	down_write(&uts_sem);
 	errno = -EFAULT;
 	if (!copy_from_user(tmp, name, len)) {
-		struct new_utsname *u = utsname();
+		struct new_utsname *u;
 
+		down_write(&uts_sem);
+		u = utsname();
 		memcpy(u->nodename, tmp, len);
 		memset(u->nodename + len, 0, sizeof(u->nodename) - len);
 		errno = 0;
 		uts_proc_notify(UTS_PROC_HOSTNAME);
+		up_write(&uts_sem);
 	}
-	up_write(&uts_sem);
 	return errno;
 }
 
@@ -1244,8 +1237,9 @@
 
 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
 {
-	int i, errno;
+	int i;
 	struct new_utsname *u;
+	char tmp[__NEW_UTS_LEN + 1];
 
 	if (len < 0)
 		return -EINVAL;
@@ -1254,11 +1248,11 @@
 	i = 1 + strlen(u->nodename);
 	if (i > len)
 		i = len;
-	errno = 0;
-	if (copy_to_user(name, u->nodename, i))
-		errno = -EFAULT;
+	memcpy(tmp, u->nodename, i);
 	up_read(&uts_sem);
-	return errno;
+	if (copy_to_user(name, tmp, i))
+		return -EFAULT;
+	return 0;
 }
 
 #endif
@@ -1277,17 +1271,18 @@
 	if (len < 0 || len > __NEW_UTS_LEN)
 		return -EINVAL;
 
-	down_write(&uts_sem);
 	errno = -EFAULT;
 	if (!copy_from_user(tmp, name, len)) {
-		struct new_utsname *u = utsname();
+		struct new_utsname *u;
 
+		down_write(&uts_sem);
+		u = utsname();
 		memcpy(u->domainname, tmp, len);
 		memset(u->domainname + len, 0, sizeof(u->domainname) - len);
 		errno = 0;
 		uts_proc_notify(UTS_PROC_DOMAINNAME);
+		up_write(&uts_sem);
 	}
-	up_write(&uts_sem);
 	return errno;
 }
 
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 1d5e480..6efdb67 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1394,6 +1394,13 @@
 		.proc_handler	= proc_dointvec,
 	},
 	{
+		.procname       = "reap_mem_on_sigkill",
+		.data           = &sysctl_reap_mem_on_sigkill,
+		.maxlen         = sizeof(sysctl_reap_mem_on_sigkill),
+		.mode           = 0644,
+		.proc_handler   = proc_dointvec,
+	},
+	{
 		.procname	= "overcommit_ratio",
 		.data		= &sysctl_overcommit_ratio,
 		.maxlen		= sizeof(sysctl_overcommit_ratio),
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 78c0e04..aa25aac 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -815,7 +815,8 @@
 	/* Convert (if necessary) to absolute time */
 	if (flags != TIMER_ABSTIME) {
 		ktime_t now = alarm_bases[type].gettime();
-		exp = ktime_add(now, exp);
+
+		exp = ktime_add_safe(now, exp);
 	}
 
 	if (alarmtimer_do_nsleep(&alarm, exp))
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 4ce4285..8f482de 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -686,7 +686,7 @@
 
 static inline bool local_timer_softirq_pending(void)
 {
-	return local_softirq_pending() & TIMER_SOFTIRQ;
+	return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
 }
 
 static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 15f3487..9e5ffd1 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -704,6 +704,16 @@
 #endif
 }
 
+u64 jiffies64_to_nsecs(u64 j)
+{
+#if !(NSEC_PER_SEC % HZ)
+	return (NSEC_PER_SEC / HZ) * j;
+# else
+	return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
+#endif
+}
+EXPORT_SYMBOL(jiffies64_to_nsecs);
+
 /**
  * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
  *
diff --git a/kernel/time/timeconst.bc b/kernel/time/timeconst.bc
index c486889..f83bbb8 100644
--- a/kernel/time/timeconst.bc
+++ b/kernel/time/timeconst.bc
@@ -98,6 +98,12 @@
 		print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n"
 		print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n"
 		print "#define USEC_TO_HZ_DEN\t\t", 1000000/cd, "\n"
+
+		cd=gcd(hz,1000000000)
+		print "#define HZ_TO_NSEC_NUM\t\t", 1000000000/cd, "\n"
+		print "#define HZ_TO_NSEC_DEN\t\t", hz/cd, "\n"
+		print "#define NSEC_TO_HZ_NUM\t\t", hz/cd, "\n"
+		print "#define NSEC_TO_HZ_DEN\t\t", 1000000000/cd, "\n"
 		print "\n"
 
 		print "#endif /* KERNEL_TIMECONST_H */\n"
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 4e17d55..bfa8bb3 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1720,6 +1720,10 @@
 	mutex_lock(&bdev->bd_mutex);
 
 	if (attr == &dev_attr_enable) {
+		if (!!value == !!q->blk_trace) {
+			ret = 0;
+			goto out_unlock_bdev;
+		}
 		if (value)
 			ret = blk_trace_setup_queue(q, bdev);
 		else
diff --git a/kernel/trace/msm_rtb.c b/kernel/trace/msm_rtb.c
index d3bcd5c..6979bf0 100644
--- a/kernel/trace/msm_rtb.c
+++ b/kernel/trace/msm_rtb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -299,7 +299,7 @@
 	md_entry.virt_addr = (uintptr_t)msm_rtb.rtb;
 	md_entry.phys_addr = msm_rtb.phys;
 	md_entry.size = msm_rtb.size;
-	if (msm_minidump_add_region(&md_entry))
+	if (msm_minidump_add_region(&md_entry) < 0)
 		pr_info("Failed to add RTB in Minidump\n");
 
 #if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3e1d11f..f316e90 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1504,6 +1504,8 @@
 	tmp_iter_page = first_page;
 
 	do {
+		cond_resched();
+
 		to_remove_page = tmp_iter_page;
 		rb_inc_page(cpu_buffer, &tmp_iter_page);
 
@@ -3137,6 +3139,22 @@
 }
 
 /**
+ * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
+ * @buffer: The ring buffer to see if write is set enabled
+ *
+ * Returns true if the ring buffer is set writable by ring_buffer_record_on().
+ * Note that this does NOT mean it is in a writable state.
+ *
+ * It may return true when the ring buffer has been disabled by
+ * ring_buffer_record_disable(), as that is a temporary disabling of
+ * the ring buffer.
+ */
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
+{
+	return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
+}
+
+/**
  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  * @buffer: The ring buffer to stop writes to.
  * @cpu: The CPU buffer to stop
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2839d86..c13e83b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1325,6 +1325,12 @@
 
 	arch_spin_lock(&tr->max_lock);
 
+	/* Inherit the recordable setting from trace_buffer */
+	if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
+		ring_buffer_record_on(tr->max_buffer.buffer);
+	else
+		ring_buffer_record_off(tr->max_buffer.buffer);
+
 	buf = tr->trace_buffer.buffer;
 	tr->trace_buffer.buffer = tr->max_buffer.buffer;
 	tr->max_buffer.buffer = buf;
@@ -2566,6 +2572,7 @@
 }
 EXPORT_SYMBOL_GPL(trace_vbprintk);
 
+__printf(3, 0)
 static int
 __trace_array_vprintk(struct ring_buffer *buffer,
 		      unsigned long ip, const char *fmt, va_list args)
@@ -2621,12 +2628,14 @@
 	return len;
 }
 
+__printf(3, 0)
 int trace_array_vprintk(struct trace_array *tr,
 			unsigned long ip, const char *fmt, va_list args)
 {
 	return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
 }
 
+__printf(3, 0)
 int trace_array_printk(struct trace_array *tr,
 		       unsigned long ip, const char *fmt, ...)
 {
@@ -2642,6 +2651,7 @@
 	return ret;
 }
 
+__printf(3, 4)
 int trace_array_printk_buf(struct ring_buffer *buffer,
 			   unsigned long ip, const char *fmt, ...)
 {
@@ -2657,6 +2667,7 @@
 	return ret;
 }
 
+__printf(2, 0)
 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
 {
 	return trace_array_vprintk(&global_trace, ip, fmt, args);
@@ -7066,7 +7077,9 @@
 
 	if (buffer) {
 		mutex_lock(&trace_types_lock);
-		if (val) {
+		if (!!val == tracer_tracing_is_on(tr)) {
+			val = 0; /* do nothing */
+		} else if (val) {
 			tracer_tracing_on(tr);
 			if (tr->current_trace->start)
 				tr->current_trace->start(tr);
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 88f398a..8819944 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -678,6 +678,8 @@
 		goto out_free;
 
  out_reg:
+	/* Up the trigger_data count to make sure reg doesn't free it on failure */
+	event_trigger_init(trigger_ops, trigger_data);
 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
 	/*
 	 * The above returns on success the # of functions enabled,
@@ -685,11 +687,13 @@
 	 * Consider no functions a failure too.
 	 */
 	if (!ret) {
+		cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
 		ret = -ENOENT;
-		goto out_free;
-	} else if (ret < 0)
-		goto out_free;
-	ret = 0;
+	} else if (ret > 0)
+		ret = 0;
+
+	/* Down the counter of trigger_data or free it if not used anymore */
+	event_trigger_free(trigger_ops, trigger_data);
  out:
 	return ret;
 
@@ -1385,6 +1389,9 @@
 		goto out;
 	}
 
+	/* Up the trigger_data count to make sure nothing frees it on failure */
+	event_trigger_init(trigger_ops, trigger_data);
+
 	if (trigger) {
 		number = strsep(&trigger, ":");
 
@@ -1435,6 +1442,7 @@
 		goto out_disable;
 	/* Just return zero, not the number of enabled functions */
 	ret = 0;
+	event_trigger_free(trigger_ops, trigger_data);
  out:
 	return ret;
 
@@ -1445,7 +1453,7 @@
  out_free:
 	if (cmd_ops->set_filter)
 		cmd_ops->set_filter(NULL, trigger_data, NULL);
-	kfree(trigger_data);
+	event_trigger_free(trigger_ops, trigger_data);
 	kfree(enable_data);
 	goto out;
 }
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index ea3ed03..3b4cd44 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -359,11 +359,10 @@
 static int
 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
 {
+	struct event_file_link *link = NULL;
 	int ret = 0;
 
 	if (file) {
-		struct event_file_link *link;
-
 		link = kmalloc(sizeof(*link), GFP_KERNEL);
 		if (!link) {
 			ret = -ENOMEM;
@@ -383,6 +382,18 @@
 		else
 			ret = enable_kprobe(&tk->rp.kp);
 	}
+
+	if (ret) {
+		if (file) {
+			/* Notice the if is true on not WARN() */
+			if (!WARN_ON_ONCE(!link))
+				list_del_rcu(&link->list);
+			kfree(link);
+			tk->tp.flags &= ~TP_FLAG_TRACE;
+		} else {
+			tk->tp.flags &= ~TP_FLAG_PROFILE;
+		}
+	}
  out:
 	return ret;
 }
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 83afbf2..92e394e 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -969,7 +969,7 @@
 
 		list_del_rcu(&link->list);
 		/* synchronize with u{,ret}probe_trace_func */
-		synchronize_sched();
+		synchronize_rcu();
 		kfree(link);
 
 		if (!list_empty(&tu->tp.files))
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 86b7854..f789bbb 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -649,7 +649,16 @@
 	unsigned idx;
 	struct uid_gid_extent *extent = NULL;
 	char *kbuf = NULL, *pos, *next_line;
-	ssize_t ret = -EINVAL;
+	ssize_t ret;
+
+	/* Only allow < page size writes at the beginning of the file */
+	if ((*ppos != 0) || (count >= PAGE_SIZE))
+		return -EINVAL;
+
+	/* Slurp in the user data */
+	kbuf = memdup_user_nul(buf, count);
+	if (IS_ERR(kbuf))
+		return PTR_ERR(kbuf);
 
 	/*
 	 * The userns_state_mutex serializes all writes to any given map.
@@ -683,19 +692,6 @@
 	if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
 		goto out;
 
-	/* Only allow < page size writes at the beginning of the file */
-	ret = -EINVAL;
-	if ((*ppos != 0) || (count >= PAGE_SIZE))
-		goto out;
-
-	/* Slurp in the user data */
-	kbuf = memdup_user_nul(buf, count);
-	if (IS_ERR(kbuf)) {
-		ret = PTR_ERR(kbuf);
-		kbuf = NULL;
-		goto out;
-	}
-
 	/* Parse the user data */
 	ret = -EINVAL;
 	pos = kbuf;
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index c8eac43..d2b3b29 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -17,7 +17,7 @@
 
 #ifdef CONFIG_PROC_SYSCTL
 
-static void *get_uts(struct ctl_table *table, int write)
+static void *get_uts(struct ctl_table *table)
 {
 	char *which = table->data;
 	struct uts_namespace *uts_ns;
@@ -25,21 +25,9 @@
 	uts_ns = current->nsproxy->uts_ns;
 	which = (which - (char *)&init_uts_ns) + (char *)uts_ns;
 
-	if (!write)
-		down_read(&uts_sem);
-	else
-		down_write(&uts_sem);
 	return which;
 }
 
-static void put_uts(struct ctl_table *table, int write, void *which)
-{
-	if (!write)
-		up_read(&uts_sem);
-	else
-		up_write(&uts_sem);
-}
-
 /*
  *	Special case of dostring for the UTS structure. This has locks
  *	to observe. Should this be in kernel/sys.c ????
@@ -49,13 +37,34 @@
 {
 	struct ctl_table uts_table;
 	int r;
-	memcpy(&uts_table, table, sizeof(uts_table));
-	uts_table.data = get_uts(table, write);
-	r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
-	put_uts(table, write, uts_table.data);
+	char tmp_data[__NEW_UTS_LEN + 1];
 
-	if (write)
+	memcpy(&uts_table, table, sizeof(uts_table));
+	uts_table.data = tmp_data;
+
+	/*
+	 * Buffer the value in tmp_data so that proc_dostring() can be called
+	 * without holding any locks.
+	 * We also need to read the original value in the write==1 case to
+	 * support partial writes.
+	 */
+	down_read(&uts_sem);
+	memcpy(tmp_data, get_uts(table), sizeof(tmp_data));
+	up_read(&uts_sem);
+	r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
+
+	if (write) {
+		/*
+		 * Write back the new value.
+		 * Note that, since we dropped uts_sem, the result can
+		 * theoretically be incorrect if there are two parallel writes
+		 * at non-zero offsets to the same sysctl.
+		 */
+		down_write(&uts_sem);
+		memcpy(get_uts(table), tmp_data, sizeof(tmp_data));
+		up_write(&uts_sem);
 		proc_sys_poll_notify(table->poll);
+	}
 
 	return r;
 }
diff --git a/lib/Kconfig b/lib/Kconfig
index 8b6c41e..e5443ce 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -185,6 +185,9 @@
 	  when they need to do cyclic redundancy check according CRC8
 	  algorithm. Module will be called crc8.
 
+config XXHASH
+	tristate
+
 config AUDIT_GENERIC
 	bool
 	depends on AUDIT && !AUDIT_ARCH
@@ -239,6 +242,14 @@
 config LZ4_DECOMPRESS
 	tristate
 
+config ZSTD_COMPRESS
+	select XXHASH
+	tristate
+
+config ZSTD_DECOMPRESS
+	select XXHASH
+	tristate
+
 source "lib/xz/Kconfig"
 
 #
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4fc6d8f..ad250d1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2100,6 +2100,16 @@
 	 recoverable data corruption scenarios to system-halting panics,
 	 for easier detection and debug.
 
+config BUG_ON_DATA_CORRUPTION
+	bool "Trigger a BUG when data corruption is detected"
+	select CONFIG_DEBUG_LIST
+	help
+	  Select this option if the kernel should BUG when it encounters
+	  data corruption in kernel memory structures when they get checked
+	  for validity.
+
+	  If unsure, say N.
+
 source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 6bde16d..003fae9 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -95,6 +95,7 @@
 obj-$(CONFIG_CRC7)	+= crc7.o
 obj-$(CONFIG_LIBCRC32C)	+= libcrc32c.o
 obj-$(CONFIG_CRC8)	+= crc8.o
+obj-$(CONFIG_XXHASH)	+= xxhash.o
 obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
 
 obj-$(CONFIG_842_COMPRESS) += 842/
@@ -108,6 +109,8 @@
 obj-$(CONFIG_LZ4_COMPRESS) += lz4/
 obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/
 obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/
+obj-$(CONFIG_ZSTD_COMPRESS) += zstd/
+obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/
 obj-$(CONFIG_XZ_DEC) += xz/
 obj-$(CONFIG_RAID6_PQ) += raid6/
 
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 19572a4..88580e8 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -293,13 +293,13 @@
 		return;
 
 	limit++;
-	if (is_on_stack) {
-		pr_warn("object %p is on stack %p, but NOT annotated\n", addr,
-				task_stack_page(current));
-	} else {
-		pr_warn("object %p is NOT on stack %p, but annotated\n", addr,
-				task_stack_page(current));
-	}
+	if (is_on_stack)
+		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
+			 task_stack_page(current));
+	else
+		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
+			 task_stack_page(current));
+
 	WARN_ON(1);
 }
 
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 5323b59..b946203 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -84,7 +84,7 @@
 		if (ioremap_pmd_enabled() &&
 		    ((next - addr) == PMD_SIZE) &&
 		    IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
-		    pmd_free_pte_page(pmd)) {
+		    pmd_free_pte_page(pmd, addr)) {
 			if (pmd_set_huge(pmd, phys_addr + addr, prot))
 				continue;
 		}
@@ -111,7 +111,7 @@
 		if (ioremap_pud_enabled() &&
 		    ((next - addr) == PUD_SIZE) &&
 		    IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
-		    pud_free_pmd_page(pud)) {
+		    pud_free_pmd_page(pud, addr)) {
 			if (pud_set_huge(pud, phys_addr + addr, prot))
 				continue;
 		}
diff --git a/lib/klist.c b/lib/klist.c
index 0507fa5..f6b5478 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -336,8 +336,9 @@
 	void (*put)(struct klist_node *) = i->i_klist->put;
 	struct klist_node *last = i->i_cur;
 	struct klist_node *prev;
+	unsigned long flags;
 
-	spin_lock(&i->i_klist->k_lock);
+	spin_lock_irqsave(&i->i_klist->k_lock, flags);
 
 	if (last) {
 		prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@
 		prev = to_klist_node(prev->n_node.prev);
 	}
 
-	spin_unlock(&i->i_klist->k_lock);
+	spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
 
 	if (put && last)
 		put(last);
@@ -377,8 +378,9 @@
 	void (*put)(struct klist_node *) = i->i_klist->put;
 	struct klist_node *last = i->i_cur;
 	struct klist_node *next;
+	unsigned long flags;
 
-	spin_lock(&i->i_klist->k_lock);
+	spin_lock_irqsave(&i->i_klist->k_lock, flags);
 
 	if (last) {
 		next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@
 		next = to_klist_node(next->n_node.next);
 	}
 
-	spin_unlock(&i->i_klist->k_lock);
+	spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
 
 	if (put && last)
 		put(last);
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 7a5c1c0..8dfe861 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -2,8 +2,7 @@
  * Copyright 2006, Red Hat, Inc., Dave Jones
  * Released under the General Public License (GPL).
  *
- * This file contains the linked list implementations for
- * DEBUG_LIST.
+ * This file contains the linked list validation for DEBUG_LIST.
  */
 
 #include <linux/export.h>
@@ -14,94 +13,57 @@
 #include <linux/bug.h>
 
 /*
- * Insert a new entry between two known consecutive entries.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
+ * Check that the data structures for the list manipulations are reasonably
+ * valid. Failures here indicate memory corruption (and possibly an exploit
+ * attempt).
  */
 
-void __list_add(struct list_head *new,
-			      struct list_head *prev,
-			      struct list_head *next)
+bool __list_add_valid(struct list_head *new, struct list_head *prev,
+		      struct list_head *next)
 {
-	WARN(next->prev != prev,
-		"list_add corruption. next->prev should be "
-		"prev (%p), but was %p. (next=%p).\n",
-		prev, next->prev, next);
-	WARN(prev->next != next,
-		"list_add corruption. prev->next should be "
-		"next (%p), but was %p. (prev=%p).\n",
-		next, prev->next, prev);
-	WARN(new == prev || new == next,
-	     "list_add double add: new=%p, prev=%p, next=%p.\n",
-	     new, prev, next);
 
-	BUG_ON((prev->next != next || next->prev != prev ||
-		 new == prev || new == next) && PANIC_CORRUPTION);
+	if (CHECK_DATA_CORRUPTION(next->prev != prev,
+                        "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
+                        prev, next->prev, next) ||
+            CHECK_DATA_CORRUPTION(prev->next != next,
+                        "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
+                        next, prev->next, prev) ||
+            CHECK_DATA_CORRUPTION(new == prev || new == next,
+                        "list_add double add: new=%p, prev=%p, next=%p.\n",
+                        new, prev, next))
+                return false;
 
 	next->prev = new;
 	new->next = next;
 	new->prev = prev;
 	WRITE_ONCE(prev->next, new);
-}
-EXPORT_SYMBOL(__list_add);
 
-void __list_del_entry(struct list_head *entry)
+	return true;
+}
+EXPORT_SYMBOL(__list_add_valid);
+
+bool __list_del_entry_valid(struct list_head *entry)
 {
 	struct list_head *prev, *next;
 
 	prev = entry->prev;
 	next = entry->next;
 
-	if (WARN(next == LIST_POISON1,
-		"list_del corruption, %p->next is LIST_POISON1 (%p)\n",
-		entry, LIST_POISON1) ||
-	    WARN(prev == LIST_POISON2,
-		"list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
-		entry, LIST_POISON2) ||
-	    WARN(prev->next != entry,
-		"list_del corruption. prev->next should be %p, "
-		"but was %p\n", entry, prev->next) ||
-	    WARN(next->prev != entry,
-		"list_del corruption. next->prev should be %p, but was %p\n",
-		entry, next->prev)) {
-		BUG_ON(PANIC_CORRUPTION);
-		return;
-	}
+	if (CHECK_DATA_CORRUPTION(next == LIST_POISON1,
+			"list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+			entry, LIST_POISON1) ||
+	    CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
+			"list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+			entry, LIST_POISON2) ||
+	    CHECK_DATA_CORRUPTION(prev->next != entry,
+			"list_del corruption. prev->next should be %p, but was %p\n",
+			entry, prev->next) ||
+	    CHECK_DATA_CORRUPTION(next->prev != entry,
+			"list_del corruption. next->prev should be %p, but was %p\n",
+			entry, next->prev))
+		return false;
 
-	__list_del(prev, next);
-}
-EXPORT_SYMBOL(__list_del_entry);
+	return true;
 
-/**
- * list_del - deletes entry from list.
- * @entry: the element to delete from the list.
- * Note: list_empty on entry does not return true after this, the entry is
- * in an undefined state.
- */
-void list_del(struct list_head *entry)
-{
-	__list_del_entry(entry);
-	entry->next = LIST_POISON1;
-	entry->prev = LIST_POISON2;
 }
-EXPORT_SYMBOL(list_del);
-
-/*
- * RCU variants.
- */
-void __list_add_rcu(struct list_head *new,
-		    struct list_head *prev, struct list_head *next)
-{
-	WARN(next->prev != prev,
-		"list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
-		prev, next->prev, next);
-	WARN(prev->next != next,
-		"list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
-		next, prev->next, prev);
-	new->next = next;
-	new->prev = prev;
-	rcu_assign_pointer(list_next_rcu(prev), new);
-	next->prev = new;
-}
-EXPORT_SYMBOL(__list_add_rcu);
+EXPORT_SYMBOL(__list_del_entry_valid);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 895961c..101dac0 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -783,8 +783,16 @@
 
 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
 {
-	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
-		   (unsigned long)params->min_size);
+	size_t retsize;
+
+	if (params->nelem_hint)
+		retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+			      (unsigned long)params->min_size);
+	else
+		retsize = max(HASH_DEFAULT_SIZE,
+			      (unsigned long)params->min_size);
+
+	return retsize;
 }
 
 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
@@ -841,8 +849,6 @@
 	struct bucket_table *tbl;
 	size_t size;
 
-	size = HASH_DEFAULT_SIZE;
-
 	if ((!params->key_len && !params->obj_hashfn) ||
 	    (params->obj_hashfn && !params->obj_cmpfn))
 		return -EINVAL;
@@ -869,8 +875,7 @@
 
 	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
 
-	if (params->nelem_hint)
-		size = rounded_hashtable_size(&ht->p);
+	size = rounded_hashtable_size(&ht->p);
 
 	/* The maximum (not average) chain length grows with the
 	 * size of the hash table, at a rate of (log N)/(log log N).
diff --git a/lib/xxhash.c b/lib/xxhash.c
new file mode 100644
index 0000000..aa61e2a
--- /dev/null
+++ b/lib/xxhash.c
@@ -0,0 +1,500 @@
+/*
+ * xxHash - Extremely Fast Hash algorithm
+ * Copyright (C) 2012-2016, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ *     copyright notice, this list of conditions and the following disclaimer
+ *     in the documentation and/or other materials provided with the
+ *     distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ *
+ * You can contact the author at:
+ * - xxHash homepage: http://cyan4973.github.io/xxHash/
+ * - xxHash source repository: https://github.com/Cyan4973/xxHash
+ */
+
+#include <asm/unaligned.h>
+#include <linux/errno.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/xxhash.h>
+
+/*-*************************************
+ * Macros
+ **************************************/
+#define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r)))
+#define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r)))
+
+#ifdef __LITTLE_ENDIAN
+# define XXH_CPU_LITTLE_ENDIAN 1
+#else
+# define XXH_CPU_LITTLE_ENDIAN 0
+#endif
+
+/*-*************************************
+ * Constants
+ **************************************/
+static const uint32_t PRIME32_1 = 2654435761U;
+static const uint32_t PRIME32_2 = 2246822519U;
+static const uint32_t PRIME32_3 = 3266489917U;
+static const uint32_t PRIME32_4 =  668265263U;
+static const uint32_t PRIME32_5 =  374761393U;
+
+static const uint64_t PRIME64_1 = 11400714785074694791ULL;
+static const uint64_t PRIME64_2 = 14029467366897019727ULL;
+static const uint64_t PRIME64_3 =  1609587929392839161ULL;
+static const uint64_t PRIME64_4 =  9650029242287828579ULL;
+static const uint64_t PRIME64_5 =  2870177450012600261ULL;
+
+/*-**************************
+ *  Utils
+ ***************************/
+void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src)
+{
+	memcpy(dst, src, sizeof(*dst));
+}
+EXPORT_SYMBOL(xxh32_copy_state);
+
+void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src)
+{
+	memcpy(dst, src, sizeof(*dst));
+}
+EXPORT_SYMBOL(xxh64_copy_state);
+
+/*-***************************
+ * Simple Hash Functions
+ ****************************/
+static uint32_t xxh32_round(uint32_t seed, const uint32_t input)
+{
+	seed += input * PRIME32_2;
+	seed = xxh_rotl32(seed, 13);
+	seed *= PRIME32_1;
+	return seed;
+}
+
+uint32_t xxh32(const void *input, const size_t len, const uint32_t seed)
+{
+	const uint8_t *p = (const uint8_t *)input;
+	const uint8_t *b_end = p + len;
+	uint32_t h32;
+
+	if (len >= 16) {
+		const uint8_t *const limit = b_end - 16;
+		uint32_t v1 = seed + PRIME32_1 + PRIME32_2;
+		uint32_t v2 = seed + PRIME32_2;
+		uint32_t v3 = seed + 0;
+		uint32_t v4 = seed - PRIME32_1;
+
+		do {
+			v1 = xxh32_round(v1, get_unaligned_le32(p));
+			p += 4;
+			v2 = xxh32_round(v2, get_unaligned_le32(p));
+			p += 4;
+			v3 = xxh32_round(v3, get_unaligned_le32(p));
+			p += 4;
+			v4 = xxh32_round(v4, get_unaligned_le32(p));
+			p += 4;
+		} while (p <= limit);
+
+		h32 = xxh_rotl32(v1, 1) + xxh_rotl32(v2, 7) +
+			xxh_rotl32(v3, 12) + xxh_rotl32(v4, 18);
+	} else {
+		h32 = seed + PRIME32_5;
+	}
+
+	h32 += (uint32_t)len;
+
+	while (p + 4 <= b_end) {
+		h32 += get_unaligned_le32(p) * PRIME32_3;
+		h32 = xxh_rotl32(h32, 17) * PRIME32_4;
+		p += 4;
+	}
+
+	while (p < b_end) {
+		h32 += (*p) * PRIME32_5;
+		h32 = xxh_rotl32(h32, 11) * PRIME32_1;
+		p++;
+	}
+
+	h32 ^= h32 >> 15;
+	h32 *= PRIME32_2;
+	h32 ^= h32 >> 13;
+	h32 *= PRIME32_3;
+	h32 ^= h32 >> 16;
+
+	return h32;
+}
+EXPORT_SYMBOL(xxh32);
+
+static uint64_t xxh64_round(uint64_t acc, const uint64_t input)
+{
+	acc += input * PRIME64_2;
+	acc = xxh_rotl64(acc, 31);
+	acc *= PRIME64_1;
+	return acc;
+}
+
+static uint64_t xxh64_merge_round(uint64_t acc, uint64_t val)
+{
+	val = xxh64_round(0, val);
+	acc ^= val;
+	acc = acc * PRIME64_1 + PRIME64_4;
+	return acc;
+}
+
+uint64_t xxh64(const void *input, const size_t len, const uint64_t seed)
+{
+	const uint8_t *p = (const uint8_t *)input;
+	const uint8_t *const b_end = p + len;
+	uint64_t h64;
+
+	if (len >= 32) {
+		const uint8_t *const limit = b_end - 32;
+		uint64_t v1 = seed + PRIME64_1 + PRIME64_2;
+		uint64_t v2 = seed + PRIME64_2;
+		uint64_t v3 = seed + 0;
+		uint64_t v4 = seed - PRIME64_1;
+
+		do {
+			v1 = xxh64_round(v1, get_unaligned_le64(p));
+			p += 8;
+			v2 = xxh64_round(v2, get_unaligned_le64(p));
+			p += 8;
+			v3 = xxh64_round(v3, get_unaligned_le64(p));
+			p += 8;
+			v4 = xxh64_round(v4, get_unaligned_le64(p));
+			p += 8;
+		} while (p <= limit);
+
+		h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
+			xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
+		h64 = xxh64_merge_round(h64, v1);
+		h64 = xxh64_merge_round(h64, v2);
+		h64 = xxh64_merge_round(h64, v3);
+		h64 = xxh64_merge_round(h64, v4);
+
+	} else {
+		h64  = seed + PRIME64_5;
+	}
+
+	h64 += (uint64_t)len;
+
+	while (p + 8 <= b_end) {
+		const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
+
+		h64 ^= k1;
+		h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
+		p += 8;
+	}
+
+	if (p + 4 <= b_end) {
+		h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;
+		h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+		p += 4;
+	}
+
+	while (p < b_end) {
+		h64 ^= (*p) * PRIME64_5;
+		h64 = xxh_rotl64(h64, 11) * PRIME64_1;
+		p++;
+	}
+
+	h64 ^= h64 >> 33;
+	h64 *= PRIME64_2;
+	h64 ^= h64 >> 29;
+	h64 *= PRIME64_3;
+	h64 ^= h64 >> 32;
+
+	return h64;
+}
+EXPORT_SYMBOL(xxh64);
+
+/*-**************************************************
+ * Advanced Hash Functions
+ ***************************************************/
+void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed)
+{
+	/* use a local state for memcpy() to avoid strict-aliasing warnings */
+	struct xxh32_state state;
+
+	memset(&state, 0, sizeof(state));
+	state.v1 = seed + PRIME32_1 + PRIME32_2;
+	state.v2 = seed + PRIME32_2;
+	state.v3 = seed + 0;
+	state.v4 = seed - PRIME32_1;
+	memcpy(statePtr, &state, sizeof(state));
+}
+EXPORT_SYMBOL(xxh32_reset);
+
+void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed)
+{
+	/* use a local state for memcpy() to avoid strict-aliasing warnings */
+	struct xxh64_state state;
+
+	memset(&state, 0, sizeof(state));
+	state.v1 = seed + PRIME64_1 + PRIME64_2;
+	state.v2 = seed + PRIME64_2;
+	state.v3 = seed + 0;
+	state.v4 = seed - PRIME64_1;
+	memcpy(statePtr, &state, sizeof(state));
+}
+EXPORT_SYMBOL(xxh64_reset);
+
+int xxh32_update(struct xxh32_state *state, const void *input, const size_t len)
+{
+	const uint8_t *p = (const uint8_t *)input;
+	const uint8_t *const b_end = p + len;
+
+	if (input == NULL)
+		return -EINVAL;
+
+	state->total_len_32 += (uint32_t)len;
+	state->large_len |= (len >= 16) | (state->total_len_32 >= 16);
+
+	if (state->memsize + len < 16) { /* fill in tmp buffer */
+		memcpy((uint8_t *)(state->mem32) + state->memsize, input, len);
+		state->memsize += (uint32_t)len;
+		return 0;
+	}
+
+	if (state->memsize) { /* some data left from previous update */
+		const uint32_t *p32 = state->mem32;
+
+		memcpy((uint8_t *)(state->mem32) + state->memsize, input,
+			16 - state->memsize);
+
+		state->v1 = xxh32_round(state->v1, get_unaligned_le32(p32));
+		p32++;
+		state->v2 = xxh32_round(state->v2, get_unaligned_le32(p32));
+		p32++;
+		state->v3 = xxh32_round(state->v3, get_unaligned_le32(p32));
+		p32++;
+		state->v4 = xxh32_round(state->v4, get_unaligned_le32(p32));
+		p32++;
+
+		p += 16-state->memsize;
+		state->memsize = 0;
+	}
+
+	if (p <= b_end - 16) {
+		const uint8_t *const limit = b_end - 16;
+		uint32_t v1 = state->v1;
+		uint32_t v2 = state->v2;
+		uint32_t v3 = state->v3;
+		uint32_t v4 = state->v4;
+
+		do {
+			v1 = xxh32_round(v1, get_unaligned_le32(p));
+			p += 4;
+			v2 = xxh32_round(v2, get_unaligned_le32(p));
+			p += 4;
+			v3 = xxh32_round(v3, get_unaligned_le32(p));
+			p += 4;
+			v4 = xxh32_round(v4, get_unaligned_le32(p));
+			p += 4;
+		} while (p <= limit);
+
+		state->v1 = v1;
+		state->v2 = v2;
+		state->v3 = v3;
+		state->v4 = v4;
+	}
+
+	if (p < b_end) {
+		memcpy(state->mem32, p, (size_t)(b_end-p));
+		state->memsize = (uint32_t)(b_end-p);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(xxh32_update);
+
+uint32_t xxh32_digest(const struct xxh32_state *state)
+{
+	const uint8_t *p = (const uint8_t *)state->mem32;
+	const uint8_t *const b_end = (const uint8_t *)(state->mem32) +
+		state->memsize;
+	uint32_t h32;
+
+	if (state->large_len) {
+		h32 = xxh_rotl32(state->v1, 1) + xxh_rotl32(state->v2, 7) +
+			xxh_rotl32(state->v3, 12) + xxh_rotl32(state->v4, 18);
+	} else {
+		h32 = state->v3 /* == seed */ + PRIME32_5;
+	}
+
+	h32 += state->total_len_32;
+
+	while (p + 4 <= b_end) {
+		h32 += get_unaligned_le32(p) * PRIME32_3;
+		h32 = xxh_rotl32(h32, 17) * PRIME32_4;
+		p += 4;
+	}
+
+	while (p < b_end) {
+		h32 += (*p) * PRIME32_5;
+		h32 = xxh_rotl32(h32, 11) * PRIME32_1;
+		p++;
+	}
+
+	h32 ^= h32 >> 15;
+	h32 *= PRIME32_2;
+	h32 ^= h32 >> 13;
+	h32 *= PRIME32_3;
+	h32 ^= h32 >> 16;
+
+	return h32;
+}
+EXPORT_SYMBOL(xxh32_digest);
+
+int xxh64_update(struct xxh64_state *state, const void *input, const size_t len)
+{
+	const uint8_t *p = (const uint8_t *)input;
+	const uint8_t *const b_end = p + len;
+
+	if (input == NULL)
+		return -EINVAL;
+
+	state->total_len += len;
+
+	if (state->memsize + len < 32) { /* fill in tmp buffer */
+		memcpy(((uint8_t *)state->mem64) + state->memsize, input, len);
+		state->memsize += (uint32_t)len;
+		return 0;
+	}
+
+	if (state->memsize) { /* tmp buffer is full */
+		uint64_t *p64 = state->mem64;
+
+		memcpy(((uint8_t *)p64) + state->memsize, input,
+			32 - state->memsize);
+
+		state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64));
+		p64++;
+		state->v2 = xxh64_round(state->v2, get_unaligned_le64(p64));
+		p64++;
+		state->v3 = xxh64_round(state->v3, get_unaligned_le64(p64));
+		p64++;
+		state->v4 = xxh64_round(state->v4, get_unaligned_le64(p64));
+
+		p += 32 - state->memsize;
+		state->memsize = 0;
+	}
+
+	if (p + 32 <= b_end) {
+		const uint8_t *const limit = b_end - 32;
+		uint64_t v1 = state->v1;
+		uint64_t v2 = state->v2;
+		uint64_t v3 = state->v3;
+		uint64_t v4 = state->v4;
+
+		do {
+			v1 = xxh64_round(v1, get_unaligned_le64(p));
+			p += 8;
+			v2 = xxh64_round(v2, get_unaligned_le64(p));
+			p += 8;
+			v3 = xxh64_round(v3, get_unaligned_le64(p));
+			p += 8;
+			v4 = xxh64_round(v4, get_unaligned_le64(p));
+			p += 8;
+		} while (p <= limit);
+
+		state->v1 = v1;
+		state->v2 = v2;
+		state->v3 = v3;
+		state->v4 = v4;
+	}
+
+	if (p < b_end) {
+		memcpy(state->mem64, p, (size_t)(b_end-p));
+		state->memsize = (uint32_t)(b_end - p);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(xxh64_update);
+
+uint64_t xxh64_digest(const struct xxh64_state *state)
+{
+	const uint8_t *p = (const uint8_t *)state->mem64;
+	const uint8_t *const b_end = (const uint8_t *)state->mem64 +
+		state->memsize;
+	uint64_t h64;
+
+	if (state->total_len >= 32) {
+		const uint64_t v1 = state->v1;
+		const uint64_t v2 = state->v2;
+		const uint64_t v3 = state->v3;
+		const uint64_t v4 = state->v4;
+
+		h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
+			xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
+		h64 = xxh64_merge_round(h64, v1);
+		h64 = xxh64_merge_round(h64, v2);
+		h64 = xxh64_merge_round(h64, v3);
+		h64 = xxh64_merge_round(h64, v4);
+	} else {
+		h64  = state->v3 + PRIME64_5;
+	}
+
+	h64 += (uint64_t)state->total_len;
+
+	while (p + 8 <= b_end) {
+		const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
+
+		h64 ^= k1;
+		h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
+		p += 8;
+	}
+
+	if (p + 4 <= b_end) {
+		h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;
+		h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+		p += 4;
+	}
+
+	while (p < b_end) {
+		h64 ^= (*p) * PRIME64_5;
+		h64 = xxh_rotl64(h64, 11) * PRIME64_1;
+		p++;
+	}
+
+	h64 ^= h64 >> 33;
+	h64 *= PRIME64_2;
+	h64 ^= h64 >> 29;
+	h64 *= PRIME64_3;
+	h64 ^= h64 >> 32;
+
+	return h64;
+}
+EXPORT_SYMBOL(xxh64_digest);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("xxHash");
diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
new file mode 100644
index 0000000..dd0a359
--- /dev/null
+++ b/lib/zstd/Makefile
@@ -0,0 +1,18 @@
+obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
+obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
+
+ccflags-y += -O3
+
+# Object files unique to zstd_compress and zstd_decompress
+zstd_compress-y := fse_compress.o huf_compress.o compress.o
+zstd_decompress-y := huf_decompress.o decompress.o
+
+# These object files are shared between the modules.
+# Always add them to zstd_compress.
+# Unless both zstd_compress and zstd_decompress are built in
+# then also add them to zstd_decompress.
+zstd_compress-y += entropy_common.o fse_decompress.o zstd_common.o
+
+ifneq ($(CONFIG_ZSTD_COMPRESS)$(CONFIG_ZSTD_DECOMPRESS),yy)
+	zstd_decompress-y += entropy_common.o fse_decompress.o zstd_common.o
+endif
diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h
new file mode 100644
index 0000000..a826b99
--- /dev/null
+++ b/lib/zstd/bitstream.h
@@ -0,0 +1,374 @@
+/*
+ * bitstream
+ * Part of FSE library
+ * header file (to include)
+ * Copyright (C) 2013-2016, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+/*
+*  This API consists of small unitary functions, which must be inlined for best performance.
+*  Since link-time-optimization is not available for all compilers,
+*  these functions are defined into a .h to be included.
+*/
+
+/*-****************************************
+*  Dependencies
+******************************************/
+#include "error_private.h" /* error codes and messages */
+#include "mem.h"	   /* unaligned access routines */
+
+/*=========================================
+*  Target specific
+=========================================*/
+#define STREAM_ACCUMULATOR_MIN_32 25
+#define STREAM_ACCUMULATOR_MIN_64 57
+#define STREAM_ACCUMULATOR_MIN ((U32)(ZSTD_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
+
+/*-******************************************
+*  bitStream encoding API (write forward)
+********************************************/
+/* bitStream can mix input from multiple sources.
+*  A critical property of these streams is that they encode and decode in **reverse** direction.
+*  So the first bit sequence you add will be the last to be read, like a LIFO stack.
+*/
+typedef struct {
+	size_t bitContainer;
+	int bitPos;
+	char *startPtr;
+	char *ptr;
+	char *endPtr;
+} BIT_CStream_t;
+
+ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *dstBuffer, size_t dstCapacity);
+ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
+ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC);
+ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC);
+
+/* Start with initCStream, providing the size of buffer to write into.
+*  bitStream will never write outside of this buffer.
+*  `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
+*
+*  bits are first added to a local register.
+*  Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
+*  Writing data into memory is an explicit operation, performed by the flushBits function.
+*  Hence keep track how many bits are potentially stored into local register to avoid register overflow.
+*  After a flushBits, a maximum of 7 bits might still be stored into local register.
+*
+*  Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
+*
+*  Last operation is to close the bitStream.
+*  The function returns the final size of CStream in bytes.
+*  If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
+*/
+
+/*-********************************************
+*  bitStream decoding API (read backward)
+**********************************************/
+typedef struct {
+	size_t bitContainer;
+	unsigned bitsConsumed;
+	const char *ptr;
+	const char *start;
+} BIT_DStream_t;
+
+typedef enum {
+	BIT_DStream_unfinished = 0,
+	BIT_DStream_endOfBuffer = 1,
+	BIT_DStream_completed = 2,
+	BIT_DStream_overflow = 3
+} BIT_DStream_status; /* result of BIT_reloadDStream() */
+/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize);
+ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, unsigned nbBits);
+ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD);
+ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *bitD);
+
+/* Start by invoking BIT_initDStream().
+*  A chunk of the bitStream is then stored into a local register.
+*  Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+*  You can then retrieve bitFields stored into the local register, **in reverse order**.
+*  Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
+*  A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
+*  Otherwise, it can be less than that, so proceed accordingly.
+*  Checking if DStream has reached its end can be performed with BIT_endOfDStream().
+*/
+
+/*-****************************************
+*  unsafe API
+******************************************/
+ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
+/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
+
+ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC);
+/* unsafe version; does not check buffer overflow */
+
+ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+/*-**************************************************************
+*  Internal functions
+****************************************************************/
+ZSTD_STATIC unsigned BIT_highbit32(register U32 val) { return 31 - __builtin_clz(val); }
+
+/*=====    Local Constants   =====*/
+static const unsigned BIT_mask[] = {0,       1,       3,       7,	0xF,      0x1F,     0x3F,     0x7F,      0xFF,
+				    0x1FF,   0x3FF,   0x7FF,   0xFFF,    0x1FFF,   0x3FFF,   0x7FFF,   0xFFFF,    0x1FFFF,
+				    0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF}; /* up to 26 bits */
+
+/*-**************************************************************
+*  bitStream encoding
+****************************************************************/
+/*! BIT_initCStream() :
+ *  `dstCapacity` must be > sizeof(void*)
+ *  @return : 0 if success,
+			  otherwise an error code (can be tested using ERR_isError() ) */
+ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *startPtr, size_t dstCapacity)
+{
+	bitC->bitContainer = 0;
+	bitC->bitPos = 0;
+	bitC->startPtr = (char *)startPtr;
+	bitC->ptr = bitC->startPtr;
+	bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr);
+	if (dstCapacity <= sizeof(bitC->ptr))
+		return ERROR(dstSize_tooSmall);
+	return 0;
+}
+
+/*! BIT_addBits() :
+	can add up to 26 bits into `bitC`.
+	Does not check for register overflow ! */
+ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
+{
+	bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
+	bitC->bitPos += nbBits;
+}
+
+/*! BIT_addBitsFast() :
+ *  works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
+ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
+{
+	bitC->bitContainer |= value << bitC->bitPos;
+	bitC->bitPos += nbBits;
+}
+
+/*! BIT_flushBitsFast() :
+ *  unsafe version; does not check buffer overflow */
+ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC)
+{
+	size_t const nbBytes = bitC->bitPos >> 3;
+	ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
+	bitC->ptr += nbBytes;
+	bitC->bitPos &= 7;
+	bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
+}
+
+/*! BIT_flushBits() :
+ *  safe version; check for buffer overflow, and prevents it.
+ *  note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */
+ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC)
+{
+	size_t const nbBytes = bitC->bitPos >> 3;
+	ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
+	bitC->ptr += nbBytes;
+	if (bitC->ptr > bitC->endPtr)
+		bitC->ptr = bitC->endPtr;
+	bitC->bitPos &= 7;
+	bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
+}
+
+/*! BIT_closeCStream() :
+ *  @return : size of CStream, in bytes,
+			  or 0 if it could not fit into dstBuffer */
+ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC)
+{
+	BIT_addBitsFast(bitC, 1, 1); /* endMark */
+	BIT_flushBits(bitC);
+
+	if (bitC->ptr >= bitC->endPtr)
+		return 0; /* doesn't fit within authorized budget : cancel */
+
+	return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
+}
+
+/*-********************************************************
+* bitStream decoding
+**********************************************************/
+/*! BIT_initDStream() :
+*   Initialize a BIT_DStream_t.
+*   `bitD` : a pointer to an already allocated BIT_DStream_t structure.
+*   `srcSize` must be the *exact* size of the bitStream, in bytes.
+*   @return : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize)
+{
+	if (srcSize < 1) {
+		memset(bitD, 0, sizeof(*bitD));
+		return ERROR(srcSize_wrong);
+	}
+
+	if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
+		bitD->start = (const char *)srcBuffer;
+		bitD->ptr = (const char *)srcBuffer + srcSize - sizeof(bitD->bitContainer);
+		bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
+		{
+			BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
+			bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
+			if (lastByte == 0)
+				return ERROR(GENERIC); /* endMark not present */
+		}
+	} else {
+		bitD->start = (const char *)srcBuffer;
+		bitD->ptr = bitD->start;
+		bitD->bitContainer = *(const BYTE *)(bitD->start);
+		switch (srcSize) {
+		case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16);
+		case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24);
+		case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32);
+		case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24;
+		case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16;
+		case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8;
+		default:;
+		}
+		{
+			BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
+			bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+			if (lastByte == 0)
+				return ERROR(GENERIC); /* endMark not present */
+		}
+		bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize) * 8;
+	}
+
+	return srcSize;
+}
+
+ZSTD_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; }
+
+ZSTD_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { return (bitContainer >> start) & BIT_mask[nbBits]; }
+
+ZSTD_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) { return bitContainer & BIT_mask[nbBits]; }
+
+/*! BIT_lookBits() :
+ *  Provides next n bits from local register.
+ *  local register is not modified.
+ *  On 32-bits, maxNbBits==24.
+ *  On 64-bits, maxNbBits==56.
+ *  @return : value extracted
+ */
+ZSTD_STATIC size_t BIT_lookBits(const BIT_DStream_t *bitD, U32 nbBits)
+{
+	U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
+	return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask - nbBits) & bitMask);
+}
+
+/*! BIT_lookBitsFast() :
+*   unsafe version; only works only if nbBits >= 1 */
+ZSTD_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t *bitD, U32 nbBits)
+{
+	U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
+	return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask + 1) - nbBits) & bitMask);
+}
+
+ZSTD_STATIC void BIT_skipBits(BIT_DStream_t *bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; }
+
+/*! BIT_readBits() :
+ *  Read (consume) next n bits from local register and update.
+ *  Pay attention to not read more than nbBits contained into local register.
+ *  @return : extracted value.
+ */
+ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, U32 nbBits)
+{
+	size_t const value = BIT_lookBits(bitD, nbBits);
+	BIT_skipBits(bitD, nbBits);
+	return value;
+}
+
+/*! BIT_readBitsFast() :
+*   unsafe version; only works only if nbBits >= 1 */
+ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, U32 nbBits)
+{
+	size_t const value = BIT_lookBitsFast(bitD, nbBits);
+	BIT_skipBits(bitD, nbBits);
+	return value;
+}
+
+/*! BIT_reloadDStream() :
+*   Refill `bitD` from buffer previously set in BIT_initDStream() .
+*   This function is safe, it guarantees it will not read beyond src buffer.
+*   @return : status of `BIT_DStream_t` internal register.
+			  if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
+ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD)
+{
+	if (bitD->bitsConsumed > (sizeof(bitD->bitContainer) * 8)) /* should not happen => corruption detected */
+		return BIT_DStream_overflow;
+
+	if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
+		bitD->ptr -= bitD->bitsConsumed >> 3;
+		bitD->bitsConsumed &= 7;
+		bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
+		return BIT_DStream_unfinished;
+	}
+	if (bitD->ptr == bitD->start) {
+		if (bitD->bitsConsumed < sizeof(bitD->bitContainer) * 8)
+			return BIT_DStream_endOfBuffer;
+		return BIT_DStream_completed;
+	}
+	{
+		U32 nbBytes = bitD->bitsConsumed >> 3;
+		BIT_DStream_status result = BIT_DStream_unfinished;
+		if (bitD->ptr - nbBytes < bitD->start) {
+			nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+			result = BIT_DStream_endOfBuffer;
+		}
+		bitD->ptr -= nbBytes;
+		bitD->bitsConsumed -= nbBytes * 8;
+		bitD->bitContainer = ZSTD_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
+		return result;
+	}
+}
+
+/*! BIT_endOfDStream() :
+*   @return Tells if DStream has exactly reached its end (all bits consumed).
+*/
+ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *DStream)
+{
+	return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer) * 8));
+}
+
+#endif /* BITSTREAM_H_MODULE */
diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c
new file mode 100644
index 0000000..f9166cf
--- /dev/null
+++ b/lib/zstd/compress.c
@@ -0,0 +1,3484 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of https://github.com/facebook/zstd.
+ * An additional grant of patent rights can be found in the PATENTS file in the
+ * same directory.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ */
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include "fse.h"
+#include "huf.h"
+#include "mem.h"
+#include "zstd_internal.h" /* includes zstd.h */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h> /* memset */
+
+/*-*************************************
+*  Constants
+***************************************/
+static const U32 g_searchStrength = 8; /* control skip over incompressible data */
+#define HASH_READ_SIZE 8
+typedef enum { ZSTDcs_created = 0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
+
+/*-*************************************
+*  Helper functions
+***************************************/
+size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
+
+/*-*************************************
+*  Sequence storage
+***************************************/
+static void ZSTD_resetSeqStore(seqStore_t *ssPtr)
+{
+	ssPtr->lit = ssPtr->litStart;
+	ssPtr->sequences = ssPtr->sequencesStart;
+	ssPtr->longLengthID = 0;
+}
+
+/*-*************************************
+*  Context memory management
+***************************************/
+struct ZSTD_CCtx_s {
+	const BYTE *nextSrc;  /* next block here to continue on curr prefix */
+	const BYTE *base;     /* All regular indexes relative to this position */
+	const BYTE *dictBase; /* extDict indexes relative to this position */
+	U32 dictLimit;	/* below that point, need extDict */
+	U32 lowLimit;	 /* below that point, no more data */
+	U32 nextToUpdate;     /* index from which to continue dictionary update */
+	U32 nextToUpdate3;    /* index from which to continue dictionary update */
+	U32 hashLog3;	 /* dispatch table : larger == faster, more memory */
+	U32 loadedDictEnd;    /* index of end of dictionary */
+	U32 forceWindow;      /* force back-references to respect limit of 1<<wLog, even for dictionary */
+	U32 forceRawDict;     /* Force loading dictionary in "content-only" mode (no header analysis) */
+	ZSTD_compressionStage_e stage;
+	U32 rep[ZSTD_REP_NUM];
+	U32 repToConfirm[ZSTD_REP_NUM];
+	U32 dictID;
+	ZSTD_parameters params;
+	void *workSpace;
+	size_t workSpaceSize;
+	size_t blockSize;
+	U64 frameContentSize;
+	struct xxh64_state xxhState;
+	ZSTD_customMem customMem;
+
+	seqStore_t seqStore; /* sequences storage ptrs */
+	U32 *hashTable;
+	U32 *hashTable3;
+	U32 *chainTable;
+	HUF_CElt *hufTable;
+	U32 flagStaticTables;
+	HUF_repeat flagStaticHufTable;
+	FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
+	FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
+	FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
+	unsigned tmpCounters[HUF_COMPRESS_WORKSPACE_SIZE_U32];
+};
+
+size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams)
+{
+	size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog);
+	U32 const divider = (cParams.searchLength == 3) ? 3 : 4;
+	size_t const maxNbSeq = blockSize / divider;
+	size_t const tokenSpace = blockSize + 11 * maxNbSeq;
+	size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog);
+	size_t const hSize = ((size_t)1) << cParams.hashLog;
+	U32 const hashLog3 = (cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
+	size_t const h3Size = ((size_t)1) << hashLog3;
+	size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+	size_t const optSpace =
+	    ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
+	size_t const workspaceSize = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
+				     (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
+
+	return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_CCtx)) + ZSTD_ALIGN(workspaceSize);
+}
+
+static ZSTD_CCtx *ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
+{
+	ZSTD_CCtx *cctx;
+	if (!customMem.customAlloc || !customMem.customFree)
+		return NULL;
+	cctx = (ZSTD_CCtx *)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
+	if (!cctx)
+		return NULL;
+	memset(cctx, 0, sizeof(ZSTD_CCtx));
+	cctx->customMem = customMem;
+	return cctx;
+}
+
+ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize)
+{
+	ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
+	ZSTD_CCtx *cctx = ZSTD_createCCtx_advanced(stackMem);
+	if (cctx) {
+		cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize);
+	}
+	return cctx;
+}
+
+size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx)
+{
+	if (cctx == NULL)
+		return 0; /* support free on NULL */
+	ZSTD_free(cctx->workSpace, cctx->customMem);
+	ZSTD_free(cctx, cctx->customMem);
+	return 0; /* reserved as a potential error code in the future */
+}
+
+const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx) /* hidden interface */ { return &(ctx->seqStore); }
+
+static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx *cctx) { return cctx->params; }
+
+/** ZSTD_checkParams() :
+	ensure param values remain within authorized range.
+	@return : 0, or an error code if one value is beyond authorized range */
+size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
+{
+#define CLAMPCHECK(val, min, max)                                       \
+	{                                                               \
+		if ((val < min) | (val > max))                          \
+			return ERROR(compressionParameter_unsupported); \
+	}
+	CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
+	CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
+	CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
+	CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
+	CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
+	CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
+	if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2)
+		return ERROR(compressionParameter_unsupported);
+	return 0;
+}
+
+/** ZSTD_cycleLog() :
+ *  condition for correct operation : hashLog > 1 */
+static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
+{
+	U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
+	return hashLog - btScale;
+}
+
+/** ZSTD_adjustCParams() :
+	optimize `cPar` for a given input (`srcSize` and `dictSize`).
+	mostly downsizing to reduce memory consumption and initialization.
+	Both `srcSize` and `dictSize` are optional (use 0 if unknown),
+	but if both are 0, no optimization can be done.
+	Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */
+ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
+{
+	if (srcSize + dictSize == 0)
+		return cPar; /* no size information available : no adjustment */
+
+	/* resize params, to use less memory when necessary */
+	{
+		U32 const minSrcSize = (srcSize == 0) ? 500 : 0;
+		U64 const rSize = srcSize + dictSize + minSrcSize;
+		if (rSize < ((U64)1 << ZSTD_WINDOWLOG_MAX)) {
+			U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
+			if (cPar.windowLog > srcLog)
+				cPar.windowLog = srcLog;
+		}
+	}
+	if (cPar.hashLog > cPar.windowLog)
+		cPar.hashLog = cPar.windowLog;
+	{
+		U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
+		if (cycleLog > cPar.windowLog)
+			cPar.chainLog -= (cycleLog - cPar.windowLog);
+	}
+
+	if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
+		cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
+
+	return cPar;
+}
+
+static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2)
+{
+	return (param1.cParams.hashLog == param2.cParams.hashLog) & (param1.cParams.chainLog == param2.cParams.chainLog) &
+	       (param1.cParams.strategy == param2.cParams.strategy) & ((param1.cParams.searchLength == 3) == (param2.cParams.searchLength == 3));
+}
+
+/*! ZSTD_continueCCtx() :
+	reuse CCtx without reset (note : requires no dictionary) */
+static size_t ZSTD_continueCCtx(ZSTD_CCtx *cctx, ZSTD_parameters params, U64 frameContentSize)
+{
+	U32 const end = (U32)(cctx->nextSrc - cctx->base);
+	cctx->params = params;
+	cctx->frameContentSize = frameContentSize;
+	cctx->lowLimit = end;
+	cctx->dictLimit = end;
+	cctx->nextToUpdate = end + 1;
+	cctx->stage = ZSTDcs_init;
+	cctx->dictID = 0;
+	cctx->loadedDictEnd = 0;
+	{
+		int i;
+		for (i = 0; i < ZSTD_REP_NUM; i++)
+			cctx->rep[i] = repStartValue[i];
+	}
+	cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */
+	xxh64_reset(&cctx->xxhState, 0);
+	return 0;
+}
+
+typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
+
+/*! ZSTD_resetCCtx_advanced() :
+	note : `params` must be validated */
+static size_t ZSTD_resetCCtx_advanced(ZSTD_CCtx *zc, ZSTD_parameters params, U64 frameContentSize, ZSTD_compResetPolicy_e const crp)
+{
+	if (crp == ZSTDcrp_continue)
+		if (ZSTD_equivalentParams(params, zc->params)) {
+			zc->flagStaticTables = 0;
+			zc->flagStaticHufTable = HUF_repeat_none;
+			return ZSTD_continueCCtx(zc, params, frameContentSize);
+		}
+
+	{
+		size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
+		U32 const divider = (params.cParams.searchLength == 3) ? 3 : 4;
+		size_t const maxNbSeq = blockSize / divider;
+		size_t const tokenSpace = blockSize + 11 * maxNbSeq;
+		size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog);
+		size_t const hSize = ((size_t)1) << params.cParams.hashLog;
+		U32 const hashLog3 = (params.cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
+		size_t const h3Size = ((size_t)1) << hashLog3;
+		size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+		void *ptr;
+
+		/* Check if workSpace is large enough, alloc a new one if needed */
+		{
+			size_t const optSpace = ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) +
+						(ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
+			size_t const neededSpace = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
+						   (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
+			if (zc->workSpaceSize < neededSpace) {
+				ZSTD_free(zc->workSpace, zc->customMem);
+				zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
+				if (zc->workSpace == NULL)
+					return ERROR(memory_allocation);
+				zc->workSpaceSize = neededSpace;
+			}
+		}
+
+		if (crp != ZSTDcrp_noMemset)
+			memset(zc->workSpace, 0, tableSpace); /* reset tables only */
+		xxh64_reset(&zc->xxhState, 0);
+		zc->hashLog3 = hashLog3;
+		zc->hashTable = (U32 *)(zc->workSpace);
+		zc->chainTable = zc->hashTable + hSize;
+		zc->hashTable3 = zc->chainTable + chainSize;
+		ptr = zc->hashTable3 + h3Size;
+		zc->hufTable = (HUF_CElt *)ptr;
+		zc->flagStaticTables = 0;
+		zc->flagStaticHufTable = HUF_repeat_none;
+		ptr = ((U32 *)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
+
+		zc->nextToUpdate = 1;
+		zc->nextSrc = NULL;
+		zc->base = NULL;
+		zc->dictBase = NULL;
+		zc->dictLimit = 0;
+		zc->lowLimit = 0;
+		zc->params = params;
+		zc->blockSize = blockSize;
+		zc->frameContentSize = frameContentSize;
+		{
+			int i;
+			for (i = 0; i < ZSTD_REP_NUM; i++)
+				zc->rep[i] = repStartValue[i];
+		}
+
+		if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) {
+			zc->seqStore.litFreq = (U32 *)ptr;
+			zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1 << Litbits);
+			zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL + 1);
+			zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML + 1);
+			ptr = zc->seqStore.offCodeFreq + (MaxOff + 1);
+			zc->seqStore.matchTable = (ZSTD_match_t *)ptr;
+			ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM + 1;
+			zc->seqStore.priceTable = (ZSTD_optimal_t *)ptr;
+			ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM + 1;
+			zc->seqStore.litLengthSum = 0;
+		}
+		zc->seqStore.sequencesStart = (seqDef *)ptr;
+		ptr = zc->seqStore.sequencesStart + maxNbSeq;
+		zc->seqStore.llCode = (BYTE *)ptr;
+		zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
+		zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
+		zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
+
+		zc->stage = ZSTDcs_init;
+		zc->dictID = 0;
+		zc->loadedDictEnd = 0;
+
+		return 0;
+	}
+}
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ *        do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx)
+{
+	int i;
+	for (i = 0; i < ZSTD_REP_NUM; i++)
+		cctx->rep[i] = 0;
+}
+
+/*! ZSTD_copyCCtx() :
+*   Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+*   Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+*   @return : 0, or an error code */
+size_t ZSTD_copyCCtx(ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, unsigned long long pledgedSrcSize)
+{
+	if (srcCCtx->stage != ZSTDcs_init)
+		return ERROR(stage_wrong);
+
+	memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
+	{
+		ZSTD_parameters params = srcCCtx->params;
+		params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
+		ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset);
+	}
+
+	/* copy tables */
+	{
+		size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
+		size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
+		size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
+		size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+		memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace);
+	}
+
+	/* copy dictionary offsets */
+	dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
+	dstCCtx->nextToUpdate3 = srcCCtx->nextToUpdate3;
+	dstCCtx->nextSrc = srcCCtx->nextSrc;
+	dstCCtx->base = srcCCtx->base;
+	dstCCtx->dictBase = srcCCtx->dictBase;
+	dstCCtx->dictLimit = srcCCtx->dictLimit;
+	dstCCtx->lowLimit = srcCCtx->lowLimit;
+	dstCCtx->loadedDictEnd = srcCCtx->loadedDictEnd;
+	dstCCtx->dictID = srcCCtx->dictID;
+
+	/* copy entropy tables */
+	dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
+	dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable;
+	if (srcCCtx->flagStaticTables) {
+		memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
+		memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
+		memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
+	}
+	if (srcCCtx->flagStaticHufTable) {
+		memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256 * 4);
+	}
+
+	return 0;
+}
+
+/*! ZSTD_reduceTable() :
+*   reduce table indexes by `reducerValue` */
+static void ZSTD_reduceTable(U32 *const table, U32 const size, U32 const reducerValue)
+{
+	U32 u;
+	for (u = 0; u < size; u++) {
+		if (table[u] < reducerValue)
+			table[u] = 0;
+		else
+			table[u] -= reducerValue;
+	}
+}
+
+/*! ZSTD_reduceIndex() :
+*   rescale all indexes to avoid future overflow (indexes are U32) */
+static void ZSTD_reduceIndex(ZSTD_CCtx *zc, const U32 reducerValue)
+{
+	{
+		U32 const hSize = 1 << zc->params.cParams.hashLog;
+		ZSTD_reduceTable(zc->hashTable, hSize, reducerValue);
+	}
+
+	{
+		U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog);
+		ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue);
+	}
+
+	{
+		U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0;
+		ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue);
+	}
+}
+
+/*-*******************************************************
+*  Block entropic compression
+*********************************************************/
+
+/* See doc/zstd_compression_format.md for detailed format description */
+
+size_t ZSTD_noCompressBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	if (srcSize + ZSTD_blockHeaderSize > dstCapacity)
+		return ERROR(dstSize_tooSmall);
+	memcpy((BYTE *)dst + ZSTD_blockHeaderSize, src, srcSize);
+	ZSTD_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
+	return ZSTD_blockHeaderSize + srcSize;
+}
+
+static size_t ZSTD_noCompressLiterals(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	BYTE *const ostart = (BYTE * const)dst;
+	U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
+
+	if (srcSize + flSize > dstCapacity)
+		return ERROR(dstSize_tooSmall);
+
+	switch (flSize) {
+	case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize << 3)); break;
+	case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_basic + (1 << 2) + (srcSize << 4))); break;
+	default: /*note : should not be necessary : flSize is within {1,2,3} */
+	case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_basic + (3 << 2) + (srcSize << 4))); break;
+	}
+
+	memcpy(ostart + flSize, src, srcSize);
+	return srcSize + flSize;
+}
+
+static size_t ZSTD_compressRleLiteralsBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	BYTE *const ostart = (BYTE * const)dst;
+	U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
+
+	(void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
+
+	switch (flSize) {
+	case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize << 3)); break;
+	case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_rle + (1 << 2) + (srcSize << 4))); break;
+	default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */
+	case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_rle + (3 << 2) + (srcSize << 4))); break;
+	}
+
+	ostart[flSize] = *(const BYTE *)src;
+	return flSize + 1;
+}
+
+static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
+
+static size_t ZSTD_compressLiterals(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	size_t const minGain = ZSTD_minGain(srcSize);
+	size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
+	BYTE *const ostart = (BYTE *)dst;
+	U32 singleStream = srcSize < 256;
+	symbolEncodingType_e hType = set_compressed;
+	size_t cLitSize;
+
+/* small ? don't even attempt compression (speed opt) */
+#define LITERAL_NOENTROPY 63
+	{
+		size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
+		if (srcSize <= minLitSize)
+			return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+	}
+
+	if (dstCapacity < lhSize + 1)
+		return ERROR(dstSize_tooSmall); /* not enough space for compression */
+	{
+		HUF_repeat repeat = zc->flagStaticHufTable;
+		int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
+		if (repeat == HUF_repeat_valid && lhSize == 3)
+			singleStream = 1;
+		cLitSize = singleStream ? HUF_compress1X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
+								sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat)
+					: HUF_compress4X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
+								sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat);
+		if (repeat != HUF_repeat_none) {
+			hType = set_repeat;
+		} /* reused the existing table */
+		else {
+			zc->flagStaticHufTable = HUF_repeat_check;
+		} /* now have a table to reuse */
+	}
+
+	if ((cLitSize == 0) | (cLitSize >= srcSize - minGain)) {
+		zc->flagStaticHufTable = HUF_repeat_none;
+		return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+	}
+	if (cLitSize == 1) {
+		zc->flagStaticHufTable = HUF_repeat_none;
+		return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
+	}
+
+	/* Build header */
+	switch (lhSize) {
+	case 3: /* 2 - 2 - 10 - 10 */
+	{
+		U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 14);
+		ZSTD_writeLE24(ostart, lhc);
+		break;
+	}
+	case 4: /* 2 - 2 - 14 - 14 */
+	{
+		U32 const lhc = hType + (2 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 18);
+		ZSTD_writeLE32(ostart, lhc);
+		break;
+	}
+	default: /* should not be necessary, lhSize is only {3,4,5} */
+	case 5:  /* 2 - 2 - 18 - 18 */
+	{
+		U32 const lhc = hType + (3 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 22);
+		ZSTD_writeLE32(ostart, lhc);
+		ostart[4] = (BYTE)(cLitSize >> 10);
+		break;
+	}
+	}
+	return lhSize + cLitSize;
+}
+
+static const BYTE LL_Code[64] = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18,
+				 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
+				 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24};
+
+static const BYTE ML_Code[128] = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+				  26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38,
+				  38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+				  40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42,
+				  42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42};
+
+void ZSTD_seqToCodes(const seqStore_t *seqStorePtr)
+{
+	BYTE const LL_deltaCode = 19;
+	BYTE const ML_deltaCode = 36;
+	const seqDef *const sequences = seqStorePtr->sequencesStart;
+	BYTE *const llCodeTable = seqStorePtr->llCode;
+	BYTE *const ofCodeTable = seqStorePtr->ofCode;
+	BYTE *const mlCodeTable = seqStorePtr->mlCode;
+	U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+	U32 u;
+	for (u = 0; u < nbSeq; u++) {
+		U32 const llv = sequences[u].litLength;
+		U32 const mlv = sequences[u].matchLength;
+		llCodeTable[u] = (llv > 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv];
+		ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
+		mlCodeTable[u] = (mlv > 127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv];
+	}
+	if (seqStorePtr->longLengthID == 1)
+		llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
+	if (seqStorePtr->longLengthID == 2)
+		mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
+}
+
+ZSTD_STATIC size_t ZSTD_compressSequences_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity)
+{
+	const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN;
+	const seqStore_t *seqStorePtr = &(zc->seqStore);
+	FSE_CTable *CTable_LitLength = zc->litlengthCTable;
+	FSE_CTable *CTable_OffsetBits = zc->offcodeCTable;
+	FSE_CTable *CTable_MatchLength = zc->matchlengthCTable;
+	U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
+	const seqDef *const sequences = seqStorePtr->sequencesStart;
+	const BYTE *const ofCodeTable = seqStorePtr->ofCode;
+	const BYTE *const llCodeTable = seqStorePtr->llCode;
+	const BYTE *const mlCodeTable = seqStorePtr->mlCode;
+	BYTE *const ostart = (BYTE *)dst;
+	BYTE *const oend = ostart + dstCapacity;
+	BYTE *op = ostart;
+	size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+	BYTE *seqHead;
+
+	U32 *count;
+	S16 *norm;
+	U32 *workspace;
+	size_t workspaceSize = sizeof(zc->tmpCounters);
+	{
+		size_t spaceUsed32 = 0;
+		count = (U32 *)zc->tmpCounters + spaceUsed32;
+		spaceUsed32 += MaxSeq + 1;
+		norm = (S16 *)((U32 *)zc->tmpCounters + spaceUsed32);
+		spaceUsed32 += ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
+
+		workspace = (U32 *)zc->tmpCounters + spaceUsed32;
+		workspaceSize -= (spaceUsed32 << 2);
+	}
+
+	/* Compress literals */
+	{
+		const BYTE *const literals = seqStorePtr->litStart;
+		size_t const litSize = seqStorePtr->lit - literals;
+		size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
+		if (ZSTD_isError(cSize))
+			return cSize;
+		op += cSize;
+	}
+
+	/* Sequences Header */
+	if ((oend - op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */)
+		return ERROR(dstSize_tooSmall);
+	if (nbSeq < 0x7F)
+		*op++ = (BYTE)nbSeq;
+	else if (nbSeq < LONGNBSEQ)
+		op[0] = (BYTE)((nbSeq >> 8) + 0x80), op[1] = (BYTE)nbSeq, op += 2;
+	else
+		op[0] = 0xFF, ZSTD_writeLE16(op + 1, (U16)(nbSeq - LONGNBSEQ)), op += 3;
+	if (nbSeq == 0)
+		return op - ostart;
+
+	/* seqHead : flags for FSE encoding type */
+	seqHead = op++;
+
+#define MIN_SEQ_FOR_DYNAMIC_FSE 64
+#define MAX_SEQ_FOR_STATIC_FSE 1000
+
+	/* convert length/distances into codes */
+	ZSTD_seqToCodes(seqStorePtr);
+
+	/* CTable for Literal Lengths */
+	{
+		U32 max = MaxLL;
+		size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);
+		if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
+			*op++ = llCodeTable[0];
+			FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
+			LLtype = set_rle;
+		} else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
+			LLtype = set_repeat;
+		} else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog - 1)))) {
+			FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, workspace, workspaceSize);
+			LLtype = set_basic;
+		} else {
+			size_t nbSeq_1 = nbSeq;
+			const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
+			if (count[llCodeTable[nbSeq - 1]] > 1) {
+				count[llCodeTable[nbSeq - 1]]--;
+				nbSeq_1--;
+			}
+			FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
+			{
+				size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
+				if (FSE_isError(NCountSize))
+					return NCountSize;
+				op += NCountSize;
+			}
+			FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, workspace, workspaceSize);
+			LLtype = set_compressed;
+		}
+	}
+
+	/* CTable for Offsets */
+	{
+		U32 max = MaxOff;
+		size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);
+		if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
+			*op++ = ofCodeTable[0];
+			FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
+			Offtype = set_rle;
+		} else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
+			Offtype = set_repeat;
+		} else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog - 1)))) {
+			FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, workspace, workspaceSize);
+			Offtype = set_basic;
+		} else {
+			size_t nbSeq_1 = nbSeq;
+			const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
+			if (count[ofCodeTable[nbSeq - 1]] > 1) {
+				count[ofCodeTable[nbSeq - 1]]--;
+				nbSeq_1--;
+			}
+			FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
+			{
+				size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
+				if (FSE_isError(NCountSize))
+					return NCountSize;
+				op += NCountSize;
+			}
+			FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, workspace, workspaceSize);
+			Offtype = set_compressed;
+		}
+	}
+
+	/* CTable for MatchLengths */
+	{
+		U32 max = MaxML;
+		size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);
+		if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
+			*op++ = *mlCodeTable;
+			FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
+			MLtype = set_rle;
+		} else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
+			MLtype = set_repeat;
+		} else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog - 1)))) {
+			FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, workspace, workspaceSize);
+			MLtype = set_basic;
+		} else {
+			size_t nbSeq_1 = nbSeq;
+			const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
+			if (count[mlCodeTable[nbSeq - 1]] > 1) {
+				count[mlCodeTable[nbSeq - 1]]--;
+				nbSeq_1--;
+			}
+			FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
+			{
+				size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
+				if (FSE_isError(NCountSize))
+					return NCountSize;
+				op += NCountSize;
+			}
+			FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, workspace, workspaceSize);
+			MLtype = set_compressed;
+		}
+	}
+
+	*seqHead = (BYTE)((LLtype << 6) + (Offtype << 4) + (MLtype << 2));
+	zc->flagStaticTables = 0;
+
+	/* Encoding Sequences */
+	{
+		BIT_CStream_t blockStream;
+		FSE_CState_t stateMatchLength;
+		FSE_CState_t stateOffsetBits;
+		FSE_CState_t stateLitLength;
+
+		CHECK_E(BIT_initCStream(&blockStream, op, oend - op), dstSize_tooSmall); /* not enough space remaining */
+
+		/* first symbols */
+		FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]);
+		FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]);
+		FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]);
+		BIT_addBits(&blockStream, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]);
+		if (ZSTD_32bits())
+			BIT_flushBits(&blockStream);
+		BIT_addBits(&blockStream, sequences[nbSeq - 1].matchLength, ML_bits[mlCodeTable[nbSeq - 1]]);
+		if (ZSTD_32bits())
+			BIT_flushBits(&blockStream);
+		if (longOffsets) {
+			U32 const ofBits = ofCodeTable[nbSeq - 1];
+			int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
+			if (extraBits) {
+				BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, extraBits);
+				BIT_flushBits(&blockStream);
+			}
+			BIT_addBits(&blockStream, sequences[nbSeq - 1].offset >> extraBits, ofBits - extraBits);
+		} else {
+			BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, ofCodeTable[nbSeq - 1]);
+		}
+		BIT_flushBits(&blockStream);
+
+		{
+			size_t n;
+			for (n = nbSeq - 2; n < nbSeq; n--) { /* intentional underflow */
+				BYTE const llCode = llCodeTable[n];
+				BYTE const ofCode = ofCodeTable[n];
+				BYTE const mlCode = mlCodeTable[n];
+				U32 const llBits = LL_bits[llCode];
+				U32 const ofBits = ofCode; /* 32b*/ /* 64b*/
+				U32 const mlBits = ML_bits[mlCode];
+				/* (7)*/							    /* (7)*/
+				FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */  /* 15 */
+				FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
+				if (ZSTD_32bits())
+					BIT_flushBits(&blockStream);				  /* (7)*/
+				FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
+				if (ZSTD_32bits() || (ofBits + mlBits + llBits >= 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
+					BIT_flushBits(&blockStream); /* (7)*/
+				BIT_addBits(&blockStream, sequences[n].litLength, llBits);
+				if (ZSTD_32bits() && ((llBits + mlBits) > 24))
+					BIT_flushBits(&blockStream);
+				BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
+				if (ZSTD_32bits())
+					BIT_flushBits(&blockStream); /* (7)*/
+				if (longOffsets) {
+					int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
+					if (extraBits) {
+						BIT_addBits(&blockStream, sequences[n].offset, extraBits);
+						BIT_flushBits(&blockStream); /* (7)*/
+					}
+					BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */
+				} else {
+					BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
+				}
+				BIT_flushBits(&blockStream); /* (7)*/
+			}
+		}
+
+		FSE_flushCState(&blockStream, &stateMatchLength);
+		FSE_flushCState(&blockStream, &stateOffsetBits);
+		FSE_flushCState(&blockStream, &stateLitLength);
+
+		{
+			size_t const streamSize = BIT_closeCStream(&blockStream);
+			if (streamSize == 0)
+				return ERROR(dstSize_tooSmall); /* not enough space */
+			op += streamSize;
+		}
+	}
+	return op - ostart;
+}
+
+ZSTD_STATIC size_t ZSTD_compressSequences(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, size_t srcSize)
+{
+	size_t const cSize = ZSTD_compressSequences_internal(zc, dst, dstCapacity);
+	size_t const minGain = ZSTD_minGain(srcSize);
+	size_t const maxCSize = srcSize - minGain;
+	/* If the srcSize <= dstCapacity, then there is enough space to write a
+	 * raw uncompressed block. Since we ran out of space, the block must not
+	 * be compressible, so fall back to a raw uncompressed block.
+	 */
+	int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity;
+	int i;
+
+	if (ZSTD_isError(cSize) && !uncompressibleError)
+		return cSize;
+	if (cSize >= maxCSize || uncompressibleError) {
+		zc->flagStaticHufTable = HUF_repeat_none;
+		return 0;
+	}
+	/* confirm repcodes */
+	for (i = 0; i < ZSTD_REP_NUM; i++)
+		zc->rep[i] = zc->repToConfirm[i];
+	return cSize;
+}
+
+/*! ZSTD_storeSeq() :
+	Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
+	`offsetCode` : distance to match, or 0 == repCode.
+	`matchCode` : matchLength - MINMATCH
+*/
+ZSTD_STATIC void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const void *literals, U32 offsetCode, size_t matchCode)
+{
+	/* copy Literals */
+	ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
+	seqStorePtr->lit += litLength;
+
+	/* literal Length */
+	if (litLength > 0xFFFF) {
+		seqStorePtr->longLengthID = 1;
+		seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+	}
+	seqStorePtr->sequences[0].litLength = (U16)litLength;
+
+	/* match offset */
+	seqStorePtr->sequences[0].offset = offsetCode + 1;
+
+	/* match Length */
+	if (matchCode > 0xFFFF) {
+		seqStorePtr->longLengthID = 2;
+		seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+	}
+	seqStorePtr->sequences[0].matchLength = (U16)matchCode;
+
+	seqStorePtr->sequences++;
+}
+
+/*-*************************************
+*  Match length counter
+***************************************/
+static unsigned ZSTD_NbCommonBytes(register size_t val)
+{
+	if (ZSTD_isLittleEndian()) {
+		if (ZSTD_64bits()) {
+			return (__builtin_ctzll((U64)val) >> 3);
+		} else { /* 32 bits */
+			return (__builtin_ctz((U32)val) >> 3);
+		}
+	} else { /* Big Endian CPU */
+		if (ZSTD_64bits()) {
+			return (__builtin_clzll(val) >> 3);
+		} else { /* 32 bits */
+			return (__builtin_clz((U32)val) >> 3);
+		}
+	}
+}
+
+static size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit)
+{
+	const BYTE *const pStart = pIn;
+	const BYTE *const pInLoopLimit = pInLimit - (sizeof(size_t) - 1);
+
+	while (pIn < pInLoopLimit) {
+		size_t const diff = ZSTD_readST(pMatch) ^ ZSTD_readST(pIn);
+		if (!diff) {
+			pIn += sizeof(size_t);
+			pMatch += sizeof(size_t);
+			continue;
+		}
+		pIn += ZSTD_NbCommonBytes(diff);
+		return (size_t)(pIn - pStart);
+	}
+	if (ZSTD_64bits())
+		if ((pIn < (pInLimit - 3)) && (ZSTD_read32(pMatch) == ZSTD_read32(pIn))) {
+			pIn += 4;
+			pMatch += 4;
+		}
+	if ((pIn < (pInLimit - 1)) && (ZSTD_read16(pMatch) == ZSTD_read16(pIn))) {
+		pIn += 2;
+		pMatch += 2;
+	}
+	if ((pIn < pInLimit) && (*pMatch == *pIn))
+		pIn++;
+	return (size_t)(pIn - pStart);
+}
+
+/** ZSTD_count_2segments() :
+*   can count match length with `ip` & `match` in 2 different segments.
+*   convention : on reaching mEnd, match count continue starting from iStart
+*/
+static size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart)
+{
+	const BYTE *const vEnd = MIN(ip + (mEnd - match), iEnd);
+	size_t const matchLength = ZSTD_count(ip, match, vEnd);
+	if (match + matchLength != mEnd)
+		return matchLength;
+	return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd);
+}
+
+/*-*************************************
+*  Hashes
+***************************************/
+static const U32 prime3bytes = 506832829U;
+static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32 - 24)) * prime3bytes) >> (32 - h); }
+ZSTD_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h) { return ZSTD_hash3(ZSTD_readLE32(ptr), h); } /* only in zstd_opt.h */
+
+static const U32 prime4bytes = 2654435761U;
+static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32 - h); }
+static size_t ZSTD_hash4Ptr(const void *ptr, U32 h) { return ZSTD_hash4(ZSTD_read32(ptr), h); }
+
+static const U64 prime5bytes = 889523592379ULL;
+static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64 - 40)) * prime5bytes) >> (64 - h)); }
+static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); }
+
+static const U64 prime6bytes = 227718039650203ULL;
+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64 - 48)) * prime6bytes) >> (64 - h)); }
+static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); }
+
+static const U64 prime7bytes = 58295818150454627ULL;
+static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64 - 56)) * prime7bytes) >> (64 - h)); }
+static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); }
+
+static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
+static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u)*prime8bytes) >> (64 - h)); }
+static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); }
+
+static size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls)
+{
+	switch (mls) {
+	// case 3: return ZSTD_hash3Ptr(p, hBits);
+	default:
+	case 4: return ZSTD_hash4Ptr(p, hBits);
+	case 5: return ZSTD_hash5Ptr(p, hBits);
+	case 6: return ZSTD_hash6Ptr(p, hBits);
+	case 7: return ZSTD_hash7Ptr(p, hBits);
+	case 8: return ZSTD_hash8Ptr(p, hBits);
+	}
+}
+
+/*-*************************************
+*  Fast Scan
+***************************************/
+static void ZSTD_fillHashTable(ZSTD_CCtx *zc, const void *end, const U32 mls)
+{
+	U32 *const hashTable = zc->hashTable;
+	U32 const hBits = zc->params.cParams.hashLog;
+	const BYTE *const base = zc->base;
+	const BYTE *ip = base + zc->nextToUpdate;
+	const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
+	const size_t fastHashFillStep = 3;
+
+	while (ip <= iend) {
+		hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
+		ip += fastHashFillStep;
+	}
+}
+
+FORCE_INLINE
+void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
+{
+	U32 *const hashTable = cctx->hashTable;
+	U32 const hBits = cctx->params.cParams.hashLog;
+	seqStore_t *seqStorePtr = &(cctx->seqStore);
+	const BYTE *const base = cctx->base;
+	const BYTE *const istart = (const BYTE *)src;
+	const BYTE *ip = istart;
+	const BYTE *anchor = istart;
+	const U32 lowestIndex = cctx->dictLimit;
+	const BYTE *const lowest = base + lowestIndex;
+	const BYTE *const iend = istart + srcSize;
+	const BYTE *const ilimit = iend - HASH_READ_SIZE;
+	U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
+	U32 offsetSaved = 0;
+
+	/* init */
+	ip += (ip == lowest);
+	{
+		U32 const maxRep = (U32)(ip - lowest);
+		if (offset_2 > maxRep)
+			offsetSaved = offset_2, offset_2 = 0;
+		if (offset_1 > maxRep)
+			offsetSaved = offset_1, offset_1 = 0;
+	}
+
+	/* Main Search Loop */
+	while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+		size_t mLength;
+		size_t const h = ZSTD_hashPtr(ip, hBits, mls);
+		U32 const curr = (U32)(ip - base);
+		U32 const matchIndex = hashTable[h];
+		const BYTE *match = base + matchIndex;
+		hashTable[h] = curr; /* update hash table */
+
+		if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
+			mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
+			ip++;
+			ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
+		} else {
+			U32 offset;
+			if ((matchIndex <= lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
+				ip += ((ip - anchor) >> g_searchStrength) + 1;
+				continue;
+			}
+			mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
+			offset = (U32)(ip - match);
+			while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
+				ip--;
+				match--;
+				mLength++;
+			} /* catch up */
+			offset_2 = offset_1;
+			offset_1 = offset;
+
+			ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
+		}
+
+		/* match found */
+		ip += mLength;
+		anchor = ip;
+
+		if (ip <= ilimit) {
+			/* Fill Table */
+			hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */
+			hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
+			/* check immediate repcode */
+			while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
+				/* store sequence */
+				size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
+				{
+					U32 const tmpOff = offset_2;
+					offset_2 = offset_1;
+					offset_1 = tmpOff;
+				} /* swap offset_2 <=> offset_1 */
+				hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
+				ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
+				ip += rLength;
+				anchor = ip;
+				continue; /* faster when present ... (?) */
+			}
+		}
+	}
+
+	/* save reps for next block */
+	cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
+	cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
+
+	/* Last Literals */
+	{
+		size_t const lastLLSize = iend - anchor;
+		memcpy(seqStorePtr->lit, anchor, lastLLSize);
+		seqStorePtr->lit += lastLLSize;
+	}
+}
+
+static void ZSTD_compressBlock_fast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
+{
+	const U32 mls = ctx->params.cParams.searchLength;
+	switch (mls) {
+	default: /* includes case 3 */
+	case 4: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
+	case 5: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return;
+	case 6: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return;
+	case 7: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return;
+	}
+}
+
+static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
+{
+	U32 *hashTable = ctx->hashTable;
+	const U32 hBits = ctx->params.cParams.hashLog;
+	seqStore_t *seqStorePtr = &(ctx->seqStore);
+	const BYTE *const base = ctx->base;
+	const BYTE *const dictBase = ctx->dictBase;
+	const BYTE *const istart = (const BYTE *)src;
+	const BYTE *ip = istart;
+	const BYTE *anchor = istart;
+	const U32 lowestIndex = ctx->lowLimit;
+	const BYTE *const dictStart = dictBase + lowestIndex;
+	const U32 dictLimit = ctx->dictLimit;
+	const BYTE *const lowPrefixPtr = base + dictLimit;
+	const BYTE *const dictEnd = dictBase + dictLimit;
+	const BYTE *const iend = istart + srcSize;
+	const BYTE *const ilimit = iend - 8;
+	U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
+
+	/* Search Loop */
+	while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+		const size_t h = ZSTD_hashPtr(ip, hBits, mls);
+		const U32 matchIndex = hashTable[h];
+		const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
+		const BYTE *match = matchBase + matchIndex;
+		const U32 curr = (U32)(ip - base);
+		const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
+		const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
+		const BYTE *repMatch = repBase + repIndex;
+		size_t mLength;
+		hashTable[h] = curr; /* update hash table */
+
+		if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
+		    (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
+			const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
+			mLength = ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
+			ip++;
+			ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
+		} else {
+			if ((matchIndex < lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
+				ip += ((ip - anchor) >> g_searchStrength) + 1;
+				continue;
+			}
+			{
+				const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
+				const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
+				U32 offset;
+				mLength = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
+				while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
+					ip--;
+					match--;
+					mLength++;
+				} /* catch up */
+				offset = curr - matchIndex;
+				offset_2 = offset_1;
+				offset_1 = offset;
+				ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
+			}
+		}
+
+		/* found a match : store it */
+		ip += mLength;
+		anchor = ip;
+
+		if (ip <= ilimit) {
+			/* Fill Table */
+			hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2;
+			hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
+			/* check immediate repcode */
+			while (ip <= ilimit) {
+				U32 const curr2 = (U32)(ip - base);
+				U32 const repIndex2 = curr2 - offset_2;
+				const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
+				if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
+				    && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
+					const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
+					size_t repLength2 =
+					    ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
+					U32 tmpOffset = offset_2;
+					offset_2 = offset_1;
+					offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+					ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
+					hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2;
+					ip += repLength2;
+					anchor = ip;
+					continue;
+				}
+				break;
+			}
+		}
+	}
+
+	/* save reps for next block */
+	ctx->repToConfirm[0] = offset_1;
+	ctx->repToConfirm[1] = offset_2;
+
+	/* Last Literals */
+	{
+		size_t const lastLLSize = iend - anchor;
+		memcpy(seqStorePtr->lit, anchor, lastLLSize);
+		seqStorePtr->lit += lastLLSize;
+	}
+}
+
+static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
+{
+	U32 const mls = ctx->params.cParams.searchLength;
+	switch (mls) {
+	default: /* includes case 3 */
+	case 4: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
+	case 5: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return;
+	case 6: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return;
+	case 7: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return;
+	}
+}
+
+/*-*************************************
+*  Double Fast
+***************************************/
+static void ZSTD_fillDoubleHashTable(ZSTD_CCtx *cctx, const void *end, const U32 mls)
+{
+	U32 *const hashLarge = cctx->hashTable;
+	U32 const hBitsL = cctx->params.cParams.hashLog;
+	U32 *const hashSmall = cctx->chainTable;
+	U32 const hBitsS = cctx->params.cParams.chainLog;
+	const BYTE *const base = cctx->base;
+	const BYTE *ip = base + cctx->nextToUpdate;
+	const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
+	const size_t fastHashFillStep = 3;
+
+	while (ip <= iend) {
+		hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
+		hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
+		ip += fastHashFillStep;
+	}
+}
+
+FORCE_INLINE
+void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
+{
+	U32 *const hashLong = cctx->hashTable;
+	const U32 hBitsL = cctx->params.cParams.hashLog;
+	U32 *const hashSmall = cctx->chainTable;
+	const U32 hBitsS = cctx->params.cParams.chainLog;
+	seqStore_t *seqStorePtr = &(cctx->seqStore);
+	const BYTE *const base = cctx->base;
+	const BYTE *const istart = (const BYTE *)src;
+	const BYTE *ip = istart;
+	const BYTE *anchor = istart;
+	const U32 lowestIndex = cctx->dictLimit;
+	const BYTE *const lowest = base + lowestIndex;
+	const BYTE *const iend = istart + srcSize;
+	const BYTE *const ilimit = iend - HASH_READ_SIZE;
+	U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
+	U32 offsetSaved = 0;
+
+	/* init */
+	ip += (ip == lowest);
+	{
+		U32 const maxRep = (U32)(ip - lowest);
+		if (offset_2 > maxRep)
+			offsetSaved = offset_2, offset_2 = 0;
+		if (offset_1 > maxRep)
+			offsetSaved = offset_1, offset_1 = 0;
+	}
+
+	/* Main Search Loop */
+	while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+		size_t mLength;
+		size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
+		size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
+		U32 const curr = (U32)(ip - base);
+		U32 const matchIndexL = hashLong[h2];
+		U32 const matchIndexS = hashSmall[h];
+		const BYTE *matchLong = base + matchIndexL;
+		const BYTE *match = base + matchIndexS;
+		hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
+
+		if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
+			mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
+			ip++;
+			ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
+		} else {
+			U32 offset;
+			if ((matchIndexL > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
+				mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8;
+				offset = (U32)(ip - matchLong);
+				while (((ip > anchor) & (matchLong > lowest)) && (ip[-1] == matchLong[-1])) {
+					ip--;
+					matchLong--;
+					mLength++;
+				} /* catch up */
+			} else if ((matchIndexS > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
+				size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
+				U32 const matchIndex3 = hashLong[h3];
+				const BYTE *match3 = base + matchIndex3;
+				hashLong[h3] = curr + 1;
+				if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
+					mLength = ZSTD_count(ip + 9, match3 + 8, iend) + 8;
+					ip++;
+					offset = (U32)(ip - match3);
+					while (((ip > anchor) & (match3 > lowest)) && (ip[-1] == match3[-1])) {
+						ip--;
+						match3--;
+						mLength++;
+					} /* catch up */
+				} else {
+					mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
+					offset = (U32)(ip - match);
+					while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
+						ip--;
+						match--;
+						mLength++;
+					} /* catch up */
+				}
+			} else {
+				ip += ((ip - anchor) >> g_searchStrength) + 1;
+				continue;
+			}
+
+			offset_2 = offset_1;
+			offset_1 = offset;
+
+			ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
+		}
+
+		/* match found */
+		ip += mLength;
+		anchor = ip;
+
+		if (ip <= ilimit) {
+			/* Fill Table */
+			hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] =
+			    curr + 2; /* here because curr+2 could be > iend-8 */
+			hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
+
+			/* check immediate repcode */
+			while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
+				/* store sequence */
+				size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
+				{
+					U32 const tmpOff = offset_2;
+					offset_2 = offset_1;
+					offset_1 = tmpOff;
+				} /* swap offset_2 <=> offset_1 */
+				hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
+				hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
+				ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
+				ip += rLength;
+				anchor = ip;
+				continue; /* faster when present ... (?) */
+			}
+		}
+	}
+
+	/* save reps for next block */
+	cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
+	cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
+
+	/* Last Literals */
+	{
+		size_t const lastLLSize = iend - anchor;
+		memcpy(seqStorePtr->lit, anchor, lastLLSize);
+		seqStorePtr->lit += lastLLSize;
+	}
+}
+
+static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
+{
+	const U32 mls = ctx->params.cParams.searchLength;
+	switch (mls) {
+	default: /* includes case 3 */
+	case 4: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
+	case 5: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return;
+	case 6: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return;
+	case 7: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return;
+	}
+}
+
+static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
+{
+	U32 *const hashLong = ctx->hashTable;
+	U32 const hBitsL = ctx->params.cParams.hashLog;
+	U32 *const hashSmall = ctx->chainTable;
+	U32 const hBitsS = ctx->params.cParams.chainLog;
+	seqStore_t *seqStorePtr = &(ctx->seqStore);
+	const BYTE *const base = ctx->base;
+	const BYTE *const dictBase = ctx->dictBase;
+	const BYTE *const istart = (const BYTE *)src;
+	const BYTE *ip = istart;
+	const BYTE *anchor = istart;
+	const U32 lowestIndex = ctx->lowLimit;
+	const BYTE *const dictStart = dictBase + lowestIndex;
+	const U32 dictLimit = ctx->dictLimit;
+	const BYTE *const lowPrefixPtr = base + dictLimit;
+	const BYTE *const dictEnd = dictBase + dictLimit;
+	const BYTE *const iend = istart + srcSize;
+	const BYTE *const ilimit = iend - 8;
+	U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
+
+	/* Search Loop */
+	while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+		const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
+		const U32 matchIndex = hashSmall[hSmall];
+		const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
+		const BYTE *match = matchBase + matchIndex;
+
+		const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
+		const U32 matchLongIndex = hashLong[hLong];
+		const BYTE *matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
+		const BYTE *matchLong = matchLongBase + matchLongIndex;
+
+		const U32 curr = (U32)(ip - base);
+		const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
+		const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
+		const BYTE *repMatch = repBase + repIndex;
+		size_t mLength;
+		hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
+
+		if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
+		    (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
+			const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
+			mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, lowPrefixPtr) + 4;
+			ip++;
+			ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
+		} else {
+			if ((matchLongIndex > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
+				const BYTE *matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
+				const BYTE *lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
+				U32 offset;
+				mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, lowPrefixPtr) + 8;
+				offset = curr - matchLongIndex;
+				while (((ip > anchor) & (matchLong > lowMatchPtr)) && (ip[-1] == matchLong[-1])) {
+					ip--;
+					matchLong--;
+					mLength++;
+				} /* catch up */
+				offset_2 = offset_1;
+				offset_1 = offset;
+				ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
+
+			} else if ((matchIndex > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
+				size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
+				U32 const matchIndex3 = hashLong[h3];
+				const BYTE *const match3Base = matchIndex3 < dictLimit ? dictBase : base;
+				const BYTE *match3 = match3Base + matchIndex3;
+				U32 offset;
+				hashLong[h3] = curr + 1;
+				if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
+					const BYTE *matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
+					const BYTE *lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
+					mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, lowPrefixPtr) + 8;
+					ip++;
+					offset = curr + 1 - matchIndex3;
+					while (((ip > anchor) & (match3 > lowMatchPtr)) && (ip[-1] == match3[-1])) {
+						ip--;
+						match3--;
+						mLength++;
+					} /* catch up */
+				} else {
+					const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
+					const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
+					mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, lowPrefixPtr) + 4;
+					offset = curr - matchIndex;
+					while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
+						ip--;
+						match--;
+						mLength++;
+					} /* catch up */
+				}
+				offset_2 = offset_1;
+				offset_1 = offset;
+				ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
+
+			} else {
+				ip += ((ip - anchor) >> g_searchStrength) + 1;
+				continue;
+			}
+		}
+
+		/* found a match : store it */
+		ip += mLength;
+		anchor = ip;
+
+		if (ip <= ilimit) {
+			/* Fill Table */
+			hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = curr + 2;
+			hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = curr + 2;
+			hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
+			hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (U32)(ip - 2 - base);
+			/* check immediate repcode */
+			while (ip <= ilimit) {
+				U32 const curr2 = (U32)(ip - base);
+				U32 const repIndex2 = curr2 - offset_2;
+				const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
+				if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
+				    && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
+					const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
+					size_t const repLength2 =
+					    ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
+					U32 tmpOffset = offset_2;
+					offset_2 = offset_1;
+					offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+					ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
+					hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2;
+					hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2;
+					ip += repLength2;
+					anchor = ip;
+					continue;
+				}
+				break;
+			}
+		}
+	}
+
+	/* save reps for next block */
+	ctx->repToConfirm[0] = offset_1;
+	ctx->repToConfirm[1] = offset_2;
+
+	/* Last Literals */
+	{
+		size_t const lastLLSize = iend - anchor;
+		memcpy(seqStorePtr->lit, anchor, lastLLSize);
+		seqStorePtr->lit += lastLLSize;
+	}
+}
+
+static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
+{
+	U32 const mls = ctx->params.cParams.searchLength;
+	switch (mls) {
+	default: /* includes case 3 */
+	case 4: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
+	case 5: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return;
+	case 6: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return;
+	case 7: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return;
+	}
+}
+
+/*-*************************************
+*  Binary Tree search
+***************************************/
+/** ZSTD_insertBt1() : add one or multiple positions to tree.
+*   ip : assumed <= iend-8 .
+*   @return : nb of positions added */
+static U32 ZSTD_insertBt1(ZSTD_CCtx *zc, const BYTE *const ip, const U32 mls, const BYTE *const iend, U32 nbCompares, U32 extDict)
+{
+	U32 *const hashTable = zc->hashTable;
+	U32 const hashLog = zc->params.cParams.hashLog;
+	size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+	U32 *const bt = zc->chainTable;
+	U32 const btLog = zc->params.cParams.chainLog - 1;
+	U32 const btMask = (1 << btLog) - 1;
+	U32 matchIndex = hashTable[h];
+	size_t commonLengthSmaller = 0, commonLengthLarger = 0;
+	const BYTE *const base = zc->base;
+	const BYTE *const dictBase = zc->dictBase;
+	const U32 dictLimit = zc->dictLimit;
+	const BYTE *const dictEnd = dictBase + dictLimit;
+	const BYTE *const prefixStart = base + dictLimit;
+	const BYTE *match;
+	const U32 curr = (U32)(ip - base);
+	const U32 btLow = btMask >= curr ? 0 : curr - btMask;
+	U32 *smallerPtr = bt + 2 * (curr & btMask);
+	U32 *largerPtr = smallerPtr + 1;
+	U32 dummy32; /* to be nullified at the end */
+	U32 const windowLow = zc->lowLimit;
+	U32 matchEndIdx = curr + 8;
+	size_t bestLength = 8;
+
+	hashTable[h] = curr; /* Update Hash Table */
+
+	while (nbCompares-- && (matchIndex > windowLow)) {
+		U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
+		size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+
+		if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
+			match = base + matchIndex;
+			if (match[matchLength] == ip[matchLength])
+				matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
+		} else {
+			match = dictBase + matchIndex;
+			matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
+			if (matchIndex + matchLength >= dictLimit)
+				match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+		}
+
+		if (matchLength > bestLength) {
+			bestLength = matchLength;
+			if (matchLength > matchEndIdx - matchIndex)
+				matchEndIdx = matchIndex + (U32)matchLength;
+		}
+
+		if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
+			break;		      /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
+
+		if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */
+			/* match is smaller than curr */
+			*smallerPtr = matchIndex;	  /* update smaller idx */
+			commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+			if (matchIndex <= btLow) {
+				smallerPtr = &dummy32;
+				break;
+			}			  /* beyond tree size, stop the search */
+			smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
+			matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
+		} else {
+			/* match is larger than curr */
+			*largerPtr = matchIndex;
+			commonLengthLarger = matchLength;
+			if (matchIndex <= btLow) {
+				largerPtr = &dummy32;
+				break;
+			} /* beyond tree size, stop the search */
+			largerPtr = nextPtr;
+			matchIndex = nextPtr[0];
+		}
+	}
+
+	*smallerPtr = *largerPtr = 0;
+	if (bestLength > 384)
+		return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
+	if (matchEndIdx > curr + 8)
+		return matchEndIdx - curr - 8;
+	return 1;
+}
+
+static size_t ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, size_t *offsetPtr, U32 nbCompares, const U32 mls,
+					    U32 extDict)
+{
+	U32 *const hashTable = zc->hashTable;
+	U32 const hashLog = zc->params.cParams.hashLog;
+	size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+	U32 *const bt = zc->chainTable;
+	U32 const btLog = zc->params.cParams.chainLog - 1;
+	U32 const btMask = (1 << btLog) - 1;
+	U32 matchIndex = hashTable[h];
+	size_t commonLengthSmaller = 0, commonLengthLarger = 0;
+	const BYTE *const base = zc->base;
+	const BYTE *const dictBase = zc->dictBase;
+	const U32 dictLimit = zc->dictLimit;
+	const BYTE *const dictEnd = dictBase + dictLimit;
+	const BYTE *const prefixStart = base + dictLimit;
+	const U32 curr = (U32)(ip - base);
+	const U32 btLow = btMask >= curr ? 0 : curr - btMask;
+	const U32 windowLow = zc->lowLimit;
+	U32 *smallerPtr = bt + 2 * (curr & btMask);
+	U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
+	U32 matchEndIdx = curr + 8;
+	U32 dummy32; /* to be nullified at the end */
+	size_t bestLength = 0;
+
+	hashTable[h] = curr; /* Update Hash Table */
+
+	while (nbCompares-- && (matchIndex > windowLow)) {
+		U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
+		size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+		const BYTE *match;
+
+		if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
+			match = base + matchIndex;
+			if (match[matchLength] == ip[matchLength])
+				matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
+		} else {
+			match = dictBase + matchIndex;
+			matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
+			if (matchIndex + matchLength >= dictLimit)
+				match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+		}
+
+		if (matchLength > bestLength) {
+			if (matchLength > matchEndIdx - matchIndex)
+				matchEndIdx = matchIndex + (U32)matchLength;
+			if ((4 * (int)(matchLength - bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)offsetPtr[0] + 1)))
+				bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
+			if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
+				break;		      /* drop, to guarantee consistency (miss a little bit of compression) */
+		}
+
+		if (match[matchLength] < ip[matchLength]) {
+			/* match is smaller than curr */
+			*smallerPtr = matchIndex;	  /* update smaller idx */
+			commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+			if (matchIndex <= btLow) {
+				smallerPtr = &dummy32;
+				break;
+			}			  /* beyond tree size, stop the search */
+			smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
+			matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
+		} else {
+			/* match is larger than curr */
+			*largerPtr = matchIndex;
+			commonLengthLarger = matchLength;
+			if (matchIndex <= btLow) {
+				largerPtr = &dummy32;
+				break;
+			} /* beyond tree size, stop the search */
+			largerPtr = nextPtr;
+			matchIndex = nextPtr[0];
+		}
+	}
+
+	*smallerPtr = *largerPtr = 0;
+
+	zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
+	return bestLength;
+}
+
+static void ZSTD_updateTree(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
+{
+	const BYTE *const base = zc->base;
+	const U32 target = (U32)(ip - base);
+	U32 idx = zc->nextToUpdate;
+
+	while (idx < target)
+		idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 0);
+}
+
+/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
+static size_t ZSTD_BtFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls)
+{
+	if (ip < zc->base + zc->nextToUpdate)
+		return 0; /* skipped area */
+	ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
+	return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
+}
+
+static size_t ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
+					     const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 matchLengthSearch)
+{
+	switch (matchLengthSearch) {
+	default: /* includes case 3 */
+	case 4: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
+	case 5: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
+	case 7:
+	case 6: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
+	}
+}
+
+static void ZSTD_updateTree_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
+{
+	const BYTE *const base = zc->base;
+	const U32 target = (U32)(ip - base);
+	U32 idx = zc->nextToUpdate;
+
+	while (idx < target)
+		idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 1);
+}
+
+/** Tree updater, providing best match */
+static size_t ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
+					   const U32 mls)
+{
+	if (ip < zc->base + zc->nextToUpdate)
+		return 0; /* skipped area */
+	ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
+	return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
+}
+
+static size_t ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
+						     const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
+						     const U32 matchLengthSearch)
+{
+	switch (matchLengthSearch) {
+	default: /* includes case 3 */
+	case 4: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
+	case 5: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
+	case 7:
+	case 6: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
+	}
+}
+
+/* *********************************
+*  Hash Chain
+***********************************/
+#define NEXT_IN_CHAIN(d, mask) chainTable[(d)&mask]
+
+/* Update chains up to ip (excluded)
+   Assumption : always within prefix (i.e. not within extDict) */
+FORCE_INLINE
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_CCtx *zc, const BYTE *ip, U32 mls)
+{
+	U32 *const hashTable = zc->hashTable;
+	const U32 hashLog = zc->params.cParams.hashLog;
+	U32 *const chainTable = zc->chainTable;
+	const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1;
+	const BYTE *const base = zc->base;
+	const U32 target = (U32)(ip - base);
+	U32 idx = zc->nextToUpdate;
+
+	while (idx < target) { /* catch up */
+		size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls);
+		NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
+		hashTable[h] = idx;
+		idx++;
+	}
+
+	zc->nextToUpdate = target;
+	return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
+}
+
+/* inlining is important to hardwire a hot branch (template emulation) */
+FORCE_INLINE
+size_t ZSTD_HcFindBestMatch_generic(ZSTD_CCtx *zc, /* Index table will be updated */
+				    const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls,
+				    const U32 extDict)
+{
+	U32 *const chainTable = zc->chainTable;
+	const U32 chainSize = (1 << zc->params.cParams.chainLog);
+	const U32 chainMask = chainSize - 1;
+	const BYTE *const base = zc->base;
+	const BYTE *const dictBase = zc->dictBase;
+	const U32 dictLimit = zc->dictLimit;
+	const BYTE *const prefixStart = base + dictLimit;
+	const BYTE *const dictEnd = dictBase + dictLimit;
+	const U32 lowLimit = zc->lowLimit;
+	const U32 curr = (U32)(ip - base);
+	const U32 minChain = curr > chainSize ? curr - chainSize : 0;
+	int nbAttempts = maxNbAttempts;
+	size_t ml = EQUAL_READ32 - 1;
+
+	/* HC4 match finder */
+	U32 matchIndex = ZSTD_insertAndFindFirstIndex(zc, ip, mls);
+
+	for (; (matchIndex > lowLimit) & (nbAttempts > 0); nbAttempts--) {
+		const BYTE *match;
+		size_t currMl = 0;
+		if ((!extDict) || matchIndex >= dictLimit) {
+			match = base + matchIndex;
+			if (match[ml] == ip[ml]) /* potentially better */
+				currMl = ZSTD_count(ip, match, iLimit);
+		} else {
+			match = dictBase + matchIndex;
+			if (ZSTD_read32(match) == ZSTD_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+				currMl = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
+		}
+
+		/* save best solution */
+		if (currMl > ml) {
+			ml = currMl;
+			*offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
+			if (ip + currMl == iLimit)
+				break; /* best possible, and avoid read overflow*/
+		}
+
+		if (matchIndex <= minChain)
+			break;
+		matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
+	}
+
+	return ml;
+}
+
+FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
+						   const U32 matchLengthSearch)
+{
+	switch (matchLengthSearch) {
+	default: /* includes case 3 */
+	case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
+	case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
+	case 7:
+	case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
+	}
+}
+
+FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
+							   const U32 matchLengthSearch)
+{
+	switch (matchLengthSearch) {
+	default: /* includes case 3 */
+	case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
+	case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
+	case 7:
+	case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
+	}
+}
+
+/* *******************************
+*  Common parser - lazy strategy
+*********************************/
+FORCE_INLINE
+void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
+{
+	seqStore_t *seqStorePtr = &(ctx->seqStore);
+	const BYTE *const istart = (const BYTE *)src;
+	const BYTE *ip = istart;
+	const BYTE *anchor = istart;
+	const BYTE *const iend = istart + srcSize;
+	const BYTE *const ilimit = iend - 8;
+	const BYTE *const base = ctx->base + ctx->dictLimit;
+
+	U32 const maxSearches = 1 << ctx->params.cParams.searchLog;
+	U32 const mls = ctx->params.cParams.searchLength;
+
+	typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
+	searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
+	U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset = 0;
+
+	/* init */
+	ip += (ip == base);
+	ctx->nextToUpdate3 = ctx->nextToUpdate;
+	{
+		U32 const maxRep = (U32)(ip - base);
+		if (offset_2 > maxRep)
+			savedOffset = offset_2, offset_2 = 0;
+		if (offset_1 > maxRep)
+			savedOffset = offset_1, offset_1 = 0;
+	}
+
+	/* Match Loop */
+	while (ip < ilimit) {
+		size_t matchLength = 0;
+		size_t offset = 0;
+		const BYTE *start = ip + 1;
+
+		/* check repCode */
+		if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
+			/* repcode : we take it */
+			matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
+			if (depth == 0)
+				goto _storeSequence;
+		}
+
+		/* first search (depth 0) */
+		{
+			size_t offsetFound = 99999999;
+			size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
+			if (ml2 > matchLength)
+				matchLength = ml2, start = ip, offset = offsetFound;
+		}
+
+		if (matchLength < EQUAL_READ32) {
+			ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
+			continue;
+		}
+
+		/* let's try to find a better solution */
+		if (depth >= 1)
+			while (ip < ilimit) {
+				ip++;
+				if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
+					size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
+					int const gain2 = (int)(mlRep * 3);
+					int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
+					if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
+						matchLength = mlRep, offset = 0, start = ip;
+				}
+				{
+					size_t offset2 = 99999999;
+					size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+					int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
+					int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
+					if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
+						matchLength = ml2, offset = offset2, start = ip;
+						continue; /* search a better one */
+					}
+				}
+
+				/* let's find an even better one */
+				if ((depth == 2) && (ip < ilimit)) {
+					ip++;
+					if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
+						size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
+						int const gain2 = (int)(ml2 * 4);
+						int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
+						if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
+							matchLength = ml2, offset = 0, start = ip;
+					}
+					{
+						size_t offset2 = 99999999;
+						size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+						int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
+						int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
+						if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
+							matchLength = ml2, offset = offset2, start = ip;
+							continue;
+						}
+					}
+				}
+				break; /* nothing found : store previous solution */
+			}
+
+		/* NOTE:
+		 * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
+		 * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
+		 * overflows the pointer, which is undefined behavior.
+		 */
+		/* catch up */
+		if (offset) {
+			while ((start > anchor) && (start > base + offset - ZSTD_REP_MOVE) &&
+			       (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1])) /* only search for offset within prefix */
+			{
+				start--;
+				matchLength++;
+			}
+			offset_2 = offset_1;
+			offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+		}
+
+	/* store sequence */
+_storeSequence:
+		{
+			size_t const litLength = start - anchor;
+			ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
+			anchor = ip = start + matchLength;
+		}
+
+		/* check immediate repcode */
+		while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
+			/* store sequence */
+			matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32;
+			offset = offset_2;
+			offset_2 = offset_1;
+			offset_1 = (U32)offset; /* swap repcodes */
+			ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
+			ip += matchLength;
+			anchor = ip;
+			continue; /* faster when present ... (?) */
+		}
+	}
+
+	/* Save reps for next block */
+	ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
+	ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
+
+	/* Last Literals */
+	{
+		size_t const lastLLSize = iend - anchor;
+		memcpy(seqStorePtr->lit, anchor, lastLLSize);
+		seqStorePtr->lit += lastLLSize;
+	}
+}
+
+static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); }
+
+static void ZSTD_compressBlock_lazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); }
+
+static void ZSTD_compressBlock_lazy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); }
+
+static void ZSTD_compressBlock_greedy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); }
+
+FORCE_INLINE
+void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
+{
+	seqStore_t *seqStorePtr = &(ctx->seqStore);
+	const BYTE *const istart = (const BYTE *)src;
+	const BYTE *ip = istart;
+	const BYTE *anchor = istart;
+	const BYTE *const iend = istart + srcSize;
+	const BYTE *const ilimit = iend - 8;
+	const BYTE *const base = ctx->base;
+	const U32 dictLimit = ctx->dictLimit;
+	const U32 lowestIndex = ctx->lowLimit;
+	const BYTE *const prefixStart = base + dictLimit;
+	const BYTE *const dictBase = ctx->dictBase;
+	const BYTE *const dictEnd = dictBase + dictLimit;
+	const BYTE *const dictStart = dictBase + ctx->lowLimit;
+
+	const U32 maxSearches = 1 << ctx->params.cParams.searchLog;
+	const U32 mls = ctx->params.cParams.searchLength;
+
+	typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
+	searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
+
+	U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
+
+	/* init */
+	ctx->nextToUpdate3 = ctx->nextToUpdate;
+	ip += (ip == prefixStart);
+
+	/* Match Loop */
+	while (ip < ilimit) {
+		size_t matchLength = 0;
+		size_t offset = 0;
+		const BYTE *start = ip + 1;
+		U32 curr = (U32)(ip - base);
+
+		/* check repCode */
+		{
+			const U32 repIndex = (U32)(curr + 1 - offset_1);
+			const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
+			const BYTE *const repMatch = repBase + repIndex;
+			if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+				if (ZSTD_read32(ip + 1) == ZSTD_read32(repMatch)) {
+					/* repcode detected we should take it */
+					const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
+					matchLength =
+					    ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
+					if (depth == 0)
+						goto _storeSequence;
+				}
+		}
+
+		/* first search (depth 0) */
+		{
+			size_t offsetFound = 99999999;
+			size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
+			if (ml2 > matchLength)
+				matchLength = ml2, start = ip, offset = offsetFound;
+		}
+
+		if (matchLength < EQUAL_READ32) {
+			ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
+			continue;
+		}
+
+		/* let's try to find a better solution */
+		if (depth >= 1)
+			while (ip < ilimit) {
+				ip++;
+				curr++;
+				/* check repCode */
+				if (offset) {
+					const U32 repIndex = (U32)(curr - offset_1);
+					const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
+					const BYTE *const repMatch = repBase + repIndex;
+					if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+						if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
+							/* repcode detected */
+							const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
+							size_t const repLength =
+							    ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) +
+							    EQUAL_READ32;
+							int const gain2 = (int)(repLength * 3);
+							int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
+							if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
+								matchLength = repLength, offset = 0, start = ip;
+						}
+				}
+
+				/* search match, depth 1 */
+				{
+					size_t offset2 = 99999999;
+					size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+					int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
+					int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
+					if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
+						matchLength = ml2, offset = offset2, start = ip;
+						continue; /* search a better one */
+					}
+				}
+
+				/* let's find an even better one */
+				if ((depth == 2) && (ip < ilimit)) {
+					ip++;
+					curr++;
+					/* check repCode */
+					if (offset) {
+						const U32 repIndex = (U32)(curr - offset_1);
+						const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
+						const BYTE *const repMatch = repBase + repIndex;
+						if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+							if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
+								/* repcode detected */
+								const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
+								size_t repLength = ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend,
+													repEnd, prefixStart) +
+										   EQUAL_READ32;
+								int gain2 = (int)(repLength * 4);
+								int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
+								if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
+									matchLength = repLength, offset = 0, start = ip;
+							}
+					}
+
+					/* search match, depth 2 */
+					{
+						size_t offset2 = 99999999;
+						size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
+						int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
+						int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
+						if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
+							matchLength = ml2, offset = offset2, start = ip;
+							continue;
+						}
+					}
+				}
+				break; /* nothing found : store previous solution */
+			}
+
+		/* catch up */
+		if (offset) {
+			U32 const matchIndex = (U32)((start - base) - (offset - ZSTD_REP_MOVE));
+			const BYTE *match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
+			const BYTE *const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
+			while ((start > anchor) && (match > mStart) && (start[-1] == match[-1])) {
+				start--;
+				match--;
+				matchLength++;
+			} /* catch up */
+			offset_2 = offset_1;
+			offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+		}
+
+	/* store sequence */
+	_storeSequence : {
+		size_t const litLength = start - anchor;
+		ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
+		anchor = ip = start + matchLength;
+	}
+
+		/* check immediate repcode */
+		while (ip <= ilimit) {
+			const U32 repIndex = (U32)((ip - base) - offset_2);
+			const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
+			const BYTE *const repMatch = repBase + repIndex;
+			if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+				if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
+					/* repcode detected we should take it */
+					const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
+					matchLength =
+					    ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
+					offset = offset_2;
+					offset_2 = offset_1;
+					offset_1 = (U32)offset; /* swap offset history */
+					ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
+					ip += matchLength;
+					anchor = ip;
+					continue; /* faster when present ... (?) */
+				}
+			break;
+		}
+	}
+
+	/* Save reps for next block */
+	ctx->repToConfirm[0] = offset_1;
+	ctx->repToConfirm[1] = offset_2;
+
+	/* Last Literals */
+	{
+		size_t const lastLLSize = iend - anchor;
+		memcpy(seqStorePtr->lit, anchor, lastLLSize);
+		seqStorePtr->lit += lastLLSize;
+	}
+}
+
+void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); }
+
+static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
+{
+	ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
+}
+
+static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
+{
+	ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
+}
+
+static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
+{
+	ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
+}
+
+/* The optimal parser */
+#include "zstd_opt.h"
+
+static void ZSTD_compressBlock_btopt(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
+{
+#ifdef ZSTD_OPT_H_91842398743
+	ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
+#else
+	(void)ctx;
+	(void)src;
+	(void)srcSize;
+	return;
+#endif
+}
+
+static void ZSTD_compressBlock_btopt2(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
+{
+#ifdef ZSTD_OPT_H_91842398743
+	ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
+#else
+	(void)ctx;
+	(void)src;
+	(void)srcSize;
+	return;
+#endif
+}
+
+static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
+{
+#ifdef ZSTD_OPT_H_91842398743
+	ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
+#else
+	(void)ctx;
+	(void)src;
+	(void)srcSize;
+	return;
+#endif
+}
+
+static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
+{
+#ifdef ZSTD_OPT_H_91842398743
+	ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
+#else
+	(void)ctx;
+	(void)src;
+	(void)srcSize;
+	return;
+#endif
+}
+
+typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx *ctx, const void *src, size_t srcSize);
+
+static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
+{
+	static const ZSTD_blockCompressor blockCompressor[2][8] = {
+	    {ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2,
+	     ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2},
+	    {ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,
+	     ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict}};
+
+	return blockCompressor[extDict][(U32)strat];
+}
+
+static size_t ZSTD_compressBlock_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit);
+	const BYTE *const base = zc->base;
+	const BYTE *const istart = (const BYTE *)src;
+	const U32 curr = (U32)(istart - base);
+	if (srcSize < MIN_CBLOCK_SIZE + ZSTD_blockHeaderSize + 1)
+		return 0; /* don't even attempt compression below a certain srcSize */
+	ZSTD_resetSeqStore(&(zc->seqStore));
+	if (curr > zc->nextToUpdate + 384)
+		zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */
+	blockCompressor(zc, src, srcSize);
+	return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
+}
+
+/*! ZSTD_compress_generic() :
+*   Compress a chunk of data into one or multiple blocks.
+*   All blocks will be terminated, all input will be consumed.
+*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
+*   Frame is supposed already started (header already produced)
+*   @return : compressed size, or an error code
+*/
+static size_t ZSTD_compress_generic(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastFrameChunk)
+{
+	size_t blockSize = cctx->blockSize;
+	size_t remaining = srcSize;
+	const BYTE *ip = (const BYTE *)src;
+	BYTE *const ostart = (BYTE *)dst;
+	BYTE *op = ostart;
+	U32 const maxDist = 1 << cctx->params.cParams.windowLog;
+
+	if (cctx->params.fParams.checksumFlag && srcSize)
+		xxh64_update(&cctx->xxhState, src, srcSize);
+
+	while (remaining) {
+		U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
+		size_t cSize;
+
+		if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
+			return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
+		if (remaining < blockSize)
+			blockSize = remaining;
+
+		/* preemptive overflow correction */
+		if (cctx->lowLimit > (3U << 29)) {
+			U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
+			U32 const curr = (U32)(ip - cctx->base);
+			U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog);
+			U32 const correction = curr - newCurr;
+			ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
+			ZSTD_reduceIndex(cctx, correction);
+			cctx->base += correction;
+			cctx->dictBase += correction;
+			cctx->lowLimit -= correction;
+			cctx->dictLimit -= correction;
+			if (cctx->nextToUpdate < correction)
+				cctx->nextToUpdate = 0;
+			else
+				cctx->nextToUpdate -= correction;
+		}
+
+		if ((U32)(ip + blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
+			/* enforce maxDist */
+			U32 const newLowLimit = (U32)(ip + blockSize - cctx->base) - maxDist;
+			if (cctx->lowLimit < newLowLimit)
+				cctx->lowLimit = newLowLimit;
+			if (cctx->dictLimit < cctx->lowLimit)
+				cctx->dictLimit = cctx->lowLimit;
+		}
+
+		cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize);
+		if (ZSTD_isError(cSize))
+			return cSize;
+
+		if (cSize == 0) { /* block is not compressible */
+			U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw) << 1) + (U32)(blockSize << 3);
+			if (blockSize + ZSTD_blockHeaderSize > dstCapacity)
+				return ERROR(dstSize_tooSmall);
+			ZSTD_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */
+			memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
+			cSize = ZSTD_blockHeaderSize + blockSize;
+		} else {
+			U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed) << 1) + (U32)(cSize << 3);
+			ZSTD_writeLE24(op, cBlockHeader24);
+			cSize += ZSTD_blockHeaderSize;
+		}
+
+		remaining -= blockSize;
+		dstCapacity -= cSize;
+		ip += blockSize;
+		op += cSize;
+	}
+
+	if (lastFrameChunk && (op > ostart))
+		cctx->stage = ZSTDcs_ending;
+	return op - ostart;
+}
+
+static size_t ZSTD_writeFrameHeader(void *dst, size_t dstCapacity, ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
+{
+	BYTE *const op = (BYTE *)dst;
+	U32 const dictIDSizeCode = (dictID > 0) + (dictID >= 256) + (dictID >= 65536); /* 0-3 */
+	U32 const checksumFlag = params.fParams.checksumFlag > 0;
+	U32 const windowSize = 1U << params.cParams.windowLog;
+	U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
+	BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
+	U32 const fcsCode =
+	    params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0; /* 0-3 */
+	BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6));
+	size_t pos;
+
+	if (dstCapacity < ZSTD_frameHeaderSize_max)
+		return ERROR(dstSize_tooSmall);
+
+	ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER);
+	op[4] = frameHeaderDecriptionByte;
+	pos = 5;
+	if (!singleSegment)
+		op[pos++] = windowLogByte;
+	switch (dictIDSizeCode) {
+	default: /* impossible */
+	case 0: break;
+	case 1:
+		op[pos] = (BYTE)(dictID);
+		pos++;
+		break;
+	case 2:
+		ZSTD_writeLE16(op + pos, (U16)dictID);
+		pos += 2;
+		break;
+	case 3:
+		ZSTD_writeLE32(op + pos, dictID);
+		pos += 4;
+		break;
+	}
+	switch (fcsCode) {
+	default: /* impossible */
+	case 0:
+		if (singleSegment)
+			op[pos++] = (BYTE)(pledgedSrcSize);
+		break;
+	case 1:
+		ZSTD_writeLE16(op + pos, (U16)(pledgedSrcSize - 256));
+		pos += 2;
+		break;
+	case 2:
+		ZSTD_writeLE32(op + pos, (U32)(pledgedSrcSize));
+		pos += 4;
+		break;
+	case 3:
+		ZSTD_writeLE64(op + pos, (U64)(pledgedSrcSize));
+		pos += 8;
+		break;
+	}
+	return pos;
+}
+
+static size_t ZSTD_compressContinue_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame, U32 lastFrameChunk)
+{
+	const BYTE *const ip = (const BYTE *)src;
+	size_t fhSize = 0;
+
+	if (cctx->stage == ZSTDcs_created)
+		return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
+
+	if (frame && (cctx->stage == ZSTDcs_init)) {
+		fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID);
+		if (ZSTD_isError(fhSize))
+			return fhSize;
+		dstCapacity -= fhSize;
+		dst = (char *)dst + fhSize;
+		cctx->stage = ZSTDcs_ongoing;
+	}
+
+	/* Check if blocks follow each other */
+	if (src != cctx->nextSrc) {
+		/* not contiguous */
+		ptrdiff_t const delta = cctx->nextSrc - ip;
+		cctx->lowLimit = cctx->dictLimit;
+		cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base);
+		cctx->dictBase = cctx->base;
+		cctx->base -= delta;
+		cctx->nextToUpdate = cctx->dictLimit;
+		if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE)
+			cctx->lowLimit = cctx->dictLimit; /* too small extDict */
+	}
+
+	/* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
+	if ((ip + srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
+		ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
+		U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
+		cctx->lowLimit = lowLimitMax;
+	}
+
+	cctx->nextSrc = ip + srcSize;
+
+	if (srcSize) {
+		size_t const cSize = frame ? ZSTD_compress_generic(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk)
+					   : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize);
+		if (ZSTD_isError(cSize))
+			return cSize;
+		return cSize + fhSize;
+	} else
+		return fhSize;
+}
+
+size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0);
+}
+
+size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx) { return MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); }
+
+size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx);
+	if (srcSize > blockSizeMax)
+		return ERROR(srcSize_wrong);
+	return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0);
+}
+
+/*! ZSTD_loadDictionaryContent() :
+ *  @return : 0, or an error code
+ */
+static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx *zc, const void *src, size_t srcSize)
+{
+	const BYTE *const ip = (const BYTE *)src;
+	const BYTE *const iend = ip + srcSize;
+
+	/* input becomes curr prefix */
+	zc->lowLimit = zc->dictLimit;
+	zc->dictLimit = (U32)(zc->nextSrc - zc->base);
+	zc->dictBase = zc->base;
+	zc->base += ip - zc->nextSrc;
+	zc->nextToUpdate = zc->dictLimit;
+	zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base);
+
+	zc->nextSrc = iend;
+	if (srcSize <= HASH_READ_SIZE)
+		return 0;
+
+	switch (zc->params.cParams.strategy) {
+	case ZSTD_fast: ZSTD_fillHashTable(zc, iend, zc->params.cParams.searchLength); break;
+
+	case ZSTD_dfast: ZSTD_fillDoubleHashTable(zc, iend, zc->params.cParams.searchLength); break;
+
+	case ZSTD_greedy:
+	case ZSTD_lazy:
+	case ZSTD_lazy2:
+		if (srcSize >= HASH_READ_SIZE)
+			ZSTD_insertAndFindFirstIndex(zc, iend - HASH_READ_SIZE, zc->params.cParams.searchLength);
+		break;
+
+	case ZSTD_btlazy2:
+	case ZSTD_btopt:
+	case ZSTD_btopt2:
+		if (srcSize >= HASH_READ_SIZE)
+			ZSTD_updateTree(zc, iend - HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
+		break;
+
+	default:
+		return ERROR(GENERIC); /* strategy doesn't exist; impossible */
+	}
+
+	zc->nextToUpdate = (U32)(iend - zc->base);
+	return 0;
+}
+
+/* Dictionaries that assign zero probability to symbols that show up causes problems
+   when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
+   that we may encounter during compression.
+   NOTE: This behavior is not standard and could be improved in the future. */
+static size_t ZSTD_checkDictNCount(short *normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
+{
+	U32 s;
+	if (dictMaxSymbolValue < maxSymbolValue)
+		return ERROR(dictionary_corrupted);
+	for (s = 0; s <= maxSymbolValue; ++s) {
+		if (normalizedCounter[s] == 0)
+			return ERROR(dictionary_corrupted);
+	}
+	return 0;
+}
+
+/* Dictionary format :
+ * See :
+ * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
+ */
+/*! ZSTD_loadZstdDictionary() :
+ * @return : 0, or an error code
+ *  assumptions : magic number supposed already checked
+ *                dictSize supposed > 8
+ */
+static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
+{
+	const BYTE *dictPtr = (const BYTE *)dict;
+	const BYTE *const dictEnd = dictPtr + dictSize;
+	short offcodeNCount[MaxOff + 1];
+	unsigned offcodeMaxValue = MaxOff;
+
+	dictPtr += 4; /* skip magic number */
+	cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : ZSTD_readLE32(dictPtr);
+	dictPtr += 4;
+
+	{
+		size_t const hufHeaderSize = HUF_readCTable_wksp(cctx->hufTable, 255, dictPtr, dictEnd - dictPtr, cctx->tmpCounters, sizeof(cctx->tmpCounters));
+		if (HUF_isError(hufHeaderSize))
+			return ERROR(dictionary_corrupted);
+		dictPtr += hufHeaderSize;
+	}
+
+	{
+		unsigned offcodeLog;
+		size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
+		if (FSE_isError(offcodeHeaderSize))
+			return ERROR(dictionary_corrupted);
+		if (offcodeLog > OffFSELog)
+			return ERROR(dictionary_corrupted);
+		/* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
+		CHECK_E(FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
+			dictionary_corrupted);
+		dictPtr += offcodeHeaderSize;
+	}
+
+	{
+		short matchlengthNCount[MaxML + 1];
+		unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+		size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
+		if (FSE_isError(matchlengthHeaderSize))
+			return ERROR(dictionary_corrupted);
+		if (matchlengthLog > MLFSELog)
+			return ERROR(dictionary_corrupted);
+		/* Every match length code must have non-zero probability */
+		CHECK_F(ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
+		CHECK_E(
+		    FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
+		    dictionary_corrupted);
+		dictPtr += matchlengthHeaderSize;
+	}
+
+	{
+		short litlengthNCount[MaxLL + 1];
+		unsigned litlengthMaxValue = MaxLL, litlengthLog;
+		size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
+		if (FSE_isError(litlengthHeaderSize))
+			return ERROR(dictionary_corrupted);
+		if (litlengthLog > LLFSELog)
+			return ERROR(dictionary_corrupted);
+		/* Every literal length code must have non-zero probability */
+		CHECK_F(ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
+		CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
+			dictionary_corrupted);
+		dictPtr += litlengthHeaderSize;
+	}
+
+	if (dictPtr + 12 > dictEnd)
+		return ERROR(dictionary_corrupted);
+	cctx->rep[0] = ZSTD_readLE32(dictPtr + 0);
+	cctx->rep[1] = ZSTD_readLE32(dictPtr + 4);
+	cctx->rep[2] = ZSTD_readLE32(dictPtr + 8);
+	dictPtr += 12;
+
+	{
+		size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
+		U32 offcodeMax = MaxOff;
+		if (dictContentSize <= ((U32)-1) - 128 KB) {
+			U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
+			offcodeMax = ZSTD_highbit32(maxOffset);		     /* Calculate minimum offset code required to represent maxOffset */
+		}
+		/* All offset values <= dictContentSize + 128 KB must be representable */
+		CHECK_F(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
+		/* All repCodes must be <= dictContentSize and != 0*/
+		{
+			U32 u;
+			for (u = 0; u < 3; u++) {
+				if (cctx->rep[u] == 0)
+					return ERROR(dictionary_corrupted);
+				if (cctx->rep[u] > dictContentSize)
+					return ERROR(dictionary_corrupted);
+			}
+		}
+
+		cctx->flagStaticTables = 1;
+		cctx->flagStaticHufTable = HUF_repeat_valid;
+		return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
+	}
+}
+
+/** ZSTD_compress_insertDictionary() :
+*   @return : 0, or an error code */
+static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
+{
+	if ((dict == NULL) || (dictSize <= 8))
+		return 0;
+
+	/* dict as pure content */
+	if ((ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict))
+		return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
+
+	/* dict as zstd dictionary */
+	return ZSTD_loadZstdDictionary(cctx, dict, dictSize);
+}
+
+/*! ZSTD_compressBegin_internal() :
+*   @return : 0, or an error code */
+static size_t ZSTD_compressBegin_internal(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, U64 pledgedSrcSize)
+{
+	ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue;
+	CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp));
+	return ZSTD_compress_insertDictionary(cctx, dict, dictSize);
+}
+
+/*! ZSTD_compressBegin_advanced() :
+*   @return : 0, or an error code */
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+	/* compression parameters verification and optimization */
+	CHECK_F(ZSTD_checkCParams(params.cParams));
+	return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize);
+}
+
+size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel)
+{
+	ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
+	return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0);
+}
+
+size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); }
+
+/*! ZSTD_writeEpilogue() :
+*   Ends a frame.
+*   @return : nb of bytes written into dst (or an error code) */
+static size_t ZSTD_writeEpilogue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity)
+{
+	BYTE *const ostart = (BYTE *)dst;
+	BYTE *op = ostart;
+	size_t fhSize = 0;
+
+	if (cctx->stage == ZSTDcs_created)
+		return ERROR(stage_wrong); /* init missing */
+
+	/* special case : empty frame */
+	if (cctx->stage == ZSTDcs_init) {
+		fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0);
+		if (ZSTD_isError(fhSize))
+			return fhSize;
+		dstCapacity -= fhSize;
+		op += fhSize;
+		cctx->stage = ZSTDcs_ongoing;
+	}
+
+	if (cctx->stage != ZSTDcs_ending) {
+		/* write one last empty block, make it the "last" block */
+		U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw) << 1) + 0;
+		if (dstCapacity < 4)
+			return ERROR(dstSize_tooSmall);
+		ZSTD_writeLE32(op, cBlockHeader24);
+		op += ZSTD_blockHeaderSize;
+		dstCapacity -= ZSTD_blockHeaderSize;
+	}
+
+	if (cctx->params.fParams.checksumFlag) {
+		U32 const checksum = (U32)xxh64_digest(&cctx->xxhState);
+		if (dstCapacity < 4)
+			return ERROR(dstSize_tooSmall);
+		ZSTD_writeLE32(op, checksum);
+		op += 4;
+	}
+
+	cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
+	return op - ostart;
+}
+
+size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	size_t endResult;
+	size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1);
+	if (ZSTD_isError(cSize))
+		return cSize;
+	endResult = ZSTD_writeEpilogue(cctx, (char *)dst + cSize, dstCapacity - cSize);
+	if (ZSTD_isError(endResult))
+		return endResult;
+	return cSize + endResult;
+}
+
+static size_t ZSTD_compress_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
+				     ZSTD_parameters params)
+{
+	CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize));
+	return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
+size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
+			       ZSTD_parameters params)
+{
+	return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
+}
+
+size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, ZSTD_parameters params)
+{
+	return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params);
+}
+
+/* =====  Dictionary API  ===== */
+
+struct ZSTD_CDict_s {
+	void *dictBuffer;
+	const void *dictContent;
+	size_t dictContentSize;
+	ZSTD_CCtx *refContext;
+}; /* typedef'd tp ZSTD_CDict within "zstd.h" */
+
+size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) { return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); }
+
+static ZSTD_CDict *ZSTD_createCDict_advanced(const void *dictBuffer, size_t dictSize, unsigned byReference, ZSTD_parameters params, ZSTD_customMem customMem)
+{
+	if (!customMem.customAlloc || !customMem.customFree)
+		return NULL;
+
+	{
+		ZSTD_CDict *const cdict = (ZSTD_CDict *)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
+		ZSTD_CCtx *const cctx = ZSTD_createCCtx_advanced(customMem);
+
+		if (!cdict || !cctx) {
+			ZSTD_free(cdict, customMem);
+			ZSTD_freeCCtx(cctx);
+			return NULL;
+		}
+
+		if ((byReference) || (!dictBuffer) || (!dictSize)) {
+			cdict->dictBuffer = NULL;
+			cdict->dictContent = dictBuffer;
+		} else {
+			void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
+			if (!internalBuffer) {
+				ZSTD_free(cctx, customMem);
+				ZSTD_free(cdict, customMem);
+				return NULL;
+			}
+			memcpy(internalBuffer, dictBuffer, dictSize);
+			cdict->dictBuffer = internalBuffer;
+			cdict->dictContent = internalBuffer;
+		}
+
+		{
+			size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
+			if (ZSTD_isError(errorCode)) {
+				ZSTD_free(cdict->dictBuffer, customMem);
+				ZSTD_free(cdict, customMem);
+				ZSTD_freeCCtx(cctx);
+				return NULL;
+			}
+		}
+
+		cdict->refContext = cctx;
+		cdict->dictContentSize = dictSize;
+		return cdict;
+	}
+}
+
+ZSTD_CDict *ZSTD_initCDict(const void *dict, size_t dictSize, ZSTD_parameters params, void *workspace, size_t workspaceSize)
+{
+	ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
+	return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem);
+}
+
+size_t ZSTD_freeCDict(ZSTD_CDict *cdict)
+{
+	if (cdict == NULL)
+		return 0; /* support free on NULL */
+	{
+		ZSTD_customMem const cMem = cdict->refContext->customMem;
+		ZSTD_freeCCtx(cdict->refContext);
+		ZSTD_free(cdict->dictBuffer, cMem);
+		ZSTD_free(cdict, cMem);
+		return 0;
+	}
+}
+
+static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict *cdict) { return ZSTD_getParamsFromCCtx(cdict->refContext); }
+
+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize)
+{
+	if (cdict->dictContentSize)
+		CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
+	else {
+		ZSTD_parameters params = cdict->refContext->params;
+		params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
+		CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize));
+	}
+	return 0;
+}
+
+/*! ZSTD_compress_usingCDict() :
+*   Compression using a digested Dictionary.
+*   Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
+*   Note that compression level is decided during dictionary creation */
+size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict)
+{
+	CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize));
+
+	if (cdict->refContext->params.fParams.contentSizeFlag == 1) {
+		cctx->params.fParams.contentSizeFlag = 1;
+		cctx->frameContentSize = srcSize;
+	} else {
+		cctx->params.fParams.contentSizeFlag = 0;
+	}
+
+	return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
+/* ******************************************************************
+*  Streaming
+********************************************************************/
+
+typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
+
+struct ZSTD_CStream_s {
+	ZSTD_CCtx *cctx;
+	ZSTD_CDict *cdictLocal;
+	const ZSTD_CDict *cdict;
+	char *inBuff;
+	size_t inBuffSize;
+	size_t inToCompress;
+	size_t inBuffPos;
+	size_t inBuffTarget;
+	size_t blockSize;
+	char *outBuff;
+	size_t outBuffSize;
+	size_t outBuffContentSize;
+	size_t outBuffFlushedSize;
+	ZSTD_cStreamStage stage;
+	U32 checksum;
+	U32 frameEnded;
+	U64 pledgedSrcSize;
+	U64 inputProcessed;
+	ZSTD_parameters params;
+	ZSTD_customMem customMem;
+}; /* typedef'd to ZSTD_CStream within "zstd.h" */
+
+size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams)
+{
+	size_t const inBuffSize = (size_t)1 << cParams.windowLog;
+	size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize);
+	size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
+
+	return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
+}
+
+ZSTD_CStream *ZSTD_createCStream_advanced(ZSTD_customMem customMem)
+{
+	ZSTD_CStream *zcs;
+
+	if (!customMem.customAlloc || !customMem.customFree)
+		return NULL;
+
+	zcs = (ZSTD_CStream *)ZSTD_malloc(sizeof(ZSTD_CStream), customMem);
+	if (zcs == NULL)
+		return NULL;
+	memset(zcs, 0, sizeof(ZSTD_CStream));
+	memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem));
+	zcs->cctx = ZSTD_createCCtx_advanced(customMem);
+	if (zcs->cctx == NULL) {
+		ZSTD_freeCStream(zcs);
+		return NULL;
+	}
+	return zcs;
+}
+
+size_t ZSTD_freeCStream(ZSTD_CStream *zcs)
+{
+	if (zcs == NULL)
+		return 0; /* support free on NULL */
+	{
+		ZSTD_customMem const cMem = zcs->customMem;
+		ZSTD_freeCCtx(zcs->cctx);
+		zcs->cctx = NULL;
+		ZSTD_freeCDict(zcs->cdictLocal);
+		zcs->cdictLocal = NULL;
+		ZSTD_free(zcs->inBuff, cMem);
+		zcs->inBuff = NULL;
+		ZSTD_free(zcs->outBuff, cMem);
+		zcs->outBuff = NULL;
+		ZSTD_free(zcs, cMem);
+		return 0;
+	}
+}
+
+/*======   Initialization   ======*/
+
+size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
+size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */; }
+
+static size_t ZSTD_resetCStream_internal(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
+{
+	if (zcs->inBuffSize == 0)
+		return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */
+
+	if (zcs->cdict)
+		CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
+	else
+		CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
+
+	zcs->inToCompress = 0;
+	zcs->inBuffPos = 0;
+	zcs->inBuffTarget = zcs->blockSize;
+	zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
+	zcs->stage = zcss_load;
+	zcs->frameEnded = 0;
+	zcs->pledgedSrcSize = pledgedSrcSize;
+	zcs->inputProcessed = 0;
+	return 0; /* ready to go */
+}
+
+size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
+{
+
+	zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
+
+	return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
+}
+
+static size_t ZSTD_initCStream_advanced(ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+	/* allocate buffers */
+	{
+		size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
+		if (zcs->inBuffSize < neededInBuffSize) {
+			zcs->inBuffSize = neededInBuffSize;
+			ZSTD_free(zcs->inBuff, zcs->customMem);
+			zcs->inBuff = (char *)ZSTD_malloc(neededInBuffSize, zcs->customMem);
+			if (zcs->inBuff == NULL)
+				return ERROR(memory_allocation);
+		}
+		zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize);
+	}
+	if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize) + 1) {
+		zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize) + 1;
+		ZSTD_free(zcs->outBuff, zcs->customMem);
+		zcs->outBuff = (char *)ZSTD_malloc(zcs->outBuffSize, zcs->customMem);
+		if (zcs->outBuff == NULL)
+			return ERROR(memory_allocation);
+	}
+
+	if (dict && dictSize >= 8) {
+		ZSTD_freeCDict(zcs->cdictLocal);
+		zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem);
+		if (zcs->cdictLocal == NULL)
+			return ERROR(memory_allocation);
+		zcs->cdict = zcs->cdictLocal;
+	} else
+		zcs->cdict = NULL;
+
+	zcs->checksum = params.fParams.checksumFlag > 0;
+	zcs->params = params;
+
+	return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
+}
+
+ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
+{
+	ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
+	ZSTD_CStream *const zcs = ZSTD_createCStream_advanced(stackMem);
+	if (zcs) {
+		size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
+		if (ZSTD_isError(code)) {
+			return NULL;
+		}
+	}
+	return zcs;
+}
+
+ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
+{
+	ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
+	ZSTD_CStream *const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize);
+	if (zcs) {
+		zcs->cdict = cdict;
+		if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) {
+			return NULL;
+		}
+	}
+	return zcs;
+}
+
+/*======   Compression   ======*/
+
+typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e;
+
+ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	size_t const length = MIN(dstCapacity, srcSize);
+	memcpy(dst, src, length);
+	return length;
+}
+
+static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *dstCapacityPtr, const void *src, size_t *srcSizePtr, ZSTD_flush_e const flush)
+{
+	U32 someMoreWork = 1;
+	const char *const istart = (const char *)src;
+	const char *const iend = istart + *srcSizePtr;
+	const char *ip = istart;
+	char *const ostart = (char *)dst;
+	char *const oend = ostart + *dstCapacityPtr;
+	char *op = ostart;
+
+	while (someMoreWork) {
+		switch (zcs->stage) {
+		case zcss_init:
+			return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */
+
+		case zcss_load:
+			/* complete inBuffer */
+			{
+				size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
+				size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend - ip);
+				zcs->inBuffPos += loaded;
+				ip += loaded;
+				if ((zcs->inBuffPos == zcs->inToCompress) || (!flush && (toLoad != loaded))) {
+					someMoreWork = 0;
+					break; /* not enough input to get a full block : stop there, wait for more */
+				}
+			}
+			/* compress curr block (note : this stage cannot be stopped in the middle) */
+			{
+				void *cDst;
+				size_t cSize;
+				size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
+				size_t oSize = oend - op;
+				if (oSize >= ZSTD_compressBound(iSize))
+					cDst = op; /* compress directly into output buffer (avoid flush stage) */
+				else
+					cDst = zcs->outBuff, oSize = zcs->outBuffSize;
+				cSize = (flush == zsf_end) ? ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize)
+							   : ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
+				if (ZSTD_isError(cSize))
+					return cSize;
+				if (flush == zsf_end)
+					zcs->frameEnded = 1;
+				/* prepare next block */
+				zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
+				if (zcs->inBuffTarget > zcs->inBuffSize)
+					zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */
+				zcs->inToCompress = zcs->inBuffPos;
+				if (cDst == op) {
+					op += cSize;
+					break;
+				} /* no need to flush */
+				zcs->outBuffContentSize = cSize;
+				zcs->outBuffFlushedSize = 0;
+				zcs->stage = zcss_flush; /* pass-through to flush stage */
+			}
+
+		case zcss_flush: {
+			size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
+			size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
+			op += flushed;
+			zcs->outBuffFlushedSize += flushed;
+			if (toFlush != flushed) {
+				someMoreWork = 0;
+				break;
+			} /* dst too small to store flushed data : stop there */
+			zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
+			zcs->stage = zcss_load;
+			break;
+		}
+
+		case zcss_final:
+			someMoreWork = 0; /* do nothing */
+			break;
+
+		default:
+			return ERROR(GENERIC); /* impossible */
+		}
+	}
+
+	*srcSizePtr = ip - istart;
+	*dstCapacityPtr = op - ostart;
+	zcs->inputProcessed += *srcSizePtr;
+	if (zcs->frameEnded)
+		return 0;
+	{
+		size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
+		if (hintInSize == 0)
+			hintInSize = zcs->blockSize;
+		return hintInSize;
+	}
+}
+
+size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
+{
+	size_t sizeRead = input->size - input->pos;
+	size_t sizeWritten = output->size - output->pos;
+	size_t const result =
+	    ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, (const char *)(input->src) + input->pos, &sizeRead, zsf_gather);
+	input->pos += sizeRead;
+	output->pos += sizeWritten;
+	return result;
+}
+
+/*======   Finalize   ======*/
+
+/*! ZSTD_flushStream() :
+*   @return : amount of data remaining to flush */
+size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
+{
+	size_t srcSize = 0;
+	size_t sizeWritten = output->size - output->pos;
+	size_t const result = ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, &srcSize,
+							  &srcSize, /* use a valid src address instead of NULL */
+							  zsf_flush);
+	output->pos += sizeWritten;
+	if (ZSTD_isError(result))
+		return result;
+	return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */
+}
+
+size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
+{
+	BYTE *const ostart = (BYTE *)(output->dst) + output->pos;
+	BYTE *const oend = (BYTE *)(output->dst) + output->size;
+	BYTE *op = ostart;
+
+	if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize))
+		return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */
+
+	if (zcs->stage != zcss_final) {
+		/* flush whatever remains */
+		size_t srcSize = 0;
+		size_t sizeWritten = output->size - output->pos;
+		size_t const notEnded =
+		    ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */
+		size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
+		op += sizeWritten;
+		if (remainingToFlush) {
+			output->pos += sizeWritten;
+			return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4);
+		}
+		/* create epilogue */
+		zcs->stage = zcss_final;
+		zcs->outBuffContentSize = !notEnded ? 0 : ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL,
+									   0); /* write epilogue, including final empty block, into outBuff */
+	}
+
+	/* flush epilogue */
+	{
+		size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
+		size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
+		op += flushed;
+		zcs->outBuffFlushedSize += flushed;
+		output->pos += op - ostart;
+		if (toFlush == flushed)
+			zcs->stage = zcss_init; /* end reached */
+		return toFlush - flushed;
+	}
+}
+
+/*-=====  Pre-defined compression levels  =====-*/
+
+#define ZSTD_DEFAULT_CLEVEL 1
+#define ZSTD_MAX_CLEVEL 22
+int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
+
+static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL + 1] = {
+    {
+	/* "default" */
+	/* W,  C,  H,  S,  L, TL, strat */
+	{18, 12, 12, 1, 7, 16, ZSTD_fast},    /* level  0 - never used */
+	{19, 13, 14, 1, 7, 16, ZSTD_fast},    /* level  1 */
+	{19, 15, 16, 1, 6, 16, ZSTD_fast},    /* level  2 */
+	{20, 16, 17, 1, 5, 16, ZSTD_dfast},   /* level  3.*/
+	{20, 18, 18, 1, 5, 16, ZSTD_dfast},   /* level  4.*/
+	{20, 15, 18, 3, 5, 16, ZSTD_greedy},  /* level  5 */
+	{21, 16, 19, 2, 5, 16, ZSTD_lazy},    /* level  6 */
+	{21, 17, 20, 3, 5, 16, ZSTD_lazy},    /* level  7 */
+	{21, 18, 20, 3, 5, 16, ZSTD_lazy2},   /* level  8 */
+	{21, 20, 20, 3, 5, 16, ZSTD_lazy2},   /* level  9 */
+	{21, 19, 21, 4, 5, 16, ZSTD_lazy2},   /* level 10 */
+	{22, 20, 22, 4, 5, 16, ZSTD_lazy2},   /* level 11 */
+	{22, 20, 22, 5, 5, 16, ZSTD_lazy2},   /* level 12 */
+	{22, 21, 22, 5, 5, 16, ZSTD_lazy2},   /* level 13 */
+	{22, 21, 22, 6, 5, 16, ZSTD_lazy2},   /* level 14 */
+	{22, 21, 21, 5, 5, 16, ZSTD_btlazy2}, /* level 15 */
+	{23, 22, 22, 5, 5, 16, ZSTD_btlazy2}, /* level 16 */
+	{23, 21, 22, 4, 5, 24, ZSTD_btopt},   /* level 17 */
+	{23, 23, 22, 6, 5, 32, ZSTD_btopt},   /* level 18 */
+	{23, 23, 22, 6, 3, 48, ZSTD_btopt},   /* level 19 */
+	{25, 25, 23, 7, 3, 64, ZSTD_btopt2},  /* level 20 */
+	{26, 26, 23, 7, 3, 256, ZSTD_btopt2}, /* level 21 */
+	{27, 27, 25, 9, 3, 512, ZSTD_btopt2}, /* level 22 */
+    },
+    {
+	/* for srcSize <= 256 KB */
+	/* W,  C,  H,  S,  L,  T, strat */
+	{0, 0, 0, 0, 0, 0, ZSTD_fast},	 /* level  0 - not used */
+	{18, 13, 14, 1, 6, 8, ZSTD_fast},      /* level  1 */
+	{18, 14, 13, 1, 5, 8, ZSTD_dfast},     /* level  2 */
+	{18, 16, 15, 1, 5, 8, ZSTD_dfast},     /* level  3 */
+	{18, 15, 17, 1, 5, 8, ZSTD_greedy},    /* level  4.*/
+	{18, 16, 17, 4, 5, 8, ZSTD_greedy},    /* level  5.*/
+	{18, 16, 17, 3, 5, 8, ZSTD_lazy},      /* level  6.*/
+	{18, 17, 17, 4, 4, 8, ZSTD_lazy},      /* level  7 */
+	{18, 17, 17, 4, 4, 8, ZSTD_lazy2},     /* level  8 */
+	{18, 17, 17, 5, 4, 8, ZSTD_lazy2},     /* level  9 */
+	{18, 17, 17, 6, 4, 8, ZSTD_lazy2},     /* level 10 */
+	{18, 18, 17, 6, 4, 8, ZSTD_lazy2},     /* level 11.*/
+	{18, 18, 17, 7, 4, 8, ZSTD_lazy2},     /* level 12.*/
+	{18, 19, 17, 6, 4, 8, ZSTD_btlazy2},   /* level 13 */
+	{18, 18, 18, 4, 4, 16, ZSTD_btopt},    /* level 14.*/
+	{18, 18, 18, 4, 3, 16, ZSTD_btopt},    /* level 15.*/
+	{18, 19, 18, 6, 3, 32, ZSTD_btopt},    /* level 16.*/
+	{18, 19, 18, 8, 3, 64, ZSTD_btopt},    /* level 17.*/
+	{18, 19, 18, 9, 3, 128, ZSTD_btopt},   /* level 18.*/
+	{18, 19, 18, 10, 3, 256, ZSTD_btopt},  /* level 19.*/
+	{18, 19, 18, 11, 3, 512, ZSTD_btopt2}, /* level 20.*/
+	{18, 19, 18, 12, 3, 512, ZSTD_btopt2}, /* level 21.*/
+	{18, 19, 18, 13, 3, 512, ZSTD_btopt2}, /* level 22.*/
+    },
+    {
+	/* for srcSize <= 128 KB */
+	/* W,  C,  H,  S,  L,  T, strat */
+	{17, 12, 12, 1, 7, 8, ZSTD_fast},      /* level  0 - not used */
+	{17, 12, 13, 1, 6, 8, ZSTD_fast},      /* level  1 */
+	{17, 13, 16, 1, 5, 8, ZSTD_fast},      /* level  2 */
+	{17, 16, 16, 2, 5, 8, ZSTD_dfast},     /* level  3 */
+	{17, 13, 15, 3, 4, 8, ZSTD_greedy},    /* level  4 */
+	{17, 15, 17, 4, 4, 8, ZSTD_greedy},    /* level  5 */
+	{17, 16, 17, 3, 4, 8, ZSTD_lazy},      /* level  6 */
+	{17, 15, 17, 4, 4, 8, ZSTD_lazy2},     /* level  7 */
+	{17, 17, 17, 4, 4, 8, ZSTD_lazy2},     /* level  8 */
+	{17, 17, 17, 5, 4, 8, ZSTD_lazy2},     /* level  9 */
+	{17, 17, 17, 6, 4, 8, ZSTD_lazy2},     /* level 10 */
+	{17, 17, 17, 7, 4, 8, ZSTD_lazy2},     /* level 11 */
+	{17, 17, 17, 8, 4, 8, ZSTD_lazy2},     /* level 12 */
+	{17, 18, 17, 6, 4, 8, ZSTD_btlazy2},   /* level 13.*/
+	{17, 17, 17, 7, 3, 8, ZSTD_btopt},     /* level 14.*/
+	{17, 17, 17, 7, 3, 16, ZSTD_btopt},    /* level 15.*/
+	{17, 18, 17, 7, 3, 32, ZSTD_btopt},    /* level 16.*/
+	{17, 18, 17, 7, 3, 64, ZSTD_btopt},    /* level 17.*/
+	{17, 18, 17, 7, 3, 256, ZSTD_btopt},   /* level 18.*/
+	{17, 18, 17, 8, 3, 256, ZSTD_btopt},   /* level 19.*/
+	{17, 18, 17, 9, 3, 256, ZSTD_btopt2},  /* level 20.*/
+	{17, 18, 17, 10, 3, 256, ZSTD_btopt2}, /* level 21.*/
+	{17, 18, 17, 11, 3, 512, ZSTD_btopt2}, /* level 22.*/
+    },
+    {
+	/* for srcSize <= 16 KB */
+	/* W,  C,  H,  S,  L,  T, strat */
+	{14, 12, 12, 1, 7, 6, ZSTD_fast},      /* level  0 - not used */
+	{14, 14, 14, 1, 6, 6, ZSTD_fast},      /* level  1 */
+	{14, 14, 14, 1, 4, 6, ZSTD_fast},      /* level  2 */
+	{14, 14, 14, 1, 4, 6, ZSTD_dfast},     /* level  3.*/
+	{14, 14, 14, 4, 4, 6, ZSTD_greedy},    /* level  4.*/
+	{14, 14, 14, 3, 4, 6, ZSTD_lazy},      /* level  5.*/
+	{14, 14, 14, 4, 4, 6, ZSTD_lazy2},     /* level  6 */
+	{14, 14, 14, 5, 4, 6, ZSTD_lazy2},     /* level  7 */
+	{14, 14, 14, 6, 4, 6, ZSTD_lazy2},     /* level  8.*/
+	{14, 15, 14, 6, 4, 6, ZSTD_btlazy2},   /* level  9.*/
+	{14, 15, 14, 3, 3, 6, ZSTD_btopt},     /* level 10.*/
+	{14, 15, 14, 6, 3, 8, ZSTD_btopt},     /* level 11.*/
+	{14, 15, 14, 6, 3, 16, ZSTD_btopt},    /* level 12.*/
+	{14, 15, 14, 6, 3, 24, ZSTD_btopt},    /* level 13.*/
+	{14, 15, 15, 6, 3, 48, ZSTD_btopt},    /* level 14.*/
+	{14, 15, 15, 6, 3, 64, ZSTD_btopt},    /* level 15.*/
+	{14, 15, 15, 6, 3, 96, ZSTD_btopt},    /* level 16.*/
+	{14, 15, 15, 6, 3, 128, ZSTD_btopt},   /* level 17.*/
+	{14, 15, 15, 6, 3, 256, ZSTD_btopt},   /* level 18.*/
+	{14, 15, 15, 7, 3, 256, ZSTD_btopt},   /* level 19.*/
+	{14, 15, 15, 8, 3, 256, ZSTD_btopt2},  /* level 20.*/
+	{14, 15, 15, 9, 3, 256, ZSTD_btopt2},  /* level 21.*/
+	{14, 15, 15, 10, 3, 256, ZSTD_btopt2}, /* level 22.*/
+    },
+};
+
+/*! ZSTD_getCParams() :
+*   @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
+*   Size values are optional, provide 0 if not known or unused */
+ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
+{
+	ZSTD_compressionParameters cp;
+	size_t const addedSize = srcSize ? 0 : 500;
+	U64 const rSize = srcSize + dictSize ? srcSize + dictSize + addedSize : (U64)-1;
+	U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */
+	if (compressionLevel <= 0)
+		compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */
+	if (compressionLevel > ZSTD_MAX_CLEVEL)
+		compressionLevel = ZSTD_MAX_CLEVEL;
+	cp = ZSTD_defaultCParameters[tableID][compressionLevel];
+	if (ZSTD_32bits()) { /* auto-correction, for 32-bits mode */
+		if (cp.windowLog > ZSTD_WINDOWLOG_MAX)
+			cp.windowLog = ZSTD_WINDOWLOG_MAX;
+		if (cp.chainLog > ZSTD_CHAINLOG_MAX)
+			cp.chainLog = ZSTD_CHAINLOG_MAX;
+		if (cp.hashLog > ZSTD_HASHLOG_MAX)
+			cp.hashLog = ZSTD_HASHLOG_MAX;
+	}
+	cp = ZSTD_adjustCParams(cp, srcSize, dictSize);
+	return cp;
+}
+
+/*! ZSTD_getParams() :
+*   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
+*   All fields of `ZSTD_frameParameters` are set to default (0) */
+ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
+{
+	ZSTD_parameters params;
+	ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize);
+	memset(&params, 0, sizeof(params));
+	params.cParams = cParams;
+	return params;
+}
+
+EXPORT_SYMBOL(ZSTD_maxCLevel);
+EXPORT_SYMBOL(ZSTD_compressBound);
+
+EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound);
+EXPORT_SYMBOL(ZSTD_initCCtx);
+EXPORT_SYMBOL(ZSTD_compressCCtx);
+EXPORT_SYMBOL(ZSTD_compress_usingDict);
+
+EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound);
+EXPORT_SYMBOL(ZSTD_initCDict);
+EXPORT_SYMBOL(ZSTD_compress_usingCDict);
+
+EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound);
+EXPORT_SYMBOL(ZSTD_initCStream);
+EXPORT_SYMBOL(ZSTD_initCStream_usingCDict);
+EXPORT_SYMBOL(ZSTD_resetCStream);
+EXPORT_SYMBOL(ZSTD_compressStream);
+EXPORT_SYMBOL(ZSTD_flushStream);
+EXPORT_SYMBOL(ZSTD_endStream);
+EXPORT_SYMBOL(ZSTD_CStreamInSize);
+EXPORT_SYMBOL(ZSTD_CStreamOutSize);
+
+EXPORT_SYMBOL(ZSTD_getCParams);
+EXPORT_SYMBOL(ZSTD_getParams);
+EXPORT_SYMBOL(ZSTD_checkCParams);
+EXPORT_SYMBOL(ZSTD_adjustCParams);
+
+EXPORT_SYMBOL(ZSTD_compressBegin);
+EXPORT_SYMBOL(ZSTD_compressBegin_usingDict);
+EXPORT_SYMBOL(ZSTD_compressBegin_advanced);
+EXPORT_SYMBOL(ZSTD_copyCCtx);
+EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict);
+EXPORT_SYMBOL(ZSTD_compressContinue);
+EXPORT_SYMBOL(ZSTD_compressEnd);
+
+EXPORT_SYMBOL(ZSTD_getBlockSizeMax);
+EXPORT_SYMBOL(ZSTD_compressBlock);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Zstd Compressor");
diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c
new file mode 100644
index 0000000..b178467
--- /dev/null
+++ b/lib/zstd/decompress.c
@@ -0,0 +1,2528 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of https://github.com/facebook/zstd.
+ * An additional grant of patent rights can be found in the PATENTS file in the
+ * same directory.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ */
+
+/* ***************************************************************
+*  Tuning parameters
+*****************************************************************/
+/*!
+*  MAXWINDOWSIZE_DEFAULT :
+*  maximum window size accepted by DStream, by default.
+*  Frames requiring more memory will be rejected.
+*/
+#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
+#define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */
+#endif
+
+/*-*******************************************************
+*  Dependencies
+*********************************************************/
+#include "fse.h"
+#include "huf.h"
+#include "mem.h" /* low level memory routines */
+#include "zstd_internal.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h> /* memcpy, memmove, memset */
+
+#define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0)
+
+/*-*************************************
+*  Macros
+***************************************/
+#define ZSTD_isError ERR_isError /* for inlining */
+#define FSE_isError ERR_isError
+#define HUF_isError ERR_isError
+
+/*_*******************************************************
+*  Memory operations
+**********************************************************/
+static void ZSTD_copy4(void *dst, const void *src) { memcpy(dst, src, 4); }
+
+/*-*************************************************************
+*   Context management
+***************************************************************/
+typedef enum {
+	ZSTDds_getFrameHeaderSize,
+	ZSTDds_decodeFrameHeader,
+	ZSTDds_decodeBlockHeader,
+	ZSTDds_decompressBlock,
+	ZSTDds_decompressLastBlock,
+	ZSTDds_checkChecksum,
+	ZSTDds_decodeSkippableHeader,
+	ZSTDds_skipFrame
+} ZSTD_dStage;
+
+typedef struct {
+	FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
+	FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
+	FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
+	HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
+	U64 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32 / 2];
+	U32 rep[ZSTD_REP_NUM];
+} ZSTD_entropyTables_t;
+
+struct ZSTD_DCtx_s {
+	const FSE_DTable *LLTptr;
+	const FSE_DTable *MLTptr;
+	const FSE_DTable *OFTptr;
+	const HUF_DTable *HUFptr;
+	ZSTD_entropyTables_t entropy;
+	const void *previousDstEnd; /* detect continuity */
+	const void *base;	   /* start of curr segment */
+	const void *vBase;	  /* virtual start of previous segment if it was just before curr one */
+	const void *dictEnd;	/* end of previous segment */
+	size_t expected;
+	ZSTD_frameParams fParams;
+	blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
+	ZSTD_dStage stage;
+	U32 litEntropy;
+	U32 fseEntropy;
+	struct xxh64_state xxhState;
+	size_t headerSize;
+	U32 dictID;
+	const BYTE *litPtr;
+	ZSTD_customMem customMem;
+	size_t litSize;
+	size_t rleSize;
+	BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
+	BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
+}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
+
+size_t ZSTD_DCtxWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DCtx)); }
+
+size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx)
+{
+	dctx->expected = ZSTD_frameHeaderSize_prefix;
+	dctx->stage = ZSTDds_getFrameHeaderSize;
+	dctx->previousDstEnd = NULL;
+	dctx->base = NULL;
+	dctx->vBase = NULL;
+	dctx->dictEnd = NULL;
+	dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+	dctx->litEntropy = dctx->fseEntropy = 0;
+	dctx->dictID = 0;
+	ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
+	memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
+	dctx->LLTptr = dctx->entropy.LLTable;
+	dctx->MLTptr = dctx->entropy.MLTable;
+	dctx->OFTptr = dctx->entropy.OFTable;
+	dctx->HUFptr = dctx->entropy.hufTable;
+	return 0;
+}
+
+ZSTD_DCtx *ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
+{
+	ZSTD_DCtx *dctx;
+
+	if (!customMem.customAlloc || !customMem.customFree)
+		return NULL;
+
+	dctx = (ZSTD_DCtx *)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem);
+	if (!dctx)
+		return NULL;
+	memcpy(&dctx->customMem, &customMem, sizeof(customMem));
+	ZSTD_decompressBegin(dctx);
+	return dctx;
+}
+
+ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize)
+{
+	ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
+	return ZSTD_createDCtx_advanced(stackMem);
+}
+
+size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx)
+{
+	if (dctx == NULL)
+		return 0; /* support free on NULL */
+	ZSTD_free(dctx, dctx->customMem);
+	return 0; /* reserved as a potential error code in the future */
+}
+
+void ZSTD_copyDCtx(ZSTD_DCtx *dstDCtx, const ZSTD_DCtx *srcDCtx)
+{
+	size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max;
+	memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */
+}
+
+static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict);
+
+/*-*************************************************************
+*   Decompression section
+***************************************************************/
+
+/*! ZSTD_isFrame() :
+ *  Tells if the content of `buffer` starts with a valid Frame Identifier.
+ *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ *  Note 3 : Skippable Frame Identifiers are considered valid. */
+unsigned ZSTD_isFrame(const void *buffer, size_t size)
+{
+	if (size < 4)
+		return 0;
+	{
+		U32 const magic = ZSTD_readLE32(buffer);
+		if (magic == ZSTD_MAGICNUMBER)
+			return 1;
+		if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START)
+			return 1;
+	}
+	return 0;
+}
+
+/** ZSTD_frameHeaderSize() :
+*   srcSize must be >= ZSTD_frameHeaderSize_prefix.
+*   @return : size of the Frame Header */
+static size_t ZSTD_frameHeaderSize(const void *src, size_t srcSize)
+{
+	if (srcSize < ZSTD_frameHeaderSize_prefix)
+		return ERROR(srcSize_wrong);
+	{
+		BYTE const fhd = ((const BYTE *)src)[4];
+		U32 const dictID = fhd & 3;
+		U32 const singleSegment = (fhd >> 5) & 1;
+		U32 const fcsId = fhd >> 6;
+		return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (singleSegment && !fcsId);
+	}
+}
+
+/** ZSTD_getFrameParams() :
+*   decode Frame Header, or require larger `srcSize`.
+*   @return : 0, `fparamsPtr` is correctly filled,
+*            >0, `srcSize` is too small, result is expected `srcSize`,
+*             or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, size_t srcSize)
+{
+	const BYTE *ip = (const BYTE *)src;
+
+	if (srcSize < ZSTD_frameHeaderSize_prefix)
+		return ZSTD_frameHeaderSize_prefix;
+	if (ZSTD_readLE32(src) != ZSTD_MAGICNUMBER) {
+		if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
+			if (srcSize < ZSTD_skippableHeaderSize)
+				return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */
+			memset(fparamsPtr, 0, sizeof(*fparamsPtr));
+			fparamsPtr->frameContentSize = ZSTD_readLE32((const char *)src + 4);
+			fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
+			return 0;
+		}
+		return ERROR(prefix_unknown);
+	}
+
+	/* ensure there is enough `srcSize` to fully read/decode frame header */
+	{
+		size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize);
+		if (srcSize < fhsize)
+			return fhsize;
+	}
+
+	{
+		BYTE const fhdByte = ip[4];
+		size_t pos = 5;
+		U32 const dictIDSizeCode = fhdByte & 3;
+		U32 const checksumFlag = (fhdByte >> 2) & 1;
+		U32 const singleSegment = (fhdByte >> 5) & 1;
+		U32 const fcsID = fhdByte >> 6;
+		U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;
+		U32 windowSize = 0;
+		U32 dictID = 0;
+		U64 frameContentSize = 0;
+		if ((fhdByte & 0x08) != 0)
+			return ERROR(frameParameter_unsupported); /* reserved bits, which must be zero */
+		if (!singleSegment) {
+			BYTE const wlByte = ip[pos++];
+			U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
+			if (windowLog > ZSTD_WINDOWLOG_MAX)
+				return ERROR(frameParameter_windowTooLarge); /* avoids issue with 1 << windowLog */
+			windowSize = (1U << windowLog);
+			windowSize += (windowSize >> 3) * (wlByte & 7);
+		}
+
+		switch (dictIDSizeCode) {
+		default: /* impossible */
+		case 0: break;
+		case 1:
+			dictID = ip[pos];
+			pos++;
+			break;
+		case 2:
+			dictID = ZSTD_readLE16(ip + pos);
+			pos += 2;
+			break;
+		case 3:
+			dictID = ZSTD_readLE32(ip + pos);
+			pos += 4;
+			break;
+		}
+		switch (fcsID) {
+		default: /* impossible */
+		case 0:
+			if (singleSegment)
+				frameContentSize = ip[pos];
+			break;
+		case 1: frameContentSize = ZSTD_readLE16(ip + pos) + 256; break;
+		case 2: frameContentSize = ZSTD_readLE32(ip + pos); break;
+		case 3: frameContentSize = ZSTD_readLE64(ip + pos); break;
+		}
+		if (!windowSize)
+			windowSize = (U32)frameContentSize;
+		if (windowSize > windowSizeMax)
+			return ERROR(frameParameter_windowTooLarge);
+		fparamsPtr->frameContentSize = frameContentSize;
+		fparamsPtr->windowSize = windowSize;
+		fparamsPtr->dictID = dictID;
+		fparamsPtr->checksumFlag = checksumFlag;
+	}
+	return 0;
+}
+
+/** ZSTD_getFrameContentSize() :
+*   compatible with legacy mode
+*   @return : decompressed size of the single frame pointed to be `src` if known, otherwise
+*             - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+*             - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
+unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
+{
+	{
+		ZSTD_frameParams fParams;
+		if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0)
+			return ZSTD_CONTENTSIZE_ERROR;
+		if (fParams.windowSize == 0) {
+			/* Either skippable or empty frame, size == 0 either way */
+			return 0;
+		} else if (fParams.frameContentSize != 0) {
+			return fParams.frameContentSize;
+		} else {
+			return ZSTD_CONTENTSIZE_UNKNOWN;
+		}
+	}
+}
+
+/** ZSTD_findDecompressedSize() :
+ *  compatible with legacy mode
+ *  `srcSize` must be the exact length of some number of ZSTD compressed and/or
+ *      skippable frames
+ *  @return : decompressed size of the frames contained */
+unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize)
+{
+	{
+		unsigned long long totalDstSize = 0;
+		while (srcSize >= ZSTD_frameHeaderSize_prefix) {
+			const U32 magicNumber = ZSTD_readLE32(src);
+
+			if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
+				size_t skippableSize;
+				if (srcSize < ZSTD_skippableHeaderSize)
+					return ERROR(srcSize_wrong);
+				skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
+				if (srcSize < skippableSize) {
+					return ZSTD_CONTENTSIZE_ERROR;
+				}
+
+				src = (const BYTE *)src + skippableSize;
+				srcSize -= skippableSize;
+				continue;
+			}
+
+			{
+				unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+				if (ret >= ZSTD_CONTENTSIZE_ERROR)
+					return ret;
+
+				/* check for overflow */
+				if (totalDstSize + ret < totalDstSize)
+					return ZSTD_CONTENTSIZE_ERROR;
+				totalDstSize += ret;
+			}
+			{
+				size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
+				if (ZSTD_isError(frameSrcSize)) {
+					return ZSTD_CONTENTSIZE_ERROR;
+				}
+
+				src = (const BYTE *)src + frameSrcSize;
+				srcSize -= frameSrcSize;
+			}
+		}
+
+		if (srcSize) {
+			return ZSTD_CONTENTSIZE_ERROR;
+		}
+
+		return totalDstSize;
+	}
+}
+
+/** ZSTD_decodeFrameHeader() :
+*   `headerSize` must be the size provided by ZSTD_frameHeaderSize().
+*   @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
+static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx *dctx, const void *src, size_t headerSize)
+{
+	size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize);
+	if (ZSTD_isError(result))
+		return result; /* invalid header */
+	if (result > 0)
+		return ERROR(srcSize_wrong); /* headerSize too small */
+	if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
+		return ERROR(dictionary_wrong);
+	if (dctx->fParams.checksumFlag)
+		xxh64_reset(&dctx->xxhState, 0);
+	return 0;
+}
+
+typedef struct {
+	blockType_e blockType;
+	U32 lastBlock;
+	U32 origSize;
+} blockProperties_t;
+
+/*! ZSTD_getcBlockSize() :
+*   Provides the size of compressed block from block header `src` */
+size_t ZSTD_getcBlockSize(const void *src, size_t srcSize, blockProperties_t *bpPtr)
+{
+	if (srcSize < ZSTD_blockHeaderSize)
+		return ERROR(srcSize_wrong);
+	{
+		U32 const cBlockHeader = ZSTD_readLE24(src);
+		U32 const cSize = cBlockHeader >> 3;
+		bpPtr->lastBlock = cBlockHeader & 1;
+		bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
+		bpPtr->origSize = cSize; /* only useful for RLE */
+		if (bpPtr->blockType == bt_rle)
+			return 1;
+		if (bpPtr->blockType == bt_reserved)
+			return ERROR(corruption_detected);
+		return cSize;
+	}
+}
+
+static size_t ZSTD_copyRawBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	if (srcSize > dstCapacity)
+		return ERROR(dstSize_tooSmall);
+	memcpy(dst, src, srcSize);
+	return srcSize;
+}
+
+static size_t ZSTD_setRleBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize, size_t regenSize)
+{
+	if (srcSize != 1)
+		return ERROR(srcSize_wrong);
+	if (regenSize > dstCapacity)
+		return ERROR(dstSize_tooSmall);
+	memset(dst, *(const BYTE *)src, regenSize);
+	return regenSize;
+}
+
+/*! ZSTD_decodeLiteralsBlock() :
+	@return : nb of bytes read from src (< srcSize ) */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
+{
+	if (srcSize < MIN_CBLOCK_SIZE)
+		return ERROR(corruption_detected);
+
+	{
+		const BYTE *const istart = (const BYTE *)src;
+		symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+
+		switch (litEncType) {
+		case set_repeat:
+			if (dctx->litEntropy == 0)
+				return ERROR(dictionary_corrupted);
+		/* fall-through */
+		case set_compressed:
+			if (srcSize < 5)
+				return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
+			{
+				size_t lhSize, litSize, litCSize;
+				U32 singleStream = 0;
+				U32 const lhlCode = (istart[0] >> 2) & 3;
+				U32 const lhc = ZSTD_readLE32(istart);
+				switch (lhlCode) {
+				case 0:
+				case 1:
+				default: /* note : default is impossible, since lhlCode into [0..3] */
+					/* 2 - 2 - 10 - 10 */
+					singleStream = !lhlCode;
+					lhSize = 3;
+					litSize = (lhc >> 4) & 0x3FF;
+					litCSize = (lhc >> 14) & 0x3FF;
+					break;
+				case 2:
+					/* 2 - 2 - 14 - 14 */
+					lhSize = 4;
+					litSize = (lhc >> 4) & 0x3FFF;
+					litCSize = lhc >> 18;
+					break;
+				case 3:
+					/* 2 - 2 - 18 - 18 */
+					lhSize = 5;
+					litSize = (lhc >> 4) & 0x3FFFF;
+					litCSize = (lhc >> 22) + (istart[4] << 10);
+					break;
+				}
+				if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
+					return ERROR(corruption_detected);
+				if (litCSize + lhSize > srcSize)
+					return ERROR(corruption_detected);
+
+				if (HUF_isError(
+					(litEncType == set_repeat)
+					    ? (singleStream ? HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr)
+							    : HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr))
+					    : (singleStream
+						   ? HUF_decompress1X2_DCtx_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
+										 dctx->entropy.workspace, sizeof(dctx->entropy.workspace))
+						   : HUF_decompress4X_hufOnly_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
+										   dctx->entropy.workspace, sizeof(dctx->entropy.workspace)))))
+					return ERROR(corruption_detected);
+
+				dctx->litPtr = dctx->litBuffer;
+				dctx->litSize = litSize;
+				dctx->litEntropy = 1;
+				if (litEncType == set_compressed)
+					dctx->HUFptr = dctx->entropy.hufTable;
+				memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+				return litCSize + lhSize;
+			}
+
+		case set_basic: {
+			size_t litSize, lhSize;
+			U32 const lhlCode = ((istart[0]) >> 2) & 3;
+			switch (lhlCode) {
+			case 0:
+			case 2:
+			default: /* note : default is impossible, since lhlCode into [0..3] */
+				lhSize = 1;
+				litSize = istart[0] >> 3;
+				break;
+			case 1:
+				lhSize = 2;
+				litSize = ZSTD_readLE16(istart) >> 4;
+				break;
+			case 3:
+				lhSize = 3;
+				litSize = ZSTD_readLE24(istart) >> 4;
+				break;
+			}
+
+			if (lhSize + litSize + WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
+				if (litSize + lhSize > srcSize)
+					return ERROR(corruption_detected);
+				memcpy(dctx->litBuffer, istart + lhSize, litSize);
+				dctx->litPtr = dctx->litBuffer;
+				dctx->litSize = litSize;
+				memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+				return lhSize + litSize;
+			}
+			/* direct reference into compressed stream */
+			dctx->litPtr = istart + lhSize;
+			dctx->litSize = litSize;
+			return lhSize + litSize;
+		}
+
+		case set_rle: {
+			U32 const lhlCode = ((istart[0]) >> 2) & 3;
+			size_t litSize, lhSize;
+			switch (lhlCode) {
+			case 0:
+			case 2:
+			default: /* note : default is impossible, since lhlCode into [0..3] */
+				lhSize = 1;
+				litSize = istart[0] >> 3;
+				break;
+			case 1:
+				lhSize = 2;
+				litSize = ZSTD_readLE16(istart) >> 4;
+				break;
+			case 3:
+				lhSize = 3;
+				litSize = ZSTD_readLE24(istart) >> 4;
+				if (srcSize < 4)
+					return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
+				break;
+			}
+			if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
+				return ERROR(corruption_detected);
+			memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+			dctx->litPtr = dctx->litBuffer;
+			dctx->litSize = litSize;
+			return lhSize + 1;
+		}
+		default:
+			return ERROR(corruption_detected); /* impossible */
+		}
+	}
+}
+
+typedef union {
+	FSE_decode_t realData;
+	U32 alignedBy4;
+} FSE_decode_t4;
+
+static const FSE_decode_t4 LL_defaultDTable[(1 << LL_DEFAULTNORMLOG) + 1] = {
+    {{LL_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
+    {{0, 0, 4}},		 /* 0 : base, symbol, bits */
+    {{16, 0, 4}},
+    {{32, 1, 5}},
+    {{0, 3, 5}},
+    {{0, 4, 5}},
+    {{0, 6, 5}},
+    {{0, 7, 5}},
+    {{0, 9, 5}},
+    {{0, 10, 5}},
+    {{0, 12, 5}},
+    {{0, 14, 6}},
+    {{0, 16, 5}},
+    {{0, 18, 5}},
+    {{0, 19, 5}},
+    {{0, 21, 5}},
+    {{0, 22, 5}},
+    {{0, 24, 5}},
+    {{32, 25, 5}},
+    {{0, 26, 5}},
+    {{0, 27, 6}},
+    {{0, 29, 6}},
+    {{0, 31, 6}},
+    {{32, 0, 4}},
+    {{0, 1, 4}},
+    {{0, 2, 5}},
+    {{32, 4, 5}},
+    {{0, 5, 5}},
+    {{32, 7, 5}},
+    {{0, 8, 5}},
+    {{32, 10, 5}},
+    {{0, 11, 5}},
+    {{0, 13, 6}},
+    {{32, 16, 5}},
+    {{0, 17, 5}},
+    {{32, 19, 5}},
+    {{0, 20, 5}},
+    {{32, 22, 5}},
+    {{0, 23, 5}},
+    {{0, 25, 4}},
+    {{16, 25, 4}},
+    {{32, 26, 5}},
+    {{0, 28, 6}},
+    {{0, 30, 6}},
+    {{48, 0, 4}},
+    {{16, 1, 4}},
+    {{32, 2, 5}},
+    {{32, 3, 5}},
+    {{32, 5, 5}},
+    {{32, 6, 5}},
+    {{32, 8, 5}},
+    {{32, 9, 5}},
+    {{32, 11, 5}},
+    {{32, 12, 5}},
+    {{0, 15, 6}},
+    {{32, 17, 5}},
+    {{32, 18, 5}},
+    {{32, 20, 5}},
+    {{32, 21, 5}},
+    {{32, 23, 5}},
+    {{32, 24, 5}},
+    {{0, 35, 6}},
+    {{0, 34, 6}},
+    {{0, 33, 6}},
+    {{0, 32, 6}},
+}; /* LL_defaultDTable */
+
+static const FSE_decode_t4 ML_defaultDTable[(1 << ML_DEFAULTNORMLOG) + 1] = {
+    {{ML_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
+    {{0, 0, 6}},		 /* 0 : base, symbol, bits */
+    {{0, 1, 4}},
+    {{32, 2, 5}},
+    {{0, 3, 5}},
+    {{0, 5, 5}},
+    {{0, 6, 5}},
+    {{0, 8, 5}},
+    {{0, 10, 6}},
+    {{0, 13, 6}},
+    {{0, 16, 6}},
+    {{0, 19, 6}},
+    {{0, 22, 6}},
+    {{0, 25, 6}},
+    {{0, 28, 6}},
+    {{0, 31, 6}},
+    {{0, 33, 6}},
+    {{0, 35, 6}},
+    {{0, 37, 6}},
+    {{0, 39, 6}},
+    {{0, 41, 6}},
+    {{0, 43, 6}},
+    {{0, 45, 6}},
+    {{16, 1, 4}},
+    {{0, 2, 4}},
+    {{32, 3, 5}},
+    {{0, 4, 5}},
+    {{32, 6, 5}},
+    {{0, 7, 5}},
+    {{0, 9, 6}},
+    {{0, 12, 6}},
+    {{0, 15, 6}},
+    {{0, 18, 6}},
+    {{0, 21, 6}},
+    {{0, 24, 6}},
+    {{0, 27, 6}},
+    {{0, 30, 6}},
+    {{0, 32, 6}},
+    {{0, 34, 6}},
+    {{0, 36, 6}},
+    {{0, 38, 6}},
+    {{0, 40, 6}},
+    {{0, 42, 6}},
+    {{0, 44, 6}},
+    {{32, 1, 4}},
+    {{48, 1, 4}},
+    {{16, 2, 4}},
+    {{32, 4, 5}},
+    {{32, 5, 5}},
+    {{32, 7, 5}},
+    {{32, 8, 5}},
+    {{0, 11, 6}},
+    {{0, 14, 6}},
+    {{0, 17, 6}},
+    {{0, 20, 6}},
+    {{0, 23, 6}},
+    {{0, 26, 6}},
+    {{0, 29, 6}},
+    {{0, 52, 6}},
+    {{0, 51, 6}},
+    {{0, 50, 6}},
+    {{0, 49, 6}},
+    {{0, 48, 6}},
+    {{0, 47, 6}},
+    {{0, 46, 6}},
+}; /* ML_defaultDTable */
+
+static const FSE_decode_t4 OF_defaultDTable[(1 << OF_DEFAULTNORMLOG) + 1] = {
+    {{OF_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
+    {{0, 0, 5}},		 /* 0 : base, symbol, bits */
+    {{0, 6, 4}},
+    {{0, 9, 5}},
+    {{0, 15, 5}},
+    {{0, 21, 5}},
+    {{0, 3, 5}},
+    {{0, 7, 4}},
+    {{0, 12, 5}},
+    {{0, 18, 5}},
+    {{0, 23, 5}},
+    {{0, 5, 5}},
+    {{0, 8, 4}},
+    {{0, 14, 5}},
+    {{0, 20, 5}},
+    {{0, 2, 5}},
+    {{16, 7, 4}},
+    {{0, 11, 5}},
+    {{0, 17, 5}},
+    {{0, 22, 5}},
+    {{0, 4, 5}},
+    {{16, 8, 4}},
+    {{0, 13, 5}},
+    {{0, 19, 5}},
+    {{0, 1, 5}},
+    {{16, 6, 4}},
+    {{0, 10, 5}},
+    {{0, 16, 5}},
+    {{0, 28, 5}},
+    {{0, 27, 5}},
+    {{0, 26, 5}},
+    {{0, 25, 5}},
+    {{0, 24, 5}},
+}; /* OF_defaultDTable */
+
+/*! ZSTD_buildSeqTable() :
+	@return : nb bytes read from src,
+			  or an error code if it fails, testable with ZSTD_isError()
+*/
+static size_t ZSTD_buildSeqTable(FSE_DTable *DTableSpace, const FSE_DTable **DTablePtr, symbolEncodingType_e type, U32 max, U32 maxLog, const void *src,
+				 size_t srcSize, const FSE_decode_t4 *defaultTable, U32 flagRepeatTable, void *workspace, size_t workspaceSize)
+{
+	const void *const tmpPtr = defaultTable; /* bypass strict aliasing */
+	switch (type) {
+	case set_rle:
+		if (!srcSize)
+			return ERROR(srcSize_wrong);
+		if ((*(const BYTE *)src) > max)
+			return ERROR(corruption_detected);
+		FSE_buildDTable_rle(DTableSpace, *(const BYTE *)src);
+		*DTablePtr = DTableSpace;
+		return 1;
+	case set_basic: *DTablePtr = (const FSE_DTable *)tmpPtr; return 0;
+	case set_repeat:
+		if (!flagRepeatTable)
+			return ERROR(corruption_detected);
+		return 0;
+	default: /* impossible */
+	case set_compressed: {
+		U32 tableLog;
+		S16 *norm = (S16 *)workspace;
+		size_t const spaceUsed32 = ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
+
+		if ((spaceUsed32 << 2) > workspaceSize)
+			return ERROR(GENERIC);
+		workspace = (U32 *)workspace + spaceUsed32;
+		workspaceSize -= (spaceUsed32 << 2);
+		{
+			size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
+			if (FSE_isError(headerSize))
+				return ERROR(corruption_detected);
+			if (tableLog > maxLog)
+				return ERROR(corruption_detected);
+			FSE_buildDTable_wksp(DTableSpace, norm, max, tableLog, workspace, workspaceSize);
+			*DTablePtr = DTableSpace;
+			return headerSize;
+		}
+	}
+	}
+}
+
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize)
+{
+	const BYTE *const istart = (const BYTE *const)src;
+	const BYTE *const iend = istart + srcSize;
+	const BYTE *ip = istart;
+
+	/* check */
+	if (srcSize < MIN_SEQUENCES_SIZE)
+		return ERROR(srcSize_wrong);
+
+	/* SeqHead */
+	{
+		int nbSeq = *ip++;
+		if (!nbSeq) {
+			*nbSeqPtr = 0;
+			return 1;
+		}
+		if (nbSeq > 0x7F) {
+			if (nbSeq == 0xFF) {
+				if (ip + 2 > iend)
+					return ERROR(srcSize_wrong);
+				nbSeq = ZSTD_readLE16(ip) + LONGNBSEQ, ip += 2;
+			} else {
+				if (ip >= iend)
+					return ERROR(srcSize_wrong);
+				nbSeq = ((nbSeq - 0x80) << 8) + *ip++;
+			}
+		}
+		*nbSeqPtr = nbSeq;
+	}
+
+	/* FSE table descriptors */
+	if (ip + 4 > iend)
+		return ERROR(srcSize_wrong); /* minimum possible size */
+	{
+		symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
+		symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
+		symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+		ip++;
+
+		/* Build DTables */
+		{
+			size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend - ip,
+								  LL_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
+			if (ZSTD_isError(llhSize))
+				return ERROR(corruption_detected);
+			ip += llhSize;
+		}
+		{
+			size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend - ip,
+								  OF_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
+			if (ZSTD_isError(ofhSize))
+				return ERROR(corruption_detected);
+			ip += ofhSize;
+		}
+		{
+			size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend - ip,
+								  ML_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
+			if (ZSTD_isError(mlhSize))
+				return ERROR(corruption_detected);
+			ip += mlhSize;
+		}
+	}
+
+	return ip - istart;
+}
+
+typedef struct {
+	size_t litLength;
+	size_t matchLength;
+	size_t offset;
+	const BYTE *match;
+} seq_t;
+
+typedef struct {
+	BIT_DStream_t DStream;
+	FSE_DState_t stateLL;
+	FSE_DState_t stateOffb;
+	FSE_DState_t stateML;
+	size_t prevOffset[ZSTD_REP_NUM];
+	const BYTE *base;
+	size_t pos;
+	uPtrDiff gotoDict;
+} seqState_t;
+
+FORCE_NOINLINE
+size_t ZSTD_execSequenceLast7(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
+			      const BYTE *const vBase, const BYTE *const dictEnd)
+{
+	BYTE *const oLitEnd = op + sequence.litLength;
+	size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+	BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+	BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
+	const BYTE *const iLitEnd = *litPtr + sequence.litLength;
+	const BYTE *match = oLitEnd - sequence.offset;
+
+	/* check */
+	if (oMatchEnd > oend)
+		return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+	if (iLitEnd > litLimit)
+		return ERROR(corruption_detected); /* over-read beyond lit buffer */
+	if (oLitEnd <= oend_w)
+		return ERROR(GENERIC); /* Precondition */
+
+	/* copy literals */
+	if (op < oend_w) {
+		ZSTD_wildcopy(op, *litPtr, oend_w - op);
+		*litPtr += oend_w - op;
+		op = oend_w;
+	}
+	while (op < oLitEnd)
+		*op++ = *(*litPtr)++;
+
+	/* copy Match */
+	if (sequence.offset > (size_t)(oLitEnd - base)) {
+		/* offset beyond prefix */
+		if (sequence.offset > (size_t)(oLitEnd - vBase))
+			return ERROR(corruption_detected);
+		match = dictEnd - (base - match);
+		if (match + sequence.matchLength <= dictEnd) {
+			memmove(oLitEnd, match, sequence.matchLength);
+			return sequenceLength;
+		}
+		/* span extDict & currPrefixSegment */
+		{
+			size_t const length1 = dictEnd - match;
+			memmove(oLitEnd, match, length1);
+			op = oLitEnd + length1;
+			sequence.matchLength -= length1;
+			match = base;
+		}
+	}
+	while (op < oMatchEnd)
+		*op++ = *match++;
+	return sequenceLength;
+}
+
+static seq_t ZSTD_decodeSequence(seqState_t *seqState)
+{
+	seq_t seq;
+
+	U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
+	U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
+	U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
+
+	U32 const llBits = LL_bits[llCode];
+	U32 const mlBits = ML_bits[mlCode];
+	U32 const ofBits = ofCode;
+	U32 const totalBits = llBits + mlBits + ofBits;
+
+	static const U32 LL_base[MaxLL + 1] = {0,  1,  2,  3,  4,  5,  6,  7,  8,    9,     10,    11,    12,    13,     14,     15,     16,     18,
+					       20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
+
+	static const U32 ML_base[MaxML + 1] = {3,  4,  5,  6,  7,  8,  9,  10,   11,    12,    13,    14,    15,     16,     17,     18,     19,     20,
+					       21, 22, 23, 24, 25, 26, 27, 28,   29,    30,    31,    32,    33,     34,     35,     37,     39,     41,
+					       43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
+
+	static const U32 OF_base[MaxOff + 1] = {0,       1,	1,	5,	0xD,      0x1D,      0x3D,      0x7D,      0xFD,     0x1FD,
+						0x3FD,   0x7FD,    0xFFD,    0x1FFD,   0x3FFD,   0x7FFD,    0xFFFD,    0x1FFFD,   0x3FFFD,  0x7FFFD,
+						0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
+
+	/* sequence */
+	{
+		size_t offset;
+		if (!ofCode)
+			offset = 0;
+		else {
+			offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
+			if (ZSTD_32bits())
+				BIT_reloadDStream(&seqState->DStream);
+		}
+
+		if (ofCode <= 1) {
+			offset += (llCode == 0);
+			if (offset) {
+				size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+				temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
+				if (offset != 1)
+					seqState->prevOffset[2] = seqState->prevOffset[1];
+				seqState->prevOffset[1] = seqState->prevOffset[0];
+				seqState->prevOffset[0] = offset = temp;
+			} else {
+				offset = seqState->prevOffset[0];
+			}
+		} else {
+			seqState->prevOffset[2] = seqState->prevOffset[1];
+			seqState->prevOffset[1] = seqState->prevOffset[0];
+			seqState->prevOffset[0] = offset;
+		}
+		seq.offset = offset;
+	}
+
+	seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <=  16 bits */
+	if (ZSTD_32bits() && (mlBits + llBits > 24))
+		BIT_reloadDStream(&seqState->DStream);
+
+	seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <=  16 bits */
+	if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
+		BIT_reloadDStream(&seqState->DStream);
+
+	/* ANS state update */
+	FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <=  9 bits */
+	FSE_updateState(&seqState->stateML, &seqState->DStream); /* <=  9 bits */
+	if (ZSTD_32bits())
+		BIT_reloadDStream(&seqState->DStream);		   /* <= 18 bits */
+	FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <=  8 bits */
+
+	seq.match = NULL;
+
+	return seq;
+}
+
+FORCE_INLINE
+size_t ZSTD_execSequence(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
+			 const BYTE *const vBase, const BYTE *const dictEnd)
+{
+	BYTE *const oLitEnd = op + sequence.litLength;
+	size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+	BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+	BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
+	const BYTE *const iLitEnd = *litPtr + sequence.litLength;
+	const BYTE *match = oLitEnd - sequence.offset;
+
+	/* check */
+	if (oMatchEnd > oend)
+		return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+	if (iLitEnd > litLimit)
+		return ERROR(corruption_detected); /* over-read beyond lit buffer */
+	if (oLitEnd > oend_w)
+		return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
+
+	/* copy Literals */
+	ZSTD_copy8(op, *litPtr);
+	if (sequence.litLength > 8)
+		ZSTD_wildcopy(op + 8, (*litPtr) + 8,
+			      sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+	op = oLitEnd;
+	*litPtr = iLitEnd; /* update for next sequence */
+
+	/* copy Match */
+	if (sequence.offset > (size_t)(oLitEnd - base)) {
+		/* offset beyond prefix */
+		if (sequence.offset > (size_t)(oLitEnd - vBase))
+			return ERROR(corruption_detected);
+		match = dictEnd + (match - base);
+		if (match + sequence.matchLength <= dictEnd) {
+			memmove(oLitEnd, match, sequence.matchLength);
+			return sequenceLength;
+		}
+		/* span extDict & currPrefixSegment */
+		{
+			size_t const length1 = dictEnd - match;
+			memmove(oLitEnd, match, length1);
+			op = oLitEnd + length1;
+			sequence.matchLength -= length1;
+			match = base;
+			if (op > oend_w || sequence.matchLength < MINMATCH) {
+				U32 i;
+				for (i = 0; i < sequence.matchLength; ++i)
+					op[i] = match[i];
+				return sequenceLength;
+			}
+		}
+	}
+	/* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
+
+	/* match within prefix */
+	if (sequence.offset < 8) {
+		/* close range match, overlap */
+		static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
+		static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
+		int const sub2 = dec64table[sequence.offset];
+		op[0] = match[0];
+		op[1] = match[1];
+		op[2] = match[2];
+		op[3] = match[3];
+		match += dec32table[sequence.offset];
+		ZSTD_copy4(op + 4, match);
+		match -= sub2;
+	} else {
+		ZSTD_copy8(op, match);
+	}
+	op += 8;
+	match += 8;
+
+	if (oMatchEnd > oend - (16 - MINMATCH)) {
+		if (op < oend_w) {
+			ZSTD_wildcopy(op, match, oend_w - op);
+			match += oend_w - op;
+			op = oend_w;
+		}
+		while (op < oMatchEnd)
+			*op++ = *match++;
+	} else {
+		ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
+	}
+	return sequenceLength;
+}
+
+static size_t ZSTD_decompressSequences(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
+{
+	const BYTE *ip = (const BYTE *)seqStart;
+	const BYTE *const iend = ip + seqSize;
+	BYTE *const ostart = (BYTE * const)dst;
+	BYTE *const oend = ostart + maxDstSize;
+	BYTE *op = ostart;
+	const BYTE *litPtr = dctx->litPtr;
+	const BYTE *const litEnd = litPtr + dctx->litSize;
+	const BYTE *const base = (const BYTE *)(dctx->base);
+	const BYTE *const vBase = (const BYTE *)(dctx->vBase);
+	const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
+	int nbSeq;
+
+	/* Build Decoding Tables */
+	{
+		size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
+		if (ZSTD_isError(seqHSize))
+			return seqHSize;
+		ip += seqHSize;
+	}
+
+	/* Regen sequences */
+	if (nbSeq) {
+		seqState_t seqState;
+		dctx->fseEntropy = 1;
+		{
+			U32 i;
+			for (i = 0; i < ZSTD_REP_NUM; i++)
+				seqState.prevOffset[i] = dctx->entropy.rep[i];
+		}
+		CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
+		FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+		FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+		FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+		for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq;) {
+			nbSeq--;
+			{
+				seq_t const sequence = ZSTD_decodeSequence(&seqState);
+				size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+				if (ZSTD_isError(oneSeqSize))
+					return oneSeqSize;
+				op += oneSeqSize;
+			}
+		}
+
+		/* check if reached exact end */
+		if (nbSeq)
+			return ERROR(corruption_detected);
+		/* save reps for next block */
+		{
+			U32 i;
+			for (i = 0; i < ZSTD_REP_NUM; i++)
+				dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
+		}
+	}
+
+	/* last literal segment */
+	{
+		size_t const lastLLSize = litEnd - litPtr;
+		if (lastLLSize > (size_t)(oend - op))
+			return ERROR(dstSize_tooSmall);
+		memcpy(op, litPtr, lastLLSize);
+		op += lastLLSize;
+	}
+
+	return op - ostart;
+}
+
+FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t *seqState, int const longOffsets)
+{
+	seq_t seq;
+
+	U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
+	U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
+	U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
+
+	U32 const llBits = LL_bits[llCode];
+	U32 const mlBits = ML_bits[mlCode];
+	U32 const ofBits = ofCode;
+	U32 const totalBits = llBits + mlBits + ofBits;
+
+	static const U32 LL_base[MaxLL + 1] = {0,  1,  2,  3,  4,  5,  6,  7,  8,    9,     10,    11,    12,    13,     14,     15,     16,     18,
+					       20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
+
+	static const U32 ML_base[MaxML + 1] = {3,  4,  5,  6,  7,  8,  9,  10,   11,    12,    13,    14,    15,     16,     17,     18,     19,     20,
+					       21, 22, 23, 24, 25, 26, 27, 28,   29,    30,    31,    32,    33,     34,     35,     37,     39,     41,
+					       43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
+
+	static const U32 OF_base[MaxOff + 1] = {0,       1,	1,	5,	0xD,      0x1D,      0x3D,      0x7D,      0xFD,     0x1FD,
+						0x3FD,   0x7FD,    0xFFD,    0x1FFD,   0x3FFD,   0x7FFD,    0xFFFD,    0x1FFFD,   0x3FFFD,  0x7FFFD,
+						0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
+
+	/* sequence */
+	{
+		size_t offset;
+		if (!ofCode)
+			offset = 0;
+		else {
+			if (longOffsets) {
+				int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN);
+				offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+				if (ZSTD_32bits() || extraBits)
+					BIT_reloadDStream(&seqState->DStream);
+				if (extraBits)
+					offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+			} else {
+				offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
+				if (ZSTD_32bits())
+					BIT_reloadDStream(&seqState->DStream);
+			}
+		}
+
+		if (ofCode <= 1) {
+			offset += (llCode == 0);
+			if (offset) {
+				size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+				temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
+				if (offset != 1)
+					seqState->prevOffset[2] = seqState->prevOffset[1];
+				seqState->prevOffset[1] = seqState->prevOffset[0];
+				seqState->prevOffset[0] = offset = temp;
+			} else {
+				offset = seqState->prevOffset[0];
+			}
+		} else {
+			seqState->prevOffset[2] = seqState->prevOffset[1];
+			seqState->prevOffset[1] = seqState->prevOffset[0];
+			seqState->prevOffset[0] = offset;
+		}
+		seq.offset = offset;
+	}
+
+	seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <=  16 bits */
+	if (ZSTD_32bits() && (mlBits + llBits > 24))
+		BIT_reloadDStream(&seqState->DStream);
+
+	seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <=  16 bits */
+	if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
+		BIT_reloadDStream(&seqState->DStream);
+
+	{
+		size_t const pos = seqState->pos + seq.litLength;
+		seq.match = seqState->base + pos - seq.offset; /* single memory segment */
+		if (seq.offset > pos)
+			seq.match += seqState->gotoDict; /* separate memory segment */
+		seqState->pos = pos + seq.matchLength;
+	}
+
+	/* ANS state update */
+	FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <=  9 bits */
+	FSE_updateState(&seqState->stateML, &seqState->DStream); /* <=  9 bits */
+	if (ZSTD_32bits())
+		BIT_reloadDStream(&seqState->DStream);		   /* <= 18 bits */
+	FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <=  8 bits */
+
+	return seq;
+}
+
+static seq_t ZSTD_decodeSequenceLong(seqState_t *seqState, unsigned const windowSize)
+{
+	if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) {
+		return ZSTD_decodeSequenceLong_generic(seqState, 1);
+	} else {
+		return ZSTD_decodeSequenceLong_generic(seqState, 0);
+	}
+}
+
+FORCE_INLINE
+size_t ZSTD_execSequenceLong(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
+			     const BYTE *const vBase, const BYTE *const dictEnd)
+{
+	BYTE *const oLitEnd = op + sequence.litLength;
+	size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+	BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+	BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
+	const BYTE *const iLitEnd = *litPtr + sequence.litLength;
+	const BYTE *match = sequence.match;
+
+	/* check */
+	if (oMatchEnd > oend)
+		return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+	if (iLitEnd > litLimit)
+		return ERROR(corruption_detected); /* over-read beyond lit buffer */
+	if (oLitEnd > oend_w)
+		return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
+
+	/* copy Literals */
+	ZSTD_copy8(op, *litPtr);
+	if (sequence.litLength > 8)
+		ZSTD_wildcopy(op + 8, (*litPtr) + 8,
+			      sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+	op = oLitEnd;
+	*litPtr = iLitEnd; /* update for next sequence */
+
+	/* copy Match */
+	if (sequence.offset > (size_t)(oLitEnd - base)) {
+		/* offset beyond prefix */
+		if (sequence.offset > (size_t)(oLitEnd - vBase))
+			return ERROR(corruption_detected);
+		if (match + sequence.matchLength <= dictEnd) {
+			memmove(oLitEnd, match, sequence.matchLength);
+			return sequenceLength;
+		}
+		/* span extDict & currPrefixSegment */
+		{
+			size_t const length1 = dictEnd - match;
+			memmove(oLitEnd, match, length1);
+			op = oLitEnd + length1;
+			sequence.matchLength -= length1;
+			match = base;
+			if (op > oend_w || sequence.matchLength < MINMATCH) {
+				U32 i;
+				for (i = 0; i < sequence.matchLength; ++i)
+					op[i] = match[i];
+				return sequenceLength;
+			}
+		}
+	}
+	/* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
+
+	/* match within prefix */
+	if (sequence.offset < 8) {
+		/* close range match, overlap */
+		static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
+		static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
+		int const sub2 = dec64table[sequence.offset];
+		op[0] = match[0];
+		op[1] = match[1];
+		op[2] = match[2];
+		op[3] = match[3];
+		match += dec32table[sequence.offset];
+		ZSTD_copy4(op + 4, match);
+		match -= sub2;
+	} else {
+		ZSTD_copy8(op, match);
+	}
+	op += 8;
+	match += 8;
+
+	if (oMatchEnd > oend - (16 - MINMATCH)) {
+		if (op < oend_w) {
+			ZSTD_wildcopy(op, match, oend_w - op);
+			match += oend_w - op;
+			op = oend_w;
+		}
+		while (op < oMatchEnd)
+			*op++ = *match++;
+	} else {
+		ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
+	}
+	return sequenceLength;
+}
+
+static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
+{
+	const BYTE *ip = (const BYTE *)seqStart;
+	const BYTE *const iend = ip + seqSize;
+	BYTE *const ostart = (BYTE * const)dst;
+	BYTE *const oend = ostart + maxDstSize;
+	BYTE *op = ostart;
+	const BYTE *litPtr = dctx->litPtr;
+	const BYTE *const litEnd = litPtr + dctx->litSize;
+	const BYTE *const base = (const BYTE *)(dctx->base);
+	const BYTE *const vBase = (const BYTE *)(dctx->vBase);
+	const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
+	unsigned const windowSize = dctx->fParams.windowSize;
+	int nbSeq;
+
+	/* Build Decoding Tables */
+	{
+		size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
+		if (ZSTD_isError(seqHSize))
+			return seqHSize;
+		ip += seqHSize;
+	}
+
+	/* Regen sequences */
+	if (nbSeq) {
+#define STORED_SEQS 4
+#define STOSEQ_MASK (STORED_SEQS - 1)
+#define ADVANCED_SEQS 4
+		seq_t *sequences = (seq_t *)dctx->entropy.workspace;
+		int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
+		seqState_t seqState;
+		int seqNb;
+		ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.workspace) >= sizeof(seq_t) * STORED_SEQS);
+		dctx->fseEntropy = 1;
+		{
+			U32 i;
+			for (i = 0; i < ZSTD_REP_NUM; i++)
+				seqState.prevOffset[i] = dctx->entropy.rep[i];
+		}
+		seqState.base = base;
+		seqState.pos = (size_t)(op - base);
+		seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */
+		CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
+		FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+		FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+		FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+		/* prepare in advance */
+		for (seqNb = 0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb < seqAdvance; seqNb++) {
+			sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, windowSize);
+		}
+		if (seqNb < seqAdvance)
+			return ERROR(corruption_detected);
+
+		/* decode and decompress */
+		for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb < nbSeq; seqNb++) {
+			seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize);
+			size_t const oneSeqSize =
+			    ZSTD_execSequenceLong(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
+			if (ZSTD_isError(oneSeqSize))
+				return oneSeqSize;
+			ZSTD_PREFETCH(sequence.match);
+			sequences[seqNb & STOSEQ_MASK] = sequence;
+			op += oneSeqSize;
+		}
+		if (seqNb < nbSeq)
+			return ERROR(corruption_detected);
+
+		/* finish queue */
+		seqNb -= seqAdvance;
+		for (; seqNb < nbSeq; seqNb++) {
+			size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
+			if (ZSTD_isError(oneSeqSize))
+				return oneSeqSize;
+			op += oneSeqSize;
+		}
+
+		/* save reps for next block */
+		{
+			U32 i;
+			for (i = 0; i < ZSTD_REP_NUM; i++)
+				dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
+		}
+	}
+
+	/* last literal segment */
+	{
+		size_t const lastLLSize = litEnd - litPtr;
+		if (lastLLSize > (size_t)(oend - op))
+			return ERROR(dstSize_tooSmall);
+		memcpy(op, litPtr, lastLLSize);
+		op += lastLLSize;
+	}
+
+	return op - ostart;
+}
+
+static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{ /* blockType == blockCompressed */
+	const BYTE *ip = (const BYTE *)src;
+
+	if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX)
+		return ERROR(srcSize_wrong);
+
+	/* Decode literals section */
+	{
+		size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
+		if (ZSTD_isError(litCSize))
+			return litCSize;
+		ip += litCSize;
+		srcSize -= litCSize;
+	}
+	if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */
+				/* likely because of register pressure */
+				/* if that's the correct cause, then 32-bits ARM should be affected differently */
+				/* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */
+		if (dctx->fParams.windowSize > (1 << 23))
+			return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize);
+	return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
+}
+
+static void ZSTD_checkContinuity(ZSTD_DCtx *dctx, const void *dst)
+{
+	if (dst != dctx->previousDstEnd) { /* not contiguous */
+		dctx->dictEnd = dctx->previousDstEnd;
+		dctx->vBase = (const char *)dst - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
+		dctx->base = dst;
+		dctx->previousDstEnd = dst;
+	}
+}
+
+size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	size_t dSize;
+	ZSTD_checkContinuity(dctx, dst);
+	dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+	dctx->previousDstEnd = (char *)dst + dSize;
+	return dSize;
+}
+
+/** ZSTD_insertBlock() :
+	insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
+size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, size_t blockSize)
+{
+	ZSTD_checkContinuity(dctx, blockStart);
+	dctx->previousDstEnd = (const char *)blockStart + blockSize;
+	return blockSize;
+}
+
+size_t ZSTD_generateNxBytes(void *dst, size_t dstCapacity, BYTE byte, size_t length)
+{
+	if (length > dstCapacity)
+		return ERROR(dstSize_tooSmall);
+	memset(dst, byte, length);
+	return length;
+}
+
+/** ZSTD_findFrameCompressedSize() :
+ *  compatible with legacy mode
+ *  `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
+ *  `srcSize` must be at least as large as the frame contained
+ *  @return : the compressed size of the frame starting at `src` */
+size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
+{
+	if (srcSize >= ZSTD_skippableHeaderSize && (ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
+		return ZSTD_skippableHeaderSize + ZSTD_readLE32((const BYTE *)src + 4);
+	} else {
+		const BYTE *ip = (const BYTE *)src;
+		const BYTE *const ipstart = ip;
+		size_t remainingSize = srcSize;
+		ZSTD_frameParams fParams;
+
+		size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize);
+		if (ZSTD_isError(headerSize))
+			return headerSize;
+
+		/* Frame Header */
+		{
+			size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize);
+			if (ZSTD_isError(ret))
+				return ret;
+			if (ret > 0)
+				return ERROR(srcSize_wrong);
+		}
+
+		ip += headerSize;
+		remainingSize -= headerSize;
+
+		/* Loop on each block */
+		while (1) {
+			blockProperties_t blockProperties;
+			size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+			if (ZSTD_isError(cBlockSize))
+				return cBlockSize;
+
+			if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
+				return ERROR(srcSize_wrong);
+
+			ip += ZSTD_blockHeaderSize + cBlockSize;
+			remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
+
+			if (blockProperties.lastBlock)
+				break;
+		}
+
+		if (fParams.checksumFlag) { /* Frame content checksum */
+			if (remainingSize < 4)
+				return ERROR(srcSize_wrong);
+			ip += 4;
+			remainingSize -= 4;
+		}
+
+		return ip - ipstart;
+	}
+}
+
+/*! ZSTD_decompressFrame() :
+*   @dctx must be properly initialized */
+static size_t ZSTD_decompressFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void **srcPtr, size_t *srcSizePtr)
+{
+	const BYTE *ip = (const BYTE *)(*srcPtr);
+	BYTE *const ostart = (BYTE * const)dst;
+	BYTE *const oend = ostart + dstCapacity;
+	BYTE *op = ostart;
+	size_t remainingSize = *srcSizePtr;
+
+	/* check */
+	if (remainingSize < ZSTD_frameHeaderSize_min + ZSTD_blockHeaderSize)
+		return ERROR(srcSize_wrong);
+
+	/* Frame Header */
+	{
+		size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
+		if (ZSTD_isError(frameHeaderSize))
+			return frameHeaderSize;
+		if (remainingSize < frameHeaderSize + ZSTD_blockHeaderSize)
+			return ERROR(srcSize_wrong);
+		CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize));
+		ip += frameHeaderSize;
+		remainingSize -= frameHeaderSize;
+	}
+
+	/* Loop on each block */
+	while (1) {
+		size_t decodedSize;
+		blockProperties_t blockProperties;
+		size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+		if (ZSTD_isError(cBlockSize))
+			return cBlockSize;
+
+		ip += ZSTD_blockHeaderSize;
+		remainingSize -= ZSTD_blockHeaderSize;
+		if (cBlockSize > remainingSize)
+			return ERROR(srcSize_wrong);
+
+		switch (blockProperties.blockType) {
+		case bt_compressed: decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend - op, ip, cBlockSize); break;
+		case bt_raw: decodedSize = ZSTD_copyRawBlock(op, oend - op, ip, cBlockSize); break;
+		case bt_rle: decodedSize = ZSTD_generateNxBytes(op, oend - op, *ip, blockProperties.origSize); break;
+		case bt_reserved:
+		default: return ERROR(corruption_detected);
+		}
+
+		if (ZSTD_isError(decodedSize))
+			return decodedSize;
+		if (dctx->fParams.checksumFlag)
+			xxh64_update(&dctx->xxhState, op, decodedSize);
+		op += decodedSize;
+		ip += cBlockSize;
+		remainingSize -= cBlockSize;
+		if (blockProperties.lastBlock)
+			break;
+	}
+
+	if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
+		U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
+		U32 checkRead;
+		if (remainingSize < 4)
+			return ERROR(checksum_wrong);
+		checkRead = ZSTD_readLE32(ip);
+		if (checkRead != checkCalc)
+			return ERROR(checksum_wrong);
+		ip += 4;
+		remainingSize -= 4;
+	}
+
+	/* Allow caller to get size read */
+	*srcPtr = ip;
+	*srcSizePtr = remainingSize;
+	return op - ostart;
+}
+
+static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict);
+static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict);
+
+static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
+					const ZSTD_DDict *ddict)
+{
+	void *const dststart = dst;
+
+	if (ddict) {
+		if (dict) {
+			/* programmer error, these two cases should be mutually exclusive */
+			return ERROR(GENERIC);
+		}
+
+		dict = ZSTD_DDictDictContent(ddict);
+		dictSize = ZSTD_DDictDictSize(ddict);
+	}
+
+	while (srcSize >= ZSTD_frameHeaderSize_prefix) {
+		U32 magicNumber;
+
+		magicNumber = ZSTD_readLE32(src);
+		if (magicNumber != ZSTD_MAGICNUMBER) {
+			if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
+				size_t skippableSize;
+				if (srcSize < ZSTD_skippableHeaderSize)
+					return ERROR(srcSize_wrong);
+				skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
+				if (srcSize < skippableSize) {
+					return ERROR(srcSize_wrong);
+				}
+
+				src = (const BYTE *)src + skippableSize;
+				srcSize -= skippableSize;
+				continue;
+			} else {
+				return ERROR(prefix_unknown);
+			}
+		}
+
+		if (ddict) {
+			/* we were called from ZSTD_decompress_usingDDict */
+			ZSTD_refDDict(dctx, ddict);
+		} else {
+			/* this will initialize correctly with no dict if dict == NULL, so
+			 * use this in all cases but ddict */
+			CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
+		}
+		ZSTD_checkContinuity(dctx, dst);
+
+		{
+			const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize);
+			if (ZSTD_isError(res))
+				return res;
+			/* don't need to bounds check this, ZSTD_decompressFrame will have
+			 * already */
+			dst = (BYTE *)dst + res;
+			dstCapacity -= res;
+		}
+	}
+
+	if (srcSize)
+		return ERROR(srcSize_wrong); /* input not entirely consumed */
+
+	return (BYTE *)dst - (BYTE *)dststart;
+}
+
+size_t ZSTD_decompress_usingDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize)
+{
+	return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
+}
+
+size_t ZSTD_decompressDCtx(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
+}
+
+/*-**************************************
+*   Advanced Streaming Decompression API
+*   Bufferless and synchronous
+****************************************/
+size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx) { return dctx->expected; }
+
+ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx)
+{
+	switch (dctx->stage) {
+	default: /* should not happen */
+	case ZSTDds_getFrameHeaderSize:
+	case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader;
+	case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader;
+	case ZSTDds_decompressBlock: return ZSTDnit_block;
+	case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock;
+	case ZSTDds_checkChecksum: return ZSTDnit_checksum;
+	case ZSTDds_decodeSkippableHeader:
+	case ZSTDds_skipFrame: return ZSTDnit_skippableFrame;
+	}
+}
+
+int ZSTD_isSkipFrame(ZSTD_DCtx *dctx) { return dctx->stage == ZSTDds_skipFrame; } /* for zbuff */
+
+/** ZSTD_decompressContinue() :
+*   @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
+*             or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	/* Sanity check */
+	if (srcSize != dctx->expected)
+		return ERROR(srcSize_wrong);
+	if (dstCapacity)
+		ZSTD_checkContinuity(dctx, dst);
+
+	switch (dctx->stage) {
+	case ZSTDds_getFrameHeaderSize:
+		if (srcSize != ZSTD_frameHeaderSize_prefix)
+			return ERROR(srcSize_wrong);					/* impossible */
+		if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+			memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
+			dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */
+			dctx->stage = ZSTDds_decodeSkippableHeader;
+			return 0;
+		}
+		dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix);
+		if (ZSTD_isError(dctx->headerSize))
+			return dctx->headerSize;
+		memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
+		if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) {
+			dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix;
+			dctx->stage = ZSTDds_decodeFrameHeader;
+			return 0;
+		}
+		dctx->expected = 0; /* not necessary to copy more */
+
+	case ZSTDds_decodeFrameHeader:
+		memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
+		CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
+		dctx->expected = ZSTD_blockHeaderSize;
+		dctx->stage = ZSTDds_decodeBlockHeader;
+		return 0;
+
+	case ZSTDds_decodeBlockHeader: {
+		blockProperties_t bp;
+		size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+		if (ZSTD_isError(cBlockSize))
+			return cBlockSize;
+		dctx->expected = cBlockSize;
+		dctx->bType = bp.blockType;
+		dctx->rleSize = bp.origSize;
+		if (cBlockSize) {
+			dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
+			return 0;
+		}
+		/* empty block */
+		if (bp.lastBlock) {
+			if (dctx->fParams.checksumFlag) {
+				dctx->expected = 4;
+				dctx->stage = ZSTDds_checkChecksum;
+			} else {
+				dctx->expected = 0; /* end of frame */
+				dctx->stage = ZSTDds_getFrameHeaderSize;
+			}
+		} else {
+			dctx->expected = 3; /* go directly to next header */
+			dctx->stage = ZSTDds_decodeBlockHeader;
+		}
+		return 0;
+	}
+	case ZSTDds_decompressLastBlock:
+	case ZSTDds_decompressBlock: {
+		size_t rSize;
+		switch (dctx->bType) {
+		case bt_compressed: rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); break;
+		case bt_raw: rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); break;
+		case bt_rle: rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); break;
+		case bt_reserved: /* should never happen */
+		default: return ERROR(corruption_detected);
+		}
+		if (ZSTD_isError(rSize))
+			return rSize;
+		if (dctx->fParams.checksumFlag)
+			xxh64_update(&dctx->xxhState, dst, rSize);
+
+		if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
+			if (dctx->fParams.checksumFlag) {	/* another round for frame checksum */
+				dctx->expected = 4;
+				dctx->stage = ZSTDds_checkChecksum;
+			} else {
+				dctx->expected = 0; /* ends here */
+				dctx->stage = ZSTDds_getFrameHeaderSize;
+			}
+		} else {
+			dctx->stage = ZSTDds_decodeBlockHeader;
+			dctx->expected = ZSTD_blockHeaderSize;
+			dctx->previousDstEnd = (char *)dst + rSize;
+		}
+		return rSize;
+	}
+	case ZSTDds_checkChecksum: {
+		U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
+		U32 const check32 = ZSTD_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */
+		if (check32 != h32)
+			return ERROR(checksum_wrong);
+		dctx->expected = 0;
+		dctx->stage = ZSTDds_getFrameHeaderSize;
+		return 0;
+	}
+	case ZSTDds_decodeSkippableHeader: {
+		memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
+		dctx->expected = ZSTD_readLE32(dctx->headerBuffer + 4);
+		dctx->stage = ZSTDds_skipFrame;
+		return 0;
+	}
+	case ZSTDds_skipFrame: {
+		dctx->expected = 0;
+		dctx->stage = ZSTDds_getFrameHeaderSize;
+		return 0;
+	}
+	default:
+		return ERROR(GENERIC); /* impossible */
+	}
+}
+
+static size_t ZSTD_refDictContent(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
+{
+	dctx->dictEnd = dctx->previousDstEnd;
+	dctx->vBase = (const char *)dict - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
+	dctx->base = dict;
+	dctx->previousDstEnd = (const char *)dict + dictSize;
+	return 0;
+}
+
+/* ZSTD_loadEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary
+ * @return : size of entropy tables read */
+static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t *entropy, const void *const dict, size_t const dictSize)
+{
+	const BYTE *dictPtr = (const BYTE *)dict;
+	const BYTE *const dictEnd = dictPtr + dictSize;
+
+	if (dictSize <= 8)
+		return ERROR(dictionary_corrupted);
+	dictPtr += 8; /* skip header = magic + dictID */
+
+	{
+		size_t const hSize = HUF_readDTableX4_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, entropy->workspace, sizeof(entropy->workspace));
+		if (HUF_isError(hSize))
+			return ERROR(dictionary_corrupted);
+		dictPtr += hSize;
+	}
+
+	{
+		short offcodeNCount[MaxOff + 1];
+		U32 offcodeMaxValue = MaxOff, offcodeLog;
+		size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
+		if (FSE_isError(offcodeHeaderSize))
+			return ERROR(dictionary_corrupted);
+		if (offcodeLog > OffFSELog)
+			return ERROR(dictionary_corrupted);
+		CHECK_E(FSE_buildDTable_wksp(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
+		dictPtr += offcodeHeaderSize;
+	}
+
+	{
+		short matchlengthNCount[MaxML + 1];
+		unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+		size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
+		if (FSE_isError(matchlengthHeaderSize))
+			return ERROR(dictionary_corrupted);
+		if (matchlengthLog > MLFSELog)
+			return ERROR(dictionary_corrupted);
+		CHECK_E(FSE_buildDTable_wksp(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
+		dictPtr += matchlengthHeaderSize;
+	}
+
+	{
+		short litlengthNCount[MaxLL + 1];
+		unsigned litlengthMaxValue = MaxLL, litlengthLog;
+		size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
+		if (FSE_isError(litlengthHeaderSize))
+			return ERROR(dictionary_corrupted);
+		if (litlengthLog > LLFSELog)
+			return ERROR(dictionary_corrupted);
+		CHECK_E(FSE_buildDTable_wksp(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
+		dictPtr += litlengthHeaderSize;
+	}
+
+	if (dictPtr + 12 > dictEnd)
+		return ERROR(dictionary_corrupted);
+	{
+		int i;
+		size_t const dictContentSize = (size_t)(dictEnd - (dictPtr + 12));
+		for (i = 0; i < 3; i++) {
+			U32 const rep = ZSTD_readLE32(dictPtr);
+			dictPtr += 4;
+			if (rep == 0 || rep >= dictContentSize)
+				return ERROR(dictionary_corrupted);
+			entropy->rep[i] = rep;
+		}
+	}
+
+	return dictPtr - (const BYTE *)dict;
+}
+
+static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
+{
+	if (dictSize < 8)
+		return ZSTD_refDictContent(dctx, dict, dictSize);
+	{
+		U32 const magic = ZSTD_readLE32(dict);
+		if (magic != ZSTD_DICT_MAGIC) {
+			return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
+		}
+	}
+	dctx->dictID = ZSTD_readLE32((const char *)dict + 4);
+
+	/* load entropy tables */
+	{
+		size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
+		if (ZSTD_isError(eSize))
+			return ERROR(dictionary_corrupted);
+		dict = (const char *)dict + eSize;
+		dictSize -= eSize;
+	}
+	dctx->litEntropy = dctx->fseEntropy = 1;
+
+	/* reference dictionary content */
+	return ZSTD_refDictContent(dctx, dict, dictSize);
+}
+
+size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
+{
+	CHECK_F(ZSTD_decompressBegin(dctx));
+	if (dict && dictSize)
+		CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
+	return 0;
+}
+
+/* ======   ZSTD_DDict   ====== */
+
+struct ZSTD_DDict_s {
+	void *dictBuffer;
+	const void *dictContent;
+	size_t dictSize;
+	ZSTD_entropyTables_t entropy;
+	U32 dictID;
+	U32 entropyPresent;
+	ZSTD_customMem cMem;
+}; /* typedef'd to ZSTD_DDict within "zstd.h" */
+
+size_t ZSTD_DDictWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DDict)); }
+
+static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict) { return ddict->dictContent; }
+
+static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict) { return ddict->dictSize; }
+
+static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict)
+{
+	ZSTD_decompressBegin(dstDCtx); /* init */
+	if (ddict) {		       /* support refDDict on NULL */
+		dstDCtx->dictID = ddict->dictID;
+		dstDCtx->base = ddict->dictContent;
+		dstDCtx->vBase = ddict->dictContent;
+		dstDCtx->dictEnd = (const BYTE *)ddict->dictContent + ddict->dictSize;
+		dstDCtx->previousDstEnd = dstDCtx->dictEnd;
+		if (ddict->entropyPresent) {
+			dstDCtx->litEntropy = 1;
+			dstDCtx->fseEntropy = 1;
+			dstDCtx->LLTptr = ddict->entropy.LLTable;
+			dstDCtx->MLTptr = ddict->entropy.MLTable;
+			dstDCtx->OFTptr = ddict->entropy.OFTable;
+			dstDCtx->HUFptr = ddict->entropy.hufTable;
+			dstDCtx->entropy.rep[0] = ddict->entropy.rep[0];
+			dstDCtx->entropy.rep[1] = ddict->entropy.rep[1];
+			dstDCtx->entropy.rep[2] = ddict->entropy.rep[2];
+		} else {
+			dstDCtx->litEntropy = 0;
+			dstDCtx->fseEntropy = 0;
+		}
+	}
+}
+
+static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict *ddict)
+{
+	ddict->dictID = 0;
+	ddict->entropyPresent = 0;
+	if (ddict->dictSize < 8)
+		return 0;
+	{
+		U32 const magic = ZSTD_readLE32(ddict->dictContent);
+		if (magic != ZSTD_DICT_MAGIC)
+			return 0; /* pure content mode */
+	}
+	ddict->dictID = ZSTD_readLE32((const char *)ddict->dictContent + 4);
+
+	/* load entropy tables */
+	CHECK_E(ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted);
+	ddict->entropyPresent = 1;
+	return 0;
+}
+
+static ZSTD_DDict *ZSTD_createDDict_advanced(const void *dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem)
+{
+	if (!customMem.customAlloc || !customMem.customFree)
+		return NULL;
+
+	{
+		ZSTD_DDict *const ddict = (ZSTD_DDict *)ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
+		if (!ddict)
+			return NULL;
+		ddict->cMem = customMem;
+
+		if ((byReference) || (!dict) || (!dictSize)) {
+			ddict->dictBuffer = NULL;
+			ddict->dictContent = dict;
+		} else {
+			void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
+			if (!internalBuffer) {
+				ZSTD_freeDDict(ddict);
+				return NULL;
+			}
+			memcpy(internalBuffer, dict, dictSize);
+			ddict->dictBuffer = internalBuffer;
+			ddict->dictContent = internalBuffer;
+		}
+		ddict->dictSize = dictSize;
+		ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+		/* parse dictionary content */
+		{
+			size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict);
+			if (ZSTD_isError(errorCode)) {
+				ZSTD_freeDDict(ddict);
+				return NULL;
+			}
+		}
+
+		return ddict;
+	}
+}
+
+/*! ZSTD_initDDict() :
+*   Create a digested dictionary, to start decompression without startup delay.
+*   `dict` content is copied inside DDict.
+*   Consequently, `dict` can be released after `ZSTD_DDict` creation */
+ZSTD_DDict *ZSTD_initDDict(const void *dict, size_t dictSize, void *workspace, size_t workspaceSize)
+{
+	ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
+	return ZSTD_createDDict_advanced(dict, dictSize, 1, stackMem);
+}
+
+size_t ZSTD_freeDDict(ZSTD_DDict *ddict)
+{
+	if (ddict == NULL)
+		return 0; /* support free on NULL */
+	{
+		ZSTD_customMem const cMem = ddict->cMem;
+		ZSTD_free(ddict->dictBuffer, cMem);
+		ZSTD_free(ddict, cMem);
+		return 0;
+	}
+}
+
+/*! ZSTD_getDictID_fromDict() :
+ *  Provides the dictID stored within dictionary.
+ *  if @return == 0, the dictionary is not conformant with Zstandard specification.
+ *  It can still be loaded, but as a content-only dictionary. */
+unsigned ZSTD_getDictID_fromDict(const void *dict, size_t dictSize)
+{
+	if (dictSize < 8)
+		return 0;
+	if (ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC)
+		return 0;
+	return ZSTD_readLE32((const char *)dict + 4);
+}
+
+/*! ZSTD_getDictID_fromDDict() :
+ *  Provides the dictID of the dictionary loaded into `ddict`.
+ *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict)
+{
+	if (ddict == NULL)
+		return 0;
+	return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
+}
+
+/*! ZSTD_getDictID_fromFrame() :
+ *  Provides the dictID required to decompressed the frame stored within `src`.
+ *  If @return == 0, the dictID could not be decoded.
+ *  This could for one of the following reasons :
+ *  - The frame does not require a dictionary to be decoded (most common case).
+ *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ *    Note : this use case also happens when using a non-conformant dictionary.
+ *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
+ *  - This is not a Zstandard frame.
+ *  When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
+unsigned ZSTD_getDictID_fromFrame(const void *src, size_t srcSize)
+{
+	ZSTD_frameParams zfp = {0, 0, 0, 0};
+	size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize);
+	if (ZSTD_isError(hError))
+		return 0;
+	return zfp.dictID;
+}
+
+/*! ZSTD_decompress_usingDDict() :
+*   Decompression using a pre-digested Dictionary
+*   Use dictionary without significant overhead. */
+size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_DDict *ddict)
+{
+	/* pass content and size in case legacy frames are encountered */
+	return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict);
+}
+
+/*=====================================
+*   Streaming decompression
+*====================================*/
+
+typedef enum { zdss_init, zdss_loadHeader, zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
+
+/* *** Resource management *** */
+struct ZSTD_DStream_s {
+	ZSTD_DCtx *dctx;
+	ZSTD_DDict *ddictLocal;
+	const ZSTD_DDict *ddict;
+	ZSTD_frameParams fParams;
+	ZSTD_dStreamStage stage;
+	char *inBuff;
+	size_t inBuffSize;
+	size_t inPos;
+	size_t maxWindowSize;
+	char *outBuff;
+	size_t outBuffSize;
+	size_t outStart;
+	size_t outEnd;
+	size_t blockSize;
+	BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; /* tmp buffer to store frame header */
+	size_t lhSize;
+	ZSTD_customMem customMem;
+	void *legacyContext;
+	U32 previousLegacyVersion;
+	U32 legacyVersion;
+	U32 hostageByte;
+}; /* typedef'd to ZSTD_DStream within "zstd.h" */
+
+size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize)
+{
+	size_t const blockSize = MIN(maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
+	size_t const inBuffSize = blockSize;
+	size_t const outBuffSize = maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
+	return ZSTD_DCtxWorkspaceBound() + ZSTD_ALIGN(sizeof(ZSTD_DStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
+}
+
+static ZSTD_DStream *ZSTD_createDStream_advanced(ZSTD_customMem customMem)
+{
+	ZSTD_DStream *zds;
+
+	if (!customMem.customAlloc || !customMem.customFree)
+		return NULL;
+
+	zds = (ZSTD_DStream *)ZSTD_malloc(sizeof(ZSTD_DStream), customMem);
+	if (zds == NULL)
+		return NULL;
+	memset(zds, 0, sizeof(ZSTD_DStream));
+	memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem));
+	zds->dctx = ZSTD_createDCtx_advanced(customMem);
+	if (zds->dctx == NULL) {
+		ZSTD_freeDStream(zds);
+		return NULL;
+	}
+	zds->stage = zdss_init;
+	zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+	return zds;
+}
+
+ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, size_t workspaceSize)
+{
+	ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
+	ZSTD_DStream *zds = ZSTD_createDStream_advanced(stackMem);
+	if (!zds) {
+		return NULL;
+	}
+
+	zds->maxWindowSize = maxWindowSize;
+	zds->stage = zdss_loadHeader;
+	zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
+	ZSTD_freeDDict(zds->ddictLocal);
+	zds->ddictLocal = NULL;
+	zds->ddict = zds->ddictLocal;
+	zds->legacyVersion = 0;
+	zds->hostageByte = 0;
+
+	{
+		size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
+		size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
+
+		zds->inBuff = (char *)ZSTD_malloc(blockSize, zds->customMem);
+		zds->inBuffSize = blockSize;
+		zds->outBuff = (char *)ZSTD_malloc(neededOutSize, zds->customMem);
+		zds->outBuffSize = neededOutSize;
+		if (zds->inBuff == NULL || zds->outBuff == NULL) {
+			ZSTD_freeDStream(zds);
+			return NULL;
+		}
+	}
+	return zds;
+}
+
+ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize)
+{
+	ZSTD_DStream *zds = ZSTD_initDStream(maxWindowSize, workspace, workspaceSize);
+	if (zds) {
+		zds->ddict = ddict;
+	}
+	return zds;
+}
+
+size_t ZSTD_freeDStream(ZSTD_DStream *zds)
+{
+	if (zds == NULL)
+		return 0; /* support free on null */
+	{
+		ZSTD_customMem const cMem = zds->customMem;
+		ZSTD_freeDCtx(zds->dctx);
+		zds->dctx = NULL;
+		ZSTD_freeDDict(zds->ddictLocal);
+		zds->ddictLocal = NULL;
+		ZSTD_free(zds->inBuff, cMem);
+		zds->inBuff = NULL;
+		ZSTD_free(zds->outBuff, cMem);
+		zds->outBuff = NULL;
+		ZSTD_free(zds, cMem);
+		return 0;
+	}
+}
+
+/* *** Initialization *** */
+
+size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; }
+size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
+
+size_t ZSTD_resetDStream(ZSTD_DStream *zds)
+{
+	zds->stage = zdss_loadHeader;
+	zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
+	zds->legacyVersion = 0;
+	zds->hostageByte = 0;
+	return ZSTD_frameHeaderSize_prefix;
+}
+
+/* *****   Decompression   ***** */
+
+ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
+{
+	size_t const length = MIN(dstCapacity, srcSize);
+	memcpy(dst, src, length);
+	return length;
+}
+
+size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
+{
+	const char *const istart = (const char *)(input->src) + input->pos;
+	const char *const iend = (const char *)(input->src) + input->size;
+	const char *ip = istart;
+	char *const ostart = (char *)(output->dst) + output->pos;
+	char *const oend = (char *)(output->dst) + output->size;
+	char *op = ostart;
+	U32 someMoreWork = 1;
+
+	while (someMoreWork) {
+		switch (zds->stage) {
+		case zdss_init:
+			ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
+						/* fall-through */
+
+		case zdss_loadHeader: {
+			size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
+			if (ZSTD_isError(hSize))
+				return hSize;
+			if (hSize != 0) {				   /* need more input */
+				size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
+				if (toLoad > (size_t)(iend - ip)) {	/* not enough input to load full header */
+					memcpy(zds->headerBuffer + zds->lhSize, ip, iend - ip);
+					zds->lhSize += iend - ip;
+					input->pos = input->size;
+					return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) +
+					       ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
+				}
+				memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad);
+				zds->lhSize = hSize;
+				ip += toLoad;
+				break;
+			}
+
+			/* check for single-pass mode opportunity */
+			if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
+			    && (U64)(size_t)(oend - op) >= zds->fParams.frameContentSize) {
+				size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend - istart);
+				if (cSize <= (size_t)(iend - istart)) {
+					size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend - op, istart, cSize, zds->ddict);
+					if (ZSTD_isError(decompressedSize))
+						return decompressedSize;
+					ip = istart + cSize;
+					op += decompressedSize;
+					zds->dctx->expected = 0;
+					zds->stage = zdss_init;
+					someMoreWork = 0;
+					break;
+				}
+			}
+
+			/* Consume header */
+			ZSTD_refDDict(zds->dctx, zds->ddict);
+			{
+				size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */
+				CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size));
+				{
+					size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx);
+					CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer + h1Size, h2Size));
+				}
+			}
+
+			zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
+			if (zds->fParams.windowSize > zds->maxWindowSize)
+				return ERROR(frameParameter_windowTooLarge);
+
+			/* Buffers are preallocated, but double check */
+			{
+				size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
+				size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
+				if (zds->inBuffSize < blockSize) {
+					return ERROR(GENERIC);
+				}
+				if (zds->outBuffSize < neededOutSize) {
+					return ERROR(GENERIC);
+				}
+				zds->blockSize = blockSize;
+			}
+			zds->stage = zdss_read;
+		}
+		/* pass-through */
+
+		case zdss_read: {
+			size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
+			if (neededInSize == 0) { /* end of frame */
+				zds->stage = zdss_init;
+				someMoreWork = 0;
+				break;
+			}
+			if ((size_t)(iend - ip) >= neededInSize) { /* decode directly from src */
+				const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
+				size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart,
+										   (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), ip, neededInSize);
+				if (ZSTD_isError(decodedSize))
+					return decodedSize;
+				ip += neededInSize;
+				if (!decodedSize && !isSkipFrame)
+					break; /* this was just a header */
+				zds->outEnd = zds->outStart + decodedSize;
+				zds->stage = zdss_flush;
+				break;
+			}
+			if (ip == iend) {
+				someMoreWork = 0;
+				break;
+			} /* no more input */
+			zds->stage = zdss_load;
+			/* pass-through */
+		}
+
+		case zdss_load: {
+			size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
+			size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */
+			size_t loadedSize;
+			if (toLoad > zds->inBuffSize - zds->inPos)
+				return ERROR(corruption_detected); /* should never happen */
+			loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend - ip);
+			ip += loadedSize;
+			zds->inPos += loadedSize;
+			if (loadedSize < toLoad) {
+				someMoreWork = 0;
+				break;
+			} /* not enough input, wait for more */
+
+			/* decode loaded input */
+			{
+				const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
+				size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
+										   zds->inBuff, neededInSize);
+				if (ZSTD_isError(decodedSize))
+					return decodedSize;
+				zds->inPos = 0; /* input is consumed */
+				if (!decodedSize && !isSkipFrame) {
+					zds->stage = zdss_read;
+					break;
+				} /* this was just a header */
+				zds->outEnd = zds->outStart + decodedSize;
+				zds->stage = zdss_flush;
+				/* pass-through */
+			}
+		}
+
+		case zdss_flush: {
+			size_t const toFlushSize = zds->outEnd - zds->outStart;
+			size_t const flushedSize = ZSTD_limitCopy(op, oend - op, zds->outBuff + zds->outStart, toFlushSize);
+			op += flushedSize;
+			zds->outStart += flushedSize;
+			if (flushedSize == toFlushSize) { /* flush completed */
+				zds->stage = zdss_read;
+				if (zds->outStart + zds->blockSize > zds->outBuffSize)
+					zds->outStart = zds->outEnd = 0;
+				break;
+			}
+			/* cannot complete flush */
+			someMoreWork = 0;
+			break;
+		}
+		default:
+			return ERROR(GENERIC); /* impossible */
+		}
+	}
+
+	/* result */
+	input->pos += (size_t)(ip - istart);
+	output->pos += (size_t)(op - ostart);
+	{
+		size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx);
+		if (!nextSrcSizeHint) {			    /* frame fully decoded */
+			if (zds->outEnd == zds->outStart) { /* output fully flushed */
+				if (zds->hostageByte) {
+					if (input->pos >= input->size) {
+						zds->stage = zdss_read;
+						return 1;
+					}	     /* can't release hostage (not present) */
+					input->pos++; /* release hostage */
+				}
+				return 0;
+			}
+			if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
+				input->pos--;    /* note : pos > 0, otherwise, impossible to finish reading last block */
+				zds->hostageByte = 1;
+			}
+			return 1;
+		}
+		nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block); /* preload header of next block */
+		if (zds->inPos > nextSrcSizeHint)
+			return ERROR(GENERIC); /* should never happen */
+		nextSrcSizeHint -= zds->inPos; /* already loaded*/
+		return nextSrcSizeHint;
+	}
+}
+
+EXPORT_SYMBOL(ZSTD_DCtxWorkspaceBound);
+EXPORT_SYMBOL(ZSTD_initDCtx);
+EXPORT_SYMBOL(ZSTD_decompressDCtx);
+EXPORT_SYMBOL(ZSTD_decompress_usingDict);
+
+EXPORT_SYMBOL(ZSTD_DDictWorkspaceBound);
+EXPORT_SYMBOL(ZSTD_initDDict);
+EXPORT_SYMBOL(ZSTD_decompress_usingDDict);
+
+EXPORT_SYMBOL(ZSTD_DStreamWorkspaceBound);
+EXPORT_SYMBOL(ZSTD_initDStream);
+EXPORT_SYMBOL(ZSTD_initDStream_usingDDict);
+EXPORT_SYMBOL(ZSTD_resetDStream);
+EXPORT_SYMBOL(ZSTD_decompressStream);
+EXPORT_SYMBOL(ZSTD_DStreamInSize);
+EXPORT_SYMBOL(ZSTD_DStreamOutSize);
+
+EXPORT_SYMBOL(ZSTD_findFrameCompressedSize);
+EXPORT_SYMBOL(ZSTD_getFrameContentSize);
+EXPORT_SYMBOL(ZSTD_findDecompressedSize);
+
+EXPORT_SYMBOL(ZSTD_isFrame);
+EXPORT_SYMBOL(ZSTD_getDictID_fromDict);
+EXPORT_SYMBOL(ZSTD_getDictID_fromDDict);
+EXPORT_SYMBOL(ZSTD_getDictID_fromFrame);
+
+EXPORT_SYMBOL(ZSTD_getFrameParams);
+EXPORT_SYMBOL(ZSTD_decompressBegin);
+EXPORT_SYMBOL(ZSTD_decompressBegin_usingDict);
+EXPORT_SYMBOL(ZSTD_copyDCtx);
+EXPORT_SYMBOL(ZSTD_nextSrcSizeToDecompress);
+EXPORT_SYMBOL(ZSTD_decompressContinue);
+EXPORT_SYMBOL(ZSTD_nextInputType);
+
+EXPORT_SYMBOL(ZSTD_decompressBlock);
+EXPORT_SYMBOL(ZSTD_insertBlock);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Zstd Decompressor");
diff --git a/lib/zstd/entropy_common.c b/lib/zstd/entropy_common.c
new file mode 100644
index 0000000..2b0a643
--- /dev/null
+++ b/lib/zstd/entropy_common.c
@@ -0,0 +1,243 @@
+/*
+ * Common functions of New Generation Entropy library
+ * Copyright (C) 2016, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ */
+
+/* *************************************
+*  Dependencies
+***************************************/
+#include "error_private.h" /* ERR_*, ERROR */
+#include "fse.h"
+#include "huf.h"
+#include "mem.h"
+
+/*===   Version   ===*/
+unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
+
+/*===   Error Management   ===*/
+unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+
+unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+
+/*-**************************************************************
+*  FSE NCount encoding-decoding
+****************************************************************/
+size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize)
+{
+	const BYTE *const istart = (const BYTE *)headerBuffer;
+	const BYTE *const iend = istart + hbSize;
+	const BYTE *ip = istart;
+	int nbBits;
+	int remaining;
+	int threshold;
+	U32 bitStream;
+	int bitCount;
+	unsigned charnum = 0;
+	int previous0 = 0;
+
+	if (hbSize < 4)
+		return ERROR(srcSize_wrong);
+	bitStream = ZSTD_readLE32(ip);
+	nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
+	if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX)
+		return ERROR(tableLog_tooLarge);
+	bitStream >>= 4;
+	bitCount = 4;
+	*tableLogPtr = nbBits;
+	remaining = (1 << nbBits) + 1;
+	threshold = 1 << nbBits;
+	nbBits++;
+
+	while ((remaining > 1) & (charnum <= *maxSVPtr)) {
+		if (previous0) {
+			unsigned n0 = charnum;
+			while ((bitStream & 0xFFFF) == 0xFFFF) {
+				n0 += 24;
+				if (ip < iend - 5) {
+					ip += 2;
+					bitStream = ZSTD_readLE32(ip) >> bitCount;
+				} else {
+					bitStream >>= 16;
+					bitCount += 16;
+				}
+			}
+			while ((bitStream & 3) == 3) {
+				n0 += 3;
+				bitStream >>= 2;
+				bitCount += 2;
+			}
+			n0 += bitStream & 3;
+			bitCount += 2;
+			if (n0 > *maxSVPtr)
+				return ERROR(maxSymbolValue_tooSmall);
+			while (charnum < n0)
+				normalizedCounter[charnum++] = 0;
+			if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
+				ip += bitCount >> 3;
+				bitCount &= 7;
+				bitStream = ZSTD_readLE32(ip) >> bitCount;
+			} else {
+				bitStream >>= 2;
+			}
+		}
+		{
+			int const max = (2 * threshold - 1) - remaining;
+			int count;
+
+			if ((bitStream & (threshold - 1)) < (U32)max) {
+				count = bitStream & (threshold - 1);
+				bitCount += nbBits - 1;
+			} else {
+				count = bitStream & (2 * threshold - 1);
+				if (count >= threshold)
+					count -= max;
+				bitCount += nbBits;
+			}
+
+			count--;				 /* extra accuracy */
+			remaining -= count < 0 ? -count : count; /* -1 means +1 */
+			normalizedCounter[charnum++] = (short)count;
+			previous0 = !count;
+			while (remaining < threshold) {
+				nbBits--;
+				threshold >>= 1;
+			}
+
+			if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
+				ip += bitCount >> 3;
+				bitCount &= 7;
+			} else {
+				bitCount -= (int)(8 * (iend - 4 - ip));
+				ip = iend - 4;
+			}
+			bitStream = ZSTD_readLE32(ip) >> (bitCount & 31);
+		}
+	} /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
+	if (remaining != 1)
+		return ERROR(corruption_detected);
+	if (bitCount > 32)
+		return ERROR(corruption_detected);
+	*maxSVPtr = charnum - 1;
+
+	ip += (bitCount + 7) >> 3;
+	return ip - istart;
+}
+
+/*! HUF_readStats() :
+	Read compact Huffman tree, saved by HUF_writeCTable().
+	`huffWeight` is destination buffer.
+	`rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
+	@return : size read from `src` , or an error Code .
+	Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
+*/
+size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
+{
+	U32 weightTotal;
+	const BYTE *ip = (const BYTE *)src;
+	size_t iSize;
+	size_t oSize;
+
+	if (!srcSize)
+		return ERROR(srcSize_wrong);
+	iSize = ip[0];
+	/* memset(huffWeight, 0, hwSize);   */ /* is not necessary, even though some analyzer complain ... */
+
+	if (iSize >= 128) { /* special header */
+		oSize = iSize - 127;
+		iSize = ((oSize + 1) / 2);
+		if (iSize + 1 > srcSize)
+			return ERROR(srcSize_wrong);
+		if (oSize >= hwSize)
+			return ERROR(corruption_detected);
+		ip += 1;
+		{
+			U32 n;
+			for (n = 0; n < oSize; n += 2) {
+				huffWeight[n] = ip[n / 2] >> 4;
+				huffWeight[n + 1] = ip[n / 2] & 15;
+			}
+		}
+	} else {						 /* header compressed with FSE (normal case) */
+		if (iSize + 1 > srcSize)
+			return ERROR(srcSize_wrong);
+		oSize = FSE_decompress_wksp(huffWeight, hwSize - 1, ip + 1, iSize, 6, workspace, workspaceSize); /* max (hwSize-1) values decoded, as last one is implied */
+		if (FSE_isError(oSize))
+			return oSize;
+	}
+
+	/* collect weight stats */
+	memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
+	weightTotal = 0;
+	{
+		U32 n;
+		for (n = 0; n < oSize; n++) {
+			if (huffWeight[n] >= HUF_TABLELOG_MAX)
+				return ERROR(corruption_detected);
+			rankStats[huffWeight[n]]++;
+			weightTotal += (1 << huffWeight[n]) >> 1;
+		}
+	}
+	if (weightTotal == 0)
+		return ERROR(corruption_detected);
+
+	/* get last non-null symbol weight (implied, total must be 2^n) */
+	{
+		U32 const tableLog = BIT_highbit32(weightTotal) + 1;
+		if (tableLog > HUF_TABLELOG_MAX)
+			return ERROR(corruption_detected);
+		*tableLogPtr = tableLog;
+		/* determine last weight */
+		{
+			U32 const total = 1 << tableLog;
+			U32 const rest = total - weightTotal;
+			U32 const verif = 1 << BIT_highbit32(rest);
+			U32 const lastWeight = BIT_highbit32(rest) + 1;
+			if (verif != rest)
+				return ERROR(corruption_detected); /* last value must be a clean power of 2 */
+			huffWeight[oSize] = (BYTE)lastWeight;
+			rankStats[lastWeight]++;
+		}
+	}
+
+	/* check tree construction validity */
+	if ((rankStats[1] < 2) || (rankStats[1] & 1))
+		return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
+
+	/* results */
+	*nbSymbolsPtr = (U32)(oSize + 1);
+	return iSize + 1;
+}
diff --git a/lib/zstd/error_private.h b/lib/zstd/error_private.h
new file mode 100644
index 0000000..1a60b31
--- /dev/null
+++ b/lib/zstd/error_private.h
@@ -0,0 +1,53 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of https://github.com/facebook/zstd.
+ * An additional grant of patent rights can be found in the PATENTS file in the
+ * same directory.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ */
+
+/* Note : this module is expected to remain private, do not expose it */
+
+#ifndef ERROR_H_MODULE
+#define ERROR_H_MODULE
+
+/* ****************************************
+*  Dependencies
+******************************************/
+#include <linux/types.h> /* size_t */
+#include <linux/zstd.h>  /* enum list */
+
+/* ****************************************
+*  Compiler-specific
+******************************************/
+#define ERR_STATIC static __attribute__((unused))
+
+/*-****************************************
+*  Customization (error_public.h)
+******************************************/
+typedef ZSTD_ErrorCode ERR_enum;
+#define PREFIX(name) ZSTD_error_##name
+
+/*-****************************************
+*  Error codes handling
+******************************************/
+#define ERROR(name) ((size_t)-PREFIX(name))
+
+ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
+
+ERR_STATIC ERR_enum ERR_getErrorCode(size_t code)
+{
+	if (!ERR_isError(code))
+		return (ERR_enum)0;
+	return (ERR_enum)(0 - code);
+}
+
+#endif /* ERROR_H_MODULE */
diff --git a/lib/zstd/fse.h b/lib/zstd/fse.h
new file mode 100644
index 0000000..7460ab0
--- /dev/null
+++ b/lib/zstd/fse.h
@@ -0,0 +1,575 @@
+/*
+ * FSE : Finite State Entropy codec
+ * Public Prototypes declaration
+ * Copyright (C) 2013-2016, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ */
+#ifndef FSE_H
+#define FSE_H
+
+/*-*****************************************
+*  Dependencies
+******************************************/
+#include <linux/types.h> /* size_t, ptrdiff_t */
+
+/*-*****************************************
+*  FSE_PUBLIC_API : control library symbols visibility
+******************************************/
+#define FSE_PUBLIC_API
+
+/*------   Version   ------*/
+#define FSE_VERSION_MAJOR 0
+#define FSE_VERSION_MINOR 9
+#define FSE_VERSION_RELEASE 0
+
+#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
+#define FSE_QUOTE(str) #str
+#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
+#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
+
+#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR * 100 * 100 + FSE_VERSION_MINOR * 100 + FSE_VERSION_RELEASE)
+FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
+
+/*-*****************************************
+*  Tool functions
+******************************************/
+FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
+
+/* Error Management */
+FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
+
+/*-*****************************************
+*  FSE detailed API
+******************************************/
+/*!
+FSE_compress() does the following:
+1. count symbol occurrence from source[] into table count[]
+2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
+3. save normalized counters to memory buffer using writeNCount()
+4. build encoding table 'CTable' from normalized counters
+5. encode the data stream using encoding table 'CTable'
+
+FSE_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+/* *** COMPRESSION *** */
+/*! FSE_optimalTableLog():
+	dynamically downsize 'tableLog' when conditions are met.
+	It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
+	@return : recommended tableLog (necessarily <= 'maxTableLog') */
+FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+
+/*! FSE_normalizeCount():
+	normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
+	'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
+	@return : tableLog,
+			  or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t srcSize, unsigned maxSymbolValue);
+
+/*! FSE_NCountWriteBound():
+	Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
+	Typically useful for allocation purpose. */
+FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_writeNCount():
+	Compactly save 'normalizedCounter' into 'buffer'.
+	@return : size of the compressed table,
+			  or an errorCode, which can be tested using FSE_isError(). */
+FSE_PUBLIC_API size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! Constructor and Destructor of FSE_CTable.
+	Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
+typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
+
+/*! FSE_compress_usingCTable():
+	Compress `src` using `ct` into `dst` which must be already allocated.
+	@return : size of compressed data (<= `dstCapacity`),
+			  or 0 if compressed data could not fit into `dst`,
+			  or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_compress_usingCTable(void *dst, size_t dstCapacity, const void *src, size_t srcSize, const FSE_CTable *ct);
+
+/*!
+Tutorial :
+----------
+The first step is to count all symbols. FSE_count() does this job very fast.
+Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
+'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
+maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
+FSE_count() will return the number of occurrence of the most frequent symbol.
+This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+The next step is to normalize the frequencies.
+FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
+It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
+You can use 'tableLog'==0 to mean "use default tableLog value".
+If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
+which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
+
+The result of FSE_normalizeCount() will be saved into a table,
+called 'normalizedCounter', which is a table of signed short.
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
+The return value is tableLog if everything proceeded as expected.
+It is 0 if there is a single symbol within distribution.
+If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
+'buffer' must be already allocated.
+For guaranteed success, buffer size must be at least FSE_headerBound().
+The result of the function is the number of bytes written into 'buffer'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
+
+'normalizedCounter' can then be used to create the compression table 'CTable'.
+The space required by 'CTable' must be already allocated, using FSE_createCTable().
+You can then use FSE_buildCTable() to fill 'CTable'.
+If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
+
+'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
+Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
+The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
+If it returns '0', compressed data could not fit into 'dst'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+*/
+
+/* *** DECOMPRESSION *** */
+
+/*! FSE_readNCount():
+	Read compactly saved 'normalizedCounter' from 'rBuffer'.
+	@return : size read from 'rBuffer',
+			  or an errorCode, which can be tested using FSE_isError().
+			  maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+FSE_PUBLIC_API size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize);
+
+/*! Constructor and Destructor of FSE_DTable.
+	Note that its size depends on 'tableLog' */
+typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+
+/*! FSE_buildDTable():
+	Builds 'dt', which must be already allocated, using FSE_createDTable().
+	return : 0, or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize);
+
+/*! FSE_decompress_usingDTable():
+	Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
+	into `dst` which must be already allocated.
+	@return : size of regenerated data (necessarily <= `dstCapacity`),
+			  or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
+This is performed by the function FSE_buildDTable().
+The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
+`cSrcSize` must be strictly correct, otherwise decompression will fail.
+FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
+If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
+*/
+
+/* *** Dependency *** */
+#include "bitstream.h"
+
+/* *****************************************
+*  Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) (size + (size >> 7))
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1 << (maxTableLog - 1)) + ((maxSymbolValue + 1) * 2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1 << maxTableLog))
+
+/* *****************************************
+*  FSE advanced API
+*******************************************/
+/* FSE_count_wksp() :
+ * Same as FSE_count(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= `1024` unsigned
+ */
+size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace);
+
+/* FSE_countFast_wksp() :
+ * Same as FSE_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` must be a table of minimum `1024` unsigned
+ */
+size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, unsigned *workSpace);
+
+/*! FSE_count_simple
+ * Same as FSE_countFast(), but does not use any additional memory (not even on stack).
+ * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
+*/
+size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize);
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
+/**< same as FSE_optimalTableLog(), which used `minus==2` */
+
+size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits);
+/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
+
+size_t FSE_buildCTable_rle(FSE_CTable *ct, unsigned char symbolValue);
+/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * `wkspSize` must be >= `(1<<tableLog)`.
+ */
+size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize);
+
+size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits);
+/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
+
+size_t FSE_buildDTable_rle(FSE_DTable *dt, unsigned char symbolValue);
+/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize);
+/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
+
+/* *****************************************
+*  FSE symbol compression API
+*******************************************/
+/*!
+   This API consists of small unitary functions, which highly benefit from being inlined.
+   Hence their body are included in next section.
+*/
+typedef struct {
+	ptrdiff_t value;
+	const void *stateTable;
+	const void *symbolTT;
+	unsigned stateLog;
+} FSE_CState_t;
+
+static void FSE_initCState(FSE_CState_t *CStatePtr, const FSE_CTable *ct);
+
+static void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *CStatePtr, unsigned symbol);
+
+static void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *CStatePtr);
+
+/**<
+These functions are inner components of FSE_compress_usingCTable().
+They allow the creation of custom streams, mixing multiple tables and bit sources.
+
+A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
+So the first symbol you will encode is the last you will decode, like a LIFO stack.
+
+You will need a few variables to track your CStream. They are :
+
+FSE_CTable    ct;         // Provided by FSE_buildCTable()
+BIT_CStream_t bitStream;  // bitStream tracking structure
+FSE_CState_t  state;      // State tracking structure (can have several)
+
+
+The first thing to do is to init bitStream and state.
+	size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
+	FSE_initCState(&state, ct);
+
+Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
+You can then encode your input data, byte after byte.
+FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
+Remember decoding will be done in reverse direction.
+	FSE_encodeByte(&bitStream, &state, symbol);
+
+At any time, you can also add any bit sequence.
+Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
+	BIT_addBits(&bitStream, bitField, nbBits);
+
+The above methods don't commit data to memory, they just store it into local register, for speed.
+Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+Writing data to memory is a manual operation, performed by the flushBits function.
+	BIT_flushBits(&bitStream);
+
+Your last FSE encoding operation shall be to flush your last state value(s).
+	FSE_flushState(&bitStream, &state);
+
+Finally, you must close the bitStream.
+The function returns the size of CStream in bytes.
+If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
+If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
+	size_t size = BIT_closeCStream(&bitStream);
+*/
+
+/* *****************************************
+*  FSE symbol decompression API
+*******************************************/
+typedef struct {
+	size_t state;
+	const void *table; /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+static void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr);
+
+/**<
+Let's now decompose FSE_decompress_usingDTable() into its unitary components.
+You will decode FSE-encoded symbols from the bitStream,
+and also any other bitFields you put in, **in reverse order**.
+
+You will need a few variables to track your bitStream. They are :
+
+BIT_DStream_t DStream;    // Stream context
+FSE_DState_t  DState;     // State context. Multiple ones are possible
+FSE_DTable*   DTablePtr;  // Decoding table, provided by FSE_buildDTable()
+
+The first thing to do is to init the bitStream.
+	errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
+
+You should then retrieve your initial state(s)
+(in reverse flushing order if you have several ones) :
+	errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
+
+You can then decode your data, symbol after symbol.
+For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
+Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
+	unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
+
+You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
+Note : maximum allowed nbBits is 25, for 32-bits compatibility
+	size_t bitField = BIT_readBits(&DStream, nbBits);
+
+All above operations only read from local register (which size depends on size_t).
+Refueling the register from memory is manually performed by the reload method.
+	endSignal = FSE_reloadDStream(&DStream);
+
+BIT_reloadDStream() result tells if there is still some more data to read from DStream.
+BIT_DStream_unfinished : there is still some data left into the DStream.
+BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
+BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
+BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
+
+When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
+to properly detect the exact end of stream.
+After each decoded symbol, check if DStream is fully consumed using this simple test :
+	BIT_reloadDStream(&DStream) >= BIT_DStream_completed
+
+When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
+Checking if DStream has reached its end is performed by :
+	BIT_endOfDStream(&DStream);
+Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
+	FSE_endOfDState(&DState);
+*/
+
+/* *****************************************
+*  FSE unsafe API
+*******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+/* *****************************************
+*  Implementation of inlined functions
+*******************************************/
+typedef struct {
+	int deltaFindState;
+	U32 deltaNbBits;
+} FSE_symbolCompressionTransform; /* total 8 bytes */
+
+ZSTD_STATIC void FSE_initCState(FSE_CState_t *statePtr, const FSE_CTable *ct)
+{
+	const void *ptr = ct;
+	const U16 *u16ptr = (const U16 *)ptr;
+	const U32 tableLog = ZSTD_read16(ptr);
+	statePtr->value = (ptrdiff_t)1 << tableLog;
+	statePtr->stateTable = u16ptr + 2;
+	statePtr->symbolTT = ((const U32 *)ct + 1 + (tableLog ? (1 << (tableLog - 1)) : 1));
+	statePtr->stateLog = tableLog;
+}
+
+/*! FSE_initCState2() :
+*   Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
+*   uses the smallest state value possible, saving the cost of this symbol */
+ZSTD_STATIC void FSE_initCState2(FSE_CState_t *statePtr, const FSE_CTable *ct, U32 symbol)
+{
+	FSE_initCState(statePtr, ct);
+	{
+		const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
+		const U16 *stateTable = (const U16 *)(statePtr->stateTable);
+		U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1 << 15)) >> 16);
+		statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
+		statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+	}
+}
+
+ZSTD_STATIC void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *statePtr, U32 symbol)
+{
+	const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
+	const U16 *const stateTable = (const U16 *)(statePtr->stateTable);
+	U32 nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
+	BIT_addBits(bitC, statePtr->value, nbBitsOut);
+	statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+}
+
+ZSTD_STATIC void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *statePtr)
+{
+	BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
+	BIT_flushBits(bitC);
+}
+
+/* ======    Decompression    ====== */
+
+typedef struct {
+	U16 tableLog;
+	U16 fastMode;
+} FSE_DTableHeader; /* sizeof U32 */
+
+typedef struct {
+	unsigned short newState;
+	unsigned char symbol;
+	unsigned char nbBits;
+} FSE_decode_t; /* size == U32 */
+
+ZSTD_STATIC void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt)
+{
+	const void *ptr = dt;
+	const FSE_DTableHeader *const DTableH = (const FSE_DTableHeader *)ptr;
+	DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+	BIT_reloadDStream(bitD);
+	DStatePtr->table = dt + 1;
+}
+
+ZSTD_STATIC BYTE FSE_peekSymbol(const FSE_DState_t *DStatePtr)
+{
+	FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
+	return DInfo.symbol;
+}
+
+ZSTD_STATIC void FSE_updateState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
+{
+	FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
+	U32 const nbBits = DInfo.nbBits;
+	size_t const lowBits = BIT_readBits(bitD, nbBits);
+	DStatePtr->state = DInfo.newState + lowBits;
+}
+
+ZSTD_STATIC BYTE FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
+{
+	FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
+	U32 const nbBits = DInfo.nbBits;
+	BYTE const symbol = DInfo.symbol;
+	size_t const lowBits = BIT_readBits(bitD, nbBits);
+
+	DStatePtr->state = DInfo.newState + lowBits;
+	return symbol;
+}
+
+/*! FSE_decodeSymbolFast() :
+	unsafe, only works if no symbol has a probability > 50% */
+ZSTD_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
+{
+	FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
+	U32 const nbBits = DInfo.nbBits;
+	BYTE const symbol = DInfo.symbol;
+	size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
+
+	DStatePtr->state = DInfo.newState + lowBits;
+	return symbol;
+}
+
+ZSTD_STATIC unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr) { return DStatePtr->state == 0; }
+
+/* **************************************************************
+*  Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+*  Increasing memory usage improves compression ratio
+*  Reduced memory usage can improve speed, due to cache effect
+*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#ifndef FSE_MAX_MEMORY_USAGE
+#define FSE_MAX_MEMORY_USAGE 14
+#endif
+#ifndef FSE_DEFAULT_MEMORY_USAGE
+#define FSE_DEFAULT_MEMORY_USAGE 13
+#endif
+
+/*!FSE_MAX_SYMBOL_VALUE :
+*  Maximum symbol value authorized.
+*  Required for proper stack allocation */
+#ifndef FSE_MAX_SYMBOL_VALUE
+#define FSE_MAX_SYMBOL_VALUE 255
+#endif
+
+/* **************************************************************
+*  template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+#define FSE_DECODE_TYPE FSE_decode_t
+
+/* ***************************************************************
+*  Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE - 2)
+#define FSE_MAX_TABLESIZE (1U << FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE - 1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE - 2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+#define FSE_TABLESTEP(tableSize) ((tableSize >> 1) + (tableSize >> 3) + 3)
+
+#endif /* FSE_H */
diff --git a/lib/zstd/fse_compress.c b/lib/zstd/fse_compress.c
new file mode 100644
index 0000000..ef3d174
--- /dev/null
+++ b/lib/zstd/fse_compress.c
@@ -0,0 +1,795 @@
+/*
+ * FSE : Finite State Entropy encoder
+ * Copyright (C) 2013-2015, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ */
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#define FORCE_INLINE static __always_inline
+
+/* **************************************************************
+*  Includes
+****************************************************************/
+#include "bitstream.h"
+#include "fse.h"
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/string.h> /* memcpy, memset */
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define FSE_STATIC_ASSERT(c)                                   \
+	{                                                      \
+		enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
+	} /* use only *after* variable declarations */
+
+/* **************************************************************
+*  Templates
+****************************************************************/
+/*
+  designed to be included
+  for type-specific functions (template emulation in C)
+  Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+#error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+#error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X, Y) X##Y
+#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
+#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
+
+/* Function templates */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
+ * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
+ */
+size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
+{
+	U32 const tableSize = 1 << tableLog;
+	U32 const tableMask = tableSize - 1;
+	void *const ptr = ct;
+	U16 *const tableU16 = ((U16 *)ptr) + 2;
+	void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableLog ? tableSize >> 1 : 1);
+	FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
+	U32 const step = FSE_TABLESTEP(tableSize);
+	U32 highThreshold = tableSize - 1;
+
+	U32 *cumul;
+	FSE_FUNCTION_TYPE *tableSymbol;
+	size_t spaceUsed32 = 0;
+
+	cumul = (U32 *)workspace + spaceUsed32;
+	spaceUsed32 += FSE_MAX_SYMBOL_VALUE + 2;
+	tableSymbol = (FSE_FUNCTION_TYPE *)((U32 *)workspace + spaceUsed32);
+	spaceUsed32 += ALIGN(sizeof(FSE_FUNCTION_TYPE) * ((size_t)1 << tableLog), sizeof(U32)) >> 2;
+
+	if ((spaceUsed32 << 2) > workspaceSize)
+		return ERROR(tableLog_tooLarge);
+	workspace = (U32 *)workspace + spaceUsed32;
+	workspaceSize -= (spaceUsed32 << 2);
+
+	/* CTable header */
+	tableU16[-2] = (U16)tableLog;
+	tableU16[-1] = (U16)maxSymbolValue;
+
+	/* For explanations on how to distribute symbol values over the table :
+	*  http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+
+	/* symbol start positions */
+	{
+		U32 u;
+		cumul[0] = 0;
+		for (u = 1; u <= maxSymbolValue + 1; u++) {
+			if (normalizedCounter[u - 1] == -1) { /* Low proba symbol */
+				cumul[u] = cumul[u - 1] + 1;
+				tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u - 1);
+			} else {
+				cumul[u] = cumul[u - 1] + normalizedCounter[u - 1];
+			}
+		}
+		cumul[maxSymbolValue + 1] = tableSize + 1;
+	}
+
+	/* Spread symbols */
+	{
+		U32 position = 0;
+		U32 symbol;
+		for (symbol = 0; symbol <= maxSymbolValue; symbol++) {
+			int nbOccurences;
+			for (nbOccurences = 0; nbOccurences < normalizedCounter[symbol]; nbOccurences++) {
+				tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
+				position = (position + step) & tableMask;
+				while (position > highThreshold)
+					position = (position + step) & tableMask; /* Low proba area */
+			}
+		}
+
+		if (position != 0)
+			return ERROR(GENERIC); /* Must have gone through all positions */
+	}
+
+	/* Build table */
+	{
+		U32 u;
+		for (u = 0; u < tableSize; u++) {
+			FSE_FUNCTION_TYPE s = tableSymbol[u];	/* note : static analyzer may not understand tableSymbol is properly initialized */
+			tableU16[cumul[s]++] = (U16)(tableSize + u); /* TableU16 : sorted by symbol order; gives next state value */
+		}
+	}
+
+	/* Build Symbol Transformation Table */
+	{
+		unsigned total = 0;
+		unsigned s;
+		for (s = 0; s <= maxSymbolValue; s++) {
+			switch (normalizedCounter[s]) {
+			case 0: break;
+
+			case -1:
+			case 1:
+				symbolTT[s].deltaNbBits = (tableLog << 16) - (1 << tableLog);
+				symbolTT[s].deltaFindState = total - 1;
+				total++;
+				break;
+			default: {
+				U32 const maxBitsOut = tableLog - BIT_highbit32(normalizedCounter[s] - 1);
+				U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
+				symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
+				symbolTT[s].deltaFindState = total - normalizedCounter[s];
+				total += normalizedCounter[s];
+			}
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*-**************************************************************
+*  FSE NCount encoding-decoding
+****************************************************************/
+size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
+{
+	size_t const maxHeaderSize = (((maxSymbolValue + 1) * tableLog) >> 3) + 3;
+	return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
+}
+
+static size_t FSE_writeNCount_generic(void *header, size_t headerBufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+				      unsigned writeIsSafe)
+{
+	BYTE *const ostart = (BYTE *)header;
+	BYTE *out = ostart;
+	BYTE *const oend = ostart + headerBufferSize;
+	int nbBits;
+	const int tableSize = 1 << tableLog;
+	int remaining;
+	int threshold;
+	U32 bitStream;
+	int bitCount;
+	unsigned charnum = 0;
+	int previous0 = 0;
+
+	bitStream = 0;
+	bitCount = 0;
+	/* Table Size */
+	bitStream += (tableLog - FSE_MIN_TABLELOG) << bitCount;
+	bitCount += 4;
+
+	/* Init */
+	remaining = tableSize + 1; /* +1 for extra accuracy */
+	threshold = tableSize;
+	nbBits = tableLog + 1;
+
+	while (remaining > 1) { /* stops at 1 */
+		if (previous0) {
+			unsigned start = charnum;
+			while (!normalizedCounter[charnum])
+				charnum++;
+			while (charnum >= start + 24) {
+				start += 24;
+				bitStream += 0xFFFFU << bitCount;
+				if ((!writeIsSafe) && (out > oend - 2))
+					return ERROR(dstSize_tooSmall); /* Buffer overflow */
+				out[0] = (BYTE)bitStream;
+				out[1] = (BYTE)(bitStream >> 8);
+				out += 2;
+				bitStream >>= 16;
+			}
+			while (charnum >= start + 3) {
+				start += 3;
+				bitStream += 3 << bitCount;
+				bitCount += 2;
+			}
+			bitStream += (charnum - start) << bitCount;
+			bitCount += 2;
+			if (bitCount > 16) {
+				if ((!writeIsSafe) && (out > oend - 2))
+					return ERROR(dstSize_tooSmall); /* Buffer overflow */
+				out[0] = (BYTE)bitStream;
+				out[1] = (BYTE)(bitStream >> 8);
+				out += 2;
+				bitStream >>= 16;
+				bitCount -= 16;
+			}
+		}
+		{
+			int count = normalizedCounter[charnum++];
+			int const max = (2 * threshold - 1) - remaining;
+			remaining -= count < 0 ? -count : count;
+			count++; /* +1 for extra accuracy */
+			if (count >= threshold)
+				count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
+			bitStream += count << bitCount;
+			bitCount += nbBits;
+			bitCount -= (count < max);
+			previous0 = (count == 1);
+			if (remaining < 1)
+				return ERROR(GENERIC);
+			while (remaining < threshold)
+				nbBits--, threshold >>= 1;
+		}
+		if (bitCount > 16) {
+			if ((!writeIsSafe) && (out > oend - 2))
+				return ERROR(dstSize_tooSmall); /* Buffer overflow */
+			out[0] = (BYTE)bitStream;
+			out[1] = (BYTE)(bitStream >> 8);
+			out += 2;
+			bitStream >>= 16;
+			bitCount -= 16;
+		}
+	}
+
+	/* flush remaining bitStream */
+	if ((!writeIsSafe) && (out > oend - 2))
+		return ERROR(dstSize_tooSmall); /* Buffer overflow */
+	out[0] = (BYTE)bitStream;
+	out[1] = (BYTE)(bitStream >> 8);
+	out += (bitCount + 7) / 8;
+
+	if (charnum > maxSymbolValue + 1)
+		return ERROR(GENERIC);
+
+	return (out - ostart);
+}
+
+size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+	if (tableLog > FSE_MAX_TABLELOG)
+		return ERROR(tableLog_tooLarge); /* Unsupported */
+	if (tableLog < FSE_MIN_TABLELOG)
+		return ERROR(GENERIC); /* Unsupported */
+
+	if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
+		return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
+
+	return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
+}
+
+/*-**************************************************************
+*  Counting histogram
+****************************************************************/
+/*! FSE_count_simple
+	This function counts byte values within `src`, and store the histogram into table `count`.
+	It doesn't use any additional memory.
+	But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
+	For this reason, prefer using a table `count` with 256 elements.
+	@return : count of most numerous element
+*/
+size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize)
+{
+	const BYTE *ip = (const BYTE *)src;
+	const BYTE *const end = ip + srcSize;
+	unsigned maxSymbolValue = *maxSymbolValuePtr;
+	unsigned max = 0;
+
+	memset(count, 0, (maxSymbolValue + 1) * sizeof(*count));
+	if (srcSize == 0) {
+		*maxSymbolValuePtr = 0;
+		return 0;
+	}
+
+	while (ip < end)
+		count[*ip++]++;
+
+	while (!count[maxSymbolValue])
+		maxSymbolValue--;
+	*maxSymbolValuePtr = maxSymbolValue;
+
+	{
+		U32 s;
+		for (s = 0; s <= maxSymbolValue; s++)
+			if (count[s] > max)
+				max = count[s];
+	}
+
+	return (size_t)max;
+}
+
+/* FSE_count_parallel_wksp() :
+ * Same as FSE_count_parallel(), but using an externally provided scratch buffer.
+ * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */
+static size_t FSE_count_parallel_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned checkMax,
+				      unsigned *const workSpace)
+{
+	const BYTE *ip = (const BYTE *)source;
+	const BYTE *const iend = ip + sourceSize;
+	unsigned maxSymbolValue = *maxSymbolValuePtr;
+	unsigned max = 0;
+	U32 *const Counting1 = workSpace;
+	U32 *const Counting2 = Counting1 + 256;
+	U32 *const Counting3 = Counting2 + 256;
+	U32 *const Counting4 = Counting3 + 256;
+
+	memset(Counting1, 0, 4 * 256 * sizeof(unsigned));
+
+	/* safety checks */
+	if (!sourceSize) {
+		memset(count, 0, maxSymbolValue + 1);
+		*maxSymbolValuePtr = 0;
+		return 0;
+	}
+	if (!maxSymbolValue)
+		maxSymbolValue = 255; /* 0 == default */
+
+	/* by stripes of 16 bytes */
+	{
+		U32 cached = ZSTD_read32(ip);
+		ip += 4;
+		while (ip < iend - 15) {
+			U32 c = cached;
+			cached = ZSTD_read32(ip);
+			ip += 4;
+			Counting1[(BYTE)c]++;
+			Counting2[(BYTE)(c >> 8)]++;
+			Counting3[(BYTE)(c >> 16)]++;
+			Counting4[c >> 24]++;
+			c = cached;
+			cached = ZSTD_read32(ip);
+			ip += 4;
+			Counting1[(BYTE)c]++;
+			Counting2[(BYTE)(c >> 8)]++;
+			Counting3[(BYTE)(c >> 16)]++;
+			Counting4[c >> 24]++;
+			c = cached;
+			cached = ZSTD_read32(ip);
+			ip += 4;
+			Counting1[(BYTE)c]++;
+			Counting2[(BYTE)(c >> 8)]++;
+			Counting3[(BYTE)(c >> 16)]++;
+			Counting4[c >> 24]++;
+			c = cached;
+			cached = ZSTD_read32(ip);
+			ip += 4;
+			Counting1[(BYTE)c]++;
+			Counting2[(BYTE)(c >> 8)]++;
+			Counting3[(BYTE)(c >> 16)]++;
+			Counting4[c >> 24]++;
+		}
+		ip -= 4;
+	}
+
+	/* finish last symbols */
+	while (ip < iend)
+		Counting1[*ip++]++;
+
+	if (checkMax) { /* verify stats will fit into destination table */
+		U32 s;
+		for (s = 255; s > maxSymbolValue; s--) {
+			Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
+			if (Counting1[s])
+				return ERROR(maxSymbolValue_tooSmall);
+		}
+	}
+
+	{
+		U32 s;
+		for (s = 0; s <= maxSymbolValue; s++) {
+			count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
+			if (count[s] > max)
+				max = count[s];
+		}
+	}
+
+	while (!count[maxSymbolValue])
+		maxSymbolValue--;
+	*maxSymbolValuePtr = maxSymbolValue;
+	return (size_t)max;
+}
+
+/* FSE_countFast_wksp() :
+ * Same as FSE_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= `1024` unsigned */
+size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
+{
+	if (sourceSize < 1500)
+		return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
+	return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
+}
+
+/* FSE_count_wksp() :
+ * Same as FSE_count(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= `1024` unsigned */
+size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
+{
+	if (*maxSymbolValuePtr < 255)
+		return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
+	*maxSymbolValuePtr = 255;
+	return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
+}
+
+/*-**************************************************************
+*  FSE Compression Code
+****************************************************************/
+/*! FSE_sizeof_CTable() :
+	FSE_CTable is a variable size structure which contains :
+	`U16 tableLog;`
+	`U16 maxSymbolValue;`
+	`U16 nextStateNumber[1 << tableLog];`                         // This size is variable
+	`FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];`  // This size is variable
+Allocation is manual (C standard does not support variable-size structures).
+*/
+size_t FSE_sizeof_CTable(unsigned maxSymbolValue, unsigned tableLog)
+{
+	if (tableLog > FSE_MAX_TABLELOG)
+		return ERROR(tableLog_tooLarge);
+	return FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue) * sizeof(U32);
+}
+
+/* provides the minimum logSize to safely represent a distribution */
+static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
+{
+	U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
+	U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
+	U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
+	return minBits;
+}
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
+{
+	U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
+	U32 tableLog = maxTableLog;
+	U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
+	if (tableLog == 0)
+		tableLog = FSE_DEFAULT_TABLELOG;
+	if (maxBitsSrc < tableLog)
+		tableLog = maxBitsSrc; /* Accuracy can be reduced */
+	if (minBits > tableLog)
+		tableLog = minBits; /* Need a minimum to safely represent all symbol values */
+	if (tableLog < FSE_MIN_TABLELOG)
+		tableLog = FSE_MIN_TABLELOG;
+	if (tableLog > FSE_MAX_TABLELOG)
+		tableLog = FSE_MAX_TABLELOG;
+	return tableLog;
+}
+
+unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+	return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
+}
+
+/* Secondary normalization method.
+   To be used when primary method fails. */
+
+static size_t FSE_normalizeM2(short *norm, U32 tableLog, const unsigned *count, size_t total, U32 maxSymbolValue)
+{
+	short const NOT_YET_ASSIGNED = -2;
+	U32 s;
+	U32 distributed = 0;
+	U32 ToDistribute;
+
+	/* Init */
+	U32 const lowThreshold = (U32)(total >> tableLog);
+	U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
+
+	for (s = 0; s <= maxSymbolValue; s++) {
+		if (count[s] == 0) {
+			norm[s] = 0;
+			continue;
+		}
+		if (count[s] <= lowThreshold) {
+			norm[s] = -1;
+			distributed++;
+			total -= count[s];
+			continue;
+		}
+		if (count[s] <= lowOne) {
+			norm[s] = 1;
+			distributed++;
+			total -= count[s];
+			continue;
+		}
+
+		norm[s] = NOT_YET_ASSIGNED;
+	}
+	ToDistribute = (1 << tableLog) - distributed;
+
+	if ((total / ToDistribute) > lowOne) {
+		/* risk of rounding to zero */
+		lowOne = (U32)((total * 3) / (ToDistribute * 2));
+		for (s = 0; s <= maxSymbolValue; s++) {
+			if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
+				norm[s] = 1;
+				distributed++;
+				total -= count[s];
+				continue;
+			}
+		}
+		ToDistribute = (1 << tableLog) - distributed;
+	}
+
+	if (distributed == maxSymbolValue + 1) {
+		/* all values are pretty poor;
+		   probably incompressible data (should have already been detected);
+		   find max, then give all remaining points to max */
+		U32 maxV = 0, maxC = 0;
+		for (s = 0; s <= maxSymbolValue; s++)
+			if (count[s] > maxC)
+				maxV = s, maxC = count[s];
+		norm[maxV] += (short)ToDistribute;
+		return 0;
+	}
+
+	if (total == 0) {
+		/* all of the symbols were low enough for the lowOne or lowThreshold */
+		for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1))
+			if (norm[s] > 0)
+				ToDistribute--, norm[s]++;
+		return 0;
+	}
+
+	{
+		U64 const vStepLog = 62 - tableLog;
+		U64 const mid = (1ULL << (vStepLog - 1)) - 1;
+		U64 const rStep = div_u64((((U64)1 << vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
+		U64 tmpTotal = mid;
+		for (s = 0; s <= maxSymbolValue; s++) {
+			if (norm[s] == NOT_YET_ASSIGNED) {
+				U64 const end = tmpTotal + (count[s] * rStep);
+				U32 const sStart = (U32)(tmpTotal >> vStepLog);
+				U32 const sEnd = (U32)(end >> vStepLog);
+				U32 const weight = sEnd - sStart;
+				if (weight < 1)
+					return ERROR(GENERIC);
+				norm[s] = (short)weight;
+				tmpTotal = end;
+			}
+		}
+	}
+
+	return 0;
+}
+
+size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t total, unsigned maxSymbolValue)
+{
+	/* Sanity checks */
+	if (tableLog == 0)
+		tableLog = FSE_DEFAULT_TABLELOG;
+	if (tableLog < FSE_MIN_TABLELOG)
+		return ERROR(GENERIC); /* Unsupported size */
+	if (tableLog > FSE_MAX_TABLELOG)
+		return ERROR(tableLog_tooLarge); /* Unsupported size */
+	if (tableLog < FSE_minTableLog(total, maxSymbolValue))
+		return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
+
+	{
+		U32 const rtbTable[] = {0, 473195, 504333, 520860, 550000, 700000, 750000, 830000};
+		U64 const scale = 62 - tableLog;
+		U64 const step = div_u64((U64)1 << 62, (U32)total); /* <== here, one division ! */
+		U64 const vStep = 1ULL << (scale - 20);
+		int stillToDistribute = 1 << tableLog;
+		unsigned s;
+		unsigned largest = 0;
+		short largestP = 0;
+		U32 lowThreshold = (U32)(total >> tableLog);
+
+		for (s = 0; s <= maxSymbolValue; s++) {
+			if (count[s] == total)
+				return 0; /* rle special case */
+			if (count[s] == 0) {
+				normalizedCounter[s] = 0;
+				continue;
+			}
+			if (count[s] <= lowThreshold) {
+				normalizedCounter[s] = -1;
+				stillToDistribute--;
+			} else {
+				short proba = (short)((count[s] * step) >> scale);
+				if (proba < 8) {
+					U64 restToBeat = vStep * rtbTable[proba];
+					proba += (count[s] * step) - ((U64)proba << scale) > restToBeat;
+				}
+				if (proba > largestP)
+					largestP = proba, largest = s;
+				normalizedCounter[s] = proba;
+				stillToDistribute -= proba;
+			}
+		}
+		if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
+			/* corner case, need another normalization method */
+			size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
+			if (FSE_isError(errorCode))
+				return errorCode;
+		} else
+			normalizedCounter[largest] += (short)stillToDistribute;
+	}
+
+	return tableLog;
+}
+
+/* fake FSE_CTable, for raw (uncompressed) input */
+size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits)
+{
+	const unsigned tableSize = 1 << nbBits;
+	const unsigned tableMask = tableSize - 1;
+	const unsigned maxSymbolValue = tableMask;
+	void *const ptr = ct;
+	U16 *const tableU16 = ((U16 *)ptr) + 2;
+	void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableSize >> 1); /* assumption : tableLog >= 1 */
+	FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
+	unsigned s;
+
+	/* Sanity checks */
+	if (nbBits < 1)
+		return ERROR(GENERIC); /* min size */
+
+	/* header */
+	tableU16[-2] = (U16)nbBits;
+	tableU16[-1] = (U16)maxSymbolValue;
+
+	/* Build table */
+	for (s = 0; s < tableSize; s++)
+		tableU16[s] = (U16)(tableSize + s);
+
+	/* Build Symbol Transformation Table */
+	{
+		const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
+		for (s = 0; s <= maxSymbolValue; s++) {
+			symbolTT[s].deltaNbBits = deltaNbBits;
+			symbolTT[s].deltaFindState = s - 1;
+		}
+	}
+
+	return 0;
+}
+
+/* fake FSE_CTable, for rle input (always same symbol) */
+size_t FSE_buildCTable_rle(FSE_CTable *ct, BYTE symbolValue)
+{
+	void *ptr = ct;
+	U16 *tableU16 = ((U16 *)ptr) + 2;
+	void *FSCTptr = (U32 *)ptr + 2;
+	FSE_symbolCompressionTransform *symbolTT = (FSE_symbolCompressionTransform *)FSCTptr;
+
+	/* header */
+	tableU16[-2] = (U16)0;
+	tableU16[-1] = (U16)symbolValue;
+
+	/* Build table */
+	tableU16[0] = 0;
+	tableU16[1] = 0; /* just in case */
+
+	/* Build Symbol Transformation Table */
+	symbolTT[symbolValue].deltaNbBits = 0;
+	symbolTT[symbolValue].deltaFindState = 0;
+
+	return 0;
+}
+
+static size_t FSE_compress_usingCTable_generic(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct, const unsigned fast)
+{
+	const BYTE *const istart = (const BYTE *)src;
+	const BYTE *const iend = istart + srcSize;
+	const BYTE *ip = iend;
+
+	BIT_CStream_t bitC;
+	FSE_CState_t CState1, CState2;
+
+	/* init */
+	if (srcSize <= 2)
+		return 0;
+	{
+		size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
+		if (FSE_isError(initError))
+			return 0; /* not enough space available to write a bitstream */
+	}
+
+#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
+
+	if (srcSize & 1) {
+		FSE_initCState2(&CState1, ct, *--ip);
+		FSE_initCState2(&CState2, ct, *--ip);
+		FSE_encodeSymbol(&bitC, &CState1, *--ip);
+		FSE_FLUSHBITS(&bitC);
+	} else {
+		FSE_initCState2(&CState2, ct, *--ip);
+		FSE_initCState2(&CState1, ct, *--ip);
+	}
+
+	/* join to mod 4 */
+	srcSize -= 2;
+	if ((sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) && (srcSize & 2)) { /* test bit 2 */
+		FSE_encodeSymbol(&bitC, &CState2, *--ip);
+		FSE_encodeSymbol(&bitC, &CState1, *--ip);
+		FSE_FLUSHBITS(&bitC);
+	}
+
+	/* 2 or 4 encoding per loop */
+	while (ip > istart) {
+
+		FSE_encodeSymbol(&bitC, &CState2, *--ip);
+
+		if (sizeof(bitC.bitContainer) * 8 < FSE_MAX_TABLELOG * 2 + 7) /* this test must be static */
+			FSE_FLUSHBITS(&bitC);
+
+		FSE_encodeSymbol(&bitC, &CState1, *--ip);
+
+		if (sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) { /* this test must be static */
+			FSE_encodeSymbol(&bitC, &CState2, *--ip);
+			FSE_encodeSymbol(&bitC, &CState1, *--ip);
+		}
+
+		FSE_FLUSHBITS(&bitC);
+	}
+
+	FSE_flushCState(&bitC, &CState2);
+	FSE_flushCState(&bitC, &CState1);
+	return BIT_closeCStream(&bitC);
+}
+
+size_t FSE_compress_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct)
+{
+	unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
+
+	if (fast)
+		return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
+	else
+		return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
+}
+
+size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c
new file mode 100644
index 0000000..a84300e
--- /dev/null
+++ b/lib/zstd/fse_decompress.c
@@ -0,0 +1,332 @@
+/*
+ * FSE : Finite State Entropy decoder
+ * Copyright (C) 2013-2015, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ */
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#define FORCE_INLINE static __always_inline
+
+/* **************************************************************
+*  Includes
+****************************************************************/
+#include "bitstream.h"
+#include "fse.h"
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/string.h> /* memcpy, memset */
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define FSE_isError ERR_isError
+#define FSE_STATIC_ASSERT(c)                                   \
+	{                                                      \
+		enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
+	} /* use only *after* variable declarations */
+
+/* check and forward error code */
+#define CHECK_F(f)                  \
+	{                           \
+		size_t const e = f; \
+		if (FSE_isError(e)) \
+			return e;   \
+	}
+
+/* **************************************************************
+*  Templates
+****************************************************************/
+/*
+  designed to be included
+  for type-specific functions (template emulation in C)
+  Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+#error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+#error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X, Y) X##Y
+#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
+#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
+
+/* Function templates */
+
+size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
+{
+	void *const tdPtr = dt + 1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
+	FSE_DECODE_TYPE *const tableDecode = (FSE_DECODE_TYPE *)(tdPtr);
+	U16 *symbolNext = (U16 *)workspace;
+
+	U32 const maxSV1 = maxSymbolValue + 1;
+	U32 const tableSize = 1 << tableLog;
+	U32 highThreshold = tableSize - 1;
+
+	/* Sanity Checks */
+	if (workspaceSize < sizeof(U16) * (FSE_MAX_SYMBOL_VALUE + 1))
+		return ERROR(tableLog_tooLarge);
+	if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE)
+		return ERROR(maxSymbolValue_tooLarge);
+	if (tableLog > FSE_MAX_TABLELOG)
+		return ERROR(tableLog_tooLarge);
+
+	/* Init, lay down lowprob symbols */
+	{
+		FSE_DTableHeader DTableH;
+		DTableH.tableLog = (U16)tableLog;
+		DTableH.fastMode = 1;
+		{
+			S16 const largeLimit = (S16)(1 << (tableLog - 1));
+			U32 s;
+			for (s = 0; s < maxSV1; s++) {
+				if (normalizedCounter[s] == -1) {
+					tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+					symbolNext[s] = 1;
+				} else {
+					if (normalizedCounter[s] >= largeLimit)
+						DTableH.fastMode = 0;
+					symbolNext[s] = normalizedCounter[s];
+				}
+			}
+		}
+		memcpy(dt, &DTableH, sizeof(DTableH));
+	}
+
+	/* Spread symbols */
+	{
+		U32 const tableMask = tableSize - 1;
+		U32 const step = FSE_TABLESTEP(tableSize);
+		U32 s, position = 0;
+		for (s = 0; s < maxSV1; s++) {
+			int i;
+			for (i = 0; i < normalizedCounter[s]; i++) {
+				tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+				position = (position + step) & tableMask;
+				while (position > highThreshold)
+					position = (position + step) & tableMask; /* lowprob area */
+			}
+		}
+		if (position != 0)
+			return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+	}
+
+	/* Build Decoding table */
+	{
+		U32 u;
+		for (u = 0; u < tableSize; u++) {
+			FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
+			U16 nextState = symbolNext[symbol]++;
+			tableDecode[u].nbBits = (BYTE)(tableLog - BIT_highbit32((U32)nextState));
+			tableDecode[u].newState = (U16)((nextState << tableDecode[u].nbBits) - tableSize);
+		}
+	}
+
+	return 0;
+}
+
+/*-*******************************************************
+*  Decompression (Byte symbols)
+*********************************************************/
+size_t FSE_buildDTable_rle(FSE_DTable *dt, BYTE symbolValue)
+{
+	void *ptr = dt;
+	FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
+	void *dPtr = dt + 1;
+	FSE_decode_t *const cell = (FSE_decode_t *)dPtr;
+
+	DTableH->tableLog = 0;
+	DTableH->fastMode = 0;
+
+	cell->newState = 0;
+	cell->symbol = symbolValue;
+	cell->nbBits = 0;
+
+	return 0;
+}
+
+size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits)
+{
+	void *ptr = dt;
+	FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
+	void *dPtr = dt + 1;
+	FSE_decode_t *const dinfo = (FSE_decode_t *)dPtr;
+	const unsigned tableSize = 1 << nbBits;
+	const unsigned tableMask = tableSize - 1;
+	const unsigned maxSV1 = tableMask + 1;
+	unsigned s;
+
+	/* Sanity checks */
+	if (nbBits < 1)
+		return ERROR(GENERIC); /* min size */
+
+	/* Build Decoding Table */
+	DTableH->tableLog = (U16)nbBits;
+	DTableH->fastMode = 1;
+	for (s = 0; s < maxSV1; s++) {
+		dinfo[s].newState = 0;
+		dinfo[s].symbol = (BYTE)s;
+		dinfo[s].nbBits = (BYTE)nbBits;
+	}
+
+	return 0;
+}
+
+FORCE_INLINE size_t FSE_decompress_usingDTable_generic(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt,
+						       const unsigned fast)
+{
+	BYTE *const ostart = (BYTE *)dst;
+	BYTE *op = ostart;
+	BYTE *const omax = op + maxDstSize;
+	BYTE *const olimit = omax - 3;
+
+	BIT_DStream_t bitD;
+	FSE_DState_t state1;
+	FSE_DState_t state2;
+
+	/* Init */
+	CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
+
+	FSE_initDState(&state1, &bitD, dt);
+	FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+	/* 4 symbols per loop */
+	for (; (BIT_reloadDStream(&bitD) == BIT_DStream_unfinished) & (op < olimit); op += 4) {
+		op[0] = FSE_GETSYMBOL(&state1);
+
+		if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
+			BIT_reloadDStream(&bitD);
+
+		op[1] = FSE_GETSYMBOL(&state2);
+
+		if (FSE_MAX_TABLELOG * 4 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
+		{
+			if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) {
+				op += 2;
+				break;
+			}
+		}
+
+		op[2] = FSE_GETSYMBOL(&state1);
+
+		if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
+			BIT_reloadDStream(&bitD);
+
+		op[3] = FSE_GETSYMBOL(&state2);
+	}
+
+	/* tail */
+	/* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+	while (1) {
+		if (op > (omax - 2))
+			return ERROR(dstSize_tooSmall);
+		*op++ = FSE_GETSYMBOL(&state1);
+		if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
+			*op++ = FSE_GETSYMBOL(&state2);
+			break;
+		}
+
+		if (op > (omax - 2))
+			return ERROR(dstSize_tooSmall);
+		*op++ = FSE_GETSYMBOL(&state2);
+		if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
+			*op++ = FSE_GETSYMBOL(&state1);
+			break;
+		}
+	}
+
+	return op - ostart;
+}
+
+size_t FSE_decompress_usingDTable(void *dst, size_t originalSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt)
+{
+	const void *ptr = dt;
+	const FSE_DTableHeader *DTableH = (const FSE_DTableHeader *)ptr;
+	const U32 fastMode = DTableH->fastMode;
+
+	/* select fast mode (static) */
+	if (fastMode)
+		return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+	return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize)
+{
+	const BYTE *const istart = (const BYTE *)cSrc;
+	const BYTE *ip = istart;
+	unsigned tableLog;
+	unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+	size_t NCountLength;
+
+	FSE_DTable *dt;
+	short *counting;
+	size_t spaceUsed32 = 0;
+
+	FSE_STATIC_ASSERT(sizeof(FSE_DTable) == sizeof(U32));
+
+	dt = (FSE_DTable *)((U32 *)workspace + spaceUsed32);
+	spaceUsed32 += FSE_DTABLE_SIZE_U32(maxLog);
+	counting = (short *)((U32 *)workspace + spaceUsed32);
+	spaceUsed32 += ALIGN(sizeof(short) * (FSE_MAX_SYMBOL_VALUE + 1), sizeof(U32)) >> 2;
+
+	if ((spaceUsed32 << 2) > workspaceSize)
+		return ERROR(tableLog_tooLarge);
+	workspace = (U32 *)workspace + spaceUsed32;
+	workspaceSize -= (spaceUsed32 << 2);
+
+	/* normal FSE decoding mode */
+	NCountLength = FSE_readNCount(counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+	if (FSE_isError(NCountLength))
+		return NCountLength;
+	// if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size; supposed to be already checked in NCountLength, only remaining
+	// case : NCountLength==cSrcSize */
+	if (tableLog > maxLog)
+		return ERROR(tableLog_tooLarge);
+	ip += NCountLength;
+	cSrcSize -= NCountLength;
+
+	CHECK_F(FSE_buildDTable_wksp(dt, counting, maxSymbolValue, tableLog, workspace, workspaceSize));
+
+	return FSE_decompress_usingDTable(dst, dstCapacity, ip, cSrcSize, dt); /* always return, even if it is an error code */
+}
diff --git a/lib/zstd/huf.h b/lib/zstd/huf.h
new file mode 100644
index 0000000..2143da2
--- /dev/null
+++ b/lib/zstd/huf.h
@@ -0,0 +1,212 @@
+/*
+ * Huffman coder, part of New Generation Entropy library
+ * header file
+ * Copyright (C) 2013-2016, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ */
+#ifndef HUF_H_298734234
+#define HUF_H_298734234
+
+/* *** Dependencies *** */
+#include <linux/types.h> /* size_t */
+
+/* ***   Tool functions *** */
+#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
+size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
+
+/* Error Management */
+unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
+
+/* ***   Advanced function   *** */
+
+/** HUF_compress4X_wksp() :
+*   Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */
+size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
+			   size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
+
+/* *** Dependencies *** */
+#include "mem.h" /* U32 */
+
+/* *** Constants *** */
+#define HUF_TABLELOG_MAX 12     /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
+#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
+#define HUF_SYMBOLVALUE_MAX 255
+
+#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
+#error "HUF_TABLELOG_MAX is too large !"
+#endif
+
+/* ****************************************
+*  Static allocation
+******************************************/
+/* HUF buffer bounds */
+#define HUF_CTABLEBOUND 129
+#define HUF_BLOCKBOUND(size) (size + (size >> 8) + 8)			 /* only true if incompressible pre-filtered with fast heuristic */
+#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* static allocation of HUF's Compression Table */
+#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
+	U32 name##hb[maxSymbolValue + 1];              \
+	void *name##hv = &(name##hb);                  \
+	HUF_CElt *name = (HUF_CElt *)(name##hv) /* no final ; */
+
+/* static allocation of HUF's DTable */
+typedef U32 HUF_DTable;
+#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1 << (maxTableLog)))
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = {((U32)((maxTableLog)-1) * 0x01000001)}
+#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = {((U32)(maxTableLog)*0x01000001)}
+
+/* The workspace must have alignment at least 4 and be at least this large */
+#define HUF_COMPRESS_WORKSPACE_SIZE (6 << 10)
+#define HUF_COMPRESS_WORKSPACE_SIZE_U32 (HUF_COMPRESS_WORKSPACE_SIZE / sizeof(U32))
+
+/* The workspace must have alignment at least 4 and be at least this large */
+#define HUF_DECOMPRESS_WORKSPACE_SIZE (3 << 10)
+#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
+
+/* ****************************************
+*  Advanced decompression functions
+******************************************/
+size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize); /**< decodes RLE and uncompressed */
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
+				size_t workspaceSize);							       /**< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
+				   size_t workspaceSize); /**< single-symbol decoder */
+size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
+				   size_t workspaceSize); /**< double-symbols decoder */
+
+/* ****************************************
+*  HUF detailed API
+******************************************/
+/*!
+HUF_compress() does the following:
+1. count symbol occurrence from source[] into table count[] using FSE_count()
+2. (optional) refine tableLog using HUF_optimalTableLog()
+3. build Huffman table from count using HUF_buildCTable()
+4. save Huffman table to memory buffer using HUF_writeCTable_wksp()
+5. encode the data stream using HUF_compress4X_usingCTable()
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and regenerate 'CTable' using external methods.
+*/
+/* FSE_count() : find it within "fse.h" */
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
+size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, unsigned maxSymbolValue, unsigned huffLog, void *workspace, size_t workspaceSize);
+size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
+
+typedef enum {
+	HUF_repeat_none,  /**< Cannot use the previous table */
+	HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1,
+			     4}X_repeat */
+	HUF_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
+} HUF_repeat;
+/** HUF_compress4X_repeat() :
+*   Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+*   If it uses hufTable it does not modify hufTable or repeat.
+*   If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+*   If preferRepeat then the old table will always be used if valid. */
+size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
+			     size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
+			     int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
+
+/** HUF_buildCTable_wksp() :
+ *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
+ */
+size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize);
+
+/*! HUF_readStats() :
+	Read compact Huffman tree, saved by HUF_writeCTable().
+	`huffWeight` is destination buffer.
+	@return : size read from `src` , or an error Code .
+	Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
+size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize,
+			  void *workspace, size_t workspaceSize);
+
+/** HUF_readCTable() :
+*   Loading a CTable saved with HUF_writeCTable() */
+size_t HUF_readCTable_wksp(HUF_CElt *CTable, unsigned maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
+
+/*
+HUF_decompress() does the following:
+1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
+2. build Huffman table from save, using HUF_readDTableXn()
+3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
+*/
+
+/** HUF_selectDecoder() :
+*   Tells which decoder is likely to decode faster,
+*   based on a set of pre-determined metrics.
+*   @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
+*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
+U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize);
+
+size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
+size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
+
+size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
+size_t HUF_decompress4X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
+size_t HUF_decompress4X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
+
+/* single stream variants */
+
+size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
+			   size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
+size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
+/** HUF_compress1X_repeat() :
+*   Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+*   If it uses hufTable it does not modify hufTable or repeat.
+*   If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+*   If preferRepeat then the old table will always be used if valid. */
+size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
+			     size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
+			     int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
+
+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize);
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
+				   size_t workspaceSize); /**< single-symbol decoder */
+size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
+				   size_t workspaceSize); /**< double-symbols decoder */
+
+size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize,
+				    const HUF_DTable *DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
+size_t HUF_decompress1X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
+size_t HUF_decompress1X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
+
+#endif /* HUF_H_298734234 */
diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c
new file mode 100644
index 0000000..40055a7
--- /dev/null
+++ b/lib/zstd/huf_compress.c
@@ -0,0 +1,770 @@
+/*
+ * Huffman encoder, part of New Generation Entropy library
+ * Copyright (C) 2013-2016, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ */
+
+/* **************************************************************
+*  Includes
+****************************************************************/
+#include "bitstream.h"
+#include "fse.h" /* header compression */
+#include "huf.h"
+#include <linux/kernel.h>
+#include <linux/string.h> /* memcpy, memset */
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define HUF_STATIC_ASSERT(c)                                   \
+	{                                                      \
+		enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
+	} /* use only *after* variable declarations */
+#define CHECK_V_F(e, f)     \
+	size_t const e = f; \
+	if (ERR_isError(e)) \
+	return f
+#define CHECK_F(f)                        \
+	{                                 \
+		CHECK_V_F(_var_err__, f); \
+	}
+
+/* **************************************************************
+*  Utils
+****************************************************************/
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+	return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
+}
+
+/* *******************************************************
+*  HUF : Huffman block compression
+*********************************************************/
+/* HUF_compressWeights() :
+ * Same as FSE_compress(), but dedicated to huff0's weights compression.
+ * The use case needs much less stack memory.
+ * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
+ */
+#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
+size_t HUF_compressWeights_wksp(void *dst, size_t dstSize, const void *weightTable, size_t wtSize, void *workspace, size_t workspaceSize)
+{
+	BYTE *const ostart = (BYTE *)dst;
+	BYTE *op = ostart;
+	BYTE *const oend = ostart + dstSize;
+
+	U32 maxSymbolValue = HUF_TABLELOG_MAX;
+	U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
+
+	FSE_CTable *CTable;
+	U32 *count;
+	S16 *norm;
+	size_t spaceUsed32 = 0;
+
+	HUF_STATIC_ASSERT(sizeof(FSE_CTable) == sizeof(U32));
+
+	CTable = (FSE_CTable *)((U32 *)workspace + spaceUsed32);
+	spaceUsed32 += FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX);
+	count = (U32 *)workspace + spaceUsed32;
+	spaceUsed32 += HUF_TABLELOG_MAX + 1;
+	norm = (S16 *)((U32 *)workspace + spaceUsed32);
+	spaceUsed32 += ALIGN(sizeof(S16) * (HUF_TABLELOG_MAX + 1), sizeof(U32)) >> 2;
+
+	if ((spaceUsed32 << 2) > workspaceSize)
+		return ERROR(tableLog_tooLarge);
+	workspace = (U32 *)workspace + spaceUsed32;
+	workspaceSize -= (spaceUsed32 << 2);
+
+	/* init conditions */
+	if (wtSize <= 1)
+		return 0; /* Not compressible */
+
+	/* Scan input and build symbol stats */
+	{
+		CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize));
+		if (maxCount == wtSize)
+			return 1; /* only a single symbol in src : rle */
+		if (maxCount == 1)
+			return 0; /* each symbol present maximum once => not compressible */
+	}
+
+	tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
+	CHECK_F(FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue));
+
+	/* Write table description header */
+	{
+		CHECK_V_F(hSize, FSE_writeNCount(op, oend - op, norm, maxSymbolValue, tableLog));
+		op += hSize;
+	}
+
+	/* Compress */
+	CHECK_F(FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, workspace, workspaceSize));
+	{
+		CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable));
+		if (cSize == 0)
+			return 0; /* not enough space for compressed data */
+		op += cSize;
+	}
+
+	return op - ostart;
+}
+
+struct HUF_CElt_s {
+	U16 val;
+	BYTE nbBits;
+}; /* typedef'd to HUF_CElt within "huf.h" */
+
+/*! HUF_writeCTable_wksp() :
+	`CTable` : Huffman tree to save, using huf representation.
+	@return : size of saved CTable */
+size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, U32 maxSymbolValue, U32 huffLog, void *workspace, size_t workspaceSize)
+{
+	BYTE *op = (BYTE *)dst;
+	U32 n;
+
+	BYTE *bitsToWeight;
+	BYTE *huffWeight;
+	size_t spaceUsed32 = 0;
+
+	bitsToWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
+	spaceUsed32 += ALIGN(HUF_TABLELOG_MAX + 1, sizeof(U32)) >> 2;
+	huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
+	spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX, sizeof(U32)) >> 2;
+
+	if ((spaceUsed32 << 2) > workspaceSize)
+		return ERROR(tableLog_tooLarge);
+	workspace = (U32 *)workspace + spaceUsed32;
+	workspaceSize -= (spaceUsed32 << 2);
+
+	/* check conditions */
+	if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
+		return ERROR(maxSymbolValue_tooLarge);
+
+	/* convert to weight */
+	bitsToWeight[0] = 0;
+	for (n = 1; n < huffLog + 1; n++)
+		bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
+	for (n = 0; n < maxSymbolValue; n++)
+		huffWeight[n] = bitsToWeight[CTable[n].nbBits];
+
+	/* attempt weights compression by FSE */
+	{
+		CHECK_V_F(hSize, HUF_compressWeights_wksp(op + 1, maxDstSize - 1, huffWeight, maxSymbolValue, workspace, workspaceSize));
+		if ((hSize > 1) & (hSize < maxSymbolValue / 2)) { /* FSE compressed */
+			op[0] = (BYTE)hSize;
+			return hSize + 1;
+		}
+	}
+
+	/* write raw values as 4-bits (max : 15) */
+	if (maxSymbolValue > (256 - 128))
+		return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
+	if (((maxSymbolValue + 1) / 2) + 1 > maxDstSize)
+		return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
+	op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue - 1));
+	huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
+	for (n = 0; n < maxSymbolValue; n += 2)
+		op[(n / 2) + 1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n + 1]);
+	return ((maxSymbolValue + 1) / 2) + 1;
+}
+
+size_t HUF_readCTable_wksp(HUF_CElt *CTable, U32 maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
+{
+	U32 *rankVal;
+	BYTE *huffWeight;
+	U32 tableLog = 0;
+	U32 nbSymbols = 0;
+	size_t readSize;
+	size_t spaceUsed32 = 0;
+
+	rankVal = (U32 *)workspace + spaceUsed32;
+	spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
+	huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
+	spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
+
+	if ((spaceUsed32 << 2) > workspaceSize)
+		return ERROR(tableLog_tooLarge);
+	workspace = (U32 *)workspace + spaceUsed32;
+	workspaceSize -= (spaceUsed32 << 2);
+
+	/* get symbol weights */
+	readSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
+	if (ERR_isError(readSize))
+		return readSize;
+
+	/* check result */
+	if (tableLog > HUF_TABLELOG_MAX)
+		return ERROR(tableLog_tooLarge);
+	if (nbSymbols > maxSymbolValue + 1)
+		return ERROR(maxSymbolValue_tooSmall);
+
+	/* Prepare base value per rank */
+	{
+		U32 n, nextRankStart = 0;
+		for (n = 1; n <= tableLog; n++) {
+			U32 curr = nextRankStart;
+			nextRankStart += (rankVal[n] << (n - 1));
+			rankVal[n] = curr;
+		}
+	}
+
+	/* fill nbBits */
+	{
+		U32 n;
+		for (n = 0; n < nbSymbols; n++) {
+			const U32 w = huffWeight[n];
+			CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
+		}
+	}
+
+	/* fill val */
+	{
+		U16 nbPerRank[HUF_TABLELOG_MAX + 2] = {0}; /* support w=0=>n=tableLog+1 */
+		U16 valPerRank[HUF_TABLELOG_MAX + 2] = {0};
+		{
+			U32 n;
+			for (n = 0; n < nbSymbols; n++)
+				nbPerRank[CTable[n].nbBits]++;
+		}
+		/* determine stating value per rank */
+		valPerRank[tableLog + 1] = 0; /* for w==0 */
+		{
+			U16 min = 0;
+			U32 n;
+			for (n = tableLog; n > 0; n--) { /* start at n=tablelog <-> w=1 */
+				valPerRank[n] = min;     /* get starting value within each rank */
+				min += nbPerRank[n];
+				min >>= 1;
+			}
+		}
+		/* assign value within rank, symbol order */
+		{
+			U32 n;
+			for (n = 0; n <= maxSymbolValue; n++)
+				CTable[n].val = valPerRank[CTable[n].nbBits]++;
+		}
+	}
+
+	return readSize;
+}
+
+typedef struct nodeElt_s {
+	U32 count;
+	U16 parent;
+	BYTE byte;
+	BYTE nbBits;
+} nodeElt;
+
+static U32 HUF_setMaxHeight(nodeElt *huffNode, U32 lastNonNull, U32 maxNbBits)
+{
+	const U32 largestBits = huffNode[lastNonNull].nbBits;
+	if (largestBits <= maxNbBits)
+		return largestBits; /* early exit : no elt > maxNbBits */
+
+	/* there are several too large elements (at least >= 2) */
+	{
+		int totalCost = 0;
+		const U32 baseCost = 1 << (largestBits - maxNbBits);
+		U32 n = lastNonNull;
+
+		while (huffNode[n].nbBits > maxNbBits) {
+			totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
+			huffNode[n].nbBits = (BYTE)maxNbBits;
+			n--;
+		} /* n stops at huffNode[n].nbBits <= maxNbBits */
+		while (huffNode[n].nbBits == maxNbBits)
+			n--; /* n end at index of smallest symbol using < maxNbBits */
+
+		/* renorm totalCost */
+		totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
+
+		/* repay normalized cost */
+		{
+			U32 const noSymbol = 0xF0F0F0F0;
+			U32 rankLast[HUF_TABLELOG_MAX + 2];
+			int pos;
+
+			/* Get pos of last (smallest) symbol per rank */
+			memset(rankLast, 0xF0, sizeof(rankLast));
+			{
+				U32 currNbBits = maxNbBits;
+				for (pos = n; pos >= 0; pos--) {
+					if (huffNode[pos].nbBits >= currNbBits)
+						continue;
+					currNbBits = huffNode[pos].nbBits; /* < maxNbBits */
+					rankLast[maxNbBits - currNbBits] = pos;
+				}
+			}
+
+			while (totalCost > 0) {
+				U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
+				for (; nBitsToDecrease > 1; nBitsToDecrease--) {
+					U32 highPos = rankLast[nBitsToDecrease];
+					U32 lowPos = rankLast[nBitsToDecrease - 1];
+					if (highPos == noSymbol)
+						continue;
+					if (lowPos == noSymbol)
+						break;
+					{
+						U32 const highTotal = huffNode[highPos].count;
+						U32 const lowTotal = 2 * huffNode[lowPos].count;
+						if (highTotal <= lowTotal)
+							break;
+					}
+				}
+				/* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
+				/* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
+				while ((nBitsToDecrease <= HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
+					nBitsToDecrease++;
+				totalCost -= 1 << (nBitsToDecrease - 1);
+				if (rankLast[nBitsToDecrease - 1] == noSymbol)
+					rankLast[nBitsToDecrease - 1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
+				huffNode[rankLast[nBitsToDecrease]].nbBits++;
+				if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
+					rankLast[nBitsToDecrease] = noSymbol;
+				else {
+					rankLast[nBitsToDecrease]--;
+					if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits - nBitsToDecrease)
+						rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
+				}
+			} /* while (totalCost > 0) */
+
+			while (totalCost < 0) {		       /* Sometimes, cost correction overshoot */
+				if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0
+								  (using maxNbBits) */
+					while (huffNode[n].nbBits == maxNbBits)
+						n--;
+					huffNode[n + 1].nbBits--;
+					rankLast[1] = n + 1;
+					totalCost++;
+					continue;
+				}
+				huffNode[rankLast[1] + 1].nbBits--;
+				rankLast[1]++;
+				totalCost++;
+			}
+		}
+	} /* there are several too large elements (at least >= 2) */
+
+	return maxNbBits;
+}
+
+typedef struct {
+	U32 base;
+	U32 curr;
+} rankPos;
+
+static void HUF_sort(nodeElt *huffNode, const U32 *count, U32 maxSymbolValue)
+{
+	rankPos rank[32];
+	U32 n;
+
+	memset(rank, 0, sizeof(rank));
+	for (n = 0; n <= maxSymbolValue; n++) {
+		U32 r = BIT_highbit32(count[n] + 1);
+		rank[r].base++;
+	}
+	for (n = 30; n > 0; n--)
+		rank[n - 1].base += rank[n].base;
+	for (n = 0; n < 32; n++)
+		rank[n].curr = rank[n].base;
+	for (n = 0; n <= maxSymbolValue; n++) {
+		U32 const c = count[n];
+		U32 const r = BIT_highbit32(c + 1) + 1;
+		U32 pos = rank[r].curr++;
+		while ((pos > rank[r].base) && (c > huffNode[pos - 1].count))
+			huffNode[pos] = huffNode[pos - 1], pos--;
+		huffNode[pos].count = c;
+		huffNode[pos].byte = (BYTE)n;
+	}
+}
+
+/** HUF_buildCTable_wksp() :
+ *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
+ */
+#define STARTNODE (HUF_SYMBOLVALUE_MAX + 1)
+typedef nodeElt huffNodeTable[2 * HUF_SYMBOLVALUE_MAX + 1 + 1];
+size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize)
+{
+	nodeElt *const huffNode0 = (nodeElt *)workSpace;
+	nodeElt *const huffNode = huffNode0 + 1;
+	U32 n, nonNullRank;
+	int lowS, lowN;
+	U16 nodeNb = STARTNODE;
+	U32 nodeRoot;
+
+	/* safety checks */
+	if (wkspSize < sizeof(huffNodeTable))
+		return ERROR(GENERIC); /* workSpace is not large enough */
+	if (maxNbBits == 0)
+		maxNbBits = HUF_TABLELOG_DEFAULT;
+	if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
+		return ERROR(GENERIC);
+	memset(huffNode0, 0, sizeof(huffNodeTable));
+
+	/* sort, decreasing order */
+	HUF_sort(huffNode, count, maxSymbolValue);
+
+	/* init for parents */
+	nonNullRank = maxSymbolValue;
+	while (huffNode[nonNullRank].count == 0)
+		nonNullRank--;
+	lowS = nonNullRank;
+	nodeRoot = nodeNb + lowS - 1;
+	lowN = nodeNb;
+	huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS - 1].count;
+	huffNode[lowS].parent = huffNode[lowS - 1].parent = nodeNb;
+	nodeNb++;
+	lowS -= 2;
+	for (n = nodeNb; n <= nodeRoot; n++)
+		huffNode[n].count = (U32)(1U << 30);
+	huffNode0[0].count = (U32)(1U << 31); /* fake entry, strong barrier */
+
+	/* create parents */
+	while (nodeNb <= nodeRoot) {
+		U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+		U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+		huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
+		huffNode[n1].parent = huffNode[n2].parent = nodeNb;
+		nodeNb++;
+	}
+
+	/* distribute weights (unlimited tree height) */
+	huffNode[nodeRoot].nbBits = 0;
+	for (n = nodeRoot - 1; n >= STARTNODE; n--)
+		huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
+	for (n = 0; n <= nonNullRank; n++)
+		huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
+
+	/* enforce maxTableLog */
+	maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
+
+	/* fill result into tree (val, nbBits) */
+	{
+		U16 nbPerRank[HUF_TABLELOG_MAX + 1] = {0};
+		U16 valPerRank[HUF_TABLELOG_MAX + 1] = {0};
+		if (maxNbBits > HUF_TABLELOG_MAX)
+			return ERROR(GENERIC); /* check fit into table */
+		for (n = 0; n <= nonNullRank; n++)
+			nbPerRank[huffNode[n].nbBits]++;
+		/* determine stating value per rank */
+		{
+			U16 min = 0;
+			for (n = maxNbBits; n > 0; n--) {
+				valPerRank[n] = min; /* get starting value within each rank */
+				min += nbPerRank[n];
+				min >>= 1;
+			}
+		}
+		for (n = 0; n <= maxSymbolValue; n++)
+			tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
+		for (n = 0; n <= maxSymbolValue; n++)
+			tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
+	}
+
+	return maxNbBits;
+}
+
+static size_t HUF_estimateCompressedSize(HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
+{
+	size_t nbBits = 0;
+	int s;
+	for (s = 0; s <= (int)maxSymbolValue; ++s) {
+		nbBits += CTable[s].nbBits * count[s];
+	}
+	return nbBits >> 3;
+}
+
+static int HUF_validateCTable(const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
+{
+	int bad = 0;
+	int s;
+	for (s = 0; s <= (int)maxSymbolValue; ++s) {
+		bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
+	}
+	return !bad;
+}
+
+static void HUF_encodeSymbol(BIT_CStream_t *bitCPtr, U32 symbol, const HUF_CElt *CTable)
+{
+	BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
+}
+
+size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
+
+#define HUF_FLUSHBITS(s)  BIT_flushBits(s)
+
+#define HUF_FLUSHBITS_1(stream)                                            \
+	if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 2 + 7) \
+	HUF_FLUSHBITS(stream)
+
+#define HUF_FLUSHBITS_2(stream)                                            \
+	if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 4 + 7) \
+	HUF_FLUSHBITS(stream)
+
+size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
+{
+	const BYTE *ip = (const BYTE *)src;
+	BYTE *const ostart = (BYTE *)dst;
+	BYTE *const oend = ostart + dstSize;
+	BYTE *op = ostart;
+	size_t n;
+	BIT_CStream_t bitC;
+
+	/* init */
+	if (dstSize < 8)
+		return 0; /* not enough space to compress */
+	{
+		size_t const initErr = BIT_initCStream(&bitC, op, oend - op);
+		if (HUF_isError(initErr))
+			return 0;
+	}
+
+	n = srcSize & ~3; /* join to mod 4 */
+	switch (srcSize & 3) {
+	case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC);
+	case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC);
+	case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC);
+	case 0:
+	default:;
+	}
+
+	for (; n > 0; n -= 4) { /* note : n&3==0 at this stage */
+		HUF_encodeSymbol(&bitC, ip[n - 1], CTable);
+		HUF_FLUSHBITS_1(&bitC);
+		HUF_encodeSymbol(&bitC, ip[n - 2], CTable);
+		HUF_FLUSHBITS_2(&bitC);
+		HUF_encodeSymbol(&bitC, ip[n - 3], CTable);
+		HUF_FLUSHBITS_1(&bitC);
+		HUF_encodeSymbol(&bitC, ip[n - 4], CTable);
+		HUF_FLUSHBITS(&bitC);
+	}
+
+	return BIT_closeCStream(&bitC);
+}
+
+size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
+{
+	size_t const segmentSize = (srcSize + 3) / 4; /* first 3 segments */
+	const BYTE *ip = (const BYTE *)src;
+	const BYTE *const iend = ip + srcSize;
+	BYTE *const ostart = (BYTE *)dst;
+	BYTE *const oend = ostart + dstSize;
+	BYTE *op = ostart;
+
+	if (dstSize < 6 + 1 + 1 + 1 + 8)
+		return 0; /* minimum space to compress successfully */
+	if (srcSize < 12)
+		return 0; /* no saving possible : too small input */
+	op += 6;	  /* jumpTable */
+
+	{
+		CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
+		if (cSize == 0)
+			return 0;
+		ZSTD_writeLE16(ostart, (U16)cSize);
+		op += cSize;
+	}
+
+	ip += segmentSize;
+	{
+		CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
+		if (cSize == 0)
+			return 0;
+		ZSTD_writeLE16(ostart + 2, (U16)cSize);
+		op += cSize;
+	}
+
+	ip += segmentSize;
+	{
+		CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
+		if (cSize == 0)
+			return 0;
+		ZSTD_writeLE16(ostart + 4, (U16)cSize);
+		op += cSize;
+	}
+
+	ip += segmentSize;
+	{
+		CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, iend - ip, CTable));
+		if (cSize == 0)
+			return 0;
+		op += cSize;
+	}
+
+	return op - ostart;
+}
+
+static size_t HUF_compressCTable_internal(BYTE *const ostart, BYTE *op, BYTE *const oend, const void *src, size_t srcSize, unsigned singleStream,
+					  const HUF_CElt *CTable)
+{
+	size_t const cSize =
+	    singleStream ? HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable);
+	if (HUF_isError(cSize)) {
+		return cSize;
+	}
+	if (cSize == 0) {
+		return 0;
+	} /* uncompressible */
+	op += cSize;
+	/* check compressibility */
+	if ((size_t)(op - ostart) >= srcSize - 1) {
+		return 0;
+	}
+	return op - ostart;
+}
+
+/* `workSpace` must a table of at least 1024 unsigned */
+static size_t HUF_compress_internal(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog,
+				    unsigned singleStream, void *workSpace, size_t wkspSize, HUF_CElt *oldHufTable, HUF_repeat *repeat, int preferRepeat)
+{
+	BYTE *const ostart = (BYTE *)dst;
+	BYTE *const oend = ostart + dstSize;
+	BYTE *op = ostart;
+
+	U32 *count;
+	size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1);
+	HUF_CElt *CTable;
+	size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1);
+
+	/* checks & inits */
+	if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize)
+		return ERROR(GENERIC);
+	if (!srcSize)
+		return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
+	if (!dstSize)
+		return 0; /* cannot fit within dst budget */
+	if (srcSize > HUF_BLOCKSIZE_MAX)
+		return ERROR(srcSize_wrong); /* curr block size limit */
+	if (huffLog > HUF_TABLELOG_MAX)
+		return ERROR(tableLog_tooLarge);
+	if (!maxSymbolValue)
+		maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+	if (!huffLog)
+		huffLog = HUF_TABLELOG_DEFAULT;
+
+	count = (U32 *)workSpace;
+	workSpace = (BYTE *)workSpace + countSize;
+	wkspSize -= countSize;
+	CTable = (HUF_CElt *)workSpace;
+	workSpace = (BYTE *)workSpace + CTableSize;
+	wkspSize -= CTableSize;
+
+	/* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */
+	if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
+		return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
+	}
+
+	/* Scan input and build symbol stats */
+	{
+		CHECK_V_F(largest, FSE_count_wksp(count, &maxSymbolValue, (const BYTE *)src, srcSize, (U32 *)workSpace));
+		if (largest == srcSize) {
+			*ostart = ((const BYTE *)src)[0];
+			return 1;
+		} /* single symbol, rle */
+		if (largest <= (srcSize >> 7) + 1)
+			return 0; /* Fast heuristic : not compressible enough */
+	}
+
+	/* Check validity of previous table */
+	if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) {
+		*repeat = HUF_repeat_none;
+	}
+	/* Heuristic : use existing table for small inputs */
+	if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
+		return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
+	}
+
+	/* Build Huffman Tree */
+	huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+	{
+		CHECK_V_F(maxBits, HUF_buildCTable_wksp(CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize));
+		huffLog = (U32)maxBits;
+		/* Zero the unused symbols so we can check it for validity */
+		memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt));
+	}
+
+	/* Write table description header */
+	{
+		CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, CTable, maxSymbolValue, huffLog, workSpace, wkspSize));
+		/* Check if using the previous table will be beneficial */
+		if (repeat && *repeat != HUF_repeat_none) {
+			size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue);
+			size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue);
+			if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
+				return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
+			}
+		}
+		/* Use the new table */
+		if (hSize + 12ul >= srcSize) {
+			return 0;
+		}
+		op += hSize;
+		if (repeat) {
+			*repeat = HUF_repeat_none;
+		}
+		if (oldHufTable) {
+			memcpy(oldHufTable, CTable, CTableSize);
+		} /* Save the new table */
+	}
+	return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable);
+}
+
+size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
+			   size_t wkspSize)
+{
+	return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0);
+}
+
+size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
+			     size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
+{
+	return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat,
+				     preferRepeat);
+}
+
+size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
+			   size_t wkspSize)
+{
+	return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0);
+}
+
+size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
+			     size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
+{
+	return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat,
+				     preferRepeat);
+}
diff --git a/lib/zstd/huf_decompress.c b/lib/zstd/huf_decompress.c
new file mode 100644
index 0000000..6526482
--- /dev/null
+++ b/lib/zstd/huf_decompress.c
@@ -0,0 +1,960 @@
+/*
+ * Huffman decoder, part of New Generation Entropy library
+ * Copyright (C) 2013-2016, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ */
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#define FORCE_INLINE static __always_inline
+
+/* **************************************************************
+*  Dependencies
+****************************************************************/
+#include "bitstream.h" /* BIT_* */
+#include "fse.h"       /* header compression */
+#include "huf.h"
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/string.h> /* memcpy, memset */
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define HUF_STATIC_ASSERT(c)                                   \
+	{                                                      \
+		enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
+	} /* use only *after* variable declarations */
+
+/*-***************************/
+/*  generic DTableDesc       */
+/*-***************************/
+
+typedef struct {
+	BYTE maxTableLog;
+	BYTE tableType;
+	BYTE tableLog;
+	BYTE reserved;
+} DTableDesc;
+
+static DTableDesc HUF_getDTableDesc(const HUF_DTable *table)
+{
+	DTableDesc dtd;
+	memcpy(&dtd, table, sizeof(dtd));
+	return dtd;
+}
+
+/*-***************************/
+/*  single-symbol decoding   */
+/*-***************************/
+
+typedef struct {
+	BYTE byte;
+	BYTE nbBits;
+} HUF_DEltX2; /* single-symbol decoding */
+
+size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
+{
+	U32 tableLog = 0;
+	U32 nbSymbols = 0;
+	size_t iSize;
+	void *const dtPtr = DTable + 1;
+	HUF_DEltX2 *const dt = (HUF_DEltX2 *)dtPtr;
+
+	U32 *rankVal;
+	BYTE *huffWeight;
+	size_t spaceUsed32 = 0;
+
+	rankVal = (U32 *)workspace + spaceUsed32;
+	spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
+	huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
+	spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
+
+	if ((spaceUsed32 << 2) > workspaceSize)
+		return ERROR(tableLog_tooLarge);
+	workspace = (U32 *)workspace + spaceUsed32;
+	workspaceSize -= (spaceUsed32 << 2);
+
+	HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
+	/* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
+
+	iSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
+	if (HUF_isError(iSize))
+		return iSize;
+
+	/* Table header */
+	{
+		DTableDesc dtd = HUF_getDTableDesc(DTable);
+		if (tableLog > (U32)(dtd.maxTableLog + 1))
+			return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
+		dtd.tableType = 0;
+		dtd.tableLog = (BYTE)tableLog;
+		memcpy(DTable, &dtd, sizeof(dtd));
+	}
+
+	/* Calculate starting value for each rank */
+	{
+		U32 n, nextRankStart = 0;
+		for (n = 1; n < tableLog + 1; n++) {
+			U32 const curr = nextRankStart;
+			nextRankStart += (rankVal[n] << (n - 1));
+			rankVal[n] = curr;
+		}
+	}
+
+	/* fill DTable */
+	{
+		U32 n;
+		for (n = 0; n < nbSymbols; n++) {
+			U32 const w = huffWeight[n];
+			U32 const length = (1 << w) >> 1;
+			U32 u;
+			HUF_DEltX2 D;
+			D.byte = (BYTE)n;
+			D.nbBits = (BYTE)(tableLog + 1 - w);
+			for (u = rankVal[w]; u < rankVal[w] + length; u++)
+				dt[u] = D;
+			rankVal[w] += length;
+		}
+	}
+
+	return iSize;
+}
+
+static BYTE HUF_decodeSymbolX2(BIT_DStream_t *Dstream, const HUF_DEltX2 *dt, const U32 dtLog)
+{
+	size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+	BYTE const c = dt[val].byte;
+	BIT_skipBits(Dstream, dt[val].nbBits);
+	return c;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr)         \
+	if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
+	HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+	if (ZSTD_64bits())                     \
+	HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+FORCE_INLINE size_t HUF_decodeStreamX2(BYTE *p, BIT_DStream_t *const bitDPtr, BYTE *const pEnd, const HUF_DEltX2 *const dt, const U32 dtLog)
+{
+	BYTE *const pStart = p;
+
+	/* up to 4 symbols at a time */
+	while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd - 4)) {
+		HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+		HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+		HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+		HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+	}
+
+	/* closer to the end */
+	while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
+		HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+	/* no more data to retrieve from bitstream, hence no need to reload */
+	while (p < pEnd)
+		HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+	return pEnd - pStart;
+}
+
+static size_t HUF_decompress1X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
+{
+	BYTE *op = (BYTE *)dst;
+	BYTE *const oend = op + dstSize;
+	const void *dtPtr = DTable + 1;
+	const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
+	BIT_DStream_t bitD;
+	DTableDesc const dtd = HUF_getDTableDesc(DTable);
+	U32 const dtLog = dtd.tableLog;
+
+	{
+		size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
+		if (HUF_isError(errorCode))
+			return errorCode;
+	}
+
+	HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
+
+	/* check */
+	if (!BIT_endOfDStream(&bitD))
+		return ERROR(corruption_detected);
+
+	return dstSize;
+}
+
+size_t HUF_decompress1X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
+{
+	DTableDesc dtd = HUF_getDTableDesc(DTable);
+	if (dtd.tableType != 0)
+		return ERROR(GENERIC);
+	return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
+{
+	const BYTE *ip = (const BYTE *)cSrc;
+
+	size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
+	if (HUF_isError(hSize))
+		return hSize;
+	if (hSize >= cSrcSize)
+		return ERROR(srcSize_wrong);
+	ip += hSize;
+	cSrcSize -= hSize;
+
+	return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
+}
+
+static size_t HUF_decompress4X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
+{
+	/* Check */
+	if (cSrcSize < 10)
+		return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+	{
+		const BYTE *const istart = (const BYTE *)cSrc;
+		BYTE *const ostart = (BYTE *)dst;
+		BYTE *const oend = ostart + dstSize;
+		const void *const dtPtr = DTable + 1;
+		const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
+
+		/* Init */
+		BIT_DStream_t bitD1;
+		BIT_DStream_t bitD2;
+		BIT_DStream_t bitD3;
+		BIT_DStream_t bitD4;
+		size_t const length1 = ZSTD_readLE16(istart);
+		size_t const length2 = ZSTD_readLE16(istart + 2);
+		size_t const length3 = ZSTD_readLE16(istart + 4);
+		size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+		const BYTE *const istart1 = istart + 6; /* jumpTable */
+		const BYTE *const istart2 = istart1 + length1;
+		const BYTE *const istart3 = istart2 + length2;
+		const BYTE *const istart4 = istart3 + length3;
+		const size_t segmentSize = (dstSize + 3) / 4;
+		BYTE *const opStart2 = ostart + segmentSize;
+		BYTE *const opStart3 = opStart2 + segmentSize;
+		BYTE *const opStart4 = opStart3 + segmentSize;
+		BYTE *op1 = ostart;
+		BYTE *op2 = opStart2;
+		BYTE *op3 = opStart3;
+		BYTE *op4 = opStart4;
+		U32 endSignal;
+		DTableDesc const dtd = HUF_getDTableDesc(DTable);
+		U32 const dtLog = dtd.tableLog;
+
+		if (length4 > cSrcSize)
+			return ERROR(corruption_detected); /* overflow */
+		{
+			size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
+			if (HUF_isError(errorCode))
+				return errorCode;
+		}
+		{
+			size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
+			if (HUF_isError(errorCode))
+				return errorCode;
+		}
+		{
+			size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
+			if (HUF_isError(errorCode))
+				return errorCode;
+		}
+		{
+			size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
+			if (HUF_isError(errorCode))
+				return errorCode;
+		}
+
+		/* 16-32 symbols per loop (4-8 symbols per stream) */
+		endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+		for (; (endSignal == BIT_DStream_unfinished) && (op4 < (oend - 7));) {
+			HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+			HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+			HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+			HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+			HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+			HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+			HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+			HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+			HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+			HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+			HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+			HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+			HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+			HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+			HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+			HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+			endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+		}
+
+		/* check corruption */
+		if (op1 > opStart2)
+			return ERROR(corruption_detected);
+		if (op2 > opStart3)
+			return ERROR(corruption_detected);
+		if (op3 > opStart4)
+			return ERROR(corruption_detected);
+		/* note : op4 supposed already verified within main loop */
+
+		/* finish bitStreams one by one */
+		HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+		HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+		HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+		HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
+
+		/* check */
+		endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+		if (!endSignal)
+			return ERROR(corruption_detected);
+
+		/* decoded size */
+		return dstSize;
+	}
+}
+
+size_t HUF_decompress4X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
+{
+	DTableDesc dtd = HUF_getDTableDesc(DTable);
+	if (dtd.tableType != 0)
+		return ERROR(GENERIC);
+	return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
+{
+	const BYTE *ip = (const BYTE *)cSrc;
+
+	size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
+	if (HUF_isError(hSize))
+		return hSize;
+	if (hSize >= cSrcSize)
+		return ERROR(srcSize_wrong);
+	ip += hSize;
+	cSrcSize -= hSize;
+
+	return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
+}
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+typedef struct {
+	U16 sequence;
+	BYTE nbBits;
+	BYTE length;
+} HUF_DEltX4; /* double-symbols decoding */
+
+typedef struct {
+	BYTE symbol;
+	BYTE weight;
+} sortedSymbol_t;
+
+/* HUF_fillDTableX4Level2() :
+ * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
+static void HUF_fillDTableX4Level2(HUF_DEltX4 *DTable, U32 sizeLog, const U32 consumed, const U32 *rankValOrigin, const int minWeight,
+				   const sortedSymbol_t *sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq)
+{
+	HUF_DEltX4 DElt;
+	U32 rankVal[HUF_TABLELOG_MAX + 1];
+
+	/* get pre-calculated rankVal */
+	memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+	/* fill skipped values */
+	if (minWeight > 1) {
+		U32 i, skipSize = rankVal[minWeight];
+		ZSTD_writeLE16(&(DElt.sequence), baseSeq);
+		DElt.nbBits = (BYTE)(consumed);
+		DElt.length = 1;
+		for (i = 0; i < skipSize; i++)
+			DTable[i] = DElt;
+	}
+
+	/* fill DTable */
+	{
+		U32 s;
+		for (s = 0; s < sortedListSize; s++) { /* note : sortedSymbols already skipped */
+			const U32 symbol = sortedSymbols[s].symbol;
+			const U32 weight = sortedSymbols[s].weight;
+			const U32 nbBits = nbBitsBaseline - weight;
+			const U32 length = 1 << (sizeLog - nbBits);
+			const U32 start = rankVal[weight];
+			U32 i = start;
+			const U32 end = start + length;
+
+			ZSTD_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+			DElt.nbBits = (BYTE)(nbBits + consumed);
+			DElt.length = 2;
+			do {
+				DTable[i++] = DElt;
+			} while (i < end); /* since length >= 1 */
+
+			rankVal[weight] += length;
+		}
+	}
+}
+
+typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1];
+typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
+
+static void HUF_fillDTableX4(HUF_DEltX4 *DTable, const U32 targetLog, const sortedSymbol_t *sortedList, const U32 sortedListSize, const U32 *rankStart,
+			     rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline)
+{
+	U32 rankVal[HUF_TABLELOG_MAX + 1];
+	const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+	const U32 minBits = nbBitsBaseline - maxWeight;
+	U32 s;
+
+	memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+	/* fill DTable */
+	for (s = 0; s < sortedListSize; s++) {
+		const U16 symbol = sortedList[s].symbol;
+		const U32 weight = sortedList[s].weight;
+		const U32 nbBits = nbBitsBaseline - weight;
+		const U32 start = rankVal[weight];
+		const U32 length = 1 << (targetLog - nbBits);
+
+		if (targetLog - nbBits >= minBits) { /* enough room for a second symbol */
+			U32 sortedRank;
+			int minWeight = nbBits + scaleLog;
+			if (minWeight < 1)
+				minWeight = 1;
+			sortedRank = rankStart[minWeight];
+			HUF_fillDTableX4Level2(DTable + start, targetLog - nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList + sortedRank,
+					       sortedListSize - sortedRank, nbBitsBaseline, symbol);
+		} else {
+			HUF_DEltX4 DElt;
+			ZSTD_writeLE16(&(DElt.sequence), symbol);
+			DElt.nbBits = (BYTE)(nbBits);
+			DElt.length = 1;
+			{
+				U32 const end = start + length;
+				U32 u;
+				for (u = start; u < end; u++)
+					DTable[u] = DElt;
+			}
+		}
+		rankVal[weight] += length;
+	}
+}
+
+size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
+{
+	U32 tableLog, maxW, sizeOfSort, nbSymbols;
+	DTableDesc dtd = HUF_getDTableDesc(DTable);
+	U32 const maxTableLog = dtd.maxTableLog;
+	size_t iSize;
+	void *dtPtr = DTable + 1; /* force compiler to avoid strict-aliasing */
+	HUF_DEltX4 *const dt = (HUF_DEltX4 *)dtPtr;
+	U32 *rankStart;
+
+	rankValCol_t *rankVal;
+	U32 *rankStats;
+	U32 *rankStart0;
+	sortedSymbol_t *sortedSymbol;
+	BYTE *weightList;
+	size_t spaceUsed32 = 0;
+
+	HUF_STATIC_ASSERT((sizeof(rankValCol_t) & 3) == 0);
+
+	rankVal = (rankValCol_t *)((U32 *)workspace + spaceUsed32);
+	spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
+	rankStats = (U32 *)workspace + spaceUsed32;
+	spaceUsed32 += HUF_TABLELOG_MAX + 1;
+	rankStart0 = (U32 *)workspace + spaceUsed32;
+	spaceUsed32 += HUF_TABLELOG_MAX + 2;
+	sortedSymbol = (sortedSymbol_t *)((U32 *)workspace + spaceUsed32);
+	spaceUsed32 += ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
+	weightList = (BYTE *)((U32 *)workspace + spaceUsed32);
+	spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
+
+	if ((spaceUsed32 << 2) > workspaceSize)
+		return ERROR(tableLog_tooLarge);
+	workspace = (U32 *)workspace + spaceUsed32;
+	workspaceSize -= (spaceUsed32 << 2);
+
+	rankStart = rankStart0 + 1;
+	memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
+
+	HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
+	if (maxTableLog > HUF_TABLELOG_MAX)
+		return ERROR(tableLog_tooLarge);
+	/* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
+
+	iSize = HUF_readStats_wksp(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
+	if (HUF_isError(iSize))
+		return iSize;
+
+	/* check result */
+	if (tableLog > maxTableLog)
+		return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
+
+	/* find maxWeight */
+	for (maxW = tableLog; rankStats[maxW] == 0; maxW--) {
+	} /* necessarily finds a solution before 0 */
+
+	/* Get start index of each weight */
+	{
+		U32 w, nextRankStart = 0;
+		for (w = 1; w < maxW + 1; w++) {
+			U32 curr = nextRankStart;
+			nextRankStart += rankStats[w];
+			rankStart[w] = curr;
+		}
+		rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+		sizeOfSort = nextRankStart;
+	}
+
+	/* sort symbols by weight */
+	{
+		U32 s;
+		for (s = 0; s < nbSymbols; s++) {
+			U32 const w = weightList[s];
+			U32 const r = rankStart[w]++;
+			sortedSymbol[r].symbol = (BYTE)s;
+			sortedSymbol[r].weight = (BYTE)w;
+		}
+		rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
+	}
+
+	/* Build rankVal */
+	{
+		U32 *const rankVal0 = rankVal[0];
+		{
+			int const rescale = (maxTableLog - tableLog) - 1; /* tableLog <= maxTableLog */
+			U32 nextRankVal = 0;
+			U32 w;
+			for (w = 1; w < maxW + 1; w++) {
+				U32 curr = nextRankVal;
+				nextRankVal += rankStats[w] << (w + rescale);
+				rankVal0[w] = curr;
+			}
+		}
+		{
+			U32 const minBits = tableLog + 1 - maxW;
+			U32 consumed;
+			for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
+				U32 *const rankValPtr = rankVal[consumed];
+				U32 w;
+				for (w = 1; w < maxW + 1; w++) {
+					rankValPtr[w] = rankVal0[w] >> consumed;
+				}
+			}
+		}
+	}
+
+	HUF_fillDTableX4(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog + 1);
+
+	dtd.tableLog = (BYTE)maxTableLog;
+	dtd.tableType = 1;
+	memcpy(DTable, &dtd, sizeof(dtd));
+	return iSize;
+}
+
+static U32 HUF_decodeSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
+{
+	size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+	memcpy(op, dt + val, 2);
+	BIT_skipBits(DStream, dt[val].nbBits);
+	return dt[val].length;
+}
+
+static U32 HUF_decodeLastSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
+{
+	size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+	memcpy(op, dt + val, 1);
+	if (dt[val].length == 1)
+		BIT_skipBits(DStream, dt[val].nbBits);
+	else {
+		if (DStream->bitsConsumed < (sizeof(DStream->bitContainer) * 8)) {
+			BIT_skipBits(DStream, dt[val].nbBits);
+			if (DStream->bitsConsumed > (sizeof(DStream->bitContainer) * 8))
+				/* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+				DStream->bitsConsumed = (sizeof(DStream->bitContainer) * 8);
+		}
+	}
+	return 1;
+}
+
+#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr)         \
+	if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
+	ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+	if (ZSTD_64bits())                     \
+	ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+FORCE_INLINE size_t HUF_decodeStreamX4(BYTE *p, BIT_DStream_t *bitDPtr, BYTE *const pEnd, const HUF_DEltX4 *const dt, const U32 dtLog)
+{
+	BYTE *const pStart = p;
+
+	/* up to 8 symbols at a time */
+	while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd - (sizeof(bitDPtr->bitContainer) - 1))) {
+		HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+		HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
+		HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+		HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+	}
+
+	/* closer to end : up to 2 symbols at a time */
+	while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd - 2))
+		HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+	while (p <= pEnd - 2)
+		HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+
+	if (p < pEnd)
+		p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+	return p - pStart;
+}
+
+static size_t HUF_decompress1X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
+{
+	BIT_DStream_t bitD;
+
+	/* Init */
+	{
+		size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
+		if (HUF_isError(errorCode))
+			return errorCode;
+	}
+
+	/* decode */
+	{
+		BYTE *const ostart = (BYTE *)dst;
+		BYTE *const oend = ostart + dstSize;
+		const void *const dtPtr = DTable + 1; /* force compiler to not use strict-aliasing */
+		const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
+		DTableDesc const dtd = HUF_getDTableDesc(DTable);
+		HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
+	}
+
+	/* check */
+	if (!BIT_endOfDStream(&bitD))
+		return ERROR(corruption_detected);
+
+	/* decoded size */
+	return dstSize;
+}
+
+size_t HUF_decompress1X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
+{
+	DTableDesc dtd = HUF_getDTableDesc(DTable);
+	if (dtd.tableType != 1)
+		return ERROR(GENERIC);
+	return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
+{
+	const BYTE *ip = (const BYTE *)cSrc;
+
+	size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
+	if (HUF_isError(hSize))
+		return hSize;
+	if (hSize >= cSrcSize)
+		return ERROR(srcSize_wrong);
+	ip += hSize;
+	cSrcSize -= hSize;
+
+	return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
+}
+
+static size_t HUF_decompress4X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
+{
+	if (cSrcSize < 10)
+		return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+	{
+		const BYTE *const istart = (const BYTE *)cSrc;
+		BYTE *const ostart = (BYTE *)dst;
+		BYTE *const oend = ostart + dstSize;
+		const void *const dtPtr = DTable + 1;
+		const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
+
+		/* Init */
+		BIT_DStream_t bitD1;
+		BIT_DStream_t bitD2;
+		BIT_DStream_t bitD3;
+		BIT_DStream_t bitD4;
+		size_t const length1 = ZSTD_readLE16(istart);
+		size_t const length2 = ZSTD_readLE16(istart + 2);
+		size_t const length3 = ZSTD_readLE16(istart + 4);
+		size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+		const BYTE *const istart1 = istart + 6; /* jumpTable */
+		const BYTE *const istart2 = istart1 + length1;
+		const BYTE *const istart3 = istart2 + length2;
+		const BYTE *const istart4 = istart3 + length3;
+		size_t const segmentSize = (dstSize + 3) / 4;
+		BYTE *const opStart2 = ostart + segmentSize;
+		BYTE *const opStart3 = opStart2 + segmentSize;
+		BYTE *const opStart4 = opStart3 + segmentSize;
+		BYTE *op1 = ostart;
+		BYTE *op2 = opStart2;
+		BYTE *op3 = opStart3;
+		BYTE *op4 = opStart4;
+		U32 endSignal;
+		DTableDesc const dtd = HUF_getDTableDesc(DTable);
+		U32 const dtLog = dtd.tableLog;
+
+		if (length4 > cSrcSize)
+			return ERROR(corruption_detected); /* overflow */
+		{
+			size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
+			if (HUF_isError(errorCode))
+				return errorCode;
+		}
+		{
+			size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
+			if (HUF_isError(errorCode))
+				return errorCode;
+		}
+		{
+			size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
+			if (HUF_isError(errorCode))
+				return errorCode;
+		}
+		{
+			size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
+			if (HUF_isError(errorCode))
+				return errorCode;
+		}
+
+		/* 16-32 symbols per loop (4-8 symbols per stream) */
+		endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+		for (; (endSignal == BIT_DStream_unfinished) & (op4 < (oend - (sizeof(bitD4.bitContainer) - 1)));) {
+			HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+			HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+			HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+			HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+			HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
+			HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
+			HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
+			HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
+			HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+			HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+			HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+			HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+			HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
+			HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
+			HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
+			HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+			endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+		}
+
+		/* check corruption */
+		if (op1 > opStart2)
+			return ERROR(corruption_detected);
+		if (op2 > opStart3)
+			return ERROR(corruption_detected);
+		if (op3 > opStart4)
+			return ERROR(corruption_detected);
+		/* note : op4 already verified within main loop */
+
+		/* finish bitStreams one by one */
+		HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+		HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+		HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+		HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
+
+		/* check */
+		{
+			U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+			if (!endCheck)
+				return ERROR(corruption_detected);
+		}
+
+		/* decoded size */
+		return dstSize;
+	}
+}
+
+size_t HUF_decompress4X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
+{
+	DTableDesc dtd = HUF_getDTableDesc(DTable);
+	if (dtd.tableType != 1)
+		return ERROR(GENERIC);
+	return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
+{
+	const BYTE *ip = (const BYTE *)cSrc;
+
+	size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
+	if (HUF_isError(hSize))
+		return hSize;
+	if (hSize >= cSrcSize)
+		return ERROR(srcSize_wrong);
+	ip += hSize;
+	cSrcSize -= hSize;
+
+	return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
+}
+
+/* ********************************/
+/* Generic decompression selector */
+/* ********************************/
+
+size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
+{
+	DTableDesc const dtd = HUF_getDTableDesc(DTable);
+	return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
+			     : HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
+{
+	DTableDesc const dtd = HUF_getDTableDesc(DTable);
+	return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
+			     : HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
+}
+
+typedef struct {
+	U32 tableTime;
+	U32 decode256Time;
+} algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = {
+    /* single, double, quad */
+    {{0, 0}, {1, 1}, {2, 2}},		     /* Q==0 : impossible */
+    {{0, 0}, {1, 1}, {2, 2}},		     /* Q==1 : impossible */
+    {{38, 130}, {1313, 74}, {2151, 38}},     /* Q == 2 : 12-18% */
+    {{448, 128}, {1353, 74}, {2238, 41}},    /* Q == 3 : 18-25% */
+    {{556, 128}, {1353, 74}, {2238, 47}},    /* Q == 4 : 25-32% */
+    {{714, 128}, {1418, 74}, {2436, 53}},    /* Q == 5 : 32-38% */
+    {{883, 128}, {1437, 74}, {2464, 61}},    /* Q == 6 : 38-44% */
+    {{897, 128}, {1515, 75}, {2622, 68}},    /* Q == 7 : 44-50% */
+    {{926, 128}, {1613, 75}, {2730, 75}},    /* Q == 8 : 50-56% */
+    {{947, 128}, {1729, 77}, {3359, 77}},    /* Q == 9 : 56-62% */
+    {{1107, 128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
+    {{1177, 128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
+    {{1242, 128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
+    {{1349, 128}, {2644, 106}, {5260, 106}}, /* Q ==13 : 81-87% */
+    {{1455, 128}, {2422, 124}, {4174, 124}}, /* Q ==14 : 87-93% */
+    {{722, 128}, {1891, 145}, {1936, 146}},  /* Q ==15 : 93-99% */
+};
+
+/** HUF_selectDecoder() :
+*   Tells which decoder is likely to decode faster,
+*   based on a set of pre-determined metrics.
+*   @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
+*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
+U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize)
+{
+	/* decoder timing evaluation */
+	U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
+	U32 const D256 = (U32)(dstSize >> 8);
+	U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
+	U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
+	DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */
+
+	return DTime1 < DTime0;
+}
+
+typedef size_t (*decompressionAlgo)(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize);
+
+size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
+{
+	/* validation checks */
+	if (dstSize == 0)
+		return ERROR(dstSize_tooSmall);
+	if (cSrcSize > dstSize)
+		return ERROR(corruption_detected); /* invalid */
+	if (cSrcSize == dstSize) {
+		memcpy(dst, cSrc, dstSize);
+		return dstSize;
+	} /* not compressed */
+	if (cSrcSize == 1) {
+		memset(dst, *(const BYTE *)cSrc, dstSize);
+		return dstSize;
+	} /* RLE */
+
+	{
+		U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+		return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
+			      : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
+	}
+}
+
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
+{
+	/* validation checks */
+	if (dstSize == 0)
+		return ERROR(dstSize_tooSmall);
+	if ((cSrcSize >= dstSize) || (cSrcSize <= 1))
+		return ERROR(corruption_detected); /* invalid */
+
+	{
+		U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+		return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
+			      : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
+	}
+}
+
+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
+{
+	/* validation checks */
+	if (dstSize == 0)
+		return ERROR(dstSize_tooSmall);
+	if (cSrcSize > dstSize)
+		return ERROR(corruption_detected); /* invalid */
+	if (cSrcSize == dstSize) {
+		memcpy(dst, cSrc, dstSize);
+		return dstSize;
+	} /* not compressed */
+	if (cSrcSize == 1) {
+		memset(dst, *(const BYTE *)cSrc, dstSize);
+		return dstSize;
+	} /* RLE */
+
+	{
+		U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+		return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
+			      : HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
+	}
+}
diff --git a/lib/zstd/mem.h b/lib/zstd/mem.h
new file mode 100644
index 0000000..3a0f34c
--- /dev/null
+++ b/lib/zstd/mem.h
@@ -0,0 +1,151 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of https://github.com/facebook/zstd.
+ * An additional grant of patent rights can be found in the PATENTS file in the
+ * same directory.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ */
+
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+/*-****************************************
+*  Dependencies
+******************************************/
+#include <asm/unaligned.h>
+#include <linux/string.h> /* memcpy */
+#include <linux/types.h>  /* size_t, ptrdiff_t */
+
+/*-****************************************
+*  Compiler specifics
+******************************************/
+#define ZSTD_STATIC static __inline __attribute__((unused))
+
+/*-**************************************************************
+*  Basic Types
+*****************************************************************/
+typedef uint8_t BYTE;
+typedef uint16_t U16;
+typedef int16_t S16;
+typedef uint32_t U32;
+typedef int32_t S32;
+typedef uint64_t U64;
+typedef int64_t S64;
+typedef ptrdiff_t iPtrDiff;
+typedef uintptr_t uPtrDiff;
+
+/*-**************************************************************
+*  Memory I/O
+*****************************************************************/
+ZSTD_STATIC unsigned ZSTD_32bits(void) { return sizeof(size_t) == 4; }
+ZSTD_STATIC unsigned ZSTD_64bits(void) { return sizeof(size_t) == 8; }
+
+#if defined(__LITTLE_ENDIAN)
+#define ZSTD_LITTLE_ENDIAN 1
+#else
+#define ZSTD_LITTLE_ENDIAN 0
+#endif
+
+ZSTD_STATIC unsigned ZSTD_isLittleEndian(void) { return ZSTD_LITTLE_ENDIAN; }
+
+ZSTD_STATIC U16 ZSTD_read16(const void *memPtr) { return get_unaligned((const U16 *)memPtr); }
+
+ZSTD_STATIC U32 ZSTD_read32(const void *memPtr) { return get_unaligned((const U32 *)memPtr); }
+
+ZSTD_STATIC U64 ZSTD_read64(const void *memPtr) { return get_unaligned((const U64 *)memPtr); }
+
+ZSTD_STATIC size_t ZSTD_readST(const void *memPtr) { return get_unaligned((const size_t *)memPtr); }
+
+ZSTD_STATIC void ZSTD_write16(void *memPtr, U16 value) { put_unaligned(value, (U16 *)memPtr); }
+
+ZSTD_STATIC void ZSTD_write32(void *memPtr, U32 value) { put_unaligned(value, (U32 *)memPtr); }
+
+ZSTD_STATIC void ZSTD_write64(void *memPtr, U64 value) { put_unaligned(value, (U64 *)memPtr); }
+
+/*=== Little endian r/w ===*/
+
+ZSTD_STATIC U16 ZSTD_readLE16(const void *memPtr) { return get_unaligned_le16(memPtr); }
+
+ZSTD_STATIC void ZSTD_writeLE16(void *memPtr, U16 val) { put_unaligned_le16(val, memPtr); }
+
+ZSTD_STATIC U32 ZSTD_readLE24(const void *memPtr) { return ZSTD_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16); }
+
+ZSTD_STATIC void ZSTD_writeLE24(void *memPtr, U32 val)
+{
+	ZSTD_writeLE16(memPtr, (U16)val);
+	((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
+}
+
+ZSTD_STATIC U32 ZSTD_readLE32(const void *memPtr) { return get_unaligned_le32(memPtr); }
+
+ZSTD_STATIC void ZSTD_writeLE32(void *memPtr, U32 val32) { put_unaligned_le32(val32, memPtr); }
+
+ZSTD_STATIC U64 ZSTD_readLE64(const void *memPtr) { return get_unaligned_le64(memPtr); }
+
+ZSTD_STATIC void ZSTD_writeLE64(void *memPtr, U64 val64) { put_unaligned_le64(val64, memPtr); }
+
+ZSTD_STATIC size_t ZSTD_readLEST(const void *memPtr)
+{
+	if (ZSTD_32bits())
+		return (size_t)ZSTD_readLE32(memPtr);
+	else
+		return (size_t)ZSTD_readLE64(memPtr);
+}
+
+ZSTD_STATIC void ZSTD_writeLEST(void *memPtr, size_t val)
+{
+	if (ZSTD_32bits())
+		ZSTD_writeLE32(memPtr, (U32)val);
+	else
+		ZSTD_writeLE64(memPtr, (U64)val);
+}
+
+/*=== Big endian r/w ===*/
+
+ZSTD_STATIC U32 ZSTD_readBE32(const void *memPtr) { return get_unaligned_be32(memPtr); }
+
+ZSTD_STATIC void ZSTD_writeBE32(void *memPtr, U32 val32) { put_unaligned_be32(val32, memPtr); }
+
+ZSTD_STATIC U64 ZSTD_readBE64(const void *memPtr) { return get_unaligned_be64(memPtr); }
+
+ZSTD_STATIC void ZSTD_writeBE64(void *memPtr, U64 val64) { put_unaligned_be64(val64, memPtr); }
+
+ZSTD_STATIC size_t ZSTD_readBEST(const void *memPtr)
+{
+	if (ZSTD_32bits())
+		return (size_t)ZSTD_readBE32(memPtr);
+	else
+		return (size_t)ZSTD_readBE64(memPtr);
+}
+
+ZSTD_STATIC void ZSTD_writeBEST(void *memPtr, size_t val)
+{
+	if (ZSTD_32bits())
+		ZSTD_writeBE32(memPtr, (U32)val);
+	else
+		ZSTD_writeBE64(memPtr, (U64)val);
+}
+
+/* function safe only for comparisons */
+ZSTD_STATIC U32 ZSTD_readMINMATCH(const void *memPtr, U32 length)
+{
+	switch (length) {
+	default:
+	case 4: return ZSTD_read32(memPtr);
+	case 3:
+		if (ZSTD_isLittleEndian())
+			return ZSTD_read32(memPtr) << 8;
+		else
+			return ZSTD_read32(memPtr) >> 8;
+	}
+}
+
+#endif /* MEM_H_MODULE */
diff --git a/lib/zstd/zstd_common.c b/lib/zstd/zstd_common.c
new file mode 100644
index 0000000..a282624
--- /dev/null
+++ b/lib/zstd/zstd_common.c
@@ -0,0 +1,75 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of https://github.com/facebook/zstd.
+ * An additional grant of patent rights can be found in the PATENTS file in the
+ * same directory.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ */
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include "error_private.h"
+#include "zstd_internal.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */
+#include <linux/kernel.h>
+
+/*=**************************************************************
+*  Custom allocator
+****************************************************************/
+
+#define stack_push(stack, size)                                 \
+	({                                                      \
+		void *const ptr = ZSTD_PTR_ALIGN((stack)->ptr); \
+		(stack)->ptr = (char *)ptr + (size);            \
+		(stack)->ptr <= (stack)->end ? ptr : NULL;      \
+	})
+
+ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize)
+{
+	ZSTD_customMem stackMem = {ZSTD_stackAlloc, ZSTD_stackFree, workspace};
+	ZSTD_stack *stack = (ZSTD_stack *)workspace;
+	/* Verify preconditions */
+	if (!workspace || workspaceSize < sizeof(ZSTD_stack) || workspace != ZSTD_PTR_ALIGN(workspace)) {
+		ZSTD_customMem error = {NULL, NULL, NULL};
+		return error;
+	}
+	/* Initialize the stack */
+	stack->ptr = workspace;
+	stack->end = (char *)workspace + workspaceSize;
+	stack_push(stack, sizeof(ZSTD_stack));
+	return stackMem;
+}
+
+void *ZSTD_stackAllocAll(void *opaque, size_t *size)
+{
+	ZSTD_stack *stack = (ZSTD_stack *)opaque;
+	*size = (BYTE const *)stack->end - (BYTE *)ZSTD_PTR_ALIGN(stack->ptr);
+	return stack_push(stack, *size);
+}
+
+void *ZSTD_stackAlloc(void *opaque, size_t size)
+{
+	ZSTD_stack *stack = (ZSTD_stack *)opaque;
+	return stack_push(stack, size);
+}
+void ZSTD_stackFree(void *opaque, void *address)
+{
+	(void)opaque;
+	(void)address;
+}
+
+void *ZSTD_malloc(size_t size, ZSTD_customMem customMem) { return customMem.customAlloc(customMem.opaque, size); }
+
+void ZSTD_free(void *ptr, ZSTD_customMem customMem)
+{
+	if (ptr != NULL)
+		customMem.customFree(customMem.opaque, ptr);
+}
diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h
new file mode 100644
index 0000000..1a79fab
--- /dev/null
+++ b/lib/zstd/zstd_internal.h
@@ -0,0 +1,263 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of https://github.com/facebook/zstd.
+ * An additional grant of patent rights can be found in the PATENTS file in the
+ * same directory.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ */
+
+#ifndef ZSTD_CCOMMON_H_MODULE
+#define ZSTD_CCOMMON_H_MODULE
+
+/*-*******************************************************
+*  Compiler specifics
+*********************************************************/
+#define FORCE_INLINE static __always_inline
+#define FORCE_NOINLINE static noinline
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include "error_private.h"
+#include "mem.h"
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/xxhash.h>
+#include <linux/zstd.h>
+
+/*-*************************************
+*  shared macros
+***************************************/
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+#define CHECK_F(f)                       \
+	{                                \
+		size_t const errcod = f; \
+		if (ERR_isError(errcod)) \
+			return errcod;   \
+	} /* check and Forward error code */
+#define CHECK_E(f, e)                    \
+	{                                \
+		size_t const errcod = f; \
+		if (ERR_isError(errcod)) \
+			return ERROR(e); \
+	} /* check and send Error code */
+#define ZSTD_STATIC_ASSERT(c)                                   \
+	{                                                       \
+		enum { ZSTD_static_assert = 1 / (int)(!!(c)) }; \
+	}
+
+/*-*************************************
+*  Common constants
+***************************************/
+#define ZSTD_OPT_NUM (1 << 12)
+#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */
+
+#define ZSTD_REP_NUM 3		      /* number of repcodes */
+#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */
+#define ZSTD_REP_MOVE (ZSTD_REP_NUM - 1)
+#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM)
+static const U32 repStartValue[ZSTD_REP_NUM] = {1, 4, 8};
+
+#define KB *(1 << 10)
+#define MB *(1 << 20)
+#define GB *(1U << 30)
+
+#define BIT7 128
+#define BIT6 64
+#define BIT5 32
+#define BIT4 16
+#define BIT1 2
+#define BIT0 1
+
+#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
+static const size_t ZSTD_fcs_fieldSize[4] = {0, 2, 4, 8};
+static const size_t ZSTD_did_fieldSize[4] = {0, 1, 2, 4};
+
+#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
+static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
+typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
+
+#define MIN_SEQUENCES_SIZE 1									  /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
+
+#define HufLog 12
+typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
+
+#define LONGNBSEQ 0x7F00
+
+#define MINMATCH 3
+#define EQUAL_READ32 4
+
+#define Litbits 8
+#define MaxLit ((1 << Litbits) - 1)
+#define MaxML 52
+#define MaxLL 35
+#define MaxOff 28
+#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
+#define MLFSELog 9
+#define LLFSELog 9
+#define OffFSELog 8
+
+static const U32 LL_bits[MaxLL + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+static const S16 LL_defaultNorm[MaxLL + 1] = {4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, -1, -1, -1, -1};
+#define LL_DEFAULTNORMLOG 6 /* for static allocation */
+static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
+
+static const U32 ML_bits[MaxML + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0, 0,
+				       0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+static const S16 ML_defaultNorm[MaxML + 1] = {1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,  1,  1,  1,  1,  1,  1, 1,
+					      1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1};
+#define ML_DEFAULTNORMLOG 6 /* for static allocation */
+static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
+
+static const S16 OF_defaultNorm[MaxOff + 1] = {1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1};
+#define OF_DEFAULTNORMLOG 5 /* for static allocation */
+static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
+
+/*-*******************************************
+*  Shared functions to include for inlining
+*********************************************/
+ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) {
+	memcpy(dst, src, 8);
+}
+/*! ZSTD_wildcopy() :
+*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
+#define WILDCOPY_OVERLENGTH 8
+ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length)
+{
+	const BYTE* ip = (const BYTE*)src;
+	BYTE* op = (BYTE*)dst;
+	BYTE* const oend = op + length;
+	/* Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388.
+	 * Avoid the bad case where the loop only runs once by handling the
+	 * special case separately. This doesn't trigger the bug because it
+	 * doesn't involve pointer/integer overflow.
+	 */
+	if (length <= 8)
+		return ZSTD_copy8(dst, src);
+	do {
+		ZSTD_copy8(op, ip);
+		op += 8;
+		ip += 8;
+	} while (op < oend);
+}
+
+/*-*******************************************
+*  Private interfaces
+*********************************************/
+typedef struct ZSTD_stats_s ZSTD_stats_t;
+
+typedef struct {
+	U32 off;
+	U32 len;
+} ZSTD_match_t;
+
+typedef struct {
+	U32 price;
+	U32 off;
+	U32 mlen;
+	U32 litlen;
+	U32 rep[ZSTD_REP_NUM];
+} ZSTD_optimal_t;
+
+typedef struct seqDef_s {
+	U32 offset;
+	U16 litLength;
+	U16 matchLength;
+} seqDef;
+
+typedef struct {
+	seqDef *sequencesStart;
+	seqDef *sequences;
+	BYTE *litStart;
+	BYTE *lit;
+	BYTE *llCode;
+	BYTE *mlCode;
+	BYTE *ofCode;
+	U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
+	U32 longLengthPos;
+	/* opt */
+	ZSTD_optimal_t *priceTable;
+	ZSTD_match_t *matchTable;
+	U32 *matchLengthFreq;
+	U32 *litLengthFreq;
+	U32 *litFreq;
+	U32 *offCodeFreq;
+	U32 matchLengthSum;
+	U32 matchSum;
+	U32 litLengthSum;
+	U32 litSum;
+	U32 offCodeSum;
+	U32 log2matchLengthSum;
+	U32 log2matchSum;
+	U32 log2litLengthSum;
+	U32 log2litSum;
+	U32 log2offCodeSum;
+	U32 factor;
+	U32 staticPrices;
+	U32 cachedPrice;
+	U32 cachedLitLength;
+	const BYTE *cachedLiterals;
+} seqStore_t;
+
+const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx);
+void ZSTD_seqToCodes(const seqStore_t *seqStorePtr);
+int ZSTD_isSkipFrame(ZSTD_DCtx *dctx);
+
+/*= Custom memory allocation functions */
+typedef void *(*ZSTD_allocFunction)(void *opaque, size_t size);
+typedef void (*ZSTD_freeFunction)(void *opaque, void *address);
+typedef struct {
+	ZSTD_allocFunction customAlloc;
+	ZSTD_freeFunction customFree;
+	void *opaque;
+} ZSTD_customMem;
+
+void *ZSTD_malloc(size_t size, ZSTD_customMem customMem);
+void ZSTD_free(void *ptr, ZSTD_customMem customMem);
+
+/*====== stack allocation  ======*/
+
+typedef struct {
+	void *ptr;
+	const void *end;
+} ZSTD_stack;
+
+#define ZSTD_ALIGN(x) ALIGN(x, sizeof(size_t))
+#define ZSTD_PTR_ALIGN(p) PTR_ALIGN(p, sizeof(size_t))
+
+ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize);
+
+void *ZSTD_stackAllocAll(void *opaque, size_t *size);
+void *ZSTD_stackAlloc(void *opaque, size_t size);
+void ZSTD_stackFree(void *opaque, void *address);
+
+/*======  common function  ======*/
+
+ZSTD_STATIC U32 ZSTD_highbit32(U32 val) { return 31 - __builtin_clz(val); }
+
+/* hidden functions */
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ *        do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx);
+
+size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx);
+size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx);
+size_t ZSTD_freeCDict(ZSTD_CDict *cdict);
+size_t ZSTD_freeDDict(ZSTD_DDict *cdict);
+size_t ZSTD_freeCStream(ZSTD_CStream *zcs);
+size_t ZSTD_freeDStream(ZSTD_DStream *zds);
+
+#endif /* ZSTD_CCOMMON_H_MODULE */
diff --git a/lib/zstd/zstd_opt.h b/lib/zstd/zstd_opt.h
new file mode 100644
index 0000000..55e1b4c
--- /dev/null
+++ b/lib/zstd/zstd_opt.h
@@ -0,0 +1,1014 @@
+/**
+ * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of https://github.com/facebook/zstd.
+ * An additional grant of patent rights can be found in the PATENTS file in the
+ * same directory.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ */
+
+/* Note : this file is intended to be included within zstd_compress.c */
+
+#ifndef ZSTD_OPT_H_91842398743
+#define ZSTD_OPT_H_91842398743
+
+#define ZSTD_LITFREQ_ADD 2
+#define ZSTD_FREQ_DIV 4
+#define ZSTD_MAX_PRICE (1 << 30)
+
+/*-*************************************
+*  Price functions for optimal parser
+***************************************/
+FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t *ssPtr)
+{
+	ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum + 1);
+	ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum + 1);
+	ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum + 1);
+	ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum + 1);
+	ssPtr->factor = 1 + ((ssPtr->litSum >> 5) / ssPtr->litLengthSum) + ((ssPtr->litSum << 1) / (ssPtr->litSum + ssPtr->matchSum));
+}
+
+ZSTD_STATIC void ZSTD_rescaleFreqs(seqStore_t *ssPtr, const BYTE *src, size_t srcSize)
+{
+	unsigned u;
+
+	ssPtr->cachedLiterals = NULL;
+	ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
+	ssPtr->staticPrices = 0;
+
+	if (ssPtr->litLengthSum == 0) {
+		if (srcSize <= 1024)
+			ssPtr->staticPrices = 1;
+
+		for (u = 0; u <= MaxLit; u++)
+			ssPtr->litFreq[u] = 0;
+		for (u = 0; u < srcSize; u++)
+			ssPtr->litFreq[src[u]]++;
+
+		ssPtr->litSum = 0;
+		ssPtr->litLengthSum = MaxLL + 1;
+		ssPtr->matchLengthSum = MaxML + 1;
+		ssPtr->offCodeSum = (MaxOff + 1);
+		ssPtr->matchSum = (ZSTD_LITFREQ_ADD << Litbits);
+
+		for (u = 0; u <= MaxLit; u++) {
+			ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> ZSTD_FREQ_DIV);
+			ssPtr->litSum += ssPtr->litFreq[u];
+		}
+		for (u = 0; u <= MaxLL; u++)
+			ssPtr->litLengthFreq[u] = 1;
+		for (u = 0; u <= MaxML; u++)
+			ssPtr->matchLengthFreq[u] = 1;
+		for (u = 0; u <= MaxOff; u++)
+			ssPtr->offCodeFreq[u] = 1;
+	} else {
+		ssPtr->matchLengthSum = 0;
+		ssPtr->litLengthSum = 0;
+		ssPtr->offCodeSum = 0;
+		ssPtr->matchSum = 0;
+		ssPtr->litSum = 0;
+
+		for (u = 0; u <= MaxLit; u++) {
+			ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> (ZSTD_FREQ_DIV + 1));
+			ssPtr->litSum += ssPtr->litFreq[u];
+		}
+		for (u = 0; u <= MaxLL; u++) {
+			ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u] >> (ZSTD_FREQ_DIV + 1));
+			ssPtr->litLengthSum += ssPtr->litLengthFreq[u];
+		}
+		for (u = 0; u <= MaxML; u++) {
+			ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u] >> ZSTD_FREQ_DIV);
+			ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u];
+			ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3);
+		}
+		ssPtr->matchSum *= ZSTD_LITFREQ_ADD;
+		for (u = 0; u <= MaxOff; u++) {
+			ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u] >> ZSTD_FREQ_DIV);
+			ssPtr->offCodeSum += ssPtr->offCodeFreq[u];
+		}
+	}
+
+	ZSTD_setLog2Prices(ssPtr);
+}
+
+FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t *ssPtr, U32 litLength, const BYTE *literals)
+{
+	U32 price, u;
+
+	if (ssPtr->staticPrices)
+		return ZSTD_highbit32((U32)litLength + 1) + (litLength * 6);
+
+	if (litLength == 0)
+		return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0] + 1);
+
+	/* literals */
+	if (ssPtr->cachedLiterals == literals) {
+		U32 const additional = litLength - ssPtr->cachedLitLength;
+		const BYTE *literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength;
+		price = ssPtr->cachedPrice + additional * ssPtr->log2litSum;
+		for (u = 0; u < additional; u++)
+			price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]] + 1);
+		ssPtr->cachedPrice = price;
+		ssPtr->cachedLitLength = litLength;
+	} else {
+		price = litLength * ssPtr->log2litSum;
+		for (u = 0; u < litLength; u++)
+			price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]] + 1);
+
+		if (litLength >= 12) {
+			ssPtr->cachedLiterals = literals;
+			ssPtr->cachedPrice = price;
+			ssPtr->cachedLitLength = litLength;
+		}
+	}
+
+	/* literal Length */
+	{
+		const BYTE LL_deltaCode = 19;
+		const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+		price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode] + 1);
+	}
+
+	return price;
+}
+
+FORCE_INLINE U32 ZSTD_getPrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength, const int ultra)
+{
+	/* offset */
+	U32 price;
+	BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
+
+	if (seqStorePtr->staticPrices)
+		return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength + 1) + 16 + offCode;
+
+	price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode] + 1);
+	if (!ultra && offCode >= 20)
+		price += (offCode - 19) * 2;
+
+	/* match Length */
+	{
+		const BYTE ML_deltaCode = 36;
+		const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
+		price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode] + 1);
+	}
+
+	return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor;
+}
+
+ZSTD_STATIC void ZSTD_updatePrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength)
+{
+	U32 u;
+
+	/* literals */
+	seqStorePtr->litSum += litLength * ZSTD_LITFREQ_ADD;
+	for (u = 0; u < litLength; u++)
+		seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
+
+	/* literal Length */
+	{
+		const BYTE LL_deltaCode = 19;
+		const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+		seqStorePtr->litLengthFreq[llCode]++;
+		seqStorePtr->litLengthSum++;
+	}
+
+	/* match offset */
+	{
+		BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
+		seqStorePtr->offCodeSum++;
+		seqStorePtr->offCodeFreq[offCode]++;
+	}
+
+	/* match Length */
+	{
+		const BYTE ML_deltaCode = 36;
+		const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
+		seqStorePtr->matchLengthFreq[mlCode]++;
+		seqStorePtr->matchLengthSum++;
+	}
+
+	ZSTD_setLog2Prices(seqStorePtr);
+}
+
+#define SET_PRICE(pos, mlen_, offset_, litlen_, price_)           \
+	{                                                         \
+		while (last_pos < pos) {                          \
+			opt[last_pos + 1].price = ZSTD_MAX_PRICE; \
+			last_pos++;                               \
+		}                                                 \
+		opt[pos].mlen = mlen_;                            \
+		opt[pos].off = offset_;                           \
+		opt[pos].litlen = litlen_;                        \
+		opt[pos].price = price_;                          \
+	}
+
+/* Update hashTable3 up to ip (excluded)
+   Assumption : always within prefix (i.e. not within extDict) */
+FORCE_INLINE
+U32 ZSTD_insertAndFindFirstIndexHash3(ZSTD_CCtx *zc, const BYTE *ip)
+{
+	U32 *const hashTable3 = zc->hashTable3;
+	U32 const hashLog3 = zc->hashLog3;
+	const BYTE *const base = zc->base;
+	U32 idx = zc->nextToUpdate3;
+	const U32 target = zc->nextToUpdate3 = (U32)(ip - base);
+	const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3);
+
+	while (idx < target) {
+		hashTable3[ZSTD_hash3Ptr(base + idx, hashLog3)] = idx;
+		idx++;
+	}
+
+	return hashTable3[hash3];
+}
+
+/*-*************************************
+*  Binary Tree search
+***************************************/
+static U32 ZSTD_insertBtAndGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, U32 nbCompares, const U32 mls, U32 extDict,
+					 ZSTD_match_t *matches, const U32 minMatchLen)
+{
+	const BYTE *const base = zc->base;
+	const U32 curr = (U32)(ip - base);
+	const U32 hashLog = zc->params.cParams.hashLog;
+	const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
+	U32 *const hashTable = zc->hashTable;
+	U32 matchIndex = hashTable[h];
+	U32 *const bt = zc->chainTable;
+	const U32 btLog = zc->params.cParams.chainLog - 1;
+	const U32 btMask = (1U << btLog) - 1;
+	size_t commonLengthSmaller = 0, commonLengthLarger = 0;
+	const BYTE *const dictBase = zc->dictBase;
+	const U32 dictLimit = zc->dictLimit;
+	const BYTE *const dictEnd = dictBase + dictLimit;
+	const BYTE *const prefixStart = base + dictLimit;
+	const U32 btLow = btMask >= curr ? 0 : curr - btMask;
+	const U32 windowLow = zc->lowLimit;
+	U32 *smallerPtr = bt + 2 * (curr & btMask);
+	U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
+	U32 matchEndIdx = curr + 8;
+	U32 dummy32; /* to be nullified at the end */
+	U32 mnum = 0;
+
+	const U32 minMatch = (mls == 3) ? 3 : 4;
+	size_t bestLength = minMatchLen - 1;
+
+	if (minMatch == 3) { /* HC3 match finder */
+		U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(zc, ip);
+		if (matchIndex3 > windowLow && (curr - matchIndex3 < (1 << 18))) {
+			const BYTE *match;
+			size_t currMl = 0;
+			if ((!extDict) || matchIndex3 >= dictLimit) {
+				match = base + matchIndex3;
+				if (match[bestLength] == ip[bestLength])
+					currMl = ZSTD_count(ip, match, iLimit);
+			} else {
+				match = dictBase + matchIndex3;
+				if (ZSTD_readMINMATCH(match, MINMATCH) ==
+				    ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
+					currMl = ZSTD_count_2segments(ip + MINMATCH, match + MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
+			}
+
+			/* save best solution */
+			if (currMl > bestLength) {
+				bestLength = currMl;
+				matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex3;
+				matches[mnum].len = (U32)currMl;
+				mnum++;
+				if (currMl > ZSTD_OPT_NUM)
+					goto update;
+				if (ip + currMl == iLimit)
+					goto update; /* best possible, and avoid read overflow*/
+			}
+		}
+	}
+
+	hashTable[h] = curr; /* Update Hash Table */
+
+	while (nbCompares-- && (matchIndex > windowLow)) {
+		U32 *nextPtr = bt + 2 * (matchIndex & btMask);
+		size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+		const BYTE *match;
+
+		if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
+			match = base + matchIndex;
+			if (match[matchLength] == ip[matchLength]) {
+				matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iLimit) + 1;
+			}
+		} else {
+			match = dictBase + matchIndex;
+			matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dictEnd, prefixStart);
+			if (matchIndex + matchLength >= dictLimit)
+				match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+		}
+
+		if (matchLength > bestLength) {
+			if (matchLength > matchEndIdx - matchIndex)
+				matchEndIdx = matchIndex + (U32)matchLength;
+			bestLength = matchLength;
+			matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex;
+			matches[mnum].len = (U32)matchLength;
+			mnum++;
+			if (matchLength > ZSTD_OPT_NUM)
+				break;
+			if (ip + matchLength == iLimit) /* equal : no way to know if inf or sup */
+				break;			/* drop, to guarantee consistency (miss a little bit of compression) */
+		}
+
+		if (match[matchLength] < ip[matchLength]) {
+			/* match is smaller than curr */
+			*smallerPtr = matchIndex;	  /* update smaller idx */
+			commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+			if (matchIndex <= btLow) {
+				smallerPtr = &dummy32;
+				break;
+			}			  /* beyond tree size, stop the search */
+			smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
+			matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
+		} else {
+			/* match is larger than curr */
+			*largerPtr = matchIndex;
+			commonLengthLarger = matchLength;
+			if (matchIndex <= btLow) {
+				largerPtr = &dummy32;
+				break;
+			} /* beyond tree size, stop the search */
+			largerPtr = nextPtr;
+			matchIndex = nextPtr[0];
+		}
+	}
+
+	*smallerPtr = *largerPtr = 0;
+
+update:
+	zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
+	return mnum;
+}
+
+/** Tree updater, providing best match */
+static U32 ZSTD_BtGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls, ZSTD_match_t *matches,
+				const U32 minMatchLen)
+{
+	if (ip < zc->base + zc->nextToUpdate)
+		return 0; /* skipped area */
+	ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
+	return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen);
+}
+
+static U32 ZSTD_BtGetAllMatches_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
+					  const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
+					  ZSTD_match_t *matches, const U32 minMatchLen)
+{
+	switch (matchLengthSearch) {
+	case 3: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
+	default:
+	case 4: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
+	case 5: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
+	case 7:
+	case 6: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
+	}
+}
+
+/** Tree updater, providing best match */
+static U32 ZSTD_BtGetAllMatches_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls,
+					ZSTD_match_t *matches, const U32 minMatchLen)
+{
+	if (ip < zc->base + zc->nextToUpdate)
+		return 0; /* skipped area */
+	ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
+	return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen);
+}
+
+static U32 ZSTD_BtGetAllMatches_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
+						  const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
+						  ZSTD_match_t *matches, const U32 minMatchLen)
+{
+	switch (matchLengthSearch) {
+	case 3: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
+	default:
+	case 4: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
+	case 5: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
+	case 7:
+	case 6: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
+	}
+}
+
+/*-*******************************
+*  Optimal parser
+*********************************/
+FORCE_INLINE
+void ZSTD_compressBlock_opt_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
+{
+	seqStore_t *seqStorePtr = &(ctx->seqStore);
+	const BYTE *const istart = (const BYTE *)src;
+	const BYTE *ip = istart;
+	const BYTE *anchor = istart;
+	const BYTE *const iend = istart + srcSize;
+	const BYTE *const ilimit = iend - 8;
+	const BYTE *const base = ctx->base;
+	const BYTE *const prefixStart = base + ctx->dictLimit;
+
+	const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
+	const U32 sufficient_len = ctx->params.cParams.targetLength;
+	const U32 mls = ctx->params.cParams.searchLength;
+	const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
+
+	ZSTD_optimal_t *opt = seqStorePtr->priceTable;
+	ZSTD_match_t *matches = seqStorePtr->matchTable;
+	const BYTE *inr;
+	U32 offset, rep[ZSTD_REP_NUM];
+
+	/* init */
+	ctx->nextToUpdate3 = ctx->nextToUpdate;
+	ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
+	ip += (ip == prefixStart);
+	{
+		U32 i;
+		for (i = 0; i < ZSTD_REP_NUM; i++)
+			rep[i] = ctx->rep[i];
+	}
+
+	/* Match Loop */
+	while (ip < ilimit) {
+		U32 cur, match_num, last_pos, litlen, price;
+		U32 u, mlen, best_mlen, best_off, litLength;
+		memset(opt, 0, sizeof(ZSTD_optimal_t));
+		last_pos = 0;
+		litlen = (U32)(ip - anchor);
+
+		/* check repCode */
+		{
+			U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
+			for (i = (ip == anchor); i < last_i; i++) {
+				const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
+				if ((repCur > 0) && (repCur < (S32)(ip - prefixStart)) &&
+				    (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) {
+					mlen = (U32)ZSTD_count(ip + minMatch, ip + minMatch - repCur, iend) + minMatch;
+					if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
+						best_mlen = mlen;
+						best_off = i;
+						cur = 0;
+						last_pos = 1;
+						goto _storeSequence;
+					}
+					best_off = i - (ip == anchor);
+					do {
+						price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
+						if (mlen > last_pos || price < opt[mlen].price)
+							SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
+						mlen--;
+					} while (mlen >= minMatch);
+				}
+			}
+		}
+
+		match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch);
+
+		if (!last_pos && !match_num) {
+			ip++;
+			continue;
+		}
+
+		if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
+			best_mlen = matches[match_num - 1].len;
+			best_off = matches[match_num - 1].off;
+			cur = 0;
+			last_pos = 1;
+			goto _storeSequence;
+		}
+
+		/* set prices using matches at position = 0 */
+		best_mlen = (last_pos) ? last_pos : minMatch;
+		for (u = 0; u < match_num; u++) {
+			mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
+			best_mlen = matches[u].len;
+			while (mlen <= best_mlen) {
+				price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
+				if (mlen > last_pos || price < opt[mlen].price)
+					SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */
+				mlen++;
+			}
+		}
+
+		if (last_pos < minMatch) {
+			ip++;
+			continue;
+		}
+
+		/* initialize opt[0] */
+		{
+			U32 i;
+			for (i = 0; i < ZSTD_REP_NUM; i++)
+				opt[0].rep[i] = rep[i];
+		}
+		opt[0].mlen = 1;
+		opt[0].litlen = litlen;
+
+		/* check further positions */
+		for (cur = 1; cur <= last_pos; cur++) {
+			inr = ip + cur;
+
+			if (opt[cur - 1].mlen == 1) {
+				litlen = opt[cur - 1].litlen + 1;
+				if (cur > litlen) {
+					price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
+				} else
+					price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
+			} else {
+				litlen = 1;
+				price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
+			}
+
+			if (cur > last_pos || price <= opt[cur].price)
+				SET_PRICE(cur, 1, 0, litlen, price);
+
+			if (cur == last_pos)
+				break;
+
+			if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
+				continue;
+
+			mlen = opt[cur].mlen;
+			if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
+				opt[cur].rep[2] = opt[cur - mlen].rep[1];
+				opt[cur].rep[1] = opt[cur - mlen].rep[0];
+				opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
+			} else {
+				opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
+				opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
+				opt[cur].rep[0] =
+				    ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
+			}
+
+			best_mlen = minMatch;
+			{
+				U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
+				for (i = (opt[cur].mlen != 1); i < last_i; i++) { /* check rep */
+					const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
+					if ((repCur > 0) && (repCur < (S32)(inr - prefixStart)) &&
+					    (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) {
+						mlen = (U32)ZSTD_count(inr + minMatch, inr + minMatch - repCur, iend) + minMatch;
+
+						if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
+							best_mlen = mlen;
+							best_off = i;
+							last_pos = cur + 1;
+							goto _storeSequence;
+						}
+
+						best_off = i - (opt[cur].mlen != 1);
+						if (mlen > best_mlen)
+							best_mlen = mlen;
+
+						do {
+							if (opt[cur].mlen == 1) {
+								litlen = opt[cur].litlen;
+								if (cur > litlen) {
+									price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
+															best_off, mlen - MINMATCH, ultra);
+								} else
+									price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
+							} else {
+								litlen = 0;
+								price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
+							}
+
+							if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
+								SET_PRICE(cur + mlen, mlen, i, litlen, price);
+							mlen--;
+						} while (mlen >= minMatch);
+					}
+				}
+			}
+
+			match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen);
+
+			if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
+				best_mlen = matches[match_num - 1].len;
+				best_off = matches[match_num - 1].off;
+				last_pos = cur + 1;
+				goto _storeSequence;
+			}
+
+			/* set prices using matches at position = cur */
+			for (u = 0; u < match_num; u++) {
+				mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
+				best_mlen = matches[u].len;
+
+				while (mlen <= best_mlen) {
+					if (opt[cur].mlen == 1) {
+						litlen = opt[cur].litlen;
+						if (cur > litlen)
+							price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
+													matches[u].off - 1, mlen - MINMATCH, ultra);
+						else
+							price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
+					} else {
+						litlen = 0;
+						price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
+					}
+
+					if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
+						SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
+
+					mlen++;
+				}
+			}
+		}
+
+		best_mlen = opt[last_pos].mlen;
+		best_off = opt[last_pos].off;
+		cur = last_pos - best_mlen;
+
+	/* store sequence */
+_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
+		opt[0].mlen = 1;
+
+		while (1) {
+			mlen = opt[cur].mlen;
+			offset = opt[cur].off;
+			opt[cur].mlen = best_mlen;
+			opt[cur].off = best_off;
+			best_mlen = mlen;
+			best_off = offset;
+			if (mlen > cur)
+				break;
+			cur -= mlen;
+		}
+
+		for (u = 0; u <= last_pos;) {
+			u += opt[u].mlen;
+		}
+
+		for (cur = 0; cur < last_pos;) {
+			mlen = opt[cur].mlen;
+			if (mlen == 1) {
+				ip++;
+				cur++;
+				continue;
+			}
+			offset = opt[cur].off;
+			cur += mlen;
+			litLength = (U32)(ip - anchor);
+
+			if (offset > ZSTD_REP_MOVE_OPT) {
+				rep[2] = rep[1];
+				rep[1] = rep[0];
+				rep[0] = offset - ZSTD_REP_MOVE_OPT;
+				offset--;
+			} else {
+				if (offset != 0) {
+					best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
+					if (offset != 1)
+						rep[2] = rep[1];
+					rep[1] = rep[0];
+					rep[0] = best_off;
+				}
+				if (litLength == 0)
+					offset--;
+			}
+
+			ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
+			ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
+			anchor = ip = ip + mlen;
+		}
+	} /* for (cur=0; cur < last_pos; ) */
+
+	/* Save reps for next block */
+	{
+		int i;
+		for (i = 0; i < ZSTD_REP_NUM; i++)
+			ctx->repToConfirm[i] = rep[i];
+	}
+
+	/* Last Literals */
+	{
+		size_t const lastLLSize = iend - anchor;
+		memcpy(seqStorePtr->lit, anchor, lastLLSize);
+		seqStorePtr->lit += lastLLSize;
+	}
+}
+
+FORCE_INLINE
+void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
+{
+	seqStore_t *seqStorePtr = &(ctx->seqStore);
+	const BYTE *const istart = (const BYTE *)src;
+	const BYTE *ip = istart;
+	const BYTE *anchor = istart;
+	const BYTE *const iend = istart + srcSize;
+	const BYTE *const ilimit = iend - 8;
+	const BYTE *const base = ctx->base;
+	const U32 lowestIndex = ctx->lowLimit;
+	const U32 dictLimit = ctx->dictLimit;
+	const BYTE *const prefixStart = base + dictLimit;
+	const BYTE *const dictBase = ctx->dictBase;
+	const BYTE *const dictEnd = dictBase + dictLimit;
+
+	const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
+	const U32 sufficient_len = ctx->params.cParams.targetLength;
+	const U32 mls = ctx->params.cParams.searchLength;
+	const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
+
+	ZSTD_optimal_t *opt = seqStorePtr->priceTable;
+	ZSTD_match_t *matches = seqStorePtr->matchTable;
+	const BYTE *inr;
+
+	/* init */
+	U32 offset, rep[ZSTD_REP_NUM];
+	{
+		U32 i;
+		for (i = 0; i < ZSTD_REP_NUM; i++)
+			rep[i] = ctx->rep[i];
+	}
+
+	ctx->nextToUpdate3 = ctx->nextToUpdate;
+	ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
+	ip += (ip == prefixStart);
+
+	/* Match Loop */
+	while (ip < ilimit) {
+		U32 cur, match_num, last_pos, litlen, price;
+		U32 u, mlen, best_mlen, best_off, litLength;
+		U32 curr = (U32)(ip - base);
+		memset(opt, 0, sizeof(ZSTD_optimal_t));
+		last_pos = 0;
+		opt[0].litlen = (U32)(ip - anchor);
+
+		/* check repCode */
+		{
+			U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
+			for (i = (ip == anchor); i < last_i; i++) {
+				const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
+				const U32 repIndex = (U32)(curr - repCur);
+				const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
+				const BYTE *const repMatch = repBase + repIndex;
+				if ((repCur > 0 && repCur <= (S32)curr) &&
+				    (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+				    && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
+					/* repcode detected we should take it */
+					const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
+					mlen = (U32)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
+
+					if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
+						best_mlen = mlen;
+						best_off = i;
+						cur = 0;
+						last_pos = 1;
+						goto _storeSequence;
+					}
+
+					best_off = i - (ip == anchor);
+					litlen = opt[0].litlen;
+					do {
+						price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
+						if (mlen > last_pos || price < opt[mlen].price)
+							SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
+						mlen--;
+					} while (mlen >= minMatch);
+				}
+			}
+		}
+
+		match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */
+
+		if (!last_pos && !match_num) {
+			ip++;
+			continue;
+		}
+
+		{
+			U32 i;
+			for (i = 0; i < ZSTD_REP_NUM; i++)
+				opt[0].rep[i] = rep[i];
+		}
+		opt[0].mlen = 1;
+
+		if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
+			best_mlen = matches[match_num - 1].len;
+			best_off = matches[match_num - 1].off;
+			cur = 0;
+			last_pos = 1;
+			goto _storeSequence;
+		}
+
+		best_mlen = (last_pos) ? last_pos : minMatch;
+
+		/* set prices using matches at position = 0 */
+		for (u = 0; u < match_num; u++) {
+			mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
+			best_mlen = matches[u].len;
+			litlen = opt[0].litlen;
+			while (mlen <= best_mlen) {
+				price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
+				if (mlen > last_pos || price < opt[mlen].price)
+					SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
+				mlen++;
+			}
+		}
+
+		if (last_pos < minMatch) {
+			ip++;
+			continue;
+		}
+
+		/* check further positions */
+		for (cur = 1; cur <= last_pos; cur++) {
+			inr = ip + cur;
+
+			if (opt[cur - 1].mlen == 1) {
+				litlen = opt[cur - 1].litlen + 1;
+				if (cur > litlen) {
+					price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
+				} else
+					price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
+			} else {
+				litlen = 1;
+				price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
+			}
+
+			if (cur > last_pos || price <= opt[cur].price)
+				SET_PRICE(cur, 1, 0, litlen, price);
+
+			if (cur == last_pos)
+				break;
+
+			if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
+				continue;
+
+			mlen = opt[cur].mlen;
+			if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
+				opt[cur].rep[2] = opt[cur - mlen].rep[1];
+				opt[cur].rep[1] = opt[cur - mlen].rep[0];
+				opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
+			} else {
+				opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
+				opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
+				opt[cur].rep[0] =
+				    ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
+			}
+
+			best_mlen = minMatch;
+			{
+				U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
+				for (i = (mlen != 1); i < last_i; i++) {
+					const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
+					const U32 repIndex = (U32)(curr + cur - repCur);
+					const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
+					const BYTE *const repMatch = repBase + repIndex;
+					if ((repCur > 0 && repCur <= (S32)(curr + cur)) &&
+					    (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
+					    && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
+						/* repcode detected */
+						const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
+						mlen = (U32)ZSTD_count_2segments(inr + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
+
+						if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
+							best_mlen = mlen;
+							best_off = i;
+							last_pos = cur + 1;
+							goto _storeSequence;
+						}
+
+						best_off = i - (opt[cur].mlen != 1);
+						if (mlen > best_mlen)
+							best_mlen = mlen;
+
+						do {
+							if (opt[cur].mlen == 1) {
+								litlen = opt[cur].litlen;
+								if (cur > litlen) {
+									price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
+															best_off, mlen - MINMATCH, ultra);
+								} else
+									price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
+							} else {
+								litlen = 0;
+								price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
+							}
+
+							if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
+								SET_PRICE(cur + mlen, mlen, i, litlen, price);
+							mlen--;
+						} while (mlen >= minMatch);
+					}
+				}
+			}
+
+			match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
+
+			if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
+				best_mlen = matches[match_num - 1].len;
+				best_off = matches[match_num - 1].off;
+				last_pos = cur + 1;
+				goto _storeSequence;
+			}
+
+			/* set prices using matches at position = cur */
+			for (u = 0; u < match_num; u++) {
+				mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
+				best_mlen = matches[u].len;
+
+				while (mlen <= best_mlen) {
+					if (opt[cur].mlen == 1) {
+						litlen = opt[cur].litlen;
+						if (cur > litlen)
+							price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
+													matches[u].off - 1, mlen - MINMATCH, ultra);
+						else
+							price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
+					} else {
+						litlen = 0;
+						price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
+					}
+
+					if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
+						SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
+
+					mlen++;
+				}
+			}
+		} /* for (cur = 1; cur <= last_pos; cur++) */
+
+		best_mlen = opt[last_pos].mlen;
+		best_off = opt[last_pos].off;
+		cur = last_pos - best_mlen;
+
+	/* store sequence */
+_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
+		opt[0].mlen = 1;
+
+		while (1) {
+			mlen = opt[cur].mlen;
+			offset = opt[cur].off;
+			opt[cur].mlen = best_mlen;
+			opt[cur].off = best_off;
+			best_mlen = mlen;
+			best_off = offset;
+			if (mlen > cur)
+				break;
+			cur -= mlen;
+		}
+
+		for (u = 0; u <= last_pos;) {
+			u += opt[u].mlen;
+		}
+
+		for (cur = 0; cur < last_pos;) {
+			mlen = opt[cur].mlen;
+			if (mlen == 1) {
+				ip++;
+				cur++;
+				continue;
+			}
+			offset = opt[cur].off;
+			cur += mlen;
+			litLength = (U32)(ip - anchor);
+
+			if (offset > ZSTD_REP_MOVE_OPT) {
+				rep[2] = rep[1];
+				rep[1] = rep[0];
+				rep[0] = offset - ZSTD_REP_MOVE_OPT;
+				offset--;
+			} else {
+				if (offset != 0) {
+					best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
+					if (offset != 1)
+						rep[2] = rep[1];
+					rep[1] = rep[0];
+					rep[0] = best_off;
+				}
+
+				if (litLength == 0)
+					offset--;
+			}
+
+			ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
+			ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
+			anchor = ip = ip + mlen;
+		}
+	} /* for (cur=0; cur < last_pos; ) */
+
+	/* Save reps for next block */
+	{
+		int i;
+		for (i = 0; i < ZSTD_REP_NUM; i++)
+			ctx->repToConfirm[i] = rep[i];
+	}
+
+	/* Last Literals */
+	{
+		size_t lastLLSize = iend - anchor;
+		memcpy(seqStorePtr->lit, anchor, lastLLSize);
+		seqStorePtr->lit += lastLLSize;
+	}
+}
+
+#endif /* ZSTD_OPT_H_91842398743 */
diff --git a/mm/debug.c b/mm/debug.c
index 9feb699..bebe48a 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -95,7 +95,7 @@
 
 void dump_mm(const struct mm_struct *mm)
 {
-	pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
+	pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n"
 #ifdef CONFIG_MMU
 		"get_unmapped_area %p\n"
 #endif
@@ -125,7 +125,7 @@
 #endif
 		"def_flags: %#lx(%pGv)\n",
 
-		mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
+		mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
 #ifdef CONFIG_MMU
 		mm->get_unmapped_area,
 #endif
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 27fc9ad..eb3269e 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -68,8 +68,12 @@
 		goto out;
 	}
 
-	/* Careful about overflows. Len == 0 means "as much as possible" */
-	endbyte = offset + len;
+	/*
+	 * Careful about overflows. Len == 0 means "as much as possible".  Use
+	 * unsigned math because signed overflows are undefined and UBSan
+	 * complains.
+	 */
+	endbyte = (u64)offset + (u64)len;
 	if (!len || endbyte < len)
 		endbyte = -1;
 	else
diff --git a/mm/filemap.c b/mm/filemap.c
index b27d2ca..4fcd8ee 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -441,19 +441,17 @@
 		goto out;
 
 	pagevec_init(&pvec, 0);
-	while ((index <= end) &&
-			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-			PAGECACHE_TAG_WRITEBACK,
-			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
+	while (index <= end) {
 		unsigned i;
 
+		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
+				end, PAGECACHE_TAG_WRITEBACK);
+		if (!nr_pages)
+			break;
+
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
 
-			/* until radix tree lookup accepts end_index */
-			if (page->index > end)
-				continue;
-
 			wait_on_page_writeback(page);
 			if (TestClearPageError(page))
 				ret = -EIO;
@@ -1495,9 +1493,10 @@
 EXPORT_SYMBOL(find_get_pages_contig);
 
 /**
- * find_get_pages_tag - find and return pages that match @tag
+ * find_get_pages_range_tag - find and return pages in given range matching @tag
  * @mapping:	the address_space to search
  * @index:	the starting page index
+ * @end:	The final page index (inclusive)
  * @tag:	the tag index
  * @nr_pages:	the maximum number of pages
  * @pages:	where the resulting pages are placed
@@ -1505,8 +1504,9 @@
  * Like find_get_pages, except we only return pages which are tagged with
  * @tag.   We update @index to index the next page for the traversal.
  */
-unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
-			int tag, unsigned int nr_pages, struct page **pages)
+unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
+			pgoff_t end, int tag, unsigned int nr_pages,
+			struct page **pages)
 {
 	struct radix_tree_iter iter;
 	void **slot;
@@ -1519,6 +1519,9 @@
 	radix_tree_for_each_tagged(slot, &mapping->page_tree,
 				   &iter, *index, tag) {
 		struct page *head, *page;
+
+		if (iter.index > end)
+			break;
 repeat:
 		page = radix_tree_deref_slot(slot);
 		if (unlikely(!page))
@@ -1560,18 +1563,28 @@
 		}
 
 		pages[ret] = page;
-		if (++ret == nr_pages)
-			break;
+		if (++ret == nr_pages) {
+			*index = pages[ret - 1]->index + 1;
+			goto out;
+		}
 	}
 
+	/*
+	 * We come here when we got at @end. We take care to not overflow the
+	 * index @index as it confuses some of the callers. This breaks the
+	 * iteration when there is page at index -1 but that is already broken
+	 * anyway.
+	 */
+	if (end == (pgoff_t)-1)
+		*index = (pgoff_t)-1;
+	else
+		*index = end + 1;
+out:
 	rcu_read_unlock();
 
-	if (ret)
-		*index = pages[ret - 1]->index + 1;
-
 	return ret;
 }
-EXPORT_SYMBOL(find_get_pages_tag);
+EXPORT_SYMBOL(find_get_pages_range_tag);
 
 /**
  * find_get_entries_tag - find and return entries that match @tag
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a557862..748079a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1259,12 +1259,12 @@
 
 	/* Migration could have started since the pmd_trans_migrating check */
 	if (!page_locked) {
+		page_nid = -1;
 		if (!get_page_unless_zero(page))
 			goto out_unlock;
 		spin_unlock(fe->ptl);
 		wait_on_page_locked(page);
 		put_page(page);
-		page_nid = -1;
 		goto out;
 	}
 
@@ -1642,6 +1642,8 @@
 		if (vma_is_dax(vma))
 			return;
 		page = pmd_page(_pmd);
+		if (!PageDirty(page) && pmd_dirty(_pmd))
+			set_page_dirty(page);
 		if (!PageReferenced(page) && pmd_young(_pmd))
 			SetPageReferenced(page);
 		page_remove_rmap(page, true);
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 7d78b5f..627c699 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -671,12 +671,13 @@
 int kasan_module_alloc(void *addr, size_t size)
 {
 	void *ret;
+	size_t scaled_size;
 	size_t shadow_size;
 	unsigned long shadow_start;
 
 	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
-	shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
-			PAGE_SIZE);
+	scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
+	shadow_size = round_up(scaled_size, PAGE_SIZE);
 
 	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
 		return -EINVAL;
diff --git a/mm/madvise.c b/mm/madvise.c
index ee7ad9b..b753f02 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -83,7 +83,7 @@
 		new_flags |= VM_DONTDUMP;
 		break;
 	case MADV_DODUMP:
-		if (new_flags & VM_SPECIAL) {
+		if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
 			error = -EINVAL;
 			goto out;
 		}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 34eec18..edaa133 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -895,7 +895,7 @@
 	int nid;
 	int i;
 
-	while ((memcg = parent_mem_cgroup(memcg))) {
+	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 		for_each_node(nid) {
 			mz = mem_cgroup_nodeinfo(memcg, nid);
 			for (i = 0; i <= DEF_PRIORITY; i++) {
@@ -4091,6 +4091,14 @@
 
 static DEFINE_IDR(mem_cgroup_idr);
 
+static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
+{
+	if (memcg->id.id > 0) {
+		idr_remove(&mem_cgroup_idr, memcg->id.id);
+		memcg->id.id = 0;
+	}
+}
+
 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
 {
 	VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
@@ -4101,8 +4109,7 @@
 {
 	VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
 	if (atomic_sub_and_test(n, &memcg->id.ref)) {
-		idr_remove(&mem_cgroup_idr, memcg->id.id);
-		memcg->id.id = 0;
+		mem_cgroup_id_remove(memcg);
 
 		/* Memcg ID pins CSS */
 		css_put(&memcg->css);
@@ -4227,8 +4234,7 @@
 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
 	return memcg;
 fail:
-	if (memcg->id.id > 0)
-		idr_remove(&mem_cgroup_idr, memcg->id.id);
+	mem_cgroup_id_remove(memcg);
 	__mem_cgroup_free(memcg);
 	return NULL;
 }
@@ -4287,6 +4293,7 @@
 
 	return &memcg->css;
 fail:
+	mem_cgroup_id_remove(memcg);
 	mem_cgroup_free(memcg);
 	return ERR_PTR(-ENOMEM);
 }
diff --git a/mm/memory.c b/mm/memory.c
index 7a88700..107cebb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -376,15 +376,6 @@
 {
 	struct mmu_table_batch **batch = &tlb->batch;
 
-	/*
-	 * When there's less then two users of this mm there cannot be a
-	 * concurrent page-table walk.
-	 */
-	if (atomic_read(&tlb->mm->mm_users) < 2) {
-		__tlb_remove_table(table);
-		return;
-	}
-
 	if (*batch == NULL) {
 		*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
 		if (*batch == NULL) {
@@ -1650,6 +1641,9 @@
 	if (track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)))
 		return -EINVAL;
 
+	if (!pfn_modify_allowed(pfn, pgprot))
+		return -EACCES;
+
 	ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot);
 
 	return ret;
@@ -1668,6 +1662,9 @@
 	if (track_pfn_insert(vma, &pgprot, pfn))
 		return -EINVAL;
 
+	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
+		return -EACCES;
+
 	/*
 	 * If we don't have pte special, then we have to use the pfn_valid()
 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
@@ -1701,6 +1698,7 @@
 {
 	pte_t *pte;
 	spinlock_t *ptl;
+	int err = 0;
 
 	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
 	if (!pte)
@@ -1708,12 +1706,16 @@
 	arch_enter_lazy_mmu_mode();
 	do {
 		BUG_ON(!pte_none(*pte));
+		if (!pfn_modify_allowed(pfn, prot)) {
+			err = -EACCES;
+			break;
+		}
 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
 		pfn++;
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 	arch_leave_lazy_mmu_mode();
 	pte_unmap_unlock(pte - 1, ptl);
-	return 0;
+	return err;
 }
 
 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
@@ -1722,6 +1724,7 @@
 {
 	pmd_t *pmd;
 	unsigned long next;
+	int err;
 
 	pfn -= addr >> PAGE_SHIFT;
 	pmd = pmd_alloc(mm, pud, addr);
@@ -1730,9 +1733,10 @@
 	VM_BUG_ON(pmd_trans_huge(*pmd));
 	do {
 		next = pmd_addr_end(addr, end);
-		if (remap_pte_range(mm, pmd, addr, next,
-				pfn + (addr >> PAGE_SHIFT), prot))
-			return -ENOMEM;
+		err = remap_pte_range(mm, pmd, addr, next,
+				pfn + (addr >> PAGE_SHIFT), prot);
+		if (err)
+			return err;
 	} while (pmd++, addr = next, addr != end);
 	return 0;
 }
@@ -1743,6 +1747,7 @@
 {
 	pud_t *pud;
 	unsigned long next;
+	int err;
 
 	pfn -= addr >> PAGE_SHIFT;
 	pud = pud_alloc(mm, pgd, addr);
@@ -1750,9 +1755,10 @@
 		return -ENOMEM;
 	do {
 		next = pud_addr_end(addr, end);
-		if (remap_pmd_range(mm, pud, addr, next,
-				pfn + (addr >> PAGE_SHIFT), prot))
-			return -ENOMEM;
+		err = remap_pmd_range(mm, pud, addr, next,
+				pfn + (addr >> PAGE_SHIFT), prot);
+		if (err)
+			return err;
 	} while (pud++, addr = next, addr != end);
 	return 0;
 }
@@ -4320,6 +4326,9 @@
 		return -EINVAL;
 
 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
+	if (!maddr)
+		return -ENOMEM;
+
 	if (write)
 		memcpy_toio(maddr + offset, buf, len);
 	else
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 60b16418..f5c3987 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -260,6 +260,42 @@
 	return pages;
 }
 
+static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
+			       unsigned long next, struct mm_walk *walk)
+{
+	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
+		0 : -EACCES;
+}
+
+static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
+				   unsigned long addr, unsigned long next,
+				   struct mm_walk *walk)
+{
+	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
+		0 : -EACCES;
+}
+
+static int prot_none_test(unsigned long addr, unsigned long next,
+			  struct mm_walk *walk)
+{
+	return 0;
+}
+
+static int prot_none_walk(struct vm_area_struct *vma, unsigned long start,
+			   unsigned long end, unsigned long newflags)
+{
+	pgprot_t new_pgprot = vm_get_page_prot(newflags);
+	struct mm_walk prot_none_walk = {
+		.pte_entry = prot_none_pte_entry,
+		.hugetlb_entry = prot_none_hugetlb_entry,
+		.test_walk = prot_none_test,
+		.mm = current->mm,
+		.private = &new_pgprot,
+	};
+
+	return walk_page_range(start, end, &prot_none_walk);
+}
+
 int
 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
 	unsigned long start, unsigned long end, unsigned long newflags)
@@ -278,6 +314,19 @@
 	}
 
 	/*
+	 * Do PROT_NONE PFN permission checks here when we can still
+	 * bail out without undoing a lot of state. This is a rather
+	 * uncommon case, so doesn't need to be very optimized.
+	 */
+	if (arch_has_pfn_modify_check() &&
+	    (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
+	    (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) {
+		error = prot_none_walk(vma, start, end, newflags);
+		if (error)
+			return error;
+	}
+
+	/*
 	 * If we make a private mapping writable we increase our commit;
 	 * but (without finer accounting) cannot reduce our commit if we
 	 * make it unwritable again. hugetlb mapping were accounted for
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f99065f..a7fd1c6 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -49,6 +49,7 @@
 int sysctl_panic_on_oom;
 int sysctl_oom_kill_allocating_task;
 int sysctl_oom_dump_tasks = 1;
+int sysctl_reap_mem_on_sigkill;
 
 DEFINE_MUTEX(oom_lock);
 
@@ -614,10 +615,13 @@
 	if (!oom_reaper_th)
 		return;
 
-	/* move the lock here to avoid scenario of queuing
-	 * the same task by both OOM killer and LMK.
+	/*
+	 * Move the lock here to avoid scenario of queuing
+	 * the same task by both OOM killer and any other SIGKILL
+	 * path.
 	 */
 	spin_lock(&oom_reaper_lock);
+
 	/* tsk is already queued? */
 	if (tsk == oom_reaper_list || tsk->oom_reaper_list) {
 		spin_unlock(&oom_reaper_lock);
@@ -650,6 +654,16 @@
 }
 #endif /* CONFIG_MMU */
 
+static void __mark_oom_victim(struct task_struct *tsk)
+{
+	struct mm_struct *mm = tsk->mm;
+
+	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
+		atomic_inc(&tsk->signal->oom_mm->mm_count);
+		set_bit(MMF_OOM_VICTIM, &mm->flags);
+	}
+}
+
 /**
  * mark_oom_victim - mark the given task as OOM victim
  * @tsk: task to mark
@@ -662,18 +676,13 @@
  */
 static void mark_oom_victim(struct task_struct *tsk)
 {
-	struct mm_struct *mm = tsk->mm;
-
 	WARN_ON(oom_killer_disabled);
 	/* OOM killer might race with memcg OOM */
 	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
 		return;
 
 	/* oom_mm is bound to the signal struct life time. */
-	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
-		atomic_inc(&tsk->signal->oom_mm->mm_count);
-		set_bit(MMF_OOM_VICTIM, &mm->flags);
-	}
+	__mark_oom_victim(tsk);
 
 	/*
 	 * Make sure that the task is woken up from uninterruptible sleep
@@ -1089,3 +1098,22 @@
 	out_of_memory(&oc);
 	mutex_unlock(&oom_lock);
 }
+
+void add_to_oom_reaper(struct task_struct *p)
+	__releases(p->alloc_lock)
+{
+	if (!sysctl_reap_mem_on_sigkill)
+		return;
+
+	p = find_lock_task_mm(p);
+	if (!p)
+		return;
+
+	get_task_struct(p);
+	if (task_will_free_mem(p)) {
+		__mark_oom_victim(p);
+		wake_oom_reaper(p);
+	}
+	task_unlock(p);
+	put_task_struct(p);
+}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index ca18dc0..756c386 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2185,30 +2185,14 @@
 	while (!done && (index <= end)) {
 		int i;
 
-		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
-			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
+				tag);
 		if (nr_pages == 0)
 			break;
 
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
 
-			/*
-			 * At this point, the page may be truncated or
-			 * invalidated (changing page->mapping to NULL), or
-			 * even swizzled back from swapper_space to tmpfs file
-			 * mapping. However, page->index will not change
-			 * because we have a reference on the page.
-			 */
-			if (page->index > end) {
-				/*
-				 * can't be range_cyclic (1st pass) because
-				 * end == -1 in that case.
-				 */
-				done = 1;
-				break;
-			}
-
 			done_index = page->index;
 
 			lock_page(page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c9f73d6..0cbf275 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4320,6 +4320,13 @@
 	available += global_page_state(NR_SLAB_RECLAIMABLE) -
 		     min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
 
+	/*
+	 * Part of the kernel memory, which can be released under memory
+	 * pressure.
+	 */
+	available += global_node_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >>
+		PAGE_SHIFT;
+
 	if (available < 0)
 		available = 0;
 	return available;
diff --git a/mm/shmem.c b/mm/shmem.c
index 61a39aa..290c5b8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2160,6 +2160,8 @@
 			mpol_shared_policy_init(&info->policy, NULL);
 			break;
 		}
+
+		lockdep_annotate_inode_mutex_key(inode);
 	} else
 		shmem_free_inode(sb);
 	return inode;
diff --git a/mm/slub.c b/mm/slub.c
index 7341005..b5c9fde 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -683,7 +683,7 @@
 	slab_panic(reason);
 }
 
-static void slab_err(struct kmem_cache *s, struct page *page,
+static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
 			const char *fmt, ...)
 {
 	va_list args;
@@ -1806,7 +1806,7 @@
 {
 	struct page *page, *page2;
 	void *object = NULL;
-	int available = 0;
+	unsigned int available = 0;
 	int objects;
 
 	/*
@@ -4681,6 +4681,22 @@
 #define SO_OBJECTS	(1 << SL_OBJECTS)
 #define SO_TOTAL	(1 << SL_TOTAL)
 
+#ifdef CONFIG_MEMCG
+static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
+
+static int __init setup_slub_memcg_sysfs(char *str)
+{
+	int v;
+
+	if (get_option(&str, &v) > 0)
+		memcg_sysfs_enabled = v;
+
+	return 1;
+}
+
+__setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
+#endif
+
 static ssize_t show_slab_objects(struct kmem_cache *s,
 			    char *buf, unsigned long flags)
 {
@@ -4884,10 +4900,10 @@
 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
 				 size_t length)
 {
-	unsigned long objects;
+	unsigned int objects;
 	int err;
 
-	err = kstrtoul(buf, 10, &objects);
+	err = kstrtouint(buf, 10, &objects);
 	if (err)
 		return err;
 	if (objects && !kmem_cache_has_cpu_partial(s))
@@ -5586,8 +5602,14 @@
 {
 	int err;
 	const char *name;
+	struct kset *kset = cache_kset(s);
 	int unmergeable = slab_unmergeable(s);
 
+	if (!kset) {
+		kobject_init(&s->kobj, &slab_ktype);
+		return 0;
+	}
+
 	if (unmergeable) {
 		/*
 		 * Slabcache can never be merged so we can use the name proper.
@@ -5604,7 +5626,7 @@
 		name = create_unique_id(s);
 	}
 
-	s->kobj.kset = cache_kset(s);
+	s->kobj.kset = kset;
 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
 	if (err)
 		goto out;
@@ -5614,7 +5636,7 @@
 		goto out_del_kobj;
 
 #ifdef CONFIG_MEMCG
-	if (is_root_cache(s)) {
+	if (is_root_cache(s) && memcg_sysfs_enabled) {
 		s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
 		if (!s->memcg_kset) {
 			err = -ENOMEM;
diff --git a/mm/swap.c b/mm/swap.c
index 5827225..086d7fa 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -955,15 +955,25 @@
 }
 EXPORT_SYMBOL(pagevec_lookup);
 
-unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
-		pgoff_t *index, int tag, unsigned nr_pages)
+unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
+		struct address_space *mapping, pgoff_t *index, pgoff_t end,
+		int tag)
 {
-	pvec->nr = find_get_pages_tag(mapping, index, tag,
-					nr_pages, pvec->pages);
+	pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
+					PAGEVEC_SIZE, pvec->pages);
 	return pagevec_count(pvec);
 }
-EXPORT_SYMBOL(pagevec_lookup_tag);
+EXPORT_SYMBOL(pagevec_lookup_range_tag);
 
+unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
+		struct address_space *mapping, pgoff_t *index, pgoff_t end,
+		int tag, unsigned max_pages)
+{
+	pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
+		min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages);
+	return pagevec_count(pvec);
+}
+EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
 /*
  * Perform any setup for the swap system
  */
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 35f882d..2fcc719 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -17,6 +17,7 @@
 #include <linux/blkdev.h>
 #include <linux/pagevec.h>
 #include <linux/migrate.h>
+#include <linux/delay.h>
 
 #include <asm/pgtable.h>
 
@@ -350,8 +351,11 @@
 			 * busy looping, we just conditionally invoke the
 			 * scheduler here, if there are some more important
 			 * tasks to run.
+			 *
+			 * cond_resched() may not work if the process is RT.
+			 * We need a usleep_range() give up CPU to another task.
 			 */
-			cond_resched();
+			usleep_range(500, 1000);
 			continue;
 		}
 		if (err) {		/* swp entry is obsolete ? */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 7b439c9..7b996b6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2261,6 +2261,35 @@
 	return 0;
 }
 
+
+/*
+ * Find out how many pages are allowed for a single swap device. There
+ * are two limiting factors:
+ * 1) the number of bits for the swap offset in the swp_entry_t type, and
+ * 2) the number of bits in the swap pte, as defined by the different
+ * architectures.
+ *
+ * In order to find the largest possible bit mask, a swap entry with
+ * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
+ * decoded to a swp_entry_t again, and finally the swap offset is
+ * extracted.
+ *
+ * This will mask all the bits from the initial ~0UL mask that can't
+ * be encoded in either the swp_entry_t or the architecture definition
+ * of a swap pte.
+ */
+unsigned long generic_max_swapfile_size(void)
+{
+	return swp_offset(pte_to_swp_entry(
+			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
+}
+
+/* Can be overridden by an architecture for additional checks. */
+__weak unsigned long max_swapfile_size(void)
+{
+	return generic_max_swapfile_size();
+}
+
 static unsigned long read_swap_header(struct swap_info_struct *p,
 					union swap_header *swap_header,
 					struct inode *inode)
@@ -2296,22 +2325,7 @@
 	p->cluster_next = 1;
 	p->cluster_nr = 0;
 
-	/*
-	 * Find out how many pages are allowed for a single swap
-	 * device. There are two limiting factors: 1) the number
-	 * of bits for the swap offset in the swp_entry_t type, and
-	 * 2) the number of bits in the swap pte as defined by the
-	 * different architectures. In order to find the
-	 * largest possible bit mask, a swap entry with swap type 0
-	 * and swap offset ~0UL is created, encoded to a swap pte,
-	 * decoded to a swp_entry_t again, and finally the swap
-	 * offset is extracted. This will mask all the bits from
-	 * the initial ~0UL mask that can't be encoded in either
-	 * the swp_entry_t or the architecture definition of a
-	 * swap pte.
-	 */
-	maxpages = swp_offset(pte_to_swp_entry(
-			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
+	maxpages = max_swapfile_size();
 	last_page = swap_header->info.last_page;
 	if (!last_page) {
 		pr_warn("Empty swap-file\n");
diff --git a/mm/util.c b/mm/util.c
index 8c755d0..6296de0 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -576,6 +576,13 @@
 		free += global_page_state(NR_SLAB_RECLAIMABLE);
 
 		/*
+		 * Part of the kernel memory, which can be released
+		 * under memory pressure.
+		 */
+		free += global_node_page_state(
+			NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
+
+		/*
 		 * Leave reserved pages. The pages are not for anonymous pages.
 		 */
 		if (free <= totalreserve_pages)
diff --git a/mm/vmacache.c b/mm/vmacache.c
index 035fdeb..c9ca3dd 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -6,44 +6,6 @@
 #include <linux/vmacache.h>
 
 /*
- * Flush vma caches for threads that share a given mm.
- *
- * The operation is safe because the caller holds the mmap_sem
- * exclusively and other threads accessing the vma cache will
- * have mmap_sem held at least for read, so no extra locking
- * is required to maintain the vma cache.
- */
-void vmacache_flush_all(struct mm_struct *mm)
-{
-	struct task_struct *g, *p;
-
-	count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
-
-	/*
-	 * Single threaded tasks need not iterate the entire
-	 * list of process. We can avoid the flushing as well
-	 * since the mm's seqnum was increased and don't have
-	 * to worry about other threads' seqnum. Current's
-	 * flush will occur upon the next lookup.
-	 */
-	if (atomic_read(&mm->mm_users) == 1)
-		return;
-
-	rcu_read_lock();
-	for_each_process_thread(g, p) {
-		/*
-		 * Only flush the vmacache pointers as the
-		 * mm seqnum is already set and curr's will
-		 * be set upon invalidation when the next
-		 * lookup is done.
-		 */
-		if (mm == p->mm)
-			vmacache_flush(p);
-	}
-	rcu_read_unlock();
-}
-
-/*
  * This task may be accessing a foreign mm via (for example)
  * get_user_pages()->find_vma().  The vmacache is task-local and this
  * task's vmacache pertains to a different mm (ie, its own).  There is
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 67ef013..0ffab05 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1569,7 +1569,7 @@
 			addr))
 		return;
 
-	area = remove_vm_area(addr);
+	area = find_vmap_area((unsigned long)addr)->vm;
 	if (unlikely(!area)) {
 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
 				addr);
@@ -1579,6 +1579,7 @@
 	debug_check_no_locks_freed(addr, get_vm_area_size(area));
 	debug_check_no_obj_freed(addr, get_vm_area_size(area));
 
+	remove_vm_area(addr);
 	if (deallocate_pages) {
 		int i;
 
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 3d128da..f5ef213 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -973,6 +973,7 @@
 	"nr_vmscan_immediate_reclaim",
 	"nr_dirtied",
 	"nr_written",
+	"nr_indirectly_reclaimable",
 
 	/* enum writeback_stat_item counters */
 	"nr_dirty_threshold",
@@ -1078,6 +1079,9 @@
 #ifdef CONFIG_SMP
 	"nr_tlb_remote_flush",
 	"nr_tlb_remote_flush_received",
+#else
+	"", /* nr_tlb_remote_flush */
+	"", /* nr_tlb_remote_flush_received */
 #endif /* CONFIG_SMP */
 	"nr_tlb_local_flush_all",
 	"nr_tlb_local_flush_one",
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 55c38d8..cd0e90d 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -188,6 +188,7 @@
  * (see: fix_fullness_group())
  */
 static const int fullness_threshold_frac = 4;
+static size_t huge_class_size;
 
 struct size_class {
 	spinlock_t lock;
@@ -1484,6 +1485,25 @@
 }
 EXPORT_SYMBOL_GPL(zs_unmap_object);
 
+/**
+ * zs_huge_class_size() - Returns the size (in bytes) of the first huge
+ *                        zsmalloc &size_class.
+ * @pool: zsmalloc pool to use
+ *
+ * The function returns the size of the first huge class - any object of equal
+ * or bigger size will be stored in zspage consisting of a single physical
+ * page.
+ *
+ * Context: Any context.
+ *
+ * Return: the size (in bytes) of the first huge zsmalloc &size_class.
+ */
+size_t zs_huge_class_size(struct zs_pool *pool)
+{
+	return huge_class_size;
+}
+EXPORT_SYMBOL_GPL(zs_huge_class_size);
+
 static unsigned long obj_malloc(struct size_class *class,
 				struct zspage *zspage, unsigned long handle)
 {
@@ -2440,6 +2460,27 @@
 		objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
 
 		/*
+		 * We iterate from biggest down to smallest classes,
+		 * so huge_class_size holds the size of the first huge
+		 * class. Any object bigger than or equal to that will
+		 * endup in the huge class.
+		 */
+		if (pages_per_zspage != 1 && objs_per_zspage != 1 &&
+				!huge_class_size) {
+			huge_class_size = size;
+			/*
+			 * The object uses ZS_HANDLE_SIZE bytes to store the
+			 * handle. We need to subtract it, because zs_malloc()
+			 * unconditionally adds handle size before it performs
+			 * size class search - so object may be smaller than
+			 * huge class size, yet it still can end up in the huge
+			 * class because it grows by ZS_HANDLE_SIZE extra bytes
+			 * right before class lookup.
+			 */
+			huge_class_size -= (ZS_HANDLE_SIZE - 1);
+		}
+
+		/*
 		 * size_class is used for normal zsmalloc operation such
 		 * as alloc/free for that size. Although it is natural that we
 		 * have one size_class for each size, there is a chance that we
diff --git a/mm/zswap.c b/mm/zswap.c
index ded051e..c2b5435 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1018,6 +1018,15 @@
 			ret = -ENOMEM;
 			goto reject;
 		}
+
+		/* A second zswap_is_full() check after
+		 * zswap_shrink() to make sure it's now
+		 * under the max_pool_percent
+		 */
+		if (zswap_is_full()) {
+			ret = -ENOMEM;
+			goto reject;
+		}
 	}
 
 	/* allocate entry */
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 79f1fa2..23654f1 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -745,6 +745,7 @@
 		hdr.hop_limit, &hdr.daddr);
 
 	skb_push(skb, sizeof(hdr));
+	skb_reset_mac_header(skb);
 	skb_reset_network_header(skb);
 	skb_copy_to_linear_data(skb, &hdr, sizeof(hdr));
 
diff --git a/net/9p/client.c b/net/9p/client.c
index 1fd6019..98d299e 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -931,7 +931,7 @@
 {
 	int err = 0;
 	struct p9_req_t *req;
-	char *version;
+	char *version = NULL;
 	int msize;
 
 	p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 7bc2208..aa45866 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -181,6 +181,8 @@
 	spin_lock_irqsave(&p9_poll_lock, flags);
 	list_del_init(&m->poll_pending_link);
 	spin_unlock_irqrestore(&p9_poll_lock, flags);
+
+	flush_work(&p9_poll_work);
 }
 
 /**
@@ -193,15 +195,14 @@
 static void p9_conn_cancel(struct p9_conn *m, int err)
 {
 	struct p9_req_t *req, *rtmp;
-	unsigned long flags;
 	LIST_HEAD(cancel_list);
 
 	p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
 
-	spin_lock_irqsave(&m->client->lock, flags);
+	spin_lock(&m->client->lock);
 
 	if (m->err) {
-		spin_unlock_irqrestore(&m->client->lock, flags);
+		spin_unlock(&m->client->lock);
 		return;
 	}
 
@@ -213,7 +214,6 @@
 	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
 		list_move(&req->req_list, &cancel_list);
 	}
-	spin_unlock_irqrestore(&m->client->lock, flags);
 
 	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
 		p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
@@ -222,6 +222,7 @@
 			req->t_err = err;
 		p9_client_cb(m->client, req, REQ_STATUS_ERROR);
 	}
+	spin_unlock(&m->client->lock);
 }
 
 static int
@@ -377,8 +378,9 @@
 		if (m->req->status != REQ_STATUS_ERROR)
 			status = REQ_STATUS_RCVD;
 		list_del(&m->req->req_list);
-		spin_unlock(&m->client->lock);
+		/* update req->status while holding client->lock  */
 		p9_client_cb(m->client, m->req, status);
+		spin_unlock(&m->client->lock);
 		m->rc.sdata = NULL;
 		m->rc.offset = 0;
 		m->rc.capacity = 0;
@@ -937,7 +939,7 @@
 	if (err < 0)
 		return err;
 
-	if (valid_ipaddr4(addr) < 0)
+	if (addr == NULL || valid_ipaddr4(addr) < 0)
 		return -EINVAL;
 
 	csocket = NULL;
@@ -985,6 +987,9 @@
 
 	csocket = NULL;
 
+	if (addr == NULL)
+		return -EINVAL;
+
 	if (strlen(addr) >= UNIX_PATH_MAX) {
 		pr_err("%s (%d): address too long: %s\n",
 		       __func__, task_pid_nr(current), addr);
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 553ed4e..5a2ad47 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -622,6 +622,9 @@
 	struct rdma_conn_param conn_param;
 	struct ib_qp_init_attr qp_attr;
 
+	if (addr == NULL)
+		return -EINVAL;
+
 	/* Parse the transport specific mount options */
 	err = parse_opts(args, &opts);
 	if (err < 0)
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 3aa5a93..e73fd64 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -189,7 +189,7 @@
 		s = rest_of_page(data);
 		if (s > count)
 			s = count;
-		BUG_ON(index > limit);
+		BUG_ON(index >= limit);
 		/* Make sure we don't terminate early. */
 		sg_unmark_end(&sg[index]);
 		sg_set_buf(&sg[index++], data, s);
@@ -234,6 +234,7 @@
 		s = PAGE_SIZE - data_off;
 		if (s > count)
 			s = count;
+		BUG_ON(index >= limit);
 		/* Make sure we don't terminate early. */
 		sg_unmark_end(&sg[index]);
 		sg_set_page(&sg[index++], pdata[i++], s, data_off);
@@ -406,6 +407,7 @@
 	p9_debug(P9_DEBUG_TRANS, "virtio request\n");
 
 	if (uodata) {
+		__le32 sz;
 		int n = p9_get_mapped_pages(chan, &out_pages, uodata,
 					    outlen, &offs, &need_drop);
 		if (n < 0)
@@ -416,6 +418,12 @@
 			memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
 			outlen = n;
 		}
+		/* The size field of the message must include the length of the
+		 * header and the length of the data.  We didn't actually know
+		 * the length of the data until this point so add it in now.
+		 */
+		sz = cpu_to_le32(req->tc->size + outlen);
+		memcpy(&req->tc->sdata[0], &sz, sizeof(sz));
 	} else if (uidata) {
 		int n = p9_get_mapped_pages(chan, &in_pages, uidata,
 					    inlen, &offs, &need_drop);
@@ -563,7 +571,7 @@
 	chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
 	if (IS_ERR(chan->vq)) {
 		err = PTR_ERR(chan->vq);
-		goto out_free_vq;
+		goto out_free_chan;
 	}
 	chan->vq->vdev->priv = chan;
 	spin_lock_init(&chan->lock);
@@ -616,6 +624,7 @@
 	kfree(tag);
 out_free_vq:
 	vdev->config->del_vqs(vdev);
+out_free_chan:
 	kfree(chan);
 fail:
 	return err;
@@ -643,6 +652,9 @@
 	int ret = -ENOENT;
 	int found = 0;
 
+	if (devname == NULL)
+		return -EINVAL;
+
 	mutex_lock(&virtio_9p_lock);
 	list_for_each_entry(chan, &virtio_chan_list, chan_list) {
 		if (!strncmp(devname, chan->tag, chan->tag_len) &&
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 946f1c2..1ae8c59f 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -2704,7 +2704,7 @@
 {
 	struct batadv_neigh_ifinfo *router_ifinfo = NULL;
 	struct batadv_neigh_node *router;
-	struct batadv_gw_node *curr_gw;
+	struct batadv_gw_node *curr_gw = NULL;
 	int ret = 0;
 	void *hdr;
 
@@ -2752,6 +2752,8 @@
 	ret = 0;
 
 out:
+	if (curr_gw)
+		batadv_gw_node_put(curr_gw);
 	if (router_ifinfo)
 		batadv_neigh_ifinfo_put(router_ifinfo);
 	if (router)
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index ed4ddf2..4348118 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -919,7 +919,7 @@
 {
 	struct batadv_neigh_ifinfo *router_ifinfo = NULL;
 	struct batadv_neigh_node *router;
-	struct batadv_gw_node *curr_gw;
+	struct batadv_gw_node *curr_gw = NULL;
 	int ret = 0;
 	void *hdr;
 
@@ -987,6 +987,8 @@
 	ret = 0;
 
 out:
+	if (curr_gw)
+		batadv_gw_node_put(curr_gw);
 	if (router_ifinfo)
 		batadv_neigh_ifinfo_put(router_ifinfo);
 	if (router)
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 1811f8e..552e00b0 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -774,7 +774,7 @@
 	hid->version = req->version;
 	hid->country = req->country;
 
-	strncpy(hid->name, req->name, sizeof(req->name) - 1);
+	strncpy(hid->name, req->name, sizeof(hid->name));
 
 	snprintf(hid->phys, sizeof(hid->phys), "%pMR",
 		 &l2cap_pi(session->ctrl_sock->sk)->chan->src);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 3125ce6..95fd7a8 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -392,7 +392,8 @@
  */
 static void sco_sock_kill(struct sock *sk)
 {
-	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
+	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket ||
+	    sock_flag(sk, SOCK_DEAD))
 		return;
 
 	BT_DBG("sk %p state %d", sk, sk->sk_state);
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 070cf13..f2660c1 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -67,6 +67,9 @@
 	if (e->ethproto != htons(ETH_P_ARP) ||
 	    e->invflags & EBT_IPROTO)
 		return -EINVAL;
+	if (ebt_invalid_target(info->target))
+		return -EINVAL;
+
 	return 0;
 }
 
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index da3d373..18c1f07 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -406,6 +406,12 @@
 	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
 	if (IS_ERR(watcher))
 		return PTR_ERR(watcher);
+
+	if (watcher->family != NFPROTO_BRIDGE) {
+		module_put(watcher->me);
+		return -ENOENT;
+	}
+
 	w->u.watcher = watcher;
 
 	par->target   = watcher;
@@ -704,6 +710,8 @@
 	}
 	i = 0;
 
+	memset(&mtpar, 0, sizeof(mtpar));
+	memset(&tgpar, 0, sizeof(tgpar));
 	mtpar.net	= tgpar.net       = net;
 	mtpar.table     = tgpar.table     = name;
 	mtpar.entryinfo = tgpar.entryinfo = e;
@@ -725,6 +733,13 @@
 		goto cleanup_watchers;
 	}
 
+	/* Reject UNSPEC, xtables verdicts/return values are incompatible */
+	if (target->family != NFPROTO_BRIDGE) {
+		module_put(target->me);
+		ret = -ENOENT;
+		goto cleanup_watchers;
+	}
+
 	t->u.target = target;
 	if (t->u.target == &ebt_standard_target) {
 		if (gap < sizeof(struct ebt_standard_target)) {
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index d730a0f..a0443d4 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -131,8 +131,10 @@
 	caifd = caif_get(skb->dev);
 
 	WARN_ON(caifd == NULL);
-	if (caifd == NULL)
+	if (!caifd) {
+		rcu_read_unlock();
 		return;
+	}
 
 	caifd_hold(caifd);
 	rcu_read_unlock();
diff --git a/net/core/dev.c b/net/core/dev.c
index c6a8932..df92fb8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4201,7 +4201,8 @@
 int (*gsb_nw_stack_recv)(struct sk_buff *skb) __rcu __read_mostly;
 EXPORT_SYMBOL(gsb_nw_stack_recv);
 
-int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly;
+int (*athrs_fast_nat_recv)(struct sk_buff *skb,
+			   struct packet_type *pt_temp) __rcu __read_mostly;
 EXPORT_SYMBOL(athrs_fast_nat_recv);
 
 int (*embms_tm_multicast_recv)(struct sk_buff *skb) __rcu __read_mostly;
@@ -4216,7 +4217,7 @@
 	int ret = NET_RX_DROP;
 	__be16 type;
 	int (*gsb_ns_recv)(struct sk_buff *skb);
-	int (*fast_recv)(struct sk_buff *skb);
+	int (*fast_recv)(struct sk_buff *skb, struct packet_type *pt_temp);
 	int (*embms_recv)(struct sk_buff *skb);
 
 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
@@ -4286,7 +4287,7 @@
 	}
 	fast_recv = rcu_dereference(athrs_fast_nat_recv);
 	if (fast_recv) {
-		if (fast_recv(skb)) {
+		if (fast_recv(skb, pt_prev)) {
 			ret = NET_RX_SUCCESS;
 			goto out;
 		}
@@ -8076,7 +8077,8 @@
 		/* We get here if we can't use the current device name */
 		if (!pat)
 			goto out;
-		if (dev_get_valid_name(net, dev, pat) < 0)
+		err = dev_get_valid_name(net, dev, pat);
+		if (err < 0)
 			goto out;
 	}
 
@@ -8088,7 +8090,6 @@
 	dev_close(dev);
 
 	/* And unlink it from device chain */
-	err = -ENODEV;
 	unlist_netdevice(dev);
 
 	synchronize_net();
diff --git a/net/core/dst.c b/net/core/dst.c
index b5de366..39cc119 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -349,15 +349,8 @@
 
 	new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
 	prev = cmpxchg(&dst->_metrics, old, new);
-	if (prev == old) {
-		struct dst_metrics *old_p = (struct dst_metrics *)
-					    __DST_METRICS_PTR(old);
-
-		if (prev & DST_METRICS_REFCOUNTED) {
-			if (atomic_dec_and_test(&old_p->refcnt))
-				kfree(old_p);
-		}
-	}
+	if (prev == old)
+		kfree(__DST_METRICS_PTR(old));
 }
 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
 
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 508e051..18f17e1 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -77,8 +77,20 @@
 		d->lock = lock;
 		spin_lock_bh(lock);
 	}
-	if (d->tail)
-		return gnet_stats_copy(d, type, NULL, 0, padattr);
+	if (d->tail) {
+		int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
+
+		/* The initial attribute added in gnet_stats_copy() may be
+		 * preceded by a padding attribute, in which case d->tail will
+		 * end up pointing at the padding instead of the real attribute.
+		 * Fix this so gnet_stats_finish_copy() adjusts the length of
+		 * the right attribute.
+		 */
+		if (ret == 0 && d->tail->nla_type == padattr)
+			d->tail = (struct nlattr *)((char *)d->tail +
+						    NLA_ALIGN(d->tail->nla_len));
+		return ret;
+	}
 
 	return 0;
 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 340a3db..2cfbe3f 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1147,6 +1147,12 @@
 		lladdr = neigh->ha;
 	}
 
+	/* Update confirmed timestamp for neighbour entry after we
+	 * received ARP packet even if it doesn't change IP to MAC binding.
+	 */
+	if (new & NUD_CONNECTED)
+		neigh->confirmed = jiffies;
+
 	/* If entry was valid and address is not changed,
 	   do not change entry state, if new one is STALE.
 	 */
@@ -1168,15 +1174,12 @@
 		}
 	}
 
-	/* Update timestamps only once we know we will make a change to the
+	/* Update timestamp only once we know we will make a change to the
 	 * neighbour entry. Otherwise we risk to move the locktime window with
 	 * noop updates and ignore relevant ARP updates.
 	 */
-	if (new != old || lladdr != neigh->ha) {
-		if (new & NUD_CONNECTED)
-			neigh->confirmed = jiffies;
+	if (new != old || lladdr != neigh->ha)
 		neigh->updated = jiffies;
-	}
 
 	if (new != old) {
 		neigh_del_timer(neigh);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f3a0ad1..194e844 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2339,9 +2339,12 @@
 			return err;
 	}
 
-	dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
-
-	__dev_notify_flags(dev, old_flags, ~0U);
+	if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
+		__dev_notify_flags(dev, old_flags, 0U);
+	} else {
+		dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
+		__dev_notify_flags(dev, old_flags, ~0U);
+	}
 	return 0;
 }
 EXPORT_SYMBOL(rtnl_configure_link);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 89f0fbc..b6a319c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -910,6 +910,7 @@
 	n->cloned = 1;
 	n->nohdr = 0;
 	n->peeked = 0;
+	C(pfmemalloc);
 	n->destructor = NULL;
 	C(tail);
 	C(end);
@@ -3258,6 +3259,7 @@
 				net_warn_ratelimited(
 					"skb_segment: too many frags: %u %u\n",
 					pos, mss);
+				err = -EINVAL;
 				goto err;
 			}
 
@@ -3294,11 +3296,10 @@
 
 perform_csum_check:
 		if (!csum) {
-			if (skb_has_shared_frag(nskb)) {
-				err = __skb_linearize(nskb);
-				if (err)
-					goto err;
-			}
+			if (skb_has_shared_frag(nskb) &&
+			    __skb_linearize(nskb))
+				goto err;
+
 			if (!nskb->remcsum_offload)
 				nskb->ip_summed = CHECKSUM_NONE;
 			SKB_GSO_CB(nskb)->csum =
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 3202d75..a111670 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1764,7 +1764,7 @@
 		if (itr->app.selector == app->selector &&
 		    itr->app.protocol == app->protocol &&
 		    itr->ifindex == ifindex &&
-		    (!prio || itr->app.priority == prio))
+		    ((prio == -1) || itr->app.priority == prio))
 			return itr;
 	}
 
@@ -1799,7 +1799,8 @@
 	u8 prio = 0;
 
 	spin_lock_bh(&dcb_lock);
-	if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
+	itr = dcb_app_lookup(app, dev->ifindex, -1);
+	if (itr)
 		prio = itr->app.priority;
 	spin_unlock_bh(&dcb_lock);
 
@@ -1827,7 +1828,8 @@
 
 	spin_lock_bh(&dcb_lock);
 	/* Search for existing match and replace */
-	if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
+	itr = dcb_app_lookup(new, dev->ifindex, -1);
+	if (itr) {
 		if (new->priority)
 			itr->app.priority = new->priority;
 		else {
@@ -1860,7 +1862,8 @@
 	u8 prio = 0;
 
 	spin_lock_bh(&dcb_lock);
-	if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
+	itr = dcb_app_lookup(app, dev->ifindex, -1);
+	if (itr)
 		prio |= 1 << itr->app.priority;
 	spin_unlock_bh(&dcb_lock);
 
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 86a2ed0..161dfcf 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -228,14 +228,16 @@
 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
 	u32 cwnd = hc->tx_cwnd, restart_cwnd,
 	    iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
+	s32 delta = now - hc->tx_lsndtime;
 
 	hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
 
 	/* don't reduce cwnd below the initial window (IW) */
 	restart_cwnd = min(cwnd, iwnd);
-	cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto;
-	hc->tx_cwnd = max(cwnd, restart_cwnd);
 
+	while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
+		cwnd >>= 1;
+	hc->tx_cwnd = max(cwnd, restart_cwnd);
 	hc->tx_cwnd_stamp = now;
 	hc->tx_cwnd_used  = 0;
 
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 119c043..03fcf3e 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -599,7 +599,7 @@
 {
 	struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
 	struct dccp_sock *dp = dccp_sk(sk);
-	ktime_t now = ktime_get_real();
+	ktime_t now = ktime_get();
 	s64 delta = 0;
 
 	switch (fbtype) {
@@ -624,15 +624,14 @@
 	case CCID3_FBACK_PERIODIC:
 		delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
 		if (delta <= 0)
-			DCCP_BUG("delta (%ld) <= 0", (long)delta);
-		else
-			hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
+			delta = 1;
+		hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
 		break;
 	default:
 		return;
 	}
 
-	ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
+	ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
 		       hc->rx_x_recv, hc->rx_pinv);
 
 	hc->rx_tstamp_last_feedback = now;
@@ -679,7 +678,8 @@
 static u32 ccid3_first_li(struct sock *sk)
 {
 	struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
-	u32 x_recv, p, delta;
+	u32 x_recv, p;
+	s64 delta;
 	u64 fval;
 
 	if (hc->rx_rtt == 0) {
@@ -687,7 +687,9 @@
 		hc->rx_rtt = DCCP_FALLBACK_RTT;
 	}
 
-	delta  = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
+	delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
+	if (delta <= 0)
+		delta = 1;
 	x_recv = scaled_div32(hc->rx_bytes_recv, delta);
 	if (x_recv == 0) {		/* would also trigger divide-by-zero */
 		DCCP_WARN("X_recv==0\n");
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index f025276..5f5d9ea 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -87,35 +87,39 @@
 		opt++;
 		kdebug("options: '%s'", opt);
 		do {
+			int opt_len, opt_nlen;
 			const char *eq;
-			int opt_len, opt_nlen, opt_vlen, tmp;
+			char optval[128];
 
 			next_opt = memchr(opt, '#', end - opt) ?: end;
 			opt_len = next_opt - opt;
-			if (opt_len <= 0 || opt_len > 128) {
+			if (opt_len <= 0 || opt_len > sizeof(optval)) {
 				pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
 						    opt_len);
 				return -EINVAL;
 			}
 
-			eq = memchr(opt, '=', opt_len) ?: end;
-			opt_nlen = eq - opt;
-			eq++;
-			opt_vlen = next_opt - eq; /* will be -1 if no value */
+			eq = memchr(opt, '=', opt_len);
+			if (eq) {
+				opt_nlen = eq - opt;
+				eq++;
+				memcpy(optval, eq, next_opt - eq);
+				optval[next_opt - eq] = '\0';
+			} else {
+				opt_nlen = opt_len;
+				optval[0] = '\0';
+			}
 
-			tmp = opt_vlen >= 0 ? opt_vlen : 0;
-			kdebug("option '%*.*s' val '%*.*s'",
-			       opt_nlen, opt_nlen, opt, tmp, tmp, eq);
+			kdebug("option '%*.*s' val '%s'",
+			       opt_nlen, opt_nlen, opt, optval);
 
 			/* see if it's an error number representing a DNS error
 			 * that's to be recorded as the result in this key */
 			if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
 			    memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
 				kdebug("dns error number option");
-				if (opt_vlen <= 0)
-					goto bad_option_value;
 
-				ret = kstrtoul(eq, 10, &derrno);
+				ret = kstrtoul(optval, 10, &derrno);
 				if (ret < 0)
 					goto bad_option_value;
 
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 5000e6f..339d9c6 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1199,6 +1199,9 @@
 {
 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
 
+	if (!netif_running(slave_dev))
+		return 0;
+
 	netif_device_detach(slave_dev);
 
 	if (p->phy) {
@@ -1216,6 +1219,9 @@
 {
 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
 
+	if (!netif_running(slave_dev))
+		return 0;
+
 	netif_device_attach(slave_dev);
 
 	if (p->phy) {
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 83af533..ba8bd24 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -90,12 +90,18 @@
 	return 0;
 }
 
+static int lowpan_get_iflink(const struct net_device *dev)
+{
+	return lowpan_802154_dev(dev)->wdev->ifindex;
+}
+
 static const struct net_device_ops lowpan_netdev_ops = {
 	.ndo_init		= lowpan_dev_init,
 	.ndo_start_xmit		= lowpan_xmit,
 	.ndo_open		= lowpan_open,
 	.ndo_stop		= lowpan_stop,
 	.ndo_neigh_construct    = lowpan_neigh_construct,
+	.ndo_get_iflink         = lowpan_get_iflink,
 };
 
 static void lowpan_setup(struct net_device *ldev)
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index dbb476d..50ed4755 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -266,9 +266,24 @@
 	/* We must take a copy of the skb before we modify/replace the ipv6
 	 * header as the header could be used elsewhere
 	 */
-	skb = skb_unshare(skb, GFP_ATOMIC);
-	if (!skb)
-		return NET_XMIT_DROP;
+	if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
+		     skb_tailroom(skb) < ldev->needed_tailroom)) {
+		struct sk_buff *nskb;
+
+		nskb = skb_copy_expand(skb, ldev->needed_headroom,
+				       ldev->needed_tailroom, GFP_ATOMIC);
+		if (likely(nskb)) {
+			consume_skb(skb);
+			skb = nskb;
+		} else {
+			kfree_skb(skb);
+			return NET_XMIT_DROP;
+		}
+	} else {
+		skb = skb_unshare(skb, GFP_ATOMIC);
+		if (!skb)
+			return NET_XMIT_DROP;
+	}
 
 	ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
 	if (ret < 0) {
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 90c91a7..275ef13 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1315,6 +1315,7 @@
 		if (encap)
 			skb_reset_inner_headers(skb);
 		skb->network_header = (u8 *)iph - skb->head;
+		skb_reset_mac_len(skb);
 	} while ((skb = skb->next));
 
 out:
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 972353c..65a1588 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1523,9 +1523,17 @@
 	int taglen;
 
 	for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
-		if (optptr[0] == IPOPT_CIPSO)
+		switch (optptr[0]) {
+		case IPOPT_CIPSO:
 			return optptr;
-		taglen = optptr[1];
+		case IPOPT_END:
+			return NULL;
+		case IPOPT_NOOP:
+			taglen = 1;
+			break;
+		default:
+			taglen = optptr[1];
+		}
 		optlen -= taglen;
 		optptr += taglen;
 	}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 08b7260..78ee2fc 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -282,18 +282,19 @@
 		return ip_hdr(skb)->daddr;
 
 	in_dev = __in_dev_get_rcu(dev);
-	BUG_ON(!in_dev);
 
 	net = dev_net(dev);
 
 	scope = RT_SCOPE_UNIVERSE;
 	if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
+		bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev);
 		struct flowi4 fl4 = {
 			.flowi4_iif = LOOPBACK_IFINDEX,
+			.flowi4_oif = l3mdev_master_ifindex_rcu(dev),
 			.daddr = ip_hdr(skb)->saddr,
 			.flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
 			.flowi4_scope = scope,
-			.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0,
+			.flowi4_mark = vmark ? skb->mark : 0,
 		};
 		if (!fib_lookup(net, &fl4, &res, 0))
 			return FIB_RES_PREFSRC(net, res);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 7f5fe07..f2e6e87 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1193,8 +1193,7 @@
 	if (pmc) {
 		im->interface = pmc->interface;
 		im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
-		im->sfmode = pmc->sfmode;
-		if (pmc->sfmode == MCAST_INCLUDE) {
+		if (im->sfmode == MCAST_INCLUDE) {
 			im->tomb = pmc->tomb;
 			im->sources = pmc->sources;
 			for (psf = im->sources; psf; psf = psf->sf_next)
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 8effac0..f8b41aa 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -356,11 +356,6 @@
 {
 	struct inet_frag_queue *q;
 
-	if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
-		inet_frag_schedule_worker(f);
-		return NULL;
-	}
-
 	q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
 	if (!q)
 		return NULL;
@@ -397,6 +392,11 @@
 	struct inet_frag_queue *q;
 	int depth = 0;
 
+	if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
+		inet_frag_schedule_worker(f);
+		return NULL;
+	}
+
 	if (frag_mem_limit(nf) > nf->low_thresh)
 		inet_frag_schedule_worker(f);
 
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 4bf3b8a..752711c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -446,11 +446,16 @@
 		int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
 
 		if (i < next->len) {
+			int delta = -next->truesize;
+
 			/* Eat head of the next overlapped fragment
 			 * and leave the loop. The next ones cannot overlap.
 			 */
 			if (!pskb_pull(next, i))
 				goto err;
+			delta += next->truesize;
+			if (delta)
+				add_frag_mem_limit(qp->q.net, delta);
 			FRAG_CB(next)->offset += i;
 			qp->q.meat -= i;
 			if (next->ip_summed != CHECKSUM_UNNECESSARY)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d0bd98f..5fcafc8 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -519,6 +519,8 @@
 	to->dev = from->dev;
 	to->mark = from->mark;
 
+	skb_copy_hash(to, from);
+
 	/* Copy the flags to each fragment. */
 	IPCB(to)->flags = IPCB(from)->flags;
 
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index dd80276..b21e435 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -135,15 +135,18 @@
 {
 	struct sockaddr_in sin;
 	const struct iphdr *iph = ip_hdr(skb);
-	__be16 *ports = (__be16 *)skb_transport_header(skb);
+	__be16 *ports;
+	int end;
 
-	if (skb_transport_offset(skb) + 4 > (int)skb->len)
+	end = skb_transport_offset(skb) + 4;
+	if (end > 0 && !pskb_may_pull(skb, end))
 		return;
 
 	/* All current transport protocols have the port numbers in the
 	 * first four bytes of the transport header and this function is
 	 * written with this assumption in mind.
 	 */
+	ports = (__be16 *)skb_transport_header(skb);
 
 	sin.sin_family = AF_INET;
 	sin.sin_addr.s_addr = iph->daddr;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index b23464d..d278b06 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -779,6 +779,11 @@
  */
 static inline void __init ic_bootp_init(void)
 {
+	/* Re-initialise all name servers to NONE, in case any were set via the
+	 * "ip=" or "nfsaddrs=" kernel command line parameters: any IP addresses
+	 * specified there will already have been decoded but are no longer
+	 * needed
+	 */
 	ic_nameservers_predef();
 
 	dev_add_pack(&bootp_packet_type);
@@ -1401,6 +1406,13 @@
 	int err;
 	unsigned int i;
 
+	/* Initialise all name servers to NONE (but only if the "ip=" or
+	 * "nfsaddrs=" kernel command line parameters weren't decoded, otherwise
+	 * we'll overwrite the IP addresses specified there)
+	 */
+	if (ic_set_manually == 0)
+		ic_nameservers_predef();
+
 #ifdef CONFIG_PROC_FS
 	proc_create("pnp", S_IRUGO, init_net.proc_net, &pnp_seq_fops);
 #endif /* CONFIG_PROC_FS */
@@ -1621,6 +1633,7 @@
 		return 1;
 	}
 
+	/* Initialise all name servers to NONE */
 	ic_nameservers_predef();
 
 	/* Parse string for static IP assignment.  */
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index e78f652..4822459 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -554,6 +554,7 @@
 		return -ENOMEM;
 
 	j = 0;
+	memset(&mtpar, 0, sizeof(mtpar));
 	mtpar.net	= net;
 	mtpar.table     = name;
 	mtpar.entryinfo = &e->ip;
@@ -1912,6 +1913,7 @@
 		.checkentry = icmp_checkentry,
 		.proto      = IPPROTO_ICMP,
 		.family     = NFPROTO_IPV4,
+		.me	    = THIS_MODULE,
 	},
 };
 
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index ccc484a..adc9ccc 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -144,8 +144,9 @@
 	if (write && ret == 0) {
 		low = make_kgid(user_ns, urange[0]);
 		high = make_kgid(user_ns, urange[1]);
-		if (!gid_valid(low) || !gid_valid(high) ||
-		    (urange[1] < urange[0]) || gid_lt(high, low)) {
+		if (!gid_valid(low) || !gid_valid(high))
+			return -EINVAL;
+		if (urange[1] < urange[0] || gid_lt(high, low)) {
 			low = make_kgid(&init_user_ns, 1);
 			high = make_kgid(&init_user_ns, 0);
 		}
@@ -231,8 +232,9 @@
 {
 	struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
 	struct tcp_fastopen_context *ctxt;
-	int ret;
 	u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
+	__le32 key[4];
+	int ret, i;
 
 	tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
 	if (!tbl.data)
@@ -241,11 +243,14 @@
 	rcu_read_lock();
 	ctxt = rcu_dereference(tcp_fastopen_ctx);
 	if (ctxt)
-		memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+		memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
 	else
-		memset(user_key, 0, sizeof(user_key));
+		memset(key, 0, sizeof(key));
 	rcu_read_unlock();
 
+	for (i = 0; i < ARRAY_SIZE(key); i++)
+		user_key[i] = le32_to_cpu(key[i]);
+
 	snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
 		user_key[0], user_key[1], user_key[2], user_key[3]);
 	ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
@@ -261,12 +266,16 @@
 		 * first invocation of tcp_fastopen_cookie_gen
 		 */
 		tcp_fastopen_init_key_once(false);
-		tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
+
+		for (i = 0; i < ARRAY_SIZE(user_key); i++)
+			key[i] = cpu_to_le32(user_key[i]);
+
+		tcp_fastopen_reset_cipher(key, TCP_FASTOPEN_KEY_LENGTH);
 	}
 
 bad_key:
 	pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
-	       user_key[0], user_key[1], user_key[2], user_key[3],
+		 user_key[0], user_key[1], user_key[2], user_key[3],
 	       (char *)tbl.data, ret);
 	kfree(tbl.data);
 	return ret;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index fdfaaf0..452d59b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1728,7 +1728,7 @@
 			 * shouldn't happen.
 			 */
 			if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
-				 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
+				 "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
 				 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
 				 flags))
 				break;
@@ -1743,7 +1743,7 @@
 			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
 				goto found_fin_ok;
 			WARN(!(flags & MSG_PEEK),
-			     "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
+			     "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
 			     *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
 		}
 
@@ -3290,8 +3290,7 @@
 			struct request_sock *req = inet_reqsk(sk);
 
 			local_bh_disable();
-			inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
-							  req);
+			inet_csk_reqsk_queue_drop(req->rsk_listener, req);
 			local_bh_enable();
 			return 0;
 		}
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 91698595..7e44d23 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -324,6 +324,10 @@
 	/* Reduce delayed ACKs by rounding up cwnd to the next even number. */
 	cwnd = (cwnd + 1) & ~1U;
 
+	/* Ensure gain cycling gets inflight above BDP even for small BDPs. */
+	if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT)
+		cwnd += 2;
+
 	return cwnd;
 }
 
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index ab37c67..a08cedf 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -55,7 +55,6 @@
 	u32 dctcp_alpha;
 	u32 next_seq;
 	u32 ce_state;
-	u32 delayed_ack_reserved;
 	u32 loss_cwnd;
 };
 
@@ -96,7 +95,6 @@
 
 		ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
 
-		ca->delayed_ack_reserved = 0;
 		ca->loss_cwnd = 0;
 		ca->ce_state = 0;
 
@@ -131,23 +129,14 @@
 	struct dctcp *ca = inet_csk_ca(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 
-	/* State has changed from CE=0 to CE=1 and delayed
-	 * ACK has not sent yet.
-	 */
-	if (!ca->ce_state && ca->delayed_ack_reserved) {
-		u32 tmp_rcv_nxt;
-
-		/* Save current rcv_nxt. */
-		tmp_rcv_nxt = tp->rcv_nxt;
-
-		/* Generate previous ack with CE=0. */
-		tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
-		tp->rcv_nxt = ca->prior_rcv_nxt;
-
-		tcp_send_ack(sk);
-
-		/* Recover current rcv_nxt. */
-		tp->rcv_nxt = tmp_rcv_nxt;
+	if (!ca->ce_state) {
+		/* State has changed from CE=0 to CE=1, force an immediate
+		 * ACK to reflect the new CE state. If an ACK was delayed,
+		 * send that first to reflect the prior CE state.
+		 */
+		if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+			__tcp_send_ack(sk, ca->prior_rcv_nxt);
+		tcp_enter_quickack_mode(sk, 1);
 	}
 
 	ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -161,23 +150,14 @@
 	struct dctcp *ca = inet_csk_ca(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 
-	/* State has changed from CE=1 to CE=0 and delayed
-	 * ACK has not sent yet.
-	 */
-	if (ca->ce_state && ca->delayed_ack_reserved) {
-		u32 tmp_rcv_nxt;
-
-		/* Save current rcv_nxt. */
-		tmp_rcv_nxt = tp->rcv_nxt;
-
-		/* Generate previous ack with CE=1. */
-		tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
-		tp->rcv_nxt = ca->prior_rcv_nxt;
-
-		tcp_send_ack(sk);
-
-		/* Recover current rcv_nxt. */
-		tp->rcv_nxt = tmp_rcv_nxt;
+	if (ca->ce_state) {
+		/* State has changed from CE=1 to CE=0, force an immediate
+		 * ACK to reflect the new CE state. If an ACK was delayed,
+		 * send that first to reflect the prior CE state.
+		 */
+		if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+			__tcp_send_ack(sk, ca->prior_rcv_nxt);
+		tcp_enter_quickack_mode(sk, 1);
 	}
 
 	ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -248,25 +228,6 @@
 	}
 }
 
-static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev)
-{
-	struct dctcp *ca = inet_csk_ca(sk);
-
-	switch (ev) {
-	case CA_EVENT_DELAYED_ACK:
-		if (!ca->delayed_ack_reserved)
-			ca->delayed_ack_reserved = 1;
-		break;
-	case CA_EVENT_NON_DELAYED_ACK:
-		if (ca->delayed_ack_reserved)
-			ca->delayed_ack_reserved = 0;
-		break;
-	default:
-		/* Don't care for the rest. */
-		break;
-	}
-}
-
 static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
 {
 	switch (ev) {
@@ -276,10 +237,6 @@
 	case CA_EVENT_ECN_NO_CE:
 		dctcp_ce_state_1_to_0(sk);
 		break;
-	case CA_EVENT_DELAYED_ACK:
-	case CA_EVENT_NON_DELAYED_ACK:
-		dctcp_update_ack_reserved(sk, ev);
-		break;
 	default:
 		/* Don't care for the rest. */
 		break;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c244b72..8fc8c8d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -199,24 +199,27 @@
 	}
 }
 
-static void tcp_incr_quickack(struct sock *sk)
+static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
 
 	if (quickacks == 0)
 		quickacks = 2;
+	quickacks = min(quickacks, max_quickacks);
 	if (quickacks > icsk->icsk_ack.quick)
-		icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
+		icsk->icsk_ack.quick = quickacks;
 }
 
-static void tcp_enter_quickack_mode(struct sock *sk)
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
-	tcp_incr_quickack(sk);
+
+	tcp_incr_quickack(sk, max_quickacks);
 	icsk->icsk_ack.pingpong = 0;
 	icsk->icsk_ack.ato = TCP_ATO_MIN;
 }
+EXPORT_SYMBOL(tcp_enter_quickack_mode);
 
 /* Send ACKs quickly, if "quick" count is not exhausted
  * and the session is not interactive.
@@ -248,8 +251,10 @@
 	tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
 }
 
-static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
+static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
 {
+	struct tcp_sock *tp = tcp_sk(sk);
+
 	switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
 	case INET_ECN_NOT_ECT:
 		/* Funny extension: if ECT is not set on a segment,
@@ -257,31 +262,31 @@
 		 * it is probably a retransmit.
 		 */
 		if (tp->ecn_flags & TCP_ECN_SEEN)
-			tcp_enter_quickack_mode((struct sock *)tp);
+			tcp_enter_quickack_mode(sk, 2);
 		break;
 	case INET_ECN_CE:
-		if (tcp_ca_needs_ecn((struct sock *)tp))
-			tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE);
+		if (tcp_ca_needs_ecn(sk))
+			tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
 
 		if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
 			/* Better not delay acks, sender can have a very low cwnd */
-			tcp_enter_quickack_mode((struct sock *)tp);
+			tcp_enter_quickack_mode(sk, 2);
 			tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
 		}
 		tp->ecn_flags |= TCP_ECN_SEEN;
 		break;
 	default:
-		if (tcp_ca_needs_ecn((struct sock *)tp))
-			tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE);
+		if (tcp_ca_needs_ecn(sk))
+			tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
 		tp->ecn_flags |= TCP_ECN_SEEN;
 		break;
 	}
 }
 
-static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
+static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
 {
-	if (tp->ecn_flags & TCP_ECN_OK)
-		__tcp_ecn_check_ce(tp, skb);
+	if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK)
+		__tcp_ecn_check_ce(sk, skb);
 }
 
 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
@@ -675,7 +680,7 @@
 		/* The _first_ data packet received, initialize
 		 * delayed ACK engine.
 		 */
-		tcp_incr_quickack(sk);
+		tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
 		icsk->icsk_ack.ato = TCP_ATO_MIN;
 	} else {
 		int m = now - icsk->icsk_ack.lrcvtime;
@@ -691,13 +696,13 @@
 			/* Too long gap. Apparently sender failed to
 			 * restart window, so that we send ACKs quickly.
 			 */
-			tcp_incr_quickack(sk);
+			tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
 			sk_mem_reclaim(sk);
 		}
 	}
 	icsk->icsk_ack.lrcvtime = now;
 
-	tcp_ecn_check_ce(tp, skb);
+	tcp_ecn_check_ce(sk, skb);
 
 	if (skb->len >= 128)
 		tcp_grow_window(sk, skb);
@@ -3237,6 +3242,15 @@
 
 		if (tcp_is_reno(tp)) {
 			tcp_remove_reno_sacks(sk, pkts_acked);
+
+			/* If any of the cumulatively ACKed segments was
+			 * retransmitted, non-SACK case cannot confirm that
+			 * progress was due to original transmission due to
+			 * lack of TCPCB_SACKED_ACKED bits even if some of
+			 * the packets may have been never retransmitted.
+			 */
+			if (flag & FLAG_RETRANS_DATA_ACKED)
+				flag &= ~FLAG_ORIG_SACK_ACKED;
 		} else {
 			int delta;
 
@@ -4201,7 +4215,7 @@
 	if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
 	    before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
 		NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
-		tcp_enter_quickack_mode(sk);
+		tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
 
 		if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
 			u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -4357,6 +4371,23 @@
 	return true;
 }
 
+static bool tcp_ooo_try_coalesce(struct sock *sk,
+			     struct sk_buff *to,
+			     struct sk_buff *from,
+			     bool *fragstolen)
+{
+	bool res = tcp_try_coalesce(sk, to, from, fragstolen);
+
+	/* In case tcp_drop() is called later, update to->gso_segs */
+	if (res) {
+		u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
+			       max_t(u16, 1, skb_shinfo(from)->gso_segs);
+
+		skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+	}
+	return res;
+}
+
 static void tcp_drop(struct sock *sk, struct sk_buff *skb)
 {
 	sk_drops_add(sk, skb);
@@ -4445,7 +4476,7 @@
 	u32 seq, end_seq;
 	bool fragstolen;
 
-	tcp_ecn_check_ce(tp, skb);
+	tcp_ecn_check_ce(sk, skb);
 
 	if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
@@ -4480,7 +4511,8 @@
 	/* In the typical case, we are adding an skb to the end of the list.
 	 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
 	 */
-	if (tcp_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) {
+	if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
+				 skb, &fragstolen)) {
 coalesce_done:
 		tcp_grow_window(sk, skb);
 		kfree_skb_partial(skb, fragstolen);
@@ -4508,7 +4540,7 @@
 				/* All the bits are present. Drop. */
 				NET_INC_STATS(sock_net(sk),
 					      LINUX_MIB_TCPOFOMERGE);
-				__kfree_skb(skb);
+				tcp_drop(sk, skb);
 				skb = NULL;
 				tcp_dsack_set(sk, seq, end_seq);
 				goto add_sack;
@@ -4527,10 +4559,11 @@
 						 TCP_SKB_CB(skb1)->end_seq);
 				NET_INC_STATS(sock_net(sk),
 					      LINUX_MIB_TCPOFOMERGE);
-				__kfree_skb(skb1);
+				tcp_drop(sk, skb1);
 				goto merge_right;
 			}
-		} else if (tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
+		} else if (tcp_ooo_try_coalesce(sk, skb1,
+						skb, &fragstolen)) {
 			goto coalesce_done;
 		}
 		p = &parent->rb_right;
@@ -4725,7 +4758,7 @@
 		tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
 
 out_of_window:
-		tcp_enter_quickack_mode(sk);
+		tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
 		inet_csk_schedule_ack(sk);
 drop:
 		tcp_drop(sk, skb);
@@ -4736,8 +4769,6 @@
 	if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
 		goto out_of_window;
 
-	tcp_enter_quickack_mode(sk);
-
 	if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
 		/* Partial packet, seq < rcv_next < end_seq */
 		SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
@@ -5822,7 +5853,7 @@
 			 * to stand against the temptation 8)     --ANK
 			 */
 			inet_csk_schedule_ack(sk);
-			tcp_enter_quickack_mode(sk);
+			tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 						  TCP_DELACK_MAX, TCP_RTO_MAX);
 
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0f457be..07c8fd2 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2454,6 +2454,12 @@
 		if (res)
 			goto fail;
 		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+
+		/* Please enforce IP_DF and IPID==0 for RST and
+		 * ACK sent in SYN-RECV and TIME-WAIT state.
+		 */
+		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
+
 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
 	}
 
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 830a564..a501b45 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -194,8 +194,9 @@
 				inet_twsk_deschedule_put(tw);
 				return TCP_TW_SUCCESS;
 			}
+		} else {
+			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
 		}
-		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
 
 		if (tmp_opt.saw_tstamp) {
 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 70c7212..030f3a1 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -174,8 +174,13 @@
 }
 
 /* Account for an ACK we sent. */
-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
+static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
+				      u32 rcv_nxt)
 {
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	if (unlikely(rcv_nxt != tp->rcv_nxt))
+		return;  /* Special ACK sent by DCTCP to reflect ECN */
 	tcp_dec_quickack_mode(sk, pkts);
 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 }
@@ -905,8 +910,8 @@
  * We are working here with either a clone of the original
  * SKB, or a fresh unique copy made by the retransmit engine.
  */
-static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
-			    gfp_t gfp_mask)
+static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	struct inet_sock *inet;
@@ -969,7 +974,7 @@
 	th->source		= inet->inet_sport;
 	th->dest		= inet->inet_dport;
 	th->seq			= htonl(tcb->seq);
-	th->ack_seq		= htonl(tp->rcv_nxt);
+	th->ack_seq		= htonl(rcv_nxt);
 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
 					tcb->tcp_flags);
 
@@ -1010,7 +1015,7 @@
 	icsk->icsk_af_ops->send_check(sk, skb);
 
 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
-		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
+		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
 
 	if (skb->len != tcp_header_size) {
 		tcp_event_data_sent(tp, sk);
@@ -1046,6 +1051,13 @@
 	return err;
 }
 
+static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+			    gfp_t gfp_mask)
+{
+	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
+				  tcp_sk(sk)->rcv_nxt);
+}
+
 /* This routine just queues the buffer for sending.
  *
  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
@@ -3420,8 +3432,6 @@
 	int ato = icsk->icsk_ack.ato;
 	unsigned long timeout;
 
-	tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
-
 	if (ato > TCP_DELACK_MIN) {
 		const struct tcp_sock *tp = tcp_sk(sk);
 		int max_ato = HZ / 2;
@@ -3470,7 +3480,7 @@
 }
 
 /* This routine sends an ack and also updates the window. */
-void tcp_send_ack(struct sock *sk)
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
 {
 	struct sk_buff *buff;
 
@@ -3478,8 +3488,6 @@
 	if (sk->sk_state == TCP_CLOSE)
 		return;
 
-	tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
-
 	/* We are not putting this on the write queue, so
 	 * tcp_transmit_skb() will set the ownership to this
 	 * sock.
@@ -3508,9 +3516,14 @@
 
 	/* Send it off, this clears delayed acks for us. */
 	skb_mstamp_get(&buff->skb_mstamp);
-	tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
+	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
 }
-EXPORT_SYMBOL_GPL(tcp_send_ack);
+EXPORT_SYMBOL_GPL(__tcp_send_ack);
+
+void tcp_send_ack(struct sock *sk)
+{
+	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
+}
 
 /* This routine sends a packet with an out of date sequence
  * number. It assumes the other end will try to ack it.
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 3d063eb..f6c50af 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -117,7 +117,7 @@
 	     (fwmark > 0 && skb->mark == fwmark)) &&
 	    (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
 
-		spin_lock_bh(&tcp_probe.lock);
+		spin_lock(&tcp_probe.lock);
 		/* If log fills, just silently drop */
 		if (tcp_probe_avail() > 1) {
 			struct tcp_log *p = tcp_probe.log + tcp_probe.head;
@@ -157,7 +157,7 @@
 			tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
 		}
 		tcp_probe.lastcwnd = tp->snd_cwnd;
-		spin_unlock_bh(&tcp_probe.lock);
+		spin_unlock(&tcp_probe.lock);
 
 		wake_up(&tcp_probe.wait);
 	}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 885cc39..789bbcb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1740,6 +1740,28 @@
 							 inet_compute_pseudo);
 }
 
+/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+ * return code conversion for ip layer consumption
+ */
+static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
+			       struct udphdr *uh)
+{
+	int ret;
+
+	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
+		skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+					 inet_compute_pseudo);
+
+	ret = udp_queue_rcv_skb(sk, skb);
+
+	/* a return value > 0 means to resubmit the input, but
+	 * it wants the return to be -protocol, or 0
+	 */
+	if (ret > 0)
+		return -ret;
+	return 0;
+}
+
 /*
  *	All we need to do is get the socket, and then do a checksum.
  */
@@ -1786,14 +1808,9 @@
 		if (unlikely(sk->sk_rx_dst != dst))
 			udp_sk_rx_dst_set(sk, dst);
 
-		ret = udp_queue_rcv_skb(sk, skb);
+		ret = udp_unicast_rcv_skb(sk, skb, uh);
 		sock_put(sk);
-		/* a return value > 0 means to resubmit the input, but
-		 * it wants the return to be -protocol, or 0
-		 */
-		if (ret > 0)
-			return -ret;
-		return 0;
+		return ret;
 	}
 
 	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
@@ -1801,22 +1818,8 @@
 						saddr, daddr, udptable, proto);
 
 	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
-	if (sk) {
-		int ret;
-
-		if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
-			skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
-						 inet_compute_pseudo);
-
-		ret = udp_queue_rcv_skb(sk, skb);
-
-		/* a return value > 0 means to resubmit the input, but
-		 * it wants the return to be -protocol, or 0
-		 */
-		if (ret > 0)
-			return -ret;
-		return 0;
-	}
+	if (sk)
+		return udp_unicast_rcv_skb(sk, skb, uh);
 
 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
 		goto drop;
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 8d772fe..9742abf 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -799,8 +799,7 @@
 {
 	struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts;
 
-	txopts = ipv6_renew_options_kern(sk, old, IPV6_HOPOPTS,
-					 hop, hop ? ipv6_optlen(hop) : 0);
+	txopts = ipv6_renew_options(sk, old, IPV6_HOPOPTS, hop);
 	txopt_put(old);
 	if (IS_ERR(txopts))
 		return PTR_ERR(txopts);
@@ -1222,8 +1221,7 @@
 	if (IS_ERR(new))
 		return PTR_ERR(new);
 
-	txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS,
-					 new, new ? ipv6_optlen(new) : 0);
+	txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
 
 	kfree(new);
 
@@ -1260,8 +1258,7 @@
 	if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new))
 		return; /* Nothing to do */
 
-	txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS,
-					 new, new ? ipv6_optlen(new) : 0);
+	txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
 
 	if (!IS_ERR(txopts)) {
 		txopts = xchg(&req_inet->ipv6_opt, txopts);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 1778af9..83ce5ac 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -701,13 +701,16 @@
 	}
 	if (np->rxopt.bits.rxorigdstaddr) {
 		struct sockaddr_in6 sin6;
-		__be16 *ports = (__be16 *) skb_transport_header(skb);
+		__be16 *ports;
+		int end;
 
-		if (skb_transport_offset(skb) + 4 <= (int)skb->len) {
+		end = skb_transport_offset(skb) + 4;
+		if (end <= 0 || pskb_may_pull(skb, end)) {
 			/* All current transport protocols have the port numbers in the
 			 * first four bytes of the transport header and this function is
 			 * written with this assumption in mind.
 			 */
+			ports = (__be16 *)skb_transport_header(skb);
 
 			sin6.sin6_family = AF_INET6;
 			sin6.sin6_addr = ipv6_hdr(skb)->daddr;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 139ceb6..b909c77 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -760,29 +760,21 @@
 }
 EXPORT_SYMBOL_GPL(ipv6_dup_options);
 
-static int ipv6_renew_option(void *ohdr,
-			     struct ipv6_opt_hdr __user *newopt, int newoptlen,
-			     int inherit,
-			     struct ipv6_opt_hdr **hdr,
-			     char **p)
+static void ipv6_renew_option(int renewtype,
+			      struct ipv6_opt_hdr **dest,
+			      struct ipv6_opt_hdr *old,
+			      struct ipv6_opt_hdr *new,
+			      int newtype, char **p)
 {
-	if (inherit) {
-		if (ohdr) {
-			memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
-			*hdr = (struct ipv6_opt_hdr *)*p;
-			*p += CMSG_ALIGN(ipv6_optlen(*hdr));
-		}
-	} else {
-		if (newopt) {
-			if (copy_from_user(*p, newopt, newoptlen))
-				return -EFAULT;
-			*hdr = (struct ipv6_opt_hdr *)*p;
-			if (ipv6_optlen(*hdr) > newoptlen)
-				return -EINVAL;
-			*p += CMSG_ALIGN(newoptlen);
-		}
-	}
-	return 0;
+	struct ipv6_opt_hdr *src;
+
+	src = (renewtype == newtype ? new : old);
+	if (!src)
+		return;
+
+	memcpy(*p, src, ipv6_optlen(src));
+	*dest = (struct ipv6_opt_hdr *)*p;
+	*p += CMSG_ALIGN(ipv6_optlen(*dest));
 }
 
 /**
@@ -808,13 +800,11 @@
  */
 struct ipv6_txoptions *
 ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
-		   int newtype,
-		   struct ipv6_opt_hdr __user *newopt, int newoptlen)
+		   int newtype, struct ipv6_opt_hdr *newopt)
 {
 	int tot_len = 0;
 	char *p;
 	struct ipv6_txoptions *opt2;
-	int err;
 
 	if (opt) {
 		if (newtype != IPV6_HOPOPTS && opt->hopopt)
@@ -827,8 +817,8 @@
 			tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
 	}
 
-	if (newopt && newoptlen)
-		tot_len += CMSG_ALIGN(newoptlen);
+	if (newopt)
+		tot_len += CMSG_ALIGN(ipv6_optlen(newopt));
 
 	if (!tot_len)
 		return NULL;
@@ -843,29 +833,19 @@
 	opt2->tot_len = tot_len;
 	p = (char *)(opt2 + 1);
 
-	err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
-				newtype != IPV6_HOPOPTS,
-				&opt2->hopopt, &p);
-	if (err)
-		goto out;
-
-	err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
-				newtype != IPV6_RTHDRDSTOPTS,
-				&opt2->dst0opt, &p);
-	if (err)
-		goto out;
-
-	err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
-				newtype != IPV6_RTHDR,
-				(struct ipv6_opt_hdr **)&opt2->srcrt, &p);
-	if (err)
-		goto out;
-
-	err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
-				newtype != IPV6_DSTOPTS,
-				&opt2->dst1opt, &p);
-	if (err)
-		goto out;
+	ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt,
+			  (opt ? opt->hopopt : NULL),
+			  newopt, newtype, &p);
+	ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt,
+			  (opt ? opt->dst0opt : NULL),
+			  newopt, newtype, &p);
+	ipv6_renew_option(IPV6_RTHDR,
+			  (struct ipv6_opt_hdr **)&opt2->srcrt,
+			  (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL),
+			  newopt, newtype, &p);
+	ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt,
+			  (opt ? opt->dst1opt : NULL),
+			  newopt, newtype, &p);
 
 	opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
 			  (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
@@ -873,37 +853,6 @@
 	opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
 
 	return opt2;
-out:
-	sock_kfree_s(sk, opt2, opt2->tot_len);
-	return ERR_PTR(err);
-}
-
-/**
- * ipv6_renew_options_kern - replace a specific ext hdr with a new one.
- *
- * @sk: sock from which to allocate memory
- * @opt: original options
- * @newtype: option type to replace in @opt
- * @newopt: new option of type @newtype to replace (kernel-mem)
- * @newoptlen: length of @newopt
- *
- * See ipv6_renew_options().  The difference is that @newopt is
- * kernel memory, rather than user memory.
- */
-struct ipv6_txoptions *
-ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt,
-			int newtype, struct ipv6_opt_hdr *newopt,
-			int newoptlen)
-{
-	struct ipv6_txoptions *ret_val;
-	const mm_segment_t old_fs = get_fs();
-
-	set_fs(KERNEL_DS);
-	ret_val = ipv6_renew_options(sk, opt, newtype,
-				     (struct ipv6_opt_hdr __user *)newopt,
-				     newoptlen);
-	set_fs(old_fs);
-	return ret_val;
 }
 
 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 3cdf4dc..7c539de 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -113,6 +113,7 @@
 			payload_len = skb->len - nhoff - sizeof(*ipv6h);
 		ipv6h->payload_len = htons(payload_len);
 		skb->network_header = (u8 *)ipv6h - skb->head;
+		skb_reset_mac_len(skb);
 
 		if (udpfrag) {
 			int err = ip6_find_1stfragopt(skb, &prevhdr);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ae5e38b..46f8e7c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -210,12 +210,10 @@
 				kfree_skb(skb);
 				return -ENOBUFS;
 			}
+			if (skb->sk)
+				skb_set_owner_w(skb2, skb->sk);
 			consume_skb(skb);
 			skb = skb2;
-			/* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
-			 * it is safe to call in our context (socket lock not held)
-			 */
-			skb_set_owner_w(skb, (struct sock *)sk);
 		}
 		if (opt->opt_flen)
 			ipv6_push_frag_opts(skb, opt, &proto);
@@ -585,6 +583,8 @@
 	to->dev = from->dev;
 	to->mark = from->mark;
 
+	skb_copy_hash(to, from);
+
 #ifdef CONFIG_NET_SCHED
 	to->tc_index = from->tc_index;
 #endif
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 5603410..0149bfd 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1133,12 +1133,8 @@
 		max_headroom += 8;
 		mtu -= 8;
 	}
-	if (skb->protocol == htons(ETH_P_IPV6)) {
-		if (mtu < IPV6_MIN_MTU)
-			mtu = IPV6_MIN_MTU;
-	} else if (mtu < 576) {
-		mtu = 576;
-	}
+	mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
+		       IPV6_MIN_MTU : IPV4_MIN_MTU);
 
 	if (skb_dst(skb) && !t->parms.collect_md)
 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 5ae1681..406ff50 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -480,12 +480,8 @@
 		goto tx_err_dst_release;
 	}
 
-	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
-	skb_dst_set(skb, dst);
-	skb->dev = skb_dst(skb)->dev;
-
 	mtu = dst_mtu(dst);
-	if (!skb->ignore_df && skb->len > mtu) {
+	if (skb->len > mtu) {
 		skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
 
 		if (skb->protocol == htons(ETH_P_IPV6)) {
@@ -498,9 +494,14 @@
 				  htonl(mtu));
 		}
 
-		return -EMSGSIZE;
+		err = -EMSGSIZE;
+		goto tx_err_dst_release;
 	}
 
+	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
+	skb_dst_set(skb, dst);
+	skb->dev = skb_dst(skb)->dev;
+
 	err = dst_output(t->net, skb->sk, skb);
 	if (net_xmit_eval(err) == 0) {
 		struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index c66b9a8..81fd35e 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -390,6 +390,12 @@
 	case IPV6_DSTOPTS:
 	{
 		struct ipv6_txoptions *opt;
+		struct ipv6_opt_hdr *new = NULL;
+
+		/* hop-by-hop / destination options are privileged option */
+		retv = -EPERM;
+		if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
+			break;
 
 		/* remove any sticky options header with a zero option
 		 * length, per RFC3542.
@@ -401,17 +407,22 @@
 		else if (optlen < sizeof(struct ipv6_opt_hdr) ||
 			 optlen & 0x7 || optlen > 8 * 255)
 			goto e_inval;
-
-		/* hop-by-hop / destination options are privileged option */
-		retv = -EPERM;
-		if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
-			break;
+		else {
+			new = memdup_user(optval, optlen);
+			if (IS_ERR(new)) {
+				retv = PTR_ERR(new);
+				break;
+			}
+			if (unlikely(ipv6_optlen(new) > optlen)) {
+				kfree(new);
+				goto e_inval;
+			}
+		}
 
 		opt = rcu_dereference_protected(np->opt,
 						lockdep_sock_is_held(sk));
-		opt = ipv6_renew_options(sk, opt, optname,
-					 (struct ipv6_opt_hdr __user *)optval,
-					 optlen);
+		opt = ipv6_renew_options(sk, opt, optname, new);
+		kfree(new);
 		if (IS_ERR(opt)) {
 			retv = PTR_ERR(opt);
 			break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index ca8fac6..6c54c76 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -771,8 +771,7 @@
 	if (pmc) {
 		im->idev = pmc->idev;
 		im->mca_crcount = idev->mc_qrv;
-		im->mca_sfmode = pmc->mca_sfmode;
-		if (pmc->mca_sfmode == MCAST_INCLUDE) {
+		if (im->mca_sfmode == MCAST_INCLUDE) {
 			im->mca_tomb = pmc->mca_tomb;
 			im->mca_sources = pmc->mca_sources;
 			for (psf = im->mca_sources; psf; psf = psf->sf_next)
@@ -2085,7 +2084,8 @@
 		mld_send_initial_cr(idev);
 		idev->mc_dad_count--;
 		if (idev->mc_dad_count)
-			mld_dad_start_timer(idev, idev->mc_maxdelay);
+			mld_dad_start_timer(idev,
+					    unsolicited_report_interval(idev));
 	}
 }
 
@@ -2097,7 +2097,8 @@
 	if (idev->mc_dad_count) {
 		idev->mc_dad_count--;
 		if (idev->mc_dad_count)
-			mld_dad_start_timer(idev, idev->mc_maxdelay);
+			mld_dad_start_timer(idev,
+					    unsolicited_report_interval(idev));
 	}
 	in6_dev_put(idev);
 }
@@ -2455,7 +2456,8 @@
 	if (idev->mc_ifc_count) {
 		idev->mc_ifc_count--;
 		if (idev->mc_ifc_count)
-			mld_ifc_start_timer(idev, idev->mc_maxdelay);
+			mld_ifc_start_timer(idev,
+					    unsolicited_report_interval(idev));
 	}
 	in6_dev_put(idev);
 }
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index aa82858..671eb32 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -593,6 +593,7 @@
 		return -ENOMEM;
 
 	j = 0;
+	memset(&mtpar, 0, sizeof(mtpar));
 	mtpar.net	= net;
 	mtpar.table     = name;
 	mtpar.entryinfo = &e->ipv6;
@@ -1942,6 +1943,7 @@
 		.checkentry = icmp6_checkentry,
 		.proto      = IPPROTO_ICMPV6,
 		.family     = NFPROTO_IPV6,
+		.me	    = THIS_MODULE,
 	},
 };
 
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 64ec233..ee33a67 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -117,7 +117,7 @@
 	if (hdr == NULL)
 		goto err_reg;
 
-	net->nf_frag.sysctl.frags_hdr = hdr;
+	net->nf_frag_frags_hdr = hdr;
 	return 0;
 
 err_reg:
@@ -131,8 +131,8 @@
 {
 	struct ctl_table *table;
 
-	table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg;
-	unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr);
+	table = net->nf_frag_frags_hdr->ctl_table_arg;
+	unregister_net_sysctl_table(net->nf_frag_frags_hdr);
 	if (!net_eq(net, &init_net))
 		kfree(table);
 }
@@ -618,6 +618,8 @@
 	    fq->q.meat == fq->q.len &&
 	    nf_ct_frag6_reasm(fq, skb, dev))
 		ret = 0;
+	else
+		skb_dst_drop(skb);
 
 out_unlock:
 	spin_unlock_bh(&fq->q.lock);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 101ed6c..0a78f17 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -774,6 +774,13 @@
 		return -EINVAL;
 
 	lock_sock(sk);
+
+	/* Ensure that the socket is not already bound */
+	if (self->ias_obj) {
+		err = -EINVAL;
+		goto out;
+	}
+
 #ifdef CONFIG_IRDA_ULTRA
 	/* Special care for Ultra sockets */
 	if ((sk->sk_type == SOCK_DGRAM) &&
@@ -2016,7 +2023,11 @@
 			err = -EINVAL;
 			goto out;
 		}
-		irias_insert_object(ias_obj);
+
+		/* Only insert newly allocated objects */
+		if (free_ias)
+			irias_insert_object(ias_obj);
+
 		kfree(ias_opt);
 		break;
 	case IRLMP_IAS_DEL:
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 3ba903f..58c045d 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -196,30 +196,22 @@
 	return 0;
 }
 
-static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
-			       gfp_t allocation, struct sock *sk)
+static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
+			       struct sock *sk)
 {
 	int err = -ENOBUFS;
 
-	sock_hold(sk);
-	if (*skb2 == NULL) {
-		if (atomic_read(&skb->users) != 1) {
-			*skb2 = skb_clone(skb, allocation);
-		} else {
-			*skb2 = skb;
-			atomic_inc(&skb->users);
-		}
+	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
+		return err;
+
+	skb = skb_clone(skb, allocation);
+
+	if (skb) {
+		skb_set_owner_r(skb, sk);
+		skb_queue_tail(&sk->sk_receive_queue, skb);
+		sk->sk_data_ready(sk);
+		err = 0;
 	}
-	if (*skb2 != NULL) {
-		if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
-			skb_set_owner_r(*skb2, sk);
-			skb_queue_tail(&sk->sk_receive_queue, *skb2);
-			sk->sk_data_ready(sk);
-			*skb2 = NULL;
-			err = 0;
-		}
-	}
-	sock_put(sk);
 	return err;
 }
 
@@ -234,7 +226,6 @@
 {
 	struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
 	struct sock *sk;
-	struct sk_buff *skb2 = NULL;
 	int err = -ESRCH;
 
 	/* XXX Do we need something like netlink_overrun?  I think
@@ -253,7 +244,7 @@
 		 * socket.
 		 */
 		if (pfk->promisc)
-			pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
+			pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
 
 		/* the exact target will be processed later */
 		if (sk == one_sk)
@@ -268,7 +259,7 @@
 				continue;
 		}
 
-		err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
+		err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
 
 		/* Error is cleared after successful sending to at least one
 		 * registered KM */
@@ -278,9 +269,8 @@
 	rcu_read_unlock();
 
 	if (one_sk != NULL)
-		err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
+		err = pfkey_broadcast_one(skb, allocation, one_sk);
 
-	kfree_skb(skb2);
 	kfree_skb(skb);
 	return err;
 }
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ead98e8..a5333f6 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1239,7 +1239,7 @@
 
 	/* Get routing info from the tunnel socket */
 	skb_dst_drop(skb);
-	skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
+	skb_dst_set(skb, sk_dst_check(sk, 0));
 
 	inet = inet_sk(sk);
 	fl = &inet->cork.fl;
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 842851c..e896a2c 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -73,8 +73,8 @@
 
 	rcu_read_lock_bh();
 	sap = __llc_sap_find(sap_value);
-	if (sap)
-		llc_sap_hold(sap);
+	if (!sap || !llc_sap_hold_safe(sap))
+		sap = NULL;
 	rcu_read_unlock_bh();
 	return sap;
 }
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d7801f6..6ef9d32 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -386,7 +386,7 @@
 	case NL80211_IFTYPE_AP:
 	case NL80211_IFTYPE_AP_VLAN:
 		/* Keys without a station are used for TX only */
-		if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+		if (sta && test_sta_flag(sta, WLAN_STA_MFP))
 			key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
 		break;
 	case NL80211_IFTYPE_ADHOC:
@@ -454,7 +454,7 @@
 		goto out_unlock;
 	}
 
-	ieee80211_key_free(key, true);
+	ieee80211_key_free(key, sdata->vif.type == NL80211_IFTYPE_STATION);
 
 	ret = 0;
  out_unlock:
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index a5acaf1..0c0695e 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -948,8 +948,8 @@
 	if (len < IEEE80211_DEAUTH_FRAME_LEN)
 		return;
 
-	ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
-		 mgmt->sa, mgmt->da, mgmt->bssid, reason);
+	ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+	ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
 	sta_info_destroy_addr(sdata, mgmt->sa);
 }
 
@@ -967,9 +967,9 @@
 	auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
 	auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
 
-	ibss_dbg(sdata,
-		 "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
-		 mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
+	ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+	ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
+		 mgmt->bssid, auth_transaction);
 
 	if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
 		return;
@@ -1176,10 +1176,10 @@
 		rx_timestamp = drv_get_tsf(local, sdata);
 	}
 
-	ibss_dbg(sdata,
-		 "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
+	ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
 		 mgmt->sa, mgmt->bssid,
-		 (unsigned long long)rx_timestamp,
+		 (unsigned long long)rx_timestamp);
+	ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
 		 (unsigned long long)beacon_timestamp,
 		 (unsigned long long)(rx_timestamp - beacon_timestamp),
 		 jiffies);
@@ -1538,9 +1538,9 @@
 
 	tx_last_beacon = drv_tx_last_beacon(local);
 
-	ibss_dbg(sdata,
-		 "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
-		 mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
+	ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+	ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
+		 mgmt->bssid, tx_last_beacon);
 
 	if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
 		return;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 4c625a3..6e02f8d 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -648,11 +648,15 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_key *old_key;
-	int idx, ret;
-	bool pairwise;
-
-	pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
-	idx = key->conf.keyidx;
+	int idx = key->conf.keyidx;
+	bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
+	/*
+	 * We want to delay tailroom updates only for station - in that
+	 * case it helps roaming speed, but in other cases it hurts and
+	 * can cause warnings to appear.
+	 */
+	bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION;
+	int ret;
 
 	mutex_lock(&sdata->local->key_mtx);
 
@@ -680,14 +684,14 @@
 	increment_tailroom_need_count(sdata);
 
 	ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
-	ieee80211_key_destroy(old_key, true);
+	ieee80211_key_destroy(old_key, delay_tailroom);
 
 	ieee80211_debugfs_key_add(key);
 
 	if (!local->wowlan) {
 		ret = ieee80211_key_enable_hw_accel(key);
 		if (ret)
-			ieee80211_key_free(key, true);
+			ieee80211_key_free(key, delay_tailroom);
 	} else {
 		ret = 0;
 	}
@@ -922,7 +926,8 @@
 		ieee80211_key_replace(key->sdata, key->sta,
 				key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
 				key, NULL);
-		__ieee80211_key_destroy(key, true);
+		__ieee80211_key_destroy(key, key->sdata->vif.type ==
+					NL80211_IFTYPE_STATION);
 	}
 
 	for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
@@ -932,7 +937,8 @@
 		ieee80211_key_replace(key->sdata, key->sta,
 				key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
 				key, NULL);
-		__ieee80211_key_destroy(key, true);
+		__ieee80211_key_destroy(key, key->sdata->vif.type ==
+					NL80211_IFTYPE_STATION);
 	}
 
 	mutex_unlock(&local->key_mtx);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 2bb6899..e3bbfb2 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -254,8 +254,27 @@
 	     "%s called with hardware scan in progress\n", __func__);
 
 	rtnl_lock();
-	list_for_each_entry(sdata, &local->interfaces, list)
+	list_for_each_entry(sdata, &local->interfaces, list) {
+		/*
+		 * XXX: there may be more work for other vif types and even
+		 * for station mode: a good thing would be to run most of
+		 * the iface type's dependent _stop (ieee80211_mg_stop,
+		 * ieee80211_ibss_stop) etc...
+		 * For now, fix only the specific bug that was seen: race
+		 * between csa_connection_drop_work and us.
+		 */
+		if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+			/*
+			 * This worker is scheduled from the iface worker that
+			 * runs on mac80211's workqueue, so we can't be
+			 * scheduling this worker after the cancel right here.
+			 * The exception is ieee80211_chswitch_done.
+			 * Then we can have a race...
+			 */
+			cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
+		}
 		flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+	}
 	ieee80211_scan_cancel(local);
 
 	/* make sure any new ROC will consider local->in_reconfig */
@@ -466,10 +485,7 @@
 		cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
 			    IEEE80211_VHT_CAP_SHORT_GI_80 |
 			    IEEE80211_VHT_CAP_SHORT_GI_160 |
-			    IEEE80211_VHT_CAP_RXSTBC_1 |
-			    IEEE80211_VHT_CAP_RXSTBC_2 |
-			    IEEE80211_VHT_CAP_RXSTBC_3 |
-			    IEEE80211_VHT_CAP_RXSTBC_4 |
+			    IEEE80211_VHT_CAP_RXSTBC_MASK |
 			    IEEE80211_VHT_CAP_TXSTBC |
 			    IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
 			    IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
@@ -1164,6 +1180,7 @@
 #if IS_ENABLED(CONFIG_IPV6)
 	unregister_inet6addr_notifier(&local->ifa6_notifier);
 #endif
+	ieee80211_txq_teardown_flows(local);
 
 	rtnl_lock();
 
@@ -1191,7 +1208,6 @@
 	skb_queue_purge(&local->skb_queue);
 	skb_queue_purge(&local->skb_queue_unreliable);
 	skb_queue_purge(&local->skb_queue_tdls_chsw);
-	ieee80211_txq_teardown_flows(local);
 
 	destroy_workqueue(local->workqueue);
 	wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index fed598a..b0acb29 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -563,6 +563,10 @@
 		forward = false;
 		reply = true;
 		target_metric = 0;
+
+		if (SN_GT(target_sn, ifmsh->sn))
+			ifmsh->sn = target_sn;
+
 		if (time_after(jiffies, ifmsh->last_sn_update +
 					net_traversal_jiffies(sdata)) ||
 		    time_before(jiffies, ifmsh->last_sn_update)) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 973adf3..70d289d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -989,6 +989,10 @@
 	 */
 
 	if (sdata->reserved_chanctx) {
+		struct ieee80211_supported_band *sband = NULL;
+		struct sta_info *mgd_sta = NULL;
+		enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
+
 		/*
 		 * with multi-vif csa driver may call ieee80211_csa_finish()
 		 * many times while waiting for other interfaces to use their
@@ -997,6 +1001,48 @@
 		if (sdata->reserved_ready)
 			goto out;
 
+		if (sdata->vif.bss_conf.chandef.width !=
+		    sdata->csa_chandef.width) {
+			/*
+			 * For managed interface, we need to also update the AP
+			 * station bandwidth and align the rate scale algorithm
+			 * on the bandwidth change. Here we only consider the
+			 * bandwidth of the new channel definition (as channel
+			 * switch flow does not have the full HT/VHT/HE
+			 * information), assuming that if additional changes are
+			 * required they would be done as part of the processing
+			 * of the next beacon from the AP.
+			 */
+			switch (sdata->csa_chandef.width) {
+			case NL80211_CHAN_WIDTH_20_NOHT:
+			case NL80211_CHAN_WIDTH_20:
+			default:
+				bw = IEEE80211_STA_RX_BW_20;
+				break;
+			case NL80211_CHAN_WIDTH_40:
+				bw = IEEE80211_STA_RX_BW_40;
+				break;
+			case NL80211_CHAN_WIDTH_80:
+				bw = IEEE80211_STA_RX_BW_80;
+				break;
+			case NL80211_CHAN_WIDTH_80P80:
+			case NL80211_CHAN_WIDTH_160:
+				bw = IEEE80211_STA_RX_BW_160;
+				break;
+			}
+
+			mgd_sta = sta_info_get(sdata, ifmgd->bssid);
+			sband =
+				local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
+		}
+
+		if (sdata->vif.bss_conf.chandef.width >
+		    sdata->csa_chandef.width) {
+			mgd_sta->sta.bandwidth = bw;
+			rate_control_rate_update(local, sband, mgd_sta,
+						 IEEE80211_RC_BW_CHANGED);
+		}
+
 		ret = ieee80211_vif_use_reserved_context(sdata);
 		if (ret) {
 			sdata_info(sdata,
@@ -1007,6 +1053,13 @@
 			goto out;
 		}
 
+		if (sdata->vif.bss_conf.chandef.width <
+		    sdata->csa_chandef.width) {
+			mgd_sta->sta.bandwidth = bw;
+			rate_control_rate_update(local, sband, mgd_sta,
+						 IEEE80211_RC_BW_CHANGED);
+		}
+
 		goto out;
 	}
 
@@ -1229,6 +1282,16 @@
 					 cbss->beacon_interval));
 	return;
  drop_connection:
+	/*
+	 * This is just so that the disconnect flow will know that
+	 * we were trying to switch channel and failed. In case the
+	 * mode is 1 (we are not allowed to Tx), we will know not to
+	 * send a deauthentication frame. Those two fields will be
+	 * reset when the disconnection worker runs.
+	 */
+	sdata->vif.csa_active = true;
+	sdata->csa_block_tx = csa_ie.mode;
+
 	ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
 	mutex_unlock(&local->chanctx_mtx);
 	mutex_unlock(&local->mtx);
@@ -2401,6 +2464,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+	bool tx;
 
 	sdata_lock(sdata);
 	if (!ifmgd->associated) {
@@ -2408,6 +2472,8 @@
 		return;
 	}
 
+	tx = !sdata->csa_block_tx;
+
 	/* AP is probably out of range (or not reachable for another reason) so
 	 * remove the bss struct for that AP.
 	 */
@@ -2415,7 +2481,7 @@
 
 	ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
 			       WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
-			       true, frame_buf);
+			       tx, frame_buf);
 	mutex_lock(&local->mtx);
 	sdata->vif.csa_active = false;
 	ifmgd->csa_waiting_bcn = false;
@@ -2426,7 +2492,7 @@
 	}
 	mutex_unlock(&local->mtx);
 
-	ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+	ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
 				    WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
 
 	sdata_unlock(sdata);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index ae91a3c..0bb144c 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2061,7 +2061,8 @@
 		if (!sta->uploaded)
 			continue;
 
-		if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
+		if (sta->sdata->vif.type != NL80211_IFTYPE_AP &&
+		    sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
 			continue;
 
 		for (state = IEEE80211_STA_NOTEXIST;
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 7e25345..bcd1a5e 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -63,8 +63,21 @@
 	int ret;
 
 	if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
-		u16 crc = crc_ccitt(0, skb->data, skb->len);
+		struct sk_buff *nskb;
+		u16 crc;
 
+		if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
+			nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
+					       GFP_ATOMIC);
+			if (likely(nskb)) {
+				consume_skb(skb);
+				skb = nskb;
+			} else {
+				goto err_tx;
+			}
+		}
+
+		crc = crc_ccitt(0, skb->data, skb->len);
 		put_unaligned_le16(crc, skb_put(skb, 2));
 	}
 
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index e34d3f6..fd186b0 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1968,13 +1968,20 @@
 	if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
 		/* the destination server is not available */
 
-		if (sysctl_expire_nodest_conn(ipvs)) {
+		__u32 flags = cp->flags;
+
+		/* when timer already started, silently drop the packet.*/
+		if (timer_pending(&cp->timer))
+			__ip_vs_conn_put(cp);
+		else
+			ip_vs_conn_put(cp);
+
+		if (sysctl_expire_nodest_conn(ipvs) &&
+		    !(flags & IP_VS_CONN_F_ONE_PACKET)) {
 			/* try to expire the connection immediately */
 			ip_vs_conn_expire_now(cp);
 		}
-		/* don't restart its timer, and silently
-		   drop the packet. */
-		__ip_vs_conn_put(cp);
+
 		return NF_DROP;
 	}
 
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 18e96a2..8954835 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -436,12 +436,14 @@
 	local_bh_disable();
 
 	pr_debug("freeing item in the SIP list\n");
-	list_for_each_safe(sip_node_list, sip_node_save_list,
-			   &ct->sip_segment_list) {
-		sip_node = list_entry(sip_node_list, struct sip_list, list);
-		list_del(&sip_node->list);
-		kfree(sip_node);
-	}
+	if (ct->sip_segment_list.next)
+		list_for_each_safe(sip_node_list, sip_node_save_list,
+				   &ct->sip_segment_list) {
+			sip_node = list_entry(sip_node_list,
+					      struct sip_list, list);
+			list_del(&sip_node->list);
+			kfree(sip_node);
+		}
 	/* Expectations will have been removed in clean_from_lists,
 	 * except TFTP can create an expectation on the first packet,
 	 * before connection is in the list, so we need to clean here,
@@ -1901,7 +1903,7 @@
 		return -EOPNOTSUPP;
 
 	/* On boot, we can set this without any fancy locking. */
-	if (!nf_conntrack_htable_size)
+	if (!nf_conntrack_hash)
 		return param_set_uint(val, kp);
 
 	rc = kstrtouint(val, 0, &hashsize);
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index a45bee5..d5560ae 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -244,14 +244,14 @@
 		 * We currently ignore Sync packets
 		 *
 		 *	sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-			sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+			sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
 		},
 		[DCCP_PKT_SYNCACK] = {
 		/*
 		 * We currently ignore SyncAck packets
 		 *
 		 *	sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-			sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+			sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
 		},
 	},
 	[CT_DCCP_ROLE_SERVER] = {
@@ -372,14 +372,14 @@
 		 * We currently ignore Sync packets
 		 *
 		 *	sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-			sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+			sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
 		},
 		[DCCP_PKT_SYNCACK] = {
 		/*
 		 * We currently ignore SyncAck packets
 		 *
 		 *	sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-			sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+			sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
 		},
 	},
 };
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 6d6731f..e463a3a 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1,6 +1,6 @@
 /* SIP extension for IP connection tracking.
  *
- * Copyright (c) 2015,2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017-2018, The Linux Foundation. All rights reserved.
  * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
  * based on RR's ip_conntrack_ftp.c and other modules.
  * (C) 2007 United Security Providers
@@ -1880,6 +1880,10 @@
 	if (datalen < strlen("SIP/2.0 200"))
 		return NF_ACCEPT;
 
+	/* Check if the header contains SIP version */
+	if (!strnstr(dptr, "SIP/2.0", datalen))
+		return NF_ACCEPT;
+
 	/* here we save the original datalength and data offset of the skb, this
 	 * is needed later to split combined skbs
 	 */
@@ -2051,6 +2055,10 @@
 	if (datalen < strlen("SIP/2.0 200"))
 		return NF_ACCEPT;
 
+	/* Check if the header contains SIP version */
+	if (!strnstr(dptr, "SIP/2.0", datalen))
+		return NF_ACCEPT;
+
 	return process_sip_msg(skb, ct, protoff, dataoff, &dptr, &datalen);
 }
 
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index e02fed7..42938f9 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -426,6 +426,10 @@
 	if (write) {
 		struct ctl_table tmp = *table;
 
+		/* proc_dostring() can append to existing strings, so we need to
+		 * initialize it as an empty string.
+		 */
+		buf[0] = '\0';
 		tmp.data = buf;
 		r = proc_dostring(&tmp, write, buffer, lenp, ppos);
 		if (r)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 762f31f..a3fb30f 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2476,12 +2476,13 @@
 	u32 id = ntohl(nla_get_be32(nla));
 
 	list_for_each_entry(trans, &net->nft.commit_list, list) {
-		struct nft_set *set = nft_trans_set(trans);
+		if (trans->msg_type == NFT_MSG_NEWSET) {
+			struct nft_set *set = nft_trans_set(trans);
 
-		if (trans->msg_type == NFT_MSG_NEWSET &&
-		    id == nft_trans_set_id(trans) &&
-		    nft_active_genmask(set, genmask))
-			return set;
+			if (id == nft_trans_set_id(trans) &&
+			    nft_active_genmask(set, genmask))
+				return set;
+		}
 	}
 	return ERR_PTR(-ENOENT);
 }
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 5efb402..2a811b5 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1210,6 +1210,9 @@
 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
 	[NFQA_CFG_CMD]		= { .len = sizeof(struct nfqnl_msg_config_cmd) },
 	[NFQA_CFG_PARAMS]	= { .len = sizeof(struct nfqnl_msg_config_params) },
+	[NFQA_CFG_QUEUE_MAXLEN]	= { .type = NLA_U32 },
+	[NFQA_CFG_MASK]		= { .type = NLA_U32 },
+	[NFQA_CFG_FLAGS]	= { .type = NLA_U32 },
 };
 
 static const struct nf_queue_handler nfqh = {
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 59be898..751fec7 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -877,7 +877,7 @@
 		if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
 			return ERR_PTR(-EFAULT);
 
-		strlcpy(info->name, compat_tmp.name, sizeof(info->name));
+		memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
 		info->num_counters = compat_tmp.num_counters;
 		user += sizeof(compat_tmp);
 	} else
@@ -890,9 +890,9 @@
 		if (copy_from_user(info, user, sizeof(*info)) != 0)
 			return ERR_PTR(-EFAULT);
 
-		info->name[sizeof(info->name) - 1] = '\0';
 		user += sizeof(*info);
 	}
+	info->name[sizeof(info->name) - 1] = '\0';
 
 	size = sizeof(struct xt_counters);
 	size *= info->num_counters;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 15e6e7b..0254874 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -62,6 +62,7 @@
 #include <asm/cacheflush.h>
 #include <linux/hash.h>
 #include <linux/genetlink.h>
+#include <linux/nospec.h>
 
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -654,6 +655,7 @@
 
 	if (protocol < 0 || protocol >= MAX_LINKS)
 		return -EPROTONOSUPPORT;
+	protocol = array_index_nospec(protocol, MAX_LINKS);
 
 	netlink_lock_table();
 #ifdef CONFIG_MODULES
@@ -984,6 +986,11 @@
 			return err;
 	}
 
+	if (nlk->ngroups == 0)
+		groups = 0;
+	else if (nlk->ngroups < 8*sizeof(groups))
+		groups &= (1UL << nlk->ngroups) - 1;
+
 	bound = nlk->bound;
 	if (bound) {
 		/* Ensure nlk->portid is up-to-date. */
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index 3f26611..04759a0 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -753,11 +753,14 @@
 		pr_debug("Fragment %zd bytes remaining %zd",
 			 frag_len, remaining_len);
 
-		pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
+		pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
 					 frag_len + LLCP_HEADER_SIZE, &err);
 		if (pdu == NULL) {
-			pr_err("Could not allocate PDU\n");
-			continue;
+			pr_err("Could not allocate PDU (error=%d)\n", err);
+			len -= remaining_len;
+			if (len == 0)
+				len = err;
+			break;
 		}
 
 		pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8ab2b53..8d68c9e 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2265,6 +2265,12 @@
 		if (po->stats.stats1.tp_drops)
 			status |= TP_STATUS_LOSING;
 	}
+
+	if (do_vnet &&
+	    __packet_rcv_vnet(skb, h.raw + macoff -
+			      sizeof(struct virtio_net_hdr)))
+		goto drop_n_account;
+
 	po->stats.stats1.tp_packets++;
 	if (copy_skb) {
 		status |= TP_STATUS_COPY;
@@ -2272,14 +2278,6 @@
 	}
 	spin_unlock(&sk->sk_receive_queue.lock);
 
-	if (do_vnet) {
-		if (__packet_rcv_vnet(skb, h.raw + macoff -
-					   sizeof(struct virtio_net_hdr))) {
-			spin_lock(&sk->sk_receive_queue.lock);
-			goto drop_n_account;
-		}
-	}
-
 	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
 
 	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
@@ -2919,6 +2917,8 @@
 			goto out_free;
 	} else if (reserve) {
 		skb_reserve(skb, -reserve);
+		if (len < reserve)
+			skb_reset_network_header(skb);
 	}
 
 	/* Returns -EFAULT on error */
@@ -4275,6 +4275,8 @@
 	}
 
 	if (req->tp_block_nr) {
+		unsigned int min_frame_size;
+
 		/* Sanity tests and some calculations */
 		err = -EBUSY;
 		if (unlikely(rb->pg_vec))
@@ -4297,12 +4299,12 @@
 			goto out;
 		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
 			goto out;
+		min_frame_size = po->tp_hdrlen + po->tp_reserve;
 		if (po->tp_version >= TPACKET_V3 &&
-		    req->tp_block_size <=
-		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr))
+		    req->tp_block_size <
+		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
 			goto out;
-		if (unlikely(req->tp_frame_size < po->tp_hdrlen +
-					po->tp_reserve))
+		if (unlikely(req->tp_frame_size < min_frame_size))
 			goto out;
 		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
 			goto out;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index ae5ac17..7b670a9 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -621,6 +621,10 @@
 	node = NULL;
 	if (addr->sq_node == QRTR_NODE_BCAST) {
 		enqueue_fn = qrtr_bcast_enqueue;
+		if (addr->sq_port != QRTR_PORT_CTRL) {
+			release_sock(sk);
+			return -ENOTCONN;
+		}
 	} else if (addr->sq_node == ipc->us.sq_node) {
 		enqueue_fn = qrtr_local_enqueue;
 	} else {
diff --git a/net/rds/bind.c b/net/rds/bind.c
index adb53ae..cc7e3a1 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -60,11 +60,13 @@
 	u64 key = ((u64)addr << 32) | port;
 	struct rds_sock *rs;
 
-	rs = rhashtable_lookup_fast(&bind_hash_table, &key, ht_parms);
+	rcu_read_lock();
+	rs = rhashtable_lookup(&bind_hash_table, &key, ht_parms);
 	if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
 		rds_sock_addref(rs);
 	else
 		rs = NULL;
+	rcu_read_unlock();
 
 	rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr,
 		ntohs(port));
@@ -157,6 +159,7 @@
 		goto out;
 	}
 
+	sock_set_flag(sk, SOCK_RCU_FREE);
 	ret = rds_add_bound(rs, sin->sin_addr.s_addr, &sin->sin_port);
 	if (ret)
 		goto out;
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index 66b3d62..3d9c4c6 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -61,6 +61,7 @@
 			 pool->fmr_attr.max_pages);
 	if (IS_ERR(frmr->mr)) {
 		pr_warn("RDS/IB: %s failed to allocate MR", __func__);
+		err = PTR_ERR(frmr->mr);
 		goto out_no_cigar;
 	}
 
diff --git a/net/rds/loop.c b/net/rds/loop.c
index f2bf78d..dac6218 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -193,4 +193,5 @@
 	.inc_copy_to_user	= rds_message_inc_copy_to_user,
 	.inc_free		= rds_loop_inc_free,
 	.t_name			= "loopback",
+	.t_type			= RDS_TRANS_LOOP,
 };
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 30a51fe..edfc339 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -440,6 +440,11 @@
 	int			n_status;
 };
 
+/* Available as part of RDS core, so doesn't need to participate
+ * in get_preferred transport etc
+ */
+#define	RDS_TRANS_LOOP	3
+
 /**
  * struct rds_transport -  transport specific behavioural hooks
  *
diff --git a/net/rds/recv.c b/net/rds/recv.c
index cbfabdf..f16ee1b 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -94,6 +94,11 @@
 		return;
 
 	rs->rs_rcv_bytes += delta;
+
+	/* loop transport doesn't send/recv congestion updates */
+	if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
+		return;
+
 	now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
 
 	rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 235db2c..d2932dc 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -267,10 +267,8 @@
 }
 
 /* called when adding new meta information
- * under ife->tcf_lock for existing action
 */
-static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
-				void *val, int len, bool exists)
+static int load_metaops_and_vet(u32 metaid, void *val, int len)
 {
 	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
 	int ret = 0;
@@ -278,13 +276,9 @@
 	if (!ops) {
 		ret = -ENOENT;
 #ifdef CONFIG_MODULES
-		if (exists)
-			spin_unlock_bh(&ife->tcf_lock);
 		rtnl_unlock();
 		request_module("ifemeta%u", metaid);
 		rtnl_lock();
-		if (exists)
-			spin_lock_bh(&ife->tcf_lock);
 		ops = find_ife_oplist(metaid);
 #endif
 	}
@@ -301,24 +295,17 @@
 }
 
 /* called when adding new meta information
- * under ife->tcf_lock for existing action
 */
-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
-			int len, bool atomic)
+static int __add_metainfo(const struct tcf_meta_ops *ops,
+			  struct tcf_ife_info *ife, u32 metaid, void *metaval,
+			  int len, bool atomic, bool exists)
 {
 	struct tcf_meta_info *mi = NULL;
-	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
 	int ret = 0;
 
-	if (!ops)
-		return -ENOENT;
-
 	mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
-	if (!mi) {
-		/*put back what find_ife_oplist took */
-		module_put(ops->owner);
+	if (!mi)
 		return -ENOMEM;
-	}
 
 	mi->metaid = metaid;
 	mi->ops = ops;
@@ -326,17 +313,49 @@
 		ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
 		if (ret != 0) {
 			kfree(mi);
-			module_put(ops->owner);
 			return ret;
 		}
 	}
 
+	if (exists)
+		spin_lock_bh(&ife->tcf_lock);
 	list_add_tail(&mi->metalist, &ife->metalist);
+	if (exists)
+		spin_unlock_bh(&ife->tcf_lock);
 
 	return ret;
 }
 
-static int use_all_metadata(struct tcf_ife_info *ife)
+static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
+				    struct tcf_ife_info *ife, u32 metaid,
+				    bool exists)
+{
+	int ret;
+
+	if (!try_module_get(ops->owner))
+		return -ENOENT;
+	ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
+	if (ret)
+		module_put(ops->owner);
+	return ret;
+}
+
+static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
+			int len, bool exists)
+{
+	const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+	int ret;
+
+	if (!ops)
+		return -ENOENT;
+	ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
+	if (ret)
+		/*put back what find_ife_oplist took */
+		module_put(ops->owner);
+	return ret;
+}
+
+static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
 {
 	struct tcf_meta_ops *o;
 	int rc = 0;
@@ -344,7 +363,7 @@
 
 	read_lock(&ife_mod_lock);
 	list_for_each_entry(o, &ifeoplist, list) {
-		rc = add_metainfo(ife, o->metaid, NULL, 0, true);
+		rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
 		if (rc == 0)
 			installed += 1;
 	}
@@ -395,7 +414,6 @@
 	struct tcf_meta_info *e, *n;
 
 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
-		module_put(e->ops->owner);
 		list_del(&e->metalist);
 		if (e->metaval) {
 			if (e->ops->release)
@@ -403,6 +421,7 @@
 			else
 				kfree(e->metaval);
 		}
+		module_put(e->ops->owner);
 		kfree(e);
 	}
 }
@@ -416,7 +435,6 @@
 	spin_unlock_bh(&ife->tcf_lock);
 }
 
-/* under ife->tcf_lock for existing action */
 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
 			     bool exists)
 {
@@ -430,7 +448,7 @@
 			val = nla_data(tb[i]);
 			len = nla_len(tb[i]);
 
-			rc = load_metaops_and_vet(ife, i, val, len, exists);
+			rc = load_metaops_and_vet(i, val, len);
 			if (rc != 0)
 				return rc;
 
@@ -510,6 +528,8 @@
 	if (exists)
 		spin_lock_bh(&ife->tcf_lock);
 	ife->tcf_action = parm->action;
+	if (exists)
+		spin_unlock_bh(&ife->tcf_lock);
 
 	if (parm->flags & IFE_ENCODE) {
 		if (daddr)
@@ -537,9 +557,6 @@
 				tcf_hash_release(*a, bind);
 			if (ret == ACT_P_CREATED)
 				_tcf_ife_cleanup(*a, bind);
-
-			if (exists)
-				spin_unlock_bh(&ife->tcf_lock);
 			return err;
 		}
 
@@ -553,20 +570,14 @@
 		 * as we can. You better have at least one else we are
 		 * going to bail out
 		 */
-		err = use_all_metadata(ife);
+		err = use_all_metadata(ife, exists);
 		if (err) {
 			if (ret == ACT_P_CREATED)
 				_tcf_ife_cleanup(*a, bind);
-
-			if (exists)
-				spin_unlock_bh(&ife->tcf_lock);
 			return err;
 		}
 	}
 
-	if (exists)
-		spin_unlock_bh(&ife->tcf_lock);
-
 	if (ret == ACT_P_CREATED)
 		tcf_hash_insert(tn, *a);
 
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 901fb8b..41835f6 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -39,7 +39,7 @@
 
 	tcf_lastuse_update(&t->tcf_tm);
 	bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
-	action = params->action;
+	action = READ_ONCE(t->tcf_action);
 
 	switch (params->tcft_action) {
 	case TCA_TUNNEL_KEY_ACT_RELEASE:
@@ -170,7 +170,7 @@
 
 	params_old = rtnl_dereference(t->params);
 
-	params_new->action = parm->action;
+	t->tcf_action = parm->action;
 	params_new->tcft_action = parm->t_action;
 	params_new->tcft_enc_metadata = metadata;
 
@@ -242,13 +242,13 @@
 		.index    = t->tcf_index,
 		.refcnt   = t->tcf_refcnt - ref,
 		.bindcnt  = t->tcf_bindcnt - bind,
+		.action   = t->tcf_action,
 	};
 	struct tcf_t tm;
 
 	params = rtnl_dereference(t->params);
 
 	opt.t_action = params->tcft_action;
-	opt.action = params->action;
 
 	if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
 		goto nla_put_failure;
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index e75fb65..61ddfba 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -94,6 +94,8 @@
 	if (!head)
 		return true;
 
+	tcf_unbind_filter(tp, &head->res);
+
 	if (tc_should_offload(dev, tp, head->flags))
 		mall_destroy_hw_filter(tp, head, (unsigned long) head);
 
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 0751245..db80a64 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -414,11 +414,6 @@
 		tcf_bind_filter(tp, &cr.res, base);
 	}
 
-	if (old_r)
-		tcf_exts_change(tp, &r->exts, &e);
-	else
-		tcf_exts_change(tp, &cr.exts, &e);
-
 	if (old_r && old_r != r) {
 		err = tcindex_filter_result_init(old_r);
 		if (err < 0) {
@@ -429,12 +424,15 @@
 
 	oldp = p;
 	r->res = cr.res;
+	tcf_exts_change(tp, &r->exts, &e);
+
 	rcu_assign_pointer(tp->root, cp);
 
 	if (r == &new_filter_result) {
 		struct tcindex_filter *nfp;
 		struct tcindex_filter __rcu **fp;
 
+		f->result.res = r->res;
 		tcf_exts_change(tp, &f->result.exts, &r->exts);
 
 		fp = cp->h + (handle % cp->hash);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index da574a1..e377dd5 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -851,6 +851,7 @@
 	struct nlattr *opt = tca[TCA_OPTIONS];
 	struct nlattr *tb[TCA_U32_MAX + 1];
 	u32 htid, flags = 0;
+	size_t sel_size;
 	int err;
 #ifdef CONFIG_CLS_U32_PERF
 	size_t size;
@@ -967,8 +968,11 @@
 		return -EINVAL;
 
 	s = nla_data(tb[TCA_U32_SEL]);
+	sel_size = sizeof(*s) + sizeof(*s->keys) * s->nkeys;
+	if (nla_len(tb[TCA_U32_SEL]) < sel_size)
+		return -EINVAL;
 
-	n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
+	n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
 	if (n == NULL)
 		return -ENOBUFS;
 
@@ -981,7 +985,7 @@
 	}
 #endif
 
-	memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
+	memcpy(&n->sel, s, sel_size);
 	RCU_INIT_POINTER(n->ht_up, ht);
 	n->handle = handle;
 	n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c
index c98a61e..9c4c2bb 100644
--- a/net/sched/sch_blackhole.c
+++ b/net/sched/sch_blackhole.c
@@ -21,7 +21,7 @@
 			     struct sk_buff **to_free)
 {
 	qdisc_drop(skb, sch, to_free);
-	return NET_XMIT_SUCCESS;
+	return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 }
 
 static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 2fae8b5..f4b2d69 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -492,6 +492,9 @@
 		hhf_free(q->hhf_valid_bits[i]);
 	}
 
+	if (!q->hh_flows)
+		return;
+
 	for (i = 0; i < HH_FLOWS_CNT; i++) {
 		struct hh_flow_state *flow, *next;
 		struct list_head *head = &q->hh_flows[i];
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index c798d0d..95fe75d 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1013,6 +1013,9 @@
 	int err;
 	int i;
 
+	qdisc_watchdog_init(&q->watchdog, sch);
+	INIT_WORK(&q->work, htb_work_func);
+
 	if (!opt)
 		return -EINVAL;
 
@@ -1033,8 +1036,6 @@
 	for (i = 0; i < TC_HTB_NUMPRIO; i++)
 		INIT_LIST_HEAD(q->drops + i);
 
-	qdisc_watchdog_init(&q->watchdog, sch);
-	INIT_WORK(&q->work, htb_work_func);
 	qdisc_skb_head_init(&q->direct_queue);
 
 	if (tb[TCA_HTB_DIRECT_QLEN])
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 9ffbb02..66b6e80 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -234,7 +234,7 @@
 static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
 {
 	struct multiq_sched_data *q = qdisc_priv(sch);
-	int i, err;
+	int i;
 
 	q->queues = NULL;
 
@@ -249,12 +249,7 @@
 	for (i = 0; i < q->max_bands; i++)
 		q->queues[i] = &noop_qdisc;
 
-	err = multiq_tune(sch, opt);
-
-	if (err)
-		kfree(q->queues);
-
-	return err;
+	return multiq_tune(sch, opt);
 }
 
 static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e899d9e..3f87ddb 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -937,11 +937,11 @@
 	struct netem_sched_data *q = qdisc_priv(sch);
 	int ret;
 
+	qdisc_watchdog_init(&q->watchdog, sch);
+
 	if (!opt)
 		return -EINVAL;
 
-	qdisc_watchdog_init(&q->watchdog, sch);
-
 	q->loss_model = CLG_RANDOM;
 	ret = netem_change(sch, opt);
 	if (ret)
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 303355c..b3f7980 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -423,12 +423,13 @@
 {
 	struct tbf_sched_data *q = qdisc_priv(sch);
 
+	qdisc_watchdog_init(&q->watchdog, sch);
+	q->qdisc = &noop_qdisc;
+
 	if (opt == NULL)
 		return -EINVAL;
 
 	q->t_c = ktime_get_ns();
-	qdisc_watchdog_init(&q->watchdog, sch);
-	q->qdisc = &noop_qdisc;
 
 	return tbf_change(sch, opt);
 }
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 206377f..fd7f235 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -337,8 +337,6 @@
 	}
 
 	transport = (struct sctp_transport *)v;
-	if (!sctp_transport_hold(transport))
-		return 0;
 	assoc = transport->asoc;
 	epb = &assoc->base;
 	sk = epb->sk;
@@ -428,8 +426,6 @@
 	}
 
 	transport = (struct sctp_transport *)v;
-	if (!sctp_transport_hold(transport))
-		return 0;
 	assoc = transport->asoc;
 
 	list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 78f3805..64d2d9e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4476,9 +4476,14 @@
 			break;
 		}
 
+		if (!sctp_transport_hold(t))
+			continue;
+
 		if (net_eq(sock_net(t->asoc->base.sk), net) &&
 		    t->asoc->peer.primary_path == t)
 			break;
+
+		sctp_transport_put(t);
 	}
 
 	return t;
@@ -4488,13 +4493,18 @@
 					      struct rhashtable_iter *iter,
 					      int pos)
 {
-	void *obj = SEQ_START_TOKEN;
+	struct sctp_transport *t;
 
-	while (pos && (obj = sctp_transport_get_next(net, iter)) &&
-	       !IS_ERR(obj))
-		pos--;
+	if (!pos)
+		return SEQ_START_TOKEN;
 
-	return obj;
+	while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
+		if (!--pos)
+			break;
+		sctp_transport_put(t);
+	}
+
+	return t;
 }
 
 int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
@@ -4556,8 +4566,6 @@
 	for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
 		struct sctp_transport *transport = obj;
 
-		if (!sctp_transport_hold(transport))
-			continue;
 		err = cb(transport, p);
 		sctp_transport_put(transport);
 		if (err)
diff --git a/net/socket.c b/net/socket.c
index 539755b..8b1ab42 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -91,6 +91,7 @@
 #include <linux/xattr.h>
 #include <linux/seemp_api.h>
 #include <linux/seemp_instrumentation.h>
+#include <linux/nospec.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -552,7 +553,10 @@
 	if (!err && (iattr->ia_valid & ATTR_UID)) {
 		struct socket *sock = SOCKET_I(d_inode(dentry));
 
-		sock->sk->sk_uid = iattr->ia_uid;
+		if (sock->sk)
+			sock->sk->sk_uid = iattr->ia_uid;
+		else
+			err = -ENOENT;
 	}
 
 	return err;
@@ -603,12 +607,16 @@
  *	an inode not a file.
  */
 
-void sock_release(struct socket *sock)
+static void __sock_release(struct socket *sock, struct inode *inode)
 {
 	if (sock->ops) {
 		struct module *owner = sock->ops->owner;
 
+		if (inode)
+			inode_lock(inode);
 		sock->ops->release(sock);
+		if (inode)
+			inode_unlock(inode);
 		sock->ops = NULL;
 		module_put(owner);
 	}
@@ -623,6 +631,11 @@
 	}
 	sock->file = NULL;
 }
+
+void sock_release(struct socket *sock)
+{
+	__sock_release(sock, NULL);
+}
 EXPORT_SYMBOL(sock_release);
 
 void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
@@ -1055,7 +1068,7 @@
 
 static int sock_close(struct inode *inode, struct file *filp)
 {
-	sock_release(SOCKET_I(inode));
+	__sock_release(SOCKET_I(inode), inode);
 	return 0;
 }
 
@@ -2377,6 +2390,7 @@
 
 	if (call < 1 || call > SYS_SENDMMSG)
 		return -EINVAL;
+	call = array_index_nospec(call, SYS_SENDMMSG + 1);
 
 	len = nargs[call];
 	if (len > sizeof(a))
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 4afd414..bad69e9 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -169,7 +169,7 @@
 	struct scatterlist              sg[1];
 	int err = -1;
 	u8 *checksumdata;
-	u8 rc4salt[4];
+	u8 *rc4salt;
 	struct crypto_ahash *md5;
 	struct crypto_ahash *hmac_md5;
 	struct ahash_request *req;
@@ -183,14 +183,18 @@
 		return GSS_S_FAILURE;
 	}
 
+	rc4salt = kmalloc_array(4, sizeof(*rc4salt), GFP_NOFS);
+	if (!rc4salt)
+		return GSS_S_FAILURE;
+
 	if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
 		dprintk("%s: invalid usage value %u\n", __func__, usage);
-		return GSS_S_FAILURE;
+		goto out_free_rc4salt;
 	}
 
 	checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
 	if (!checksumdata)
-		return GSS_S_FAILURE;
+		goto out_free_rc4salt;
 
 	md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
 	if (IS_ERR(md5))
@@ -258,6 +262,8 @@
 	crypto_free_ahash(md5);
 out_free_cksum:
 	kfree(checksumdata);
+out_free_rc4salt:
+	kfree(rc4salt);
 	return err ? GSS_S_FAILURE : 0;
 }
 
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index b2ae4f1..244eac1 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -965,10 +965,20 @@
 }
 EXPORT_SYMBOL_GPL(rpc_bind_new_program);
 
+void rpc_task_release_transport(struct rpc_task *task)
+{
+	struct rpc_xprt *xprt = task->tk_xprt;
+
+	if (xprt) {
+		task->tk_xprt = NULL;
+		xprt_put(xprt);
+	}
+}
+EXPORT_SYMBOL_GPL(rpc_task_release_transport);
+
 void rpc_task_release_client(struct rpc_task *task)
 {
 	struct rpc_clnt *clnt = task->tk_client;
-	struct rpc_xprt *xprt = task->tk_xprt;
 
 	if (clnt != NULL) {
 		/* Remove from client task list */
@@ -979,12 +989,14 @@
 
 		rpc_release_client(clnt);
 	}
+	rpc_task_release_transport(task);
+}
 
-	if (xprt != NULL) {
-		task->tk_xprt = NULL;
-
-		xprt_put(xprt);
-	}
+static
+void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
+{
+	if (!task->tk_xprt)
+		task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi);
 }
 
 static
@@ -992,8 +1004,7 @@
 {
 
 	if (clnt != NULL) {
-		if (task->tk_xprt == NULL)
-			task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi);
+		rpc_task_set_transport(task, clnt);
 		task->tk_client = clnt;
 		atomic_inc(&clnt->cl_count);
 		if (clnt->cl_softrtry)
@@ -1550,6 +1561,7 @@
 	task->tk_msg.rpc_proc->p_count++;
 	clnt->cl_stats->rpccnt++;
 	task->tk_action = call_reserve;
+	rpc_task_set_transport(task, clnt);
 }
 
 /*
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index f57c9f0..0287734 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -229,7 +229,7 @@
 			/* alloc the pagelist for receiving buffer */
 			ppages[p] = alloc_page(GFP_ATOMIC);
 			if (!ppages[p])
-				return -EAGAIN;
+				return -ENOBUFS;
 		}
 		seg[n].mr_page = ppages[p];
 		seg[n].mr_offset = (void *)(unsigned long) page_base;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index ee12e17..7566395 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -448,14 +448,14 @@
 	return transport->shutdown(vsock_sk(sk), mode);
 }
 
-void vsock_pending_work(struct work_struct *work)
+static void vsock_pending_work(struct work_struct *work)
 {
 	struct sock *sk;
 	struct sock *listener;
 	struct vsock_sock *vsk;
 	bool cleanup;
 
-	vsk = container_of(work, struct vsock_sock, dwork.work);
+	vsk = container_of(work, struct vsock_sock, pending_work.work);
 	sk = sk_vsock(vsk);
 	listener = vsk->listener;
 	cleanup = true;
@@ -495,7 +495,6 @@
 	sock_put(sk);
 	sock_put(listener);
 }
-EXPORT_SYMBOL_GPL(vsock_pending_work);
 
 /**** SOCKET OPERATIONS ****/
 
@@ -594,6 +593,8 @@
 	return retval;
 }
 
+static void vsock_connect_timeout(struct work_struct *work);
+
 struct sock *__vsock_create(struct net *net,
 			    struct socket *sock,
 			    struct sock *parent,
@@ -636,6 +637,8 @@
 	vsk->sent_request = false;
 	vsk->ignore_connecting_rst = false;
 	vsk->peer_shutdown = 0;
+	INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
+	INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
 
 	psk = parent ? vsock_sk(parent) : NULL;
 	if (parent) {
@@ -1115,7 +1118,7 @@
 	struct vsock_sock *vsk;
 	int cancel = 0;
 
-	vsk = container_of(work, struct vsock_sock, dwork.work);
+	vsk = container_of(work, struct vsock_sock, connect_work.work);
 	sk = sk_vsock(vsk);
 
 	lock_sock(sk);
@@ -1219,9 +1222,7 @@
 			 * timeout fires.
 			 */
 			sock_hold(sk);
-			INIT_DELAYED_WORK(&vsk->dwork,
-					  vsock_connect_timeout);
-			schedule_delayed_work(&vsk->dwork, timeout);
+			schedule_delayed_work(&vsk->connect_work, timeout);
 
 			/* Skip ahead to preserve error code set above. */
 			goto out_wait;
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 4be4fbb..4aa391c 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1099,8 +1099,7 @@
 	vpending->listener = sk;
 	sock_hold(sk);
 	sock_hold(pending);
-	INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work);
-	schedule_delayed_work(&vpending->dwork, HZ);
+	schedule_delayed_work(&vpending->pending_work, HZ);
 
 out:
 	return err;
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 38fbe09..d1fbff3 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -242,8 +242,9 @@
 
 country CL:
 	(2402 - 2482 @ 40), (20)
-	(5170 - 5330 @ 160), (20)
-	(5735 - 5835 @ 80), (20)
+	(5170 - 5330 @ 160), (24)
+	(5490 - 5730 @ 160), (24)
+	(5735 - 5835 @ 80), (30)
 	# 60 gHz band channels 1-3
 	(57240 - 63720 @ 2160), (50), NO-OUTDOOR
 
@@ -360,8 +361,8 @@
 
 country EG: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
-	(5170 - 5250 @ 40), (23)
-	(5250 - 5330 @ 40), (23), DFS
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 
 country ES: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -529,7 +530,7 @@
 country ID:
 	# ref: http://www.postel.go.id/content/ID/regulasi/standardisasi/kepdir/bwa%205,8%20ghz.pdf
 	(2402 - 2482 @ 40), (30)
-	(5735 - 5815 @ 20), (30)
+	(5735 - 5815 @ 80), (30)
 
 country IE: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -547,10 +548,12 @@
 	# 60 gHz band channels 1-4, base on Etsi En 302 567
 	(57000 - 66000 @ 2160), (40)
 
-country IN:
-	(2402 - 2482 @ 40), (20)
-	(5170 - 5330 @ 160), (23)
-	(5735 - 5835 @ 80), (33)
+country IN: DFS-ETSI
+	(2402 - 2482 @ 40), (30)
+	(5170 - 5250 @ 80), (30), AUTO-BW
+	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
+	(5490 - 5730 @ 160), (24), DFS
+	(5735 - 5875 @ 80), (30)
 
 country IQ: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -593,10 +596,10 @@
 	(57000 - 66000 @ 2160), (40)
 
 country JP: DFS-JP
-	(2402 - 2482 @ 40), (20)
+	(2402 - 2482 @ 40), (23)
 	(5170 - 5250 @ 80), (23), AUTO-BW, NO-OUTDOOR
-	(5250 - 5330 @ 80), (26), DFS, AUTO-BW, NO-OUTDOOR
-	(5490 - 5710 @ 160), (26), DFS
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW, NO-OUTDOOR
+	(5490 - 5710 @ 160), (23), DFS
 	# 60 gHz band channels 1-4
 	(57240 - 65880 @ 2160), (40)
 
@@ -755,6 +758,10 @@
 	(5490 - 5730 @ 160), (30), DFS
 	(5735 - 5875 @ 80), (14)
 
+country MM:
+	(2402 - 2482 @ 40), (20)
+	(5735 - 5835 @ 80), (30)
+
 country MN: DFS-FCC
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (24), AUTO-BW
@@ -1186,7 +1193,7 @@
 
 country US: DFS-FCC
 	(2402 - 2472 @ 40), (30)
-	(5170 - 5250 @ 80), (30), AUTO-BW
+	(5170 - 5250 @ 80), (24), AUTO-BW
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index e086950..6025e5a 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4110,6 +4110,7 @@
 		params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
 					 BIT(NL80211_STA_FLAG_MFP) |
 					 BIT(NL80211_STA_FLAG_AUTHORIZED);
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -5899,7 +5900,7 @@
 				  nl80211_check_s32);
 	/*
 	 * Check HT operation mode based on
-	 * IEEE 802.11 2012 8.4.2.59 HT Operation element.
+	 * IEEE 802.11-2016 9.4.2.57 HT Operation element.
 	 */
 	if (tb[NL80211_MESHCONF_HT_OPMODE]) {
 		ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
@@ -5909,22 +5910,9 @@
 				  IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
 			return -EINVAL;
 
-		if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
-		    (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
-			return -EINVAL;
+		/* NON_HT_STA bit is reserved, but some programs set it */
+		ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
 
-		switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) {
-		case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
-		case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
-			if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)
-				return -EINVAL;
-			break;
-		case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
-		case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
-			if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
-				return -EINVAL;
-			break;
-		}
 		cfg->ht_opmode = ht_opmode;
 		mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
 	}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 13ff407..c934189 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1310,7 +1310,7 @@
 					  u8 *op_class)
 {
 	u8 vht_opclass;
-	u16 freq = chandef->center_freq1;
+	u32 freq = chandef->center_freq1;
 
 	if (freq >= 2412 && freq <= 2472) {
 		if (chandef->width > NL80211_CHAN_WIDTH_40)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 791ad6e..a812cb0 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1874,7 +1874,10 @@
 	/* Try to instantiate a bundle */
 	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
 	if (err <= 0) {
-		if (err != 0 && err != -EAGAIN)
+		if (err == 0)
+			return NULL;
+
+		if (err != -EAGAIN)
 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
 		return ERR_PTR(err);
 	}
@@ -2355,6 +2358,9 @@
 	if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
 		return make_blackhole(net, dst_orig->ops->family, dst_orig);
 
+	if (IS_ERR(dst))
+		dst_release(dst_orig);
+
 	return dst;
 }
 EXPORT_SYMBOL(xfrm_lookup_route);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 773c66b..3c9c0fe 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -992,10 +992,12 @@
 {
 	struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
 
-	if (nlsk)
-		return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
-	else
-		return -1;
+	if (!nlsk) {
+		kfree_skb(skb);
+		return -EPIPE;
+	}
+
+	return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
 }
 
 static inline size_t xfrm_spdinfo_msgsize(void)
@@ -1406,6 +1408,9 @@
 		    (ut[i].family != prev_family))
 			return -EINVAL;
 
+		if (ut[i].mode >= XFRM_MODE_MAX)
+			return -EINVAL;
+
 		prev_family = ut[i].family;
 
 		switch (ut[i].family) {
@@ -1636,9 +1641,11 @@
 #ifdef CONFIG_XFRM_SUB_POLICY
 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
 {
-	struct xfrm_userpolicy_type upt = {
-		.type = type,
-	};
+	struct xfrm_userpolicy_type upt;
+
+	/* Sadly there are two holes in struct xfrm_userpolicy_type */
+	memset(&upt, 0, sizeof(upt));
+	upt.type = type;
 
 	return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
 }
diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c
index 95c1632..0b6f22f 100644
--- a/samples/bpf/parse_varlen.c
+++ b/samples/bpf/parse_varlen.c
@@ -6,6 +6,7 @@
  */
 #define KBUILD_MODNAME "foo"
 #include <linux/if_ether.h>
+#include <linux/if_vlan.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/in.h>
@@ -108,11 +109,6 @@
 	return 0;
 }
 
-struct vlan_hdr {
-	uint16_t h_vlan_TCI;
-	uint16_t h_vlan_encapsulated_proto;
-};
-
 SEC("varlen")
 int handle_ingress(struct __sk_buff *skb)
 {
diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c
index d291167f..7dad9a3 100644
--- a/samples/bpf/test_overhead_user.c
+++ b/samples/bpf/test_overhead_user.c
@@ -6,6 +6,7 @@
  */
 #define _GNU_SOURCE
 #include <sched.h>
+#include <errno.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <asm/unistd.h>
@@ -44,8 +45,13 @@
 		exit(1);
 	}
 	start_time = time_get_ns();
-	for (i = 0; i < MAX_CNT; i++)
-		write(fd, buf, sizeof(buf));
+	for (i = 0; i < MAX_CNT; i++) {
+		if (write(fd, buf, sizeof(buf)) < 0) {
+			printf("task rename failed: %s\n", strerror(errno));
+			close(fd);
+			return;
+		}
+	}
 	printf("task_rename:%d: %lld events per sec\n",
 	       cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
 	close(fd);
@@ -63,8 +69,13 @@
 		exit(1);
 	}
 	start_time = time_get_ns();
-	for (i = 0; i < MAX_CNT; i++)
-		read(fd, buf, sizeof(buf));
+	for (i = 0; i < MAX_CNT; i++) {
+		if (read(fd, buf, sizeof(buf)) < 0) {
+			printf("failed to read from /dev/urandom: %s\n", strerror(errno));
+			close(fd);
+			return;
+		}
+	}
 	printf("urandom_read:%d: %lld events per sec\n",
 	       cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
 	close(fd);
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index b1cdd50..3222ea5 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -451,3 +451,6 @@
 endef
 #
 ###############################################################################
+
+# delete partially updated (i.e. corrupted) files on error
+.DELETE_ON_ERROR:
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 8c69cd1..d809e00 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -36,4 +36,7 @@
    endif
 
 endif
+
+CFLAGS_KASAN_NOSANITIZE := -fno-builtin
+
 endif
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index ea98308..79615c9 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -127,7 +127,7 @@
 ifeq ($(CONFIG_KASAN),y)
 _c_flags += $(if $(patsubst n%,, \
 		$(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
-		$(CFLAGS_KASAN))
+		$(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE))
 endif
 
 ifeq ($(CONFIG_UBSAN),y)
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
index 122599b..baedaef 100755
--- a/scripts/depmod.sh
+++ b/scripts/depmod.sh
@@ -10,7 +10,13 @@
 KERNELRELEASE=$2
 SYMBOL_PREFIX=$3
 
-if ! test -r System.map -a -x "$DEPMOD"; then
+if ! test -r System.map ; then
+	exit 0
+fi
+
+if [ -z $(command -v $DEPMOD) ]; then
+	echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
+	echo "This is probably in the kmod package." >&2
 	exit 0
 fi
 
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 238db4f..88b3dc1 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -649,7 +649,7 @@
 			if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER)
 				break;
 			if (symname[0] == '.') {
-				char *munged = strdup(symname);
+				char *munged = NOFAIL(strdup(symname));
 				munged[0] = '_';
 				munged[1] = toupper(munged[1]);
 				symname = munged;
@@ -1312,7 +1312,7 @@
 static char *sec2annotation(const char *s)
 {
 	if (match(s, init_exit_sections)) {
-		char *p = malloc(20);
+		char *p = NOFAIL(malloc(20));
 		char *r = p;
 
 		*p++ = '_';
@@ -1332,7 +1332,7 @@
 			strcat(p, " ");
 		return r;
 	} else {
-		return strdup("");
+		return NOFAIL(strdup(""));
 	}
 }
 
@@ -2033,7 +2033,7 @@
 {
 	if (buf->size - buf->pos < len) {
 		buf->size += len + SZ;
-		buf->p = realloc(buf->p, buf->size);
+		buf->p = NOFAIL(realloc(buf->p, buf->size));
 	}
 	strncpy(buf->p + buf->pos, s, len);
 	buf->pos += len;
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index bf66391..6fcbd8e 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -94,7 +94,8 @@
 		mutex_lock(&mutex);
 		if (*tfm)
 			goto out;
-		*tfm = crypto_alloc_shash(algo, 0, CRYPTO_ALG_ASYNC);
+		*tfm = crypto_alloc_shash(algo, 0,
+					  CRYPTO_ALG_ASYNC | CRYPTO_NOLOAD);
 		if (IS_ERR(*tfm)) {
 			rc = PTR_ERR(*tfm);
 			pr_err("Can not allocate %s (reason: %ld)\n", algo, rc);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index a71f906..9652541 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -379,6 +379,7 @@
 
 static int read_idmap[READING_MAX_ID] = {
 	[READING_FIRMWARE] = FIRMWARE_CHECK,
+	[READING_FIRMWARE_PREALLOC_BUFFER] = FIRMWARE_CHECK,
 	[READING_MODULE] = MODULE_CHECK,
 	[READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK,
 	[READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK,
diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c
index f9a6292..8b0b9ce 100644
--- a/security/pfe/pfk.c
+++ b/security/pfe/pfk.c
@@ -76,7 +76,6 @@
 	struct pfk_key_info *key_info,
 	enum ice_cryto_algo_mode *algo,
 	bool *is_pfe,
-	unsigned int *data_unit,
 	const char *storage_type);
 
 typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1,
@@ -287,27 +286,33 @@
 {
 	const struct inode *inode;
 	enum pfe_type which_pfe;
-	const struct blk_encryption_key *key;
 	char *s_type = NULL;
+	const struct blk_encryption_key *key = NULL;
 
 	inode = pfk_bio_get_inode(bio);
 	which_pfe = pfk_get_pfe_type(inode);
 	s_type = (char *)pfk_kc_get_storage_type();
 
+	if (data_unit && (bio_dun(bio) ||
+			!memcmp(s_type, "ufs", strlen("ufs"))))
+		*data_unit = 1 << ICE_CRYPTO_DATA_UNIT_4_KB;
+
 	if (which_pfe != INVALID_PFE) {
 		/* Encrypted file; override ->bi_crypt_key */
 		pr_debug("parsing inode %lu with PFE type %d\n",
 			 inode->i_ino, which_pfe);
 		return (*(pfk_parse_inode_ftable[which_pfe]))
 				(bio, inode, key_info, algo_mode, is_pfe,
-					data_unit, (const char *)s_type);
+					(const char *)s_type);
 	}
 
 	/*
 	 * bio is not for an encrypted file.  Use ->bi_crypt_key if it was set.
 	 * Otherwise, don't encrypt/decrypt the bio.
 	 */
+#ifdef CONFIG_DM_DEFAULT_KEY
 	key = bio->bi_crypt_key;
+#endif
 	if (!key) {
 		*is_pfe = false;
 		return -EINVAL;
@@ -469,13 +474,18 @@
  */
 bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2)
 {
-	const struct blk_encryption_key *key1;
-	const struct blk_encryption_key *key2;
+	const struct blk_encryption_key *key1 = NULL;
+	const struct blk_encryption_key *key2 = NULL;
 	const struct inode *inode1;
 	const struct inode *inode2;
 	enum pfe_type which_pfe1;
 	enum pfe_type which_pfe2;
 
+#ifdef CONFIG_DM_DEFAULT_KEY
+	key1 = bio1->bi_crypt_key;
+	key2 = bio2->bi_crypt_key;
+#endif
+
 	if (!pfk_is_ready())
 		return false;
 
diff --git a/security/pfe/pfk_ext4.c b/security/pfe/pfk_ext4.c
index 7000b66..6d3cd4c 100644
--- a/security/pfe/pfk_ext4.c
+++ b/security/pfe/pfk_ext4.c
@@ -142,7 +142,6 @@
 	struct pfk_key_info *key_info,
 	enum ice_cryto_algo_mode *algo,
 	bool *is_pfe,
-	unsigned int *data_unit,
 	const char *storage_type)
 {
 	int ret = 0;
@@ -157,19 +156,6 @@
 	 */
 	*is_pfe = true;
 
-	/* Update dun based upon storage type.
-	 * For ext4 FS UFS has 4k dun whereas eMMC
-	 * uses 512Byte dun.
-	 */
-	if (storage_type && data_unit) {
-		if (!memcmp(storage_type, "ufs", strlen("ufs")))
-			*data_unit = 1 << ICE_CRYPTO_DATA_UNIT_4_KB;
-		else if (!memcmp(storage_type, "sdcc", strlen("sdcc")))
-			*data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B;
-		else
-			return -EINVAL;
-	}
-
 	if (!pfk_ext4_is_ready())
 		return -ENODEV;
 
diff --git a/security/pfe/pfk_ext4.h b/security/pfe/pfk_ext4.h
index e39d04d..346027b 100644
--- a/security/pfe/pfk_ext4.h
+++ b/security/pfe/pfk_ext4.h
@@ -25,7 +25,6 @@
 	struct pfk_key_info *key_info,
 	enum ice_cryto_algo_mode *algo,
 	bool *is_pfe,
-	unsigned int *data_unit,
 	const char *storage_type);
 
 bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
diff --git a/security/pfe/pfk_f2fs.c b/security/pfe/pfk_f2fs.c
index 2076267..9523c35 100644
--- a/security/pfe/pfk_f2fs.c
+++ b/security/pfe/pfk_f2fs.c
@@ -117,7 +117,6 @@
 		struct pfk_key_info *key_info,
 		enum ice_cryto_algo_mode *algo,
 		bool *is_pfe,
-		unsigned int *data_unit,
 		const char *storage_type)
 {
 	int ret = 0;
@@ -132,18 +131,6 @@
 	 */
 	*is_pfe = true;
 
-	/* Update the dun based upon storage type.
-	 * Right now both UFS and eMMC storage uses 4KB dun
-	 * for F2FS
-	 */
-	if (storage_type && data_unit) {
-		if (!memcmp(storage_type, "ufs", strlen("ufs")) ||
-			!memcmp(storage_type, "sdcc", strlen("sdcc")))
-			*data_unit = 1 << ICE_CRYPTO_DATA_UNIT_4_KB;
-		else
-			return -EINVAL;
-	}
-
 	if (!pfk_f2fs_is_ready())
 		return -ENODEV;
 
diff --git a/security/pfe/pfk_f2fs.h b/security/pfe/pfk_f2fs.h
index 2e0c21d..1b00bd8 100644
--- a/security/pfe/pfk_f2fs.h
+++ b/security/pfe/pfk_f2fs.h
@@ -25,7 +25,6 @@
 		struct pfk_key_info *key_info,
 		enum ice_cryto_algo_mode *algo,
 		bool *is_pfe,
-		unsigned int *data_unit,
 		const char *storage_type);
 
 bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 530ed9b..13b0d56 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -2307,6 +2307,7 @@
 	struct smack_known *skp = smk_of_task_struct(p);
 
 	isp->smk_inode = skp;
+	isp->smk_flags |= SMK_INODE_INSTANT;
 }
 
 /*
@@ -3965,15 +3966,19 @@
 	struct smack_known *skp = NULL;
 	int rc = 0;
 	struct smk_audit_info ad;
+	u16 family = sk->sk_family;
 #ifdef CONFIG_AUDIT
 	struct lsm_network_audit net;
 #endif
 #if IS_ENABLED(CONFIG_IPV6)
 	struct sockaddr_in6 sadd;
 	int proto;
+
+	if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
+		family = PF_INET;
 #endif /* CONFIG_IPV6 */
 
-	switch (sk->sk_family) {
+	switch (family) {
 	case PF_INET:
 #ifdef CONFIG_SECURITY_SMACK_NETFILTER
 		/*
@@ -3991,7 +3996,7 @@
 		 */
 		netlbl_secattr_init(&secattr);
 
-		rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr);
+		rc = netlbl_skbuff_getattr(skb, family, &secattr);
 		if (rc == 0)
 			skp = smack_from_secattr(&secattr, ssp);
 		else
@@ -4004,7 +4009,7 @@
 #endif
 #ifdef CONFIG_AUDIT
 		smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
-		ad.a.u.net->family = sk->sk_family;
+		ad.a.u.net->family = family;
 		ad.a.u.net->netif = skb->skb_iif;
 		ipv4_skb_to_auditdata(skb, &ad.a, NULL);
 #endif
@@ -4018,7 +4023,7 @@
 		rc = smk_bu_note("IPv4 delivery", skp, ssp->smk_in,
 					MAY_WRITE, rc);
 		if (rc != 0)
-			netlbl_skbuff_err(skb, sk->sk_family, rc, 0);
+			netlbl_skbuff_err(skb, family, rc, 0);
 		break;
 #if IS_ENABLED(CONFIG_IPV6)
 	case PF_INET6:
@@ -4034,7 +4039,7 @@
 			skp = smack_net_ambient;
 #ifdef CONFIG_AUDIT
 		smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
-		ad.a.u.net->family = sk->sk_family;
+		ad.a.u.net->family = family;
 		ad.a.u.net->netif = skb->skb_iif;
 		ipv6_skb_to_auditdata(skb, &ad.a, NULL);
 #endif /* CONFIG_AUDIT */
diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c
index 7196008..6555742 100644
--- a/sound/aoa/core/gpio-feature.c
+++ b/sound/aoa/core/gpio-feature.c
@@ -88,8 +88,10 @@
 	}
 
 	reg = of_get_property(np, "reg", NULL);
-	if (!reg)
+	if (!reg) {
+		of_node_put(np);
 		return NULL;
+	}
 
 	*gpioptr = *reg;
 
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index f05cb6a..78ffe44 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -239,16 +239,12 @@
 	int err;
 
 	while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
-		size_t aligned_size;
 		if (err != -ENOMEM)
 			return err;
 		if (size <= PAGE_SIZE)
 			return -ENOMEM;
-		aligned_size = PAGE_SIZE << get_order(size);
-		if (size != aligned_size)
-			size = aligned_size;
-		else
-			size >>= 1;
+		size >>= 1;
+		size = PAGE_SIZE << get_order(size);
 	}
 	if (! dmab->area)
 		return -ENOMEM;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 9ccf6a5..2bae2a3 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -652,27 +652,33 @@
 
 static int snd_interval_refine_first(struct snd_interval *i)
 {
+	const unsigned int last_max = i->max;
+
 	if (snd_BUG_ON(snd_interval_empty(i)))
 		return -EINVAL;
 	if (snd_interval_single(i))
 		return 0;
 	i->max = i->min;
-	i->openmax = i->openmin;
-	if (i->openmax)
+	if (i->openmin)
 		i->max++;
+	/* only exclude max value if also excluded before refine */
+	i->openmax = (i->openmax && i->max >= last_max);
 	return 1;
 }
 
 static int snd_interval_refine_last(struct snd_interval *i)
 {
+	const unsigned int last_min = i->min;
+
 	if (snd_BUG_ON(snd_interval_empty(i)))
 		return -EINVAL;
 	if (snd_interval_single(i))
 		return 0;
 	i->min = i->max;
-	i->openmin = i->openmax;
-	if (i->openmin)
+	if (i->openmax)
 		i->min--;
+	/* only exclude min value if also excluded before refine */
+	i->openmin = (i->openmin && i->min <= last_min);
 	return 1;
 }
 
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 180261d..f217a1d 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -636,10 +636,8 @@
 int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
 			      struct snd_rawmidi_params * params)
 {
-	char *newbuf;
-	char *oldbuf;
+	char *newbuf, *oldbuf;
 	struct snd_rawmidi_runtime *runtime = substream->runtime;
-	unsigned long flags;
 
 	if (substream->append && substream->use_count > 1)
 		return -EBUSY;
@@ -651,22 +649,17 @@
 		return -EINVAL;
 	}
 	if (params->buffer_size != runtime->buffer_size) {
-		mutex_lock(&runtime->realloc_mutex);
-		newbuf = __krealloc(runtime->buffer, params->buffer_size,
-				  GFP_KERNEL);
-		if (!newbuf) {
-			mutex_unlock(&runtime->realloc_mutex);
+		newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
+		if (!newbuf)
 			return -ENOMEM;
-		}
-		spin_lock_irqsave(&runtime->lock, flags);
+		spin_lock_irq(&runtime->lock);
 		oldbuf = runtime->buffer;
 		runtime->buffer = newbuf;
 		runtime->buffer_size = params->buffer_size;
 		runtime->avail = runtime->buffer_size;
-		spin_unlock_irqrestore(&runtime->lock, flags);
-		if (oldbuf != newbuf)
-			kfree(oldbuf);
-		mutex_unlock(&runtime->realloc_mutex);
+		runtime->appl_ptr = runtime->hw_ptr = 0;
+		spin_unlock_irq(&runtime->lock);
+		kfree(oldbuf);
 	}
 	runtime->avail_min = params->avail_min;
 	substream->active_sensing = !params->no_active_sensing;
@@ -677,10 +670,8 @@
 int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
 			     struct snd_rawmidi_params * params)
 {
-	char *newbuf;
-	char *oldbuf;
+	char *newbuf, *oldbuf;
 	struct snd_rawmidi_runtime *runtime = substream->runtime;
-	unsigned long flags;
 
 	snd_rawmidi_drain_input(substream);
 	if (params->buffer_size < 32 || params->buffer_size > 1024L * 1024L) {
@@ -690,21 +681,16 @@
 		return -EINVAL;
 	}
 	if (params->buffer_size != runtime->buffer_size) {
-		mutex_lock(&runtime->realloc_mutex);
-		newbuf = __krealloc(runtime->buffer, params->buffer_size,
-				  GFP_KERNEL);
-		if (!newbuf) {
-			mutex_unlock(&runtime->realloc_mutex);
+		newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
+		if (!newbuf)
 			return -ENOMEM;
-		}
-		spin_lock_irqsave(&runtime->lock, flags);
+		spin_lock_irq(&runtime->lock);
 		oldbuf = runtime->buffer;
 		runtime->buffer = newbuf;
 		runtime->buffer_size = params->buffer_size;
-		spin_unlock_irqrestore(&runtime->lock, flags);
-		if (oldbuf != newbuf)
-			kfree(oldbuf);
-		mutex_unlock(&runtime->realloc_mutex);
+		runtime->appl_ptr = runtime->hw_ptr = 0;
+		spin_unlock_irq(&runtime->lock);
+		kfree(oldbuf);
 	}
 	runtime->avail_min = params->avail_min;
 	return 0;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index ecd1c5f..965473d 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -2002,7 +2002,8 @@
 	struct snd_seq_client *cptr = NULL;
 
 	/* search for next client */
-	info->client++;
+	if (info->client < INT_MAX)
+		info->client++;
 	if (info->client < 0)
 		info->client = 0;
 	for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 8bdc4c9..1ebb346 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -163,6 +163,7 @@
 	int count, res;
 	unsigned char buf[32], *pbuf;
 	unsigned long flags;
+	bool check_resched = !in_atomic();
 
 	if (up) {
 		vmidi->trigger = 1;
@@ -200,6 +201,15 @@
 					vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
 				}
 			}
+			if (!check_resched)
+				continue;
+			/* do temporary unlock & cond_resched() for avoiding
+			 * CPU soft lockup, which may happen via a write from
+			 * a huge rawmidi buffer
+			 */
+			spin_unlock_irqrestore(&substream->runtime->lock, flags);
+			cond_resched();
+			spin_lock_irqsave(&substream->runtime->lock, flags);
 		}
 	out:
 		spin_unlock_irqrestore(&substream->runtime->lock, flags);
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 3469ac14..d0dfa82 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -263,6 +263,8 @@
 error:
 	mutex_unlock(&devices_mutex);
 	snd_bebob_stream_destroy_duplex(bebob);
+	kfree(bebob->maudio_special_quirk);
+	bebob->maudio_special_quirk = NULL;
 	snd_card_free(bebob->card);
 	dev_info(&bebob->unit->device,
 		 "Sound card registration failed: %d\n", err);
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index 07e5abd..6dbf047 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -96,17 +96,13 @@
 	struct fw_device *device = fw_parent_device(unit);
 	int err, rcode;
 	u64 date;
-	__le32 cues[3] = {
-		cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
-		cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
-		cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
-	};
+	__le32 *cues;
 
 	/* check date of software used to build */
 	err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
 				   &date, sizeof(u64));
 	if (err < 0)
-		goto end;
+		return err;
 	/*
 	 * firmware version 5058 or later has date later than "20070401", but
 	 * 'date' is not null-terminated.
@@ -114,20 +110,28 @@
 	if (date < 0x3230303730343031LL) {
 		dev_err(&unit->device,
 			"Use firmware version 5058 or later\n");
-		err = -ENOSYS;
-		goto end;
+		return -ENXIO;
 	}
 
+	cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
+	if (!cues)
+		return -ENOMEM;
+
+	cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
+	cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
+	cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
+
 	rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
 				   device->node_id, device->generation,
 				   device->max_speed, BEBOB_ADDR_REG_REQ,
-				   cues, sizeof(cues));
+				   cues, 3 * sizeof(*cues));
+	kfree(cues);
 	if (rcode != RCODE_COMPLETE) {
 		dev_err(&unit->device,
 			"Failed to send a cue to load firmware\n");
 		err = -EIO;
 	}
-end:
+
 	return err;
 }
 
@@ -290,10 +294,6 @@
 		bebob->midi_output_ports = 2;
 	}
 end:
-	if (err < 0) {
-		kfree(params);
-		bebob->maudio_special_quirk = NULL;
-	}
 	mutex_unlock(&bebob->mutex);
 	return err;
 }
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index 1f5e1d2..ef68999 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -49,6 +49,7 @@
 	fw_unit_put(dg00x->unit);
 
 	mutex_destroy(&dg00x->mutex);
+	kfree(dg00x);
 }
 
 static void dg00x_card_free(struct snd_card *card)
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 71a0613..f2d0733 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -301,6 +301,8 @@
 	snd_efw_transaction_remove_instance(efw);
 	snd_efw_stream_destroy_duplex(efw);
 	snd_card_free(efw->card);
+	kfree(efw->resp_buf);
+	efw->resp_buf = NULL;
 	dev_info(&efw->unit->device,
 		 "Sound card registration failed: %d\n", err);
 }
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 474b06d..696b6cf 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -135,6 +135,7 @@
 
 	kfree(oxfw->spec);
 	mutex_destroy(&oxfw->mutex);
+	kfree(oxfw);
 }
 
 /*
@@ -212,6 +213,7 @@
 static void do_registration(struct work_struct *work)
 {
 	struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work);
+	int i;
 	int err;
 
 	if (oxfw->registered)
@@ -274,7 +276,15 @@
 	snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
 	if (oxfw->has_output)
 		snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+	for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) {
+		kfree(oxfw->tx_stream_formats[i]);
+		oxfw->tx_stream_formats[i] = NULL;
+		kfree(oxfw->rx_stream_formats[i]);
+		oxfw->rx_stream_formats[i] = NULL;
+	}
 	snd_card_free(oxfw->card);
+	kfree(oxfw->spec);
+	oxfw->spec = NULL;
 	dev_info(&oxfw->unit->device,
 		 "Sound card registration failed: %d\n", err);
 }
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index 9dc93a7..4c967ac 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -93,6 +93,7 @@
 	fw_unit_put(tscm->unit);
 
 	mutex_destroy(&tscm->mutex);
+	kfree(tscm);
 }
 
 static void tscm_card_free(struct snd_card *card)
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index a31ea6c..2d7379d 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -82,10 +82,10 @@
 
 static void set_default_audio_parameters(struct snd_msnd *chip)
 {
-	chip->play_sample_size = DEFSAMPLESIZE;
+	chip->play_sample_size = snd_pcm_format_width(DEFSAMPLESIZE);
 	chip->play_sample_rate = DEFSAMPLERATE;
 	chip->play_channels = DEFCHANNELS;
-	chip->capture_sample_size = DEFSAMPLESIZE;
+	chip->capture_sample_size = snd_pcm_format_width(DEFSAMPLESIZE);
 	chip->capture_sample_rate = DEFSAMPLERATE;
 	chip->capture_channels = DEFCHANNELS;
 }
diff --git a/sound/pci/cs5535audio/cs5535audio.h b/sound/pci/cs5535audio/cs5535audio.h
index 0579daa..425d1b6 100644
--- a/sound/pci/cs5535audio/cs5535audio.h
+++ b/sound/pci/cs5535audio/cs5535audio.h
@@ -66,9 +66,9 @@
 };
 
 struct cs5535audio_dma_desc {
-	u32 addr;
-	u16 size;
-	u16 ctlreserved;
+	__le32 addr;
+	__le16 size;
+	__le16 ctlreserved;
 };
 
 struct cs5535audio_dma {
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index c208c1d..b9912ec2 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -158,8 +158,8 @@
 	lastdesc->addr = cpu_to_le32((u32) dma->desc_buf.addr);
 	lastdesc->size = 0;
 	lastdesc->ctlreserved = cpu_to_le16(PRD_JMP);
-	jmpprd_addr = cpu_to_le32(lastdesc->addr +
-				  (sizeof(struct cs5535audio_dma_desc)*periods));
+	jmpprd_addr = (u32)dma->desc_buf.addr +
+		sizeof(struct cs5535audio_dma_desc) * periods;
 
 	dma->substream = substream;
 	dma->period_bytes = period_bytes;
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 56fc47b..50b216f 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -2520,7 +2520,7 @@
 		emu->support_tlv = 1;
 		return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
 	case SNDRV_EMU10K1_IOCTL_INFO:
-		info = kmalloc(sizeof(*info), GFP_KERNEL);
+		info = kzalloc(sizeof(*info), GFP_KERNEL);
 		if (!info)
 			return -ENOMEM;
 		snd_emu10k1_fx8010_info(emu, info);
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 37be1e1..0d2bb30 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1850,7 +1850,9 @@
 	if (!kctl)
 		return -ENOMEM;
 	kctl->id.device = device;
-	snd_ctl_add(emu->card, kctl);
+	err = snd_ctl_add(emu->card, kctl);
+	if (err < 0)
+		return err;
 
 	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024);
 
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index 4f1f69b..8c778fa 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -237,13 +237,13 @@
 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
 {
 	if (addr & ~emu->dma_mask) {
-		dev_err(emu->card->dev,
+		dev_err_ratelimited(emu->card->dev,
 			"max memory size is 0x%lx (addr = 0x%lx)!!\n",
 			emu->dma_mask, (unsigned long)addr);
 		return 0;
 	}
 	if (addr & (EMUPAGESIZE-1)) {
-		dev_err(emu->card->dev, "page is not aligned\n");
+		dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
 		return 0;
 	}
 	return 1;
@@ -334,7 +334,7 @@
 		else
 			addr = snd_pcm_sgbuf_get_addr(substream, ofs);
 		if (! is_valid_page(emu, addr)) {
-			dev_err(emu->card->dev,
+			dev_err_ratelimited(emu->card->dev,
 				"emu: failure page = %d\n", idx);
 			mutex_unlock(&hdr->block_mutex);
 			return NULL;
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index a178e0d..8561f60 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1068,11 +1068,19 @@
 		if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_sec)) < 0)
 			return err;
 	}
-	for (i = 0; i < FM801_CONTROLS; i++)
-		snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls[i], chip));
+	for (i = 0; i < FM801_CONTROLS; i++) {
+		err = snd_ctl_add(chip->card,
+			snd_ctl_new1(&snd_fm801_controls[i], chip));
+		if (err < 0)
+			return err;
+	}
 	if (chip->multichannel) {
-		for (i = 0; i < FM801_CONTROLS_MULTI; i++)
-			snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls_multi[i], chip));
+		for (i = 0; i < FM801_CONTROLS_MULTI; i++) {
+			err = snd_ctl_add(chip->card,
+				snd_ctl_new1(&snd_fm801_controls_multi[i], chip));
+			if (err < 0)
+				return err;
+		}
 	}
 	return 0;
 }
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index e46c561..c6b046d 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4025,7 +4025,8 @@
 
 	list_for_each_codec(codec, bus) {
 		/* FIXME: maybe a better way needed for forced reset */
-		cancel_delayed_work_sync(&codec->jackpoll_work);
+		if (current_work() != &codec->jackpoll_work.work)
+			cancel_delayed_work_sync(&codec->jackpoll_work);
 #ifdef CONFIG_PM
 		if (hda_codec_is_power_on(codec)) {
 			hda_call_codec_suspend(codec);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4e91120..f913809 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2058,7 +2058,7 @@
  */
 static struct snd_pci_quirk power_save_blacklist[] = {
 	/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
-	SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
+	SND_PCI_QUIRK(0x1849, 0xc892, "Asrock B85M-ITX", 0),
 	/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
 	SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
 	/* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
@@ -2349,7 +2349,8 @@
 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
 	/* AMD Raven */
 	{ PCI_DEVICE(0x1022, 0x15e3),
-	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+			 AZX_DCAPS_PM_RUNTIME },
 	/* ATI HDMI */
 	{ PCI_DEVICE(0x1002, 0x0002),
 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 9ec4dba..2809999 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -38,6 +38,10 @@
 /* Enable this to see controls for tuning purpose. */
 /*#define ENABLE_TUNING_CONTROLS*/
 
+#ifdef ENABLE_TUNING_CONTROLS
+#include <sound/tlv.h>
+#endif
+
 #define FLOAT_ZERO	0x00000000
 #define FLOAT_ONE	0x3f800000
 #define FLOAT_TWO	0x40000000
@@ -3067,8 +3071,8 @@
 	return 1;
 }
 
-static const DECLARE_TLV_DB_SCALE(voice_focus_db_scale, 2000, 100, 0);
-static const DECLARE_TLV_DB_SCALE(eq_db_scale, -2400, 100, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(voice_focus_db_scale, 2000, 100, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(eq_db_scale, -2400, 100, 0);
 
 static int add_tuning_control(struct hda_codec *codec,
 				hda_nid_t pnid, hda_nid_t nid,
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 6b5804e..a6e98a4 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -205,6 +205,7 @@
 	struct conexant_spec *spec = codec->spec;
 
 	switch (codec->core.vendor_id) {
+	case 0x14f12008: /* CX8200 */
 	case 0x14f150f2: /* CX20722 */
 	case 0x14f150f4: /* CX20724 */
 		break;
@@ -212,13 +213,14 @@
 		return;
 	}
 
-	/* Turn the CX20722 codec into D3 to avoid spurious noises
+	/* Turn the problematic codec into D3 to avoid spurious noises
 	   from the internal speaker during (and after) reboot */
 	cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
 
 	snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
 	snd_hda_codec_write(codec, codec->core.afg, 0,
 			    AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+	msleep(10);
 }
 
 static void cx_auto_free(struct hda_codec *codec)
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index bd65022..76ae627 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -33,6 +33,7 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include <sound/core.h>
 #include <sound/jack.h>
 #include <sound/asoundef.h>
@@ -731,8 +732,10 @@
 
 	if (pin_idx < 0)
 		return;
+	mutex_lock(&spec->pcm_lock);
 	if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
 		snd_hda_jack_report_sync(codec);
+	mutex_unlock(&spec->pcm_lock);
 }
 
 static void jack_callback(struct hda_codec *codec,
@@ -1521,21 +1524,23 @@
 static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
 {
 	struct hda_codec *codec = per_pin->codec;
-	struct hdmi_spec *spec = codec->spec;
 	int ret;
 
 	/* no temporary power up/down needed for component notifier */
-	if (!codec_has_acomp(codec))
-		snd_hda_power_up_pm(codec);
+	if (!codec_has_acomp(codec)) {
+		ret = snd_hda_power_up_pm(codec);
+		if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec))) {
+			snd_hda_power_down_pm(codec);
+			return false;
+		}
+	}
 
-	mutex_lock(&spec->pcm_lock);
 	if (codec_has_acomp(codec)) {
 		sync_eld_via_acomp(codec, per_pin);
 		ret = false; /* don't call snd_hda_jack_report_sync() */
 	} else {
 		ret = hdmi_present_sense_via_verbs(per_pin, repoll);
 	}
-	mutex_unlock(&spec->pcm_lock);
 
 	if (!codec_has_acomp(codec))
 		snd_hda_power_down_pm(codec);
@@ -1547,12 +1552,16 @@
 {
 	struct hdmi_spec_per_pin *per_pin =
 	container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
+	struct hda_codec *codec = per_pin->codec;
+	struct hdmi_spec *spec = codec->spec;
 
 	if (per_pin->repoll_count++ > 6)
 		per_pin->repoll_count = 0;
 
+	mutex_lock(&spec->pcm_lock);
 	if (hdmi_present_sense(per_pin, per_pin->repoll_count))
 		snd_hda_jack_report_sync(per_pin->codec);
+	mutex_unlock(&spec->pcm_lock);
 }
 
 static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f03a143..ca29457 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5698,6 +5698,7 @@
 	SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
 	SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
 	SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+	SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
 	SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
 	SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
 	SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
diff --git a/sound/pci/vx222/vx222_ops.c b/sound/pci/vx222/vx222_ops.c
index 8e457ea..1997bb0 100644
--- a/sound/pci/vx222/vx222_ops.c
+++ b/sound/pci/vx222/vx222_ops.c
@@ -275,7 +275,7 @@
 		length >>= 2; /* in 32bit words */
 		/* Transfer using pseudo-dma. */
 		for (; length > 0; length--) {
-			outl(cpu_to_le32(*addr), port);
+			outl(*addr, port);
 			addr++;
 		}
 		addr = (u32 *)runtime->dma_area;
@@ -285,7 +285,7 @@
 	count >>= 2; /* in 32bit words */
 	/* Transfer using pseudo-dma. */
 	for (; count > 0; count--) {
-		outl(cpu_to_le32(*addr), port);
+		outl(*addr, port);
 		addr++;
 	}
 
@@ -313,7 +313,7 @@
 		length >>= 2; /* in 32bit words */
 		/* Transfer using pseudo-dma. */
 		for (; length > 0; length--)
-			*addr++ = le32_to_cpu(inl(port));
+			*addr++ = inl(port);
 		addr = (u32 *)runtime->dma_area;
 		pipe->hw_ptr = 0;
 	}
@@ -321,7 +321,7 @@
 	count >>= 2; /* in 32bit words */
 	/* Transfer using pseudo-dma. */
 	for (; count > 0; count--)
-		*addr++ = le32_to_cpu(inl(port));
+		*addr++ = inl(port);
 
 	vx2_release_pseudo_dma(chip);
 }
diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c
index 56aa1ba..49a8833 100644
--- a/sound/pcmcia/vx/vxp_ops.c
+++ b/sound/pcmcia/vx/vxp_ops.c
@@ -375,7 +375,7 @@
 		length >>= 1; /* in 16bit words */
 		/* Transfer using pseudo-dma. */
 		for (; length > 0; length--) {
-			outw(cpu_to_le16(*addr), port);
+			outw(*addr, port);
 			addr++;
 		}
 		addr = (unsigned short *)runtime->dma_area;
@@ -385,7 +385,7 @@
 	count >>= 1; /* in 16bit words */
 	/* Transfer using pseudo-dma. */
 	for (; count > 0; count--) {
-		outw(cpu_to_le16(*addr), port);
+		outw(*addr, port);
 		addr++;
 	}
 	vx_release_pseudo_dma(chip);
@@ -417,7 +417,7 @@
 		length >>= 1; /* in 16bit words */
 		/* Transfer using pseudo-dma. */
 		for (; length > 0; length--)
-			*addr++ = le16_to_cpu(inw(port));
+			*addr++ = inw(port);
 		addr = (unsigned short *)runtime->dma_area;
 		pipe->hw_ptr = 0;
 	}
@@ -425,12 +425,12 @@
 	count >>= 1; /* in 16bit words */
 	/* Transfer using pseudo-dma. */
 	for (; count > 1; count--)
-		*addr++ = le16_to_cpu(inw(port));
+		*addr++ = inw(port);
 	/* Disable DMA */
 	pchip->regDIALOG &= ~VXP_DLG_DMAREAD_SEL_MASK;
 	vx_outb(chip, DIALOG, pchip->regDIALOG);
 	/* Read the last word (16 bits) */
-	*addr = le16_to_cpu(inw(port));
+	*addr = inw(port);
 	/* Disable 16-bit accesses */
 	pchip->regDIALOG &= ~VXP_DLG_DMA16_SEL_MASK;
 	vx_outb(chip, DIALOG, pchip->regDIALOG);
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index fd966bb..6e8eb1f 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -157,8 +157,8 @@
 	SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
 				3, 1, 0),
 	SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
-	SOC_SINGLE("MMTLR Data Switch", 0,
-				1, 1, 0),
+	SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
+				0, 1, 0),
 	SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
 	SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
 };
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 3896523..f289762 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2431,6 +2431,7 @@
 			snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_2,
 					    WM8994_OPCLK_ENA, 0);
 		}
+		break;
 
 	default:
 		return -EINVAL;
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index cdcced9..b7c1e3d 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -128,23 +128,19 @@
 	struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
 	struct snd_soc_jack *jack = &ctx->jack;
 
-	/**
-	* TI supports 4 butons headset detection
-	* KEY_MEDIA
-	* KEY_VOICECOMMAND
-	* KEY_VOLUMEUP
-	* KEY_VOLUMEDOWN
-	*/
-	if (ctx->ts3a227e_present)
-		jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
-					SND_JACK_BTN_0 | SND_JACK_BTN_1 |
-					SND_JACK_BTN_2 | SND_JACK_BTN_3;
-	else
-		jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
+	if (ctx->ts3a227e_present) {
+		/*
+		 * The jack has already been created in the
+		 * cht_max98090_headset_init() function.
+		 */
+		snd_soc_jack_notifier_register(jack, &cht_jack_nb);
+		return 0;
+	}
+
+	jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
 
 	ret = snd_soc_card_jack_new(runtime->card, "Headset Jack",
 					jack_type, jack, NULL, 0);
-
 	if (ret) {
 		dev_err(runtime->dev, "Headset Jack creation failed %d\n", ret);
 		return ret;
@@ -200,6 +196,27 @@
 {
 	struct snd_soc_card *card = component->card;
 	struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
+	struct snd_soc_jack *jack = &ctx->jack;
+	int jack_type;
+	int ret;
+
+	/*
+	 * TI supports 4 butons headset detection
+	 * KEY_MEDIA
+	 * KEY_VOICECOMMAND
+	 * KEY_VOLUMEUP
+	 * KEY_VOLUMEDOWN
+	 */
+	jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+		    SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+		    SND_JACK_BTN_2 | SND_JACK_BTN_3;
+
+	ret = snd_soc_card_jack_new(card, "Headset Jack", jack_type,
+				    jack, NULL, 0);
+	if (ret) {
+		dev_err(card->dev, "Headset Jack creation failed %d\n", ret);
+		return ret;
+	}
 
 	return ts3a227e_enable_jack_detect(component, &ctx->jack);
 }
diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c
index 45fc06c..6b504f4 100644
--- a/sound/soc/sirf/sirf-usp.c
+++ b/sound/soc/sirf/sirf-usp.c
@@ -367,10 +367,9 @@
 	platform_set_drvdata(pdev, usp);
 
 	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap(&pdev->dev, mem_res->start,
-		resource_size(mem_res));
-	if (base == NULL)
-		return -ENOMEM;
+	base = devm_ioremap_resource(&pdev->dev, mem_res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
 	usp->regmap = devm_regmap_init_mmio(&pdev->dev, base,
 					    &sirf_usp_regmap_config);
 	if (IS_ERR(usp->regmap))
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 682c207..b36c856 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3929,6 +3929,13 @@
 			continue;
 		}
 
+		/* let users know there is no DAI to link */
+		if (!dai_w->priv) {
+			dev_dbg(card->dev, "dai widget %s has no DAI\n",
+				dai_w->name);
+			continue;
+		}
+
 		dai = dai_w->priv;
 
 		/* ...find all widgets with the same stream and link them */
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index bd8f34a..7fb237b 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1739,6 +1739,14 @@
 		int i;
 
 		for (i = 0; i < be->num_codecs; i++) {
+			/*
+			 * Skip CODECs which don't support the current stream
+			 * type. See soc_pcm_init_runtime_hw() for more details
+			 */
+			if (!snd_soc_dai_stream_valid(be->codec_dais[i],
+						      stream))
+				continue;
+
 			codec_dai_drv = be->codec_dais[i]->driver;
 			if (stream == SNDRV_PCM_STREAM_PLAYBACK)
 				codec_stream = &codec_dai_drv->playback;
@@ -1911,8 +1919,10 @@
 			continue;
 
 		if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
-		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN))
-			continue;
+		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN)) {
+			soc_pcm_hw_free(be_substream);
+			be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
+		}
 
 		dev_dbg(be->dev, "ASoC: close BE %s\n",
 			be->dai_link->name);
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index dc0a9ef..b57d1d4 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -1379,7 +1379,7 @@
 		if (bytes % (runtime->sample_bits >> 3) != 0) {
 			int oldbytes = bytes;
 			bytes = frames * stride;
-			dev_warn(&subs->dev->dev,
+			dev_warn_ratelimited(&subs->dev->dev,
 				 "Corrected urb data len. %d->%d\n",
 							oldbytes, bytes);
 		}
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 69bf5cf..15cbe25 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2875,7 +2875,8 @@
  */
 
 #define AU0828_DEVICE(vid, pid, vname, pname) { \
-	USB_DEVICE_VENDOR_SPEC(vid, pid), \
+	.idVendor = vid, \
+	.idProduct = pid, \
 	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
 		       USB_DEVICE_ID_MATCH_INT_CLASS | \
 		       USB_DEVICE_ID_MATCH_INT_SUBCLASS, \
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index aea30af..f6d1bc9 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -104,7 +104,6 @@
 #define X86_FEATURE_EXTD_APICID	( 3*32+26) /* has extended APICID (8 bits) */
 #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
 #define X86_FEATURE_APERFMPERF	( 3*32+28) /* APERFMPERF */
-/* free, was #define X86_FEATURE_EAGER_FPU	( 3*32+29) * "eagerfpu" Non lazy FPU restore */
 #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
@@ -213,7 +212,7 @@
 #define X86_FEATURE_IBPB	( 7*32+26) /* Indirect Branch Prediction Barrier */
 #define X86_FEATURE_STIBP	( 7*32+27) /* Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_ZEN		( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
-
+#define X86_FEATURE_L1TF_PTEINV	( 7*32+29) /* "" L1TF workaround PTE inversion */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
@@ -317,6 +316,7 @@
 #define X86_FEATURE_PCONFIG		(18*32+18) /* Intel PCONFIG */
 #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_FLUSH_L1D		(18*32+28) /* Flush L1D cache */
 #define X86_FEATURE_ARCH_CAPABILITIES	(18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
 #define X86_FEATURE_SPEC_CTRL_SSBD	(18*32+31) /* "" Speculative Store Bypass Disable */
 
@@ -349,5 +349,6 @@
 #define X86_BUG_SPECTRE_V1	X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
 #define X86_BUG_SPECTRE_V2	X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
 #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+#define X86_BUG_L1TF		X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/build/Build.include b/tools/build/Build.include
index b816554..ab02f8d 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -63,8 +63,8 @@
            $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp;           \
            rm -f $(depfile);                                                    \
            mv -f $(dot-target).tmp $(dot-target).cmd,                           \
-           printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
-           printf '\# using basic dep data\n\n' >> $(dot-target).cmd;           \
+           printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
+           printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd;           \
            cat $(depfile) >> $(dot-target).cmd;                                 \
            printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
 
diff --git a/tools/build/Makefile b/tools/build/Makefile
index 8332959..20d9ae1 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -42,7 +42,7 @@
 	$(Q)$(MAKE) $(build)=fixdep
 
 $(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o
-	$(QUIET_LINK)$(HOSTCC) $(LDFLAGS) -o $@ $<
+	$(QUIET_LINK)$(HOSTCC) $(HOSTLDFLAGS) -o $@ $<
 
 FORCE:
 
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 60a94b3..1774800 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -286,7 +286,7 @@
 		 * Found a match; just move the remaining
 		 * entries up.
 		 */
-		if (i == num_records) {
+		if (i == (num_records - 1)) {
 			kvp_file_info[pool].num_records--;
 			kvp_update_file(pool);
 			return 0;
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 4e60e10..0d1acb7 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -302,19 +302,34 @@
 				continue;
 			sym->pfunc = sym->cfunc = sym;
 			coldstr = strstr(sym->name, ".cold.");
-			if (coldstr) {
-				coldstr[0] = '\0';
-				pfunc = find_symbol_by_name(elf, sym->name);
-				coldstr[0] = '.';
+			if (!coldstr)
+				continue;
 
-				if (!pfunc) {
-					WARN("%s(): can't find parent function",
-					     sym->name);
-					goto err;
-				}
+			coldstr[0] = '\0';
+			pfunc = find_symbol_by_name(elf, sym->name);
+			coldstr[0] = '.';
 
-				sym->pfunc = pfunc;
-				pfunc->cfunc = sym;
+			if (!pfunc) {
+				WARN("%s(): can't find parent function",
+				     sym->name);
+				goto err;
+			}
+
+			sym->pfunc = pfunc;
+			pfunc->cfunc = sym;
+
+			/*
+			 * Unfortunately, -fnoreorder-functions puts the child
+			 * inside the parent.  Remove the overlap so we can
+			 * have sane assumptions.
+			 *
+			 * Note that pfunc->len now no longer matches
+			 * pfunc->sym.st_size.
+			 */
+			if (sym->sec == pfunc->sec &&
+			    sym->offset >= pfunc->offset &&
+			    sym->offset + sym->len == pfunc->offset + pfunc->len) {
+				pfunc->len -= sym->len;
 			}
 		}
 	}
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index 0c370f8..9a53f6e 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -58,9 +58,13 @@
 	}
 
 	/*
-	 * Check if return address is on the stack.
+	 * Check if return address is on the stack. If return address
+	 * is in a register (typically R0), it is yet to be saved on
+	 * the stack.
 	 */
-	if (nops != 0 || ops != NULL)
+	if ((nops != 0 || ops != NULL) &&
+		!(nops == 1 && ops[0].atom == DW_OP_regx &&
+			ops[0].number2 == 0 && ops[0].offset == 0))
 		return 0;
 
 	/*
@@ -243,10 +247,10 @@
 	u64 ip;
 	u64 skip_slot = -1;
 
-	if (chain->nr < 3)
+	if (!chain || chain->nr < 3)
 		return skip_slot;
 
-	ip = chain->ips[2];
+	ip = chain->ips[1];
 
 	thread__find_addr_location(thread, PERF_RECORD_MISC_USER,
 			MAP__FUNCTION, ip, &al);
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index 1030a6e..01a288c 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -21,15 +21,16 @@
 
 #endif
 
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
 int arch__choose_best_symbol(struct symbol *syma,
 			     struct symbol *symb __maybe_unused)
 {
 	char *sym = syma->name;
 
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
 	/* Skip over any initial dot */
 	if (*sym == '.')
 		sym++;
+#endif
 
 	/* Avoid "SyS" kernel syscall aliases */
 	if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
@@ -40,6 +41,7 @@
 	return SYMBOL_A;
 }
 
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
 /* Allow matching against dot variants */
 int arch__compare_symbol_names(const char *namea, const char *nameb)
 {
@@ -115,8 +117,10 @@
 	for (i = 0; i < ntevs; i++) {
 		tev = &pev->tevs[i];
 		map__for_each_symbol(map, sym, tmp) {
-			if (map->unmap_ip(map, sym->start) == tev->point.address)
+			if (map->unmap_ip(map, sym->start) == tev->point.address) {
 				arch__fix_tev_from_maps(pev, tev, map, sym);
+				break;
+			}
 		}
 	}
 }
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 23cce5e..ee9565a 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -1093,7 +1093,7 @@
 	u8 *global_data;
 	u8 *process_data;
 	u8 *thread_data;
-	u64 bytes_done;
+	u64 bytes_done, secs;
 	long work_done;
 	u32 l;
 	struct rusage rusage;
@@ -1249,7 +1249,8 @@
 	timersub(&stop, &start0, &diff);
 	td->runtime_ns = diff.tv_sec * NSEC_PER_SEC;
 	td->runtime_ns += diff.tv_usec * NSEC_PER_USEC;
-	td->speed_gbs = bytes_done / (td->runtime_ns / NSEC_PER_SEC) / 1e9;
+	secs = td->runtime_ns / NSEC_PER_SEC;
+	td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0;
 
 	getrusage(RUSAGE_THREAD, &rusage);
 	td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC;
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 9a0236a..8f8d895 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -22,7 +22,9 @@
 	return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
 }
 
+#ifndef MAX_NR_CPUS
 #define MAX_NR_CPUS			1024
+#endif
 
 extern const char *input_name;
 extern bool perf_host, perf_guest;
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 778668a..ade7213 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -413,7 +413,7 @@
 			for (subi = 0; subi < subn; subi++) {
 				pr_info("%2d.%1d: %-*s:", i, subi + 1, subw,
 					t->subtest.get_desc(subi));
-				err = test_and_print(t, skip, subi);
+				err = test_and_print(t, skip, subi + 1);
 				if (err != TEST_OK && t->subtest.skip_if_fail)
 					skip = true;
 			}
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index 98fe69a..3e7cdef 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -42,6 +42,7 @@
 
 	perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
 	perf_header__set_feat(&session->header, HEADER_NRCPUS);
+	perf_header__set_feat(&session->header, HEADER_ARCH);
 
 	session->header.data_size += DATA_SIZE;
 
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 78bd632..29d015e 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -195,6 +195,9 @@
 	for (i = 0; i < queues->nr_queues; i++) {
 		list_splice_tail(&queues->queue_array[i].head,
 				 &queue_array[i].head);
+		queue_array[i].tid = queues->queue_array[i].tid;
+		queue_array[i].cpu = queues->queue_array[i].cpu;
+		queue_array[i].set = queues->queue_array[i].set;
 		queue_array[i].priv = queues->queue_array[i].priv;
 	}
 
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f55d108..3be8c48 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -241,8 +241,9 @@
 {
 	struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
 
-	if (evsel != NULL)
-		perf_evsel__init(evsel, attr, idx);
+	if (!evsel)
+		return NULL;
+	perf_evsel__init(evsel, attr, idx);
 
 	if (perf_evsel__is_bpf_output(evsel)) {
 		evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index bf7216b..621f652 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -260,16 +260,16 @@
 "#!/usr/bin/env sh\n"
 "if ! test -d \"$KBUILD_DIR\"\n"
 "then\n"
-"	exit -1\n"
+"	exit 1\n"
 "fi\n"
 "if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n"
 "then\n"
-"	exit -1\n"
+"	exit 1\n"
 "fi\n"
 "TMPDIR=`mktemp -d`\n"
 "if test -z \"$TMPDIR\"\n"
 "then\n"
-"    exit -1\n"
+"    exit 1\n"
 "fi\n"
 "cat << EOF > $TMPDIR/Makefile\n"
 "obj-y := dummy.o\n"
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 879115f..98a4205 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -68,6 +68,7 @@
 %type <num> value_sym
 %type <head> event_config
 %type <head> opt_event_config
+%type <head> opt_pmu_config
 %type <term> event_term
 %type <head> event_pmu
 %type <head> event_legacy_symbol
@@ -219,7 +220,7 @@
 	   event_bpf_file
 
 event_pmu:
-PE_NAME opt_event_config
+PE_NAME opt_pmu_config
 {
 	struct parse_events_evlist *data = _data;
 	struct list_head *list;
@@ -482,6 +483,17 @@
 	$$ = NULL;
 }
 
+opt_pmu_config:
+'/' event_config '/'
+{
+	$$ = $2;
+}
+|
+'/' '/'
+{
+	$$ = NULL;
+}
+
 start_terms: event_config
 {
 	struct parse_events_terms *data = _data;
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 9664b1f..5ec2de8 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -733,9 +733,7 @@
 	if (!printed || !summary_only)
 		print_header();
 
-	if (topo.num_cpus > 1)
-		format_counters(&average.threads, &average.cores,
-			&average.packages);
+	format_counters(&average.threads, &average.cores, &average.packages);
 
 	printed = 1;
 
@@ -3202,7 +3200,9 @@
 	family = (fms >> 8) & 0xf;
 	model = (fms >> 4) & 0xf;
 	stepping = fms & 0xf;
-	if (family == 6 || family == 0xf)
+	if (family == 0xf)
+		family += (fms >> 20) & 0xff;
+	if (family >= 6)
 		model += ((fms >> 16) & 0xf) << 4;
 
 	if (debug) {
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
new file mode 100644
index 0000000..3b1f45e
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
@@ -0,0 +1,28 @@
+#!/bin/sh
+# description: Snapshot and tracing setting
+# flags: instance
+
+[ ! -f snapshot ] && exit_unsupported
+
+echo "Set tracing off"
+echo 0 > tracing_on
+
+echo "Allocate and take a snapshot"
+echo 1 > snapshot
+
+# Since trace buffer is empty, snapshot is also empty, but allocated
+grep -q "Snapshot is allocated" snapshot
+
+echo "Ensure keep tracing off"
+test `cat tracing_on` -eq 0
+
+echo "Set tracing on"
+echo 1 > tracing_on
+
+echo "Take a snapshot again"
+echo 1 > snapshot
+
+echo "Ensure keep tracing on"
+test `cat tracing_on` -eq 1
+
+exit 0
diff --git a/tools/testing/selftests/intel_pstate/run.sh b/tools/testing/selftests/intel_pstate/run.sh
index 7868c10..b62876f 100755
--- a/tools/testing/selftests/intel_pstate/run.sh
+++ b/tools/testing/selftests/intel_pstate/run.sh
@@ -48,11 +48,12 @@
 
 	echo "sleeping for 5 seconds"
 	sleep 5
-	num_freqs=$(cat /proc/cpuinfo | grep MHz | sort -u | wc -l)
-	if [ $num_freqs -le 2 ]; then
-		cat /proc/cpuinfo | grep MHz | sort -u | tail -1 > /tmp/result.$1
+	grep MHz /proc/cpuinfo | sort -u > /tmp/result.freqs
+	num_freqs=$(wc -l /tmp/result.freqs | awk ' { print $1 } ')
+	if [ $num_freqs -ge 2 ]; then
+		tail -n 1 /tmp/result.freqs > /tmp/result.$1
 	else
-		cat /proc/cpuinfo | grep MHz | sort -u > /tmp/result.$1
+		cp /tmp/result.freqs /tmp/result.$1
 	fi
 	./msr 0 >> /tmp/result.$1
 
@@ -82,21 +83,20 @@
 max_freq=$(($_max_freq / 1000))
 
 
-for freq in `seq $max_freq -100 $min_freq`
+[ $EVALUATE_ONLY -eq 0 ] && for freq in `seq $max_freq -100 $min_freq`
 do
 	echo "Setting maximum frequency to $freq"
 	cpupower frequency-set -g powersave --max=${freq}MHz >& /dev/null
-	[ $EVALUATE_ONLY -eq 0 ] && run_test $freq
+	run_test $freq
 done
 
-echo "=============================================================================="
+[ $EVALUATE_ONLY -eq 0 ] && cpupower frequency-set -g powersave --max=${max_freq}MHz >& /dev/null
 
+echo "=============================================================================="
 echo "The marketing frequency of the cpu is $mkt_freq MHz"
 echo "The maximum frequency of the cpu is $max_freq MHz"
 echo "The minimum frequency of the cpu is $min_freq MHz"
 
-cpupower frequency-set -g powersave --max=${max_freq}MHz >& /dev/null
-
 # make a pretty table
 echo "Target      Actual      Difference     MSR(0x199)     max_perf_pct"
 for freq in `seq $max_freq -100 $min_freq`
@@ -104,10 +104,6 @@
 	result_freq=$(cat /tmp/result.${freq} | grep "cpu MHz" | awk ' { print $4 } ' | awk -F "." ' { print $1 } ')
 	msr=$(cat /tmp/result.${freq} | grep "msr" | awk ' { print $3 } ')
 	max_perf_pct=$(cat /tmp/result.${freq} | grep "max_perf_pct" | awk ' { print $2 } ' )
-	if [ $result_freq -eq $freq ]; then
-		echo " $freq        $result_freq             0          $msr         $(($max_perf_pct*3300))"
-	else
-		echo " $freq        $result_freq          $(($result_freq-$freq))          $msr          $(($max_perf_pct*$max_freq))"
-	fi
+	echo " $freq        $result_freq          $(($result_freq-$freq))          $msr          $(($max_perf_pct*$max_freq))"
 done
 exit 0
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
index 66d31de..9d7166d 100644
--- a/tools/testing/selftests/powerpc/harness.c
+++ b/tools/testing/selftests/powerpc/harness.c
@@ -85,13 +85,13 @@
 	return status;
 }
 
-static void alarm_handler(int signum)
+static void sig_handler(int signum)
 {
-	/* Jut wake us up from waitpid */
+	/* Just wake us up from waitpid */
 }
 
-static struct sigaction alarm_action = {
-	.sa_handler = alarm_handler,
+static struct sigaction sig_action = {
+	.sa_handler = sig_handler,
 };
 
 void test_harness_set_timeout(uint64_t time)
@@ -106,8 +106,14 @@
 	test_start(name);
 	test_set_git_version(GIT_VERSION);
 
-	if (sigaction(SIGALRM, &alarm_action, NULL)) {
-		perror("sigaction");
+	if (sigaction(SIGINT, &sig_action, NULL)) {
+		perror("sigaction (sigint)");
+		test_error(name);
+		return 1;
+	}
+
+	if (sigaction(SIGALRM, &sig_action, NULL)) {
+		perror("sigaction (sigalrm)");
 		test_error(name);
 		return 1;
 	}
diff --git a/tools/testing/selftests/pstore/pstore_post_reboot_tests b/tools/testing/selftests/pstore/pstore_post_reboot_tests
index 6ccb154..22f8df1 100755
--- a/tools/testing/selftests/pstore/pstore_post_reboot_tests
+++ b/tools/testing/selftests/pstore/pstore_post_reboot_tests
@@ -7,13 +7,16 @@
 #
 # Released under the terms of the GPL v2.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 . ./common_tests
 
 if [ -e $REBOOT_FLAG  ]; then
     rm $REBOOT_FLAG
 else
     prlog "pstore_crash_test has not been executed yet. we skip further tests."
-    exit 0
+    exit $ksft_skip
 fi
 
 prlog -n "Mounting pstore filesystem ... "
diff --git a/tools/testing/selftests/static_keys/test_static_keys.sh b/tools/testing/selftests/static_keys/test_static_keys.sh
index 1261e3f..5bba779 100755
--- a/tools/testing/selftests/static_keys/test_static_keys.sh
+++ b/tools/testing/selftests/static_keys/test_static_keys.sh
@@ -1,6 +1,19 @@
 #!/bin/sh
 # Runs static keys kernel module tests
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_static_key_base; then
+	echo "static_key: module test_static_key_base is not found [SKIP]"
+	exit $ksft_skip
+fi
+
+if ! /sbin/modprobe -q -n test_static_keys; then
+	echo "static_key: module test_static_keys is not found [SKIP]"
+	exit $ksft_skip
+fi
+
 if /sbin/modprobe -q test_static_key_base; then
 	if /sbin/modprobe -q test_static_keys; then
 		echo "static_key: ok"
diff --git a/tools/testing/selftests/sync/config b/tools/testing/selftests/sync/config
new file mode 100644
index 0000000..1ab7e81
--- /dev/null
+++ b/tools/testing/selftests/sync/config
@@ -0,0 +1,4 @@
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c
index 30906bf..0ab937a 100644
--- a/tools/testing/selftests/timers/raw_skew.c
+++ b/tools/testing/selftests/timers/raw_skew.c
@@ -146,6 +146,11 @@
 	printf(" %lld.%i(act)", ppm/1000, abs((int)(ppm%1000)));
 
 	if (llabs(eppm - ppm) > 1000) {
+		if (tx1.offset || tx2.offset ||
+		    tx1.freq != tx2.freq || tx1.tick != tx2.tick) {
+			printf("	[SKIP]\n");
+			return ksft_exit_skip("The clock was adjusted externally. Shutdown NTPd or other time sync daemons\n");
+		}
 		printf("	[FAILED]\n");
 		return ksft_exit_fail();
 	}
diff --git a/tools/testing/selftests/user/test_user_copy.sh b/tools/testing/selftests/user/test_user_copy.sh
index 350107f..0409270 100755
--- a/tools/testing/selftests/user/test_user_copy.sh
+++ b/tools/testing/selftests/user/test_user_copy.sh
@@ -1,6 +1,13 @@
 #!/bin/sh
 # Runs copy_to/from_user infrastructure using test_user_copy kernel module
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_user_copy; then
+	echo "user: module test_user_copy is not found [SKIP]"
+	exit $ksft_skip
+fi
 if /sbin/modprobe -q test_user_copy; then
 	/sbin/modprobe -q -r test_user_copy
 	echo "user_copy: ok"
diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c
index 246145b..4d9dc3f 100644
--- a/tools/testing/selftests/x86/sigreturn.c
+++ b/tools/testing/selftests/x86/sigreturn.c
@@ -610,21 +610,41 @@
 	 */
 	for (int i = 0; i < NGREG; i++) {
 		greg_t req = requested_regs[i], res = resulting_regs[i];
+
 		if (i == REG_TRAPNO || i == REG_IP)
 			continue;	/* don't care */
-		if (i == REG_SP) {
-			printf("\tSP: %llx -> %llx\n", (unsigned long long)req,
-			       (unsigned long long)res);
 
+		if (i == REG_SP) {
 			/*
-			 * In many circumstances, the high 32 bits of rsp
-			 * are zeroed.  For example, we could be a real
-			 * 32-bit program, or we could hit any of a number
-			 * of poorly-documented IRET or segmented ESP
-			 * oddities.  If this happens, it's okay.
+			 * If we were using a 16-bit stack segment, then
+			 * the kernel is a bit stuck: IRET only restores
+			 * the low 16 bits of ESP/RSP if SS is 16-bit.
+			 * The kernel uses a hack to restore bits 31:16,
+			 * but that hack doesn't help with bits 63:32.
+			 * On Intel CPUs, bits 63:32 end up zeroed, and, on
+			 * AMD CPUs, they leak the high bits of the kernel
+			 * espfix64 stack pointer.  There's very little that
+			 * the kernel can do about it.
+			 *
+			 * Similarly, if we are returning to a 32-bit context,
+			 * the CPU will often lose the high 32 bits of RSP.
 			 */
-			if (res == (req & 0xFFFFFFFF))
-				continue;  /* OK; not expected to work */
+
+			if (res == req)
+				continue;
+
+			if (cs_bits != 64 && ((res ^ req) & 0xFFFFFFFF) == 0) {
+				printf("[NOTE]\tSP: %llx -> %llx\n",
+				       (unsigned long long)req,
+				       (unsigned long long)res);
+				continue;
+			}
+
+			printf("[FAIL]\tSP mismatch: requested 0x%llx; got 0x%llx\n",
+			       (unsigned long long)requested_regs[i],
+			       (unsigned long long)resulting_regs[i]);
+			nerrs++;
+			continue;
 		}
 
 		bool ignore_reg = false;
@@ -654,25 +674,18 @@
 #endif
 
 		/* Sanity check on the kernel */
-		if (i == REG_CX && requested_regs[i] != resulting_regs[i]) {
+		if (i == REG_CX && req != res) {
 			printf("[FAIL]\tCX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n",
-			       (unsigned long long)requested_regs[i],
-			       (unsigned long long)resulting_regs[i]);
+			       (unsigned long long)req,
+			       (unsigned long long)res);
 			nerrs++;
 			continue;
 		}
 
-		if (requested_regs[i] != resulting_regs[i] && !ignore_reg) {
-			/*
-			 * SP is particularly interesting here.  The
-			 * usual cause of failures is that we hit the
-			 * nasty IRET case of returning to a 16-bit SS,
-			 * in which case bits 16:31 of the *kernel*
-			 * stack pointer persist in ESP.
-			 */
+		if (req != res && !ignore_reg) {
 			printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n",
-			       i, (unsigned long long)requested_regs[i],
-			       (unsigned long long)resulting_regs[i]);
+			       i, (unsigned long long)req,
+			       (unsigned long long)res);
 			nerrs++;
 		}
 	}
diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh
index 683a292..9399c4a 100755
--- a/tools/testing/selftests/zram/zram.sh
+++ b/tools/testing/selftests/zram/zram.sh
@@ -1,6 +1,9 @@
 #!/bin/bash
 TCID="zram.sh"
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 . ./zram_lib.sh
 
 run_zram () {
@@ -23,5 +26,5 @@
 else
 	echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
 	echo "$TCID : CONFIG_ZRAM is not set"
-	exit 1
+	exit $ksft_skip
 fi
diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh
index f6a9c73..9e73a4f 100755
--- a/tools/testing/selftests/zram/zram_lib.sh
+++ b/tools/testing/selftests/zram/zram_lib.sh
@@ -18,6 +18,9 @@
 dev_makeswap=-1
 dev_mounted=-1
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 trap INT
 
 check_prereqs()
@@ -27,7 +30,7 @@
 
 	if [ $uid -ne 0 ]; then
 		echo $msg must be run as root >&2
-		exit 0
+		exit $ksft_skip
 	fi
 }
 
diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
index 88d5e71..47dfa0b 100644
--- a/tools/usb/ffs-test.c
+++ b/tools/usb/ffs-test.c
@@ -44,12 +44,25 @@
 
 /******************** Little Endian Handling ********************************/
 
-#define cpu_to_le16(x)  htole16(x)
-#define cpu_to_le32(x)  htole32(x)
+/*
+ * cpu_to_le16/32 are used when initializing structures, a context where a
+ * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way
+ * that allows them to be used when initializing structures.
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define cpu_to_le16(x)  (x)
+#define cpu_to_le32(x)  (x)
+#else
+#define cpu_to_le16(x)  ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))
+#define cpu_to_le32(x)  \
+	((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >>  8) | \
+	(((x) & 0x0000ff00u) <<  8) | (((x) & 0x000000ffu) << 24))
+#endif
+
 #define le32_to_cpu(x)  le32toh(x)
 #define le16_to_cpu(x)  le16toh(x)
 
-
 /******************** Messages and Errors ***********************************/
 
 static const char argv0[] = "ffs-test";
diff --git a/tools/usb/usbip/src/usbip_detach.c b/tools/usb/usbip/src/usbip_detach.c
index 9db9d21..6a8db85 100644
--- a/tools/usb/usbip/src/usbip_detach.c
+++ b/tools/usb/usbip/src/usbip_detach.c
@@ -43,7 +43,7 @@
 
 static int detach_port(char *port)
 {
-	int ret;
+	int ret = 0;
 	uint8_t portnum;
 	char path[PATH_MAX+1];
 
@@ -73,9 +73,12 @@
 	}
 
 	ret = usbip_vhci_detach_device(portnum);
-	if (ret < 0)
-		return -1;
+	if (ret < 0) {
+		ret = -1;
+		goto call_driver_close;
+	}
 
+call_driver_close:
 	usbip_vhci_driver_close();
 
 	return ret;
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index e92903f..6d5bcba 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -155,12 +155,6 @@
 };
 
 
-static const char * const debugfs_known_mountpoints[] = {
-	"/sys/kernel/debug",
-	"/debug",
-	0,
-};
-
 /*
  * data structures
  */
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index b9d34b3..6975ec4 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -29,8 +29,8 @@
 	int alias;
 	int refs;
 	int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
-	int hwcache_align, object_size, objs_per_slab;
-	int sanity_checks, slab_size, store_user, trace;
+	unsigned int hwcache_align, object_size, objs_per_slab;
+	unsigned int sanity_checks, slab_size, store_user, trace;
 	int order, poison, reclaim_account, red_zone;
 	unsigned long partial, objects, slabs, objects_partial, objects_total;
 	unsigned long alloc_fastpath, alloc_slowpath;
diff --git a/verity_dev_keys.x509 b/verity_dev_keys.x509
new file mode 100644
index 0000000..86399c3
--- /dev/null
+++ b/verity_dev_keys.x509
@@ -0,0 +1,24 @@
+-----BEGIN CERTIFICATE-----
+MIID/TCCAuWgAwIBAgIJAJcPmDkJqolJMA0GCSqGSIb3DQEBBQUAMIGUMQswCQYD
+VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4g
+VmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UE
+AwwHQW5kcm9pZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTAe
+Fw0xNDExMDYxOTA3NDBaFw00MjAzMjQxOTA3NDBaMIGUMQswCQYDVQQGEwJVUzET
+MBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4gVmlldzEQMA4G
+A1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UEAwwHQW5kcm9p
+ZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAOjreE0vTVSRenuzO9vnaWfk0eQzYab0gqpi
+6xAzi6dmD+ugoEKJmbPiuE5Dwf21isZ9uhUUu0dQM46dK4ocKxMRrcnmGxydFn6o
+fs3ODJMXOkv2gKXL/FdbEPdDbxzdu8z3yk+W67udM/fW7WbaQ3DO0knu+izKak/3
+T41c5uoXmQ81UNtAzRGzGchNVXMmWuTGOkg6U+0I2Td7K8yvUMWhAWPPpKLtVH9r
+AL5TzjYNR92izdKcz3AjRsI3CTjtpiVABGeX0TcjRSuZB7K9EK56HV+OFNS6I1NP
+jdD7FIShyGlqqZdUOkAUZYanbpgeT5N7QL6uuqcGpoTOkalu6kkCAwEAAaNQME4w
+HQYDVR0OBBYEFH5DM/m7oArf4O3peeKO0ZIEkrQPMB8GA1UdIwQYMBaAFH5DM/m7
+oArf4O3peeKO0ZIEkrQPMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
+AHO3NSvDE5jFvMehGGtS8BnFYdFKRIglDMc4niWSzhzOVYRH4WajxdtBWc5fx0ix
+NF/+hVKVhP6AIOQa+++sk+HIi7RvioPPbhjcsVlZe7cUEGrLSSveGouQyc+j0+m6
+JF84kszIl5GGNMTnx0XRPO+g8t6h5LWfnVydgZfpGRRg+WHewk1U2HlvTjIceb0N
+dcoJ8WKJAFWdcuE7VIm4w+vF/DYX/A2Oyzr2+QRhmYSv1cusgAeC1tvH4ap+J1Lg
+UnOu5Kh/FqPLLSwNVQp4Bu7b9QFfqK8Moj84bj88NqRGZgDyqzuTrFxn6FW7dmyA
+yttuAJAEAymk1mipd9+zp38=
+-----END CERTIFICATE-----
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index 9502124..3d6dbdf 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -203,7 +203,7 @@
 		return -1;
 
 	rd = kvm_vcpu_dabt_get_rd(vcpu);
-	addr  = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
+	addr  = kern_hyp_va(hyp_symbol_addr(kvm_vgic_global_state)->vcpu_base_va);
 	addr += fault_ipa - vgic->vgic_cpu_base;
 
 	if (kvm_vcpu_dabt_iswrite(vcpu)) {
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 539d3f5..80d8888 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -241,6 +241,10 @@
 	if (vgic_initialized(kvm))
 		return 0;
 
+	/* Are we also in the middle of creating a VCPU? */
+	if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
+		return -EBUSY;
+
 	/* freeze the number of spis */
 	if (!dist->nr_spis)
 		dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS;
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index f132006..c792471 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -337,11 +337,6 @@
 		pr_warn("GICV physical address 0x%llx not page aligned\n",
 			(unsigned long long)info->vcpu.start);
 		kvm_vgic_global_state.vcpu_base = 0;
-	} else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
-		pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
-			(unsigned long long)resource_size(&info->vcpu),
-			PAGE_SIZE);
-		kvm_vgic_global_state.vcpu_base = 0;
 	} else {
 		kvm_vgic_global_state.vcpu_base = info->vcpu.start;
 		kvm_vgic_global_state.can_emulate_gicv2 = true;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 4d28a9d..16e17ea 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -119,8 +119,12 @@
 {
 	struct kvm_kernel_irqfd *irqfd =
 		container_of(work, struct kvm_kernel_irqfd, shutdown);
+	struct kvm *kvm = irqfd->kvm;
 	u64 cnt;
 
+	/* Make sure irqfd has been initalized in assign path. */
+	synchronize_srcu(&kvm->irq_srcu);
+
 	/*
 	 * Synchronize with the wait-queue and unhook ourselves to prevent
 	 * further events.
@@ -387,7 +391,6 @@
 
 	idx = srcu_read_lock(&kvm->irq_srcu);
 	irqfd_update(kvm, irqfd);
-	srcu_read_unlock(&kvm->irq_srcu, idx);
 
 	list_add_tail(&irqfd->list, &kvm->irqfds.items);
 
@@ -402,11 +405,6 @@
 	if (events & POLLIN)
 		schedule_work(&irqfd->inject);
 
-	/*
-	 * do not drop the file until the irqfd is fully initialized, otherwise
-	 * we might race against the POLLHUP
-	 */
-	fdput(f);
 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
 	if (kvm_arch_has_irq_bypass()) {
 		irqfd->consumer.token = (void *)irqfd->eventfd;
@@ -421,6 +419,13 @@
 	}
 #endif
 
+	srcu_read_unlock(&kvm->irq_srcu, idx);
+
+	/*
+	 * do not drop the file until the irqfd is fully initialized, otherwise
+	 * we might race against the POLLHUP
+	 */
+	fdput(f);
 	return 0;
 
 fail: